unconditionally compile most of the delay slot code
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "cfghooks.h"
45 #include "tree.h"
46 #include "rtl.h"
47 #include "df.h"
48 #include "alias.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "bb-reorder.h"
54 #include "regs.h"
55 #include "flags.h"
56 #include "except.h"
57 #include "rtl-error.h"
58 #include "tm_p.h"
59 #include "insn-attr.h"
60 #include "insn-config.h"
61 #include "expmed.h"
62 #include "dojump.h"
63 #include "explow.h"
64 #include "calls.h"
65 #include "emit-rtl.h"
66 #include "varasm.h"
67 #include "stmt.h"
68 #include "expr.h"
69 #include "target.h"
70 #include "common/common-target.h"
71 #include "cfgloop.h"
72 #include "tree-pass.h"
73 #include "print-rtl.h"
74
75 /* Holds the interesting leading and trailing notes for the function.
76 Only applicable if the CFG is in cfglayout mode. */
77 static GTY(()) rtx_insn *cfg_layout_function_footer;
78 static GTY(()) rtx_insn *cfg_layout_function_header;
79
80 static rtx_insn *skip_insns_after_block (basic_block);
81 static void record_effective_endpoints (void);
82 static void fixup_reorder_chain (void);
83
84 void verify_insn_chain (void);
85 static void fixup_fallthru_exit_predecessor (void);
86 static int can_delete_note_p (const rtx_note *);
87 static int can_delete_label_p (const rtx_code_label *);
88 static basic_block rtl_split_edge (edge);
89 static bool rtl_move_block_after (basic_block, basic_block);
90 static int rtl_verify_flow_info (void);
91 static basic_block cfg_layout_split_block (basic_block, void *);
92 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
93 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
94 static void cfg_layout_delete_block (basic_block);
95 static void rtl_delete_block (basic_block);
96 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
97 static edge rtl_redirect_edge_and_branch (edge, basic_block);
98 static basic_block rtl_split_block (basic_block, void *);
99 static void rtl_dump_bb (FILE *, basic_block, int, int);
100 static int rtl_verify_flow_info_1 (void);
101 static void rtl_make_forwarder_block (edge);
102 \f
103 /* Return true if NOTE is not one of the ones that must be kept paired,
104 so that we may simply delete it. */
105
106 static int
107 can_delete_note_p (const rtx_note *note)
108 {
109 switch (NOTE_KIND (note))
110 {
111 case NOTE_INSN_DELETED:
112 case NOTE_INSN_BASIC_BLOCK:
113 case NOTE_INSN_EPILOGUE_BEG:
114 return true;
115
116 default:
117 return false;
118 }
119 }
120
121 /* True if a given label can be deleted. */
122
123 static int
124 can_delete_label_p (const rtx_code_label *label)
125 {
126 return (!LABEL_PRESERVE_P (label)
127 /* User declared labels must be preserved. */
128 && LABEL_NAME (label) == 0
129 && !in_insn_list_p (forced_labels, label));
130 }
131
132 /* Delete INSN by patching it out. */
133
134 void
135 delete_insn (rtx uncast_insn)
136 {
137 rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
138 rtx note;
139 bool really_delete = true;
140
141 if (LABEL_P (insn))
142 {
143 /* Some labels can't be directly removed from the INSN chain, as they
144 might be references via variables, constant pool etc.
145 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
146 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
147 {
148 const char *name = LABEL_NAME (insn);
149 basic_block bb = BLOCK_FOR_INSN (insn);
150 rtx_insn *bb_note = NEXT_INSN (insn);
151
152 really_delete = false;
153 PUT_CODE (insn, NOTE);
154 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
155 NOTE_DELETED_LABEL_NAME (insn) = name;
156
157 /* If the note following the label starts a basic block, and the
158 label is a member of the same basic block, interchange the two. */
159 if (bb_note != NULL_RTX
160 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
161 && bb != NULL
162 && bb == BLOCK_FOR_INSN (bb_note))
163 {
164 reorder_insns_nobb (insn, insn, bb_note);
165 BB_HEAD (bb) = bb_note;
166 if (BB_END (bb) == bb_note)
167 BB_END (bb) = insn;
168 }
169 }
170
171 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
172 }
173
174 if (really_delete)
175 {
176 /* If this insn has already been deleted, something is very wrong. */
177 gcc_assert (!insn->deleted ());
178 if (INSN_P (insn))
179 df_insn_delete (insn);
180 remove_insn (insn);
181 insn->set_deleted ();
182 }
183
184 /* If deleting a jump, decrement the use count of the label. Deleting
185 the label itself should happen in the normal course of block merging. */
186 if (JUMP_P (insn))
187 {
188 if (JUMP_LABEL (insn)
189 && LABEL_P (JUMP_LABEL (insn)))
190 LABEL_NUSES (JUMP_LABEL (insn))--;
191
192 /* If there are more targets, remove them too. */
193 while ((note
194 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
195 && LABEL_P (XEXP (note, 0)))
196 {
197 LABEL_NUSES (XEXP (note, 0))--;
198 remove_note (insn, note);
199 }
200 }
201
202 /* Also if deleting any insn that references a label as an operand. */
203 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
204 && LABEL_P (XEXP (note, 0)))
205 {
206 LABEL_NUSES (XEXP (note, 0))--;
207 remove_note (insn, note);
208 }
209
210 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
211 {
212 rtvec vec = table->get_labels ();
213 int len = GET_NUM_ELEM (vec);
214 int i;
215
216 for (i = 0; i < len; i++)
217 {
218 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
219
220 /* When deleting code in bulk (e.g. removing many unreachable
221 blocks) we can delete a label that's a target of the vector
222 before deleting the vector itself. */
223 if (!NOTE_P (label))
224 LABEL_NUSES (label)--;
225 }
226 }
227 }
228
229 /* Like delete_insn but also purge dead edges from BB. */
230
231 void
232 delete_insn_and_edges (rtx_insn *insn)
233 {
234 bool purge = false;
235
236 if (INSN_P (insn)
237 && BLOCK_FOR_INSN (insn)
238 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
239 purge = true;
240 delete_insn (insn);
241 if (purge)
242 purge_dead_edges (BLOCK_FOR_INSN (insn));
243 }
244
245 /* Unlink a chain of insns between START and FINISH, leaving notes
246 that must be paired. If CLEAR_BB is true, we set bb field for
247 insns that cannot be removed to NULL. */
248
249 void
250 delete_insn_chain (rtx start, rtx finish, bool clear_bb)
251 {
252 rtx_insn *prev, *current;
253
254 /* Unchain the insns one by one. It would be quicker to delete all of these
255 with a single unchaining, rather than one at a time, but we need to keep
256 the NOTE's. */
257 current = safe_as_a <rtx_insn *> (finish);
258 while (1)
259 {
260 prev = PREV_INSN (current);
261 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
262 ;
263 else
264 delete_insn (current);
265
266 if (clear_bb && !current->deleted ())
267 set_block_for_insn (current, NULL);
268
269 if (current == start)
270 break;
271 current = prev;
272 }
273 }
274 \f
275 /* Create a new basic block consisting of the instructions between HEAD and END
276 inclusive. This function is designed to allow fast BB construction - reuses
277 the note and basic block struct in BB_NOTE, if any and do not grow
278 BASIC_BLOCK chain and should be used directly only by CFG construction code.
279 END can be NULL in to create new empty basic block before HEAD. Both END
280 and HEAD can be NULL to create basic block at the end of INSN chain.
281 AFTER is the basic block we should be put after. */
282
283 basic_block
284 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
285 basic_block after)
286 {
287 basic_block bb;
288
289 if (bb_note
290 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
291 && bb->aux == NULL)
292 {
293 /* If we found an existing note, thread it back onto the chain. */
294
295 rtx_insn *after;
296
297 if (LABEL_P (head))
298 after = head;
299 else
300 {
301 after = PREV_INSN (head);
302 head = bb_note;
303 }
304
305 if (after != bb_note && NEXT_INSN (after) != bb_note)
306 reorder_insns_nobb (bb_note, bb_note, after);
307 }
308 else
309 {
310 /* Otherwise we must create a note and a basic block structure. */
311
312 bb = alloc_block ();
313
314 init_rtl_bb_info (bb);
315 if (!head && !end)
316 head = end = bb_note
317 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
318 else if (LABEL_P (head) && end)
319 {
320 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
321 if (head == end)
322 end = bb_note;
323 }
324 else
325 {
326 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
327 head = bb_note;
328 if (!end)
329 end = head;
330 }
331
332 NOTE_BASIC_BLOCK (bb_note) = bb;
333 }
334
335 /* Always include the bb note in the block. */
336 if (NEXT_INSN (end) == bb_note)
337 end = bb_note;
338
339 BB_HEAD (bb) = head;
340 BB_END (bb) = end;
341 bb->index = last_basic_block_for_fn (cfun)++;
342 bb->flags = BB_NEW | BB_RTL;
343 link_block (bb, after);
344 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
345 df_bb_refs_record (bb->index, false);
346 update_bb_for_insn (bb);
347 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
348
349 /* Tag the block so that we know it has been used when considering
350 other basic block notes. */
351 bb->aux = bb;
352
353 return bb;
354 }
355
356 /* Create new basic block consisting of instructions in between HEAD and END
357 and place it to the BB chain after block AFTER. END can be NULL to
358 create a new empty basic block before HEAD. Both END and HEAD can be
359 NULL to create basic block at the end of INSN chain. */
360
361 static basic_block
362 rtl_create_basic_block (void *headp, void *endp, basic_block after)
363 {
364 rtx_insn *head = (rtx_insn *) headp;
365 rtx_insn *end = (rtx_insn *) endp;
366 basic_block bb;
367
368 /* Grow the basic block array if needed. */
369 if ((size_t) last_basic_block_for_fn (cfun)
370 >= basic_block_info_for_fn (cfun)->length ())
371 {
372 size_t new_size =
373 (last_basic_block_for_fn (cfun)
374 + (last_basic_block_for_fn (cfun) + 3) / 4);
375 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
376 }
377
378 n_basic_blocks_for_fn (cfun)++;
379
380 bb = create_basic_block_structure (head, end, NULL, after);
381 bb->aux = NULL;
382 return bb;
383 }
384
385 static basic_block
386 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
387 {
388 basic_block newbb = rtl_create_basic_block (head, end, after);
389
390 return newbb;
391 }
392 \f
393 /* Delete the insns in a (non-live) block. We physically delete every
394 non-deleted-note insn, and update the flow graph appropriately.
395
396 Return nonzero if we deleted an exception handler. */
397
398 /* ??? Preserving all such notes strikes me as wrong. It would be nice
399 to post-process the stream to remove empty blocks, loops, ranges, etc. */
400
401 static void
402 rtl_delete_block (basic_block b)
403 {
404 rtx_insn *insn, *end;
405
406 /* If the head of this block is a CODE_LABEL, then it might be the
407 label for an exception handler which can't be reached. We need
408 to remove the label from the exception_handler_label list. */
409 insn = BB_HEAD (b);
410
411 end = get_last_bb_insn (b);
412
413 /* Selectively delete the entire chain. */
414 BB_HEAD (b) = NULL;
415 delete_insn_chain (insn, end, true);
416
417
418 if (dump_file)
419 fprintf (dump_file, "deleting block %d\n", b->index);
420 df_bb_delete (b->index);
421 }
422 \f
423 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
424
425 void
426 compute_bb_for_insn (void)
427 {
428 basic_block bb;
429
430 FOR_EACH_BB_FN (bb, cfun)
431 {
432 rtx_insn *end = BB_END (bb);
433 rtx_insn *insn;
434
435 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
436 {
437 BLOCK_FOR_INSN (insn) = bb;
438 if (insn == end)
439 break;
440 }
441 }
442 }
443
444 /* Release the basic_block_for_insn array. */
445
446 unsigned int
447 free_bb_for_insn (void)
448 {
449 rtx_insn *insn;
450 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
451 if (!BARRIER_P (insn))
452 BLOCK_FOR_INSN (insn) = NULL;
453 return 0;
454 }
455
456 namespace {
457
458 const pass_data pass_data_free_cfg =
459 {
460 RTL_PASS, /* type */
461 "*free_cfg", /* name */
462 OPTGROUP_NONE, /* optinfo_flags */
463 TV_NONE, /* tv_id */
464 0, /* properties_required */
465 0, /* properties_provided */
466 PROP_cfg, /* properties_destroyed */
467 0, /* todo_flags_start */
468 0, /* todo_flags_finish */
469 };
470
471 class pass_free_cfg : public rtl_opt_pass
472 {
473 public:
474 pass_free_cfg (gcc::context *ctxt)
475 : rtl_opt_pass (pass_data_free_cfg, ctxt)
476 {}
477
478 /* opt_pass methods: */
479 virtual unsigned int execute (function *);
480
481 }; // class pass_free_cfg
482
483 unsigned int
484 pass_free_cfg::execute (function *)
485 {
486 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
487 valid at that point so it would be too late to call df_analyze. */
488 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
489 {
490 df_note_add_problem ();
491 df_analyze ();
492 }
493
494 if (crtl->has_bb_partition)
495 insert_section_boundary_note ();
496
497 free_bb_for_insn ();
498 return 0;
499 }
500
501 } // anon namespace
502
503 rtl_opt_pass *
504 make_pass_free_cfg (gcc::context *ctxt)
505 {
506 return new pass_free_cfg (ctxt);
507 }
508
509 /* Return RTX to emit after when we want to emit code on the entry of function. */
510 rtx_insn *
511 entry_of_function (void)
512 {
513 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
514 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
515 }
516
517 /* Emit INSN at the entry point of the function, ensuring that it is only
518 executed once per function. */
519 void
520 emit_insn_at_entry (rtx insn)
521 {
522 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
523 edge e = ei_safe_edge (ei);
524 gcc_assert (e->flags & EDGE_FALLTHRU);
525
526 insert_insn_on_edge (insn, e);
527 commit_edge_insertions ();
528 }
529
530 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
531 (or BARRIER if found) and notify df of the bb change.
532 The insn chain range is inclusive
533 (i.e. both BEGIN and END will be updated. */
534
535 static void
536 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
537 {
538 rtx_insn *insn;
539
540 end = NEXT_INSN (end);
541 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
542 if (!BARRIER_P (insn))
543 df_insn_change_bb (insn, bb);
544 }
545
546 /* Update BLOCK_FOR_INSN of insns in BB to BB,
547 and notify df of the change. */
548
549 void
550 update_bb_for_insn (basic_block bb)
551 {
552 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
553 }
554
555 \f
556 /* Like active_insn_p, except keep the return value clobber around
557 even after reload. */
558
559 static bool
560 flow_active_insn_p (const rtx_insn *insn)
561 {
562 if (active_insn_p (insn))
563 return true;
564
565 /* A clobber of the function return value exists for buggy
566 programs that fail to return a value. Its effect is to
567 keep the return value from being live across the entire
568 function. If we allow it to be skipped, we introduce the
569 possibility for register lifetime confusion. */
570 if (GET_CODE (PATTERN (insn)) == CLOBBER
571 && REG_P (XEXP (PATTERN (insn), 0))
572 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
573 return true;
574
575 return false;
576 }
577
578 /* Return true if the block has no effect and only forwards control flow to
579 its single destination. */
580
581 bool
582 contains_no_active_insn_p (const_basic_block bb)
583 {
584 rtx_insn *insn;
585
586 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
587 || !single_succ_p (bb))
588 return false;
589
590 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
591 if (INSN_P (insn) && flow_active_insn_p (insn))
592 return false;
593
594 return (!INSN_P (insn)
595 || (JUMP_P (insn) && simplejump_p (insn))
596 || !flow_active_insn_p (insn));
597 }
598
599 /* Likewise, but protect loop latches, headers and preheaders. */
600 /* FIXME: Make this a cfg hook. */
601
602 bool
603 forwarder_block_p (const_basic_block bb)
604 {
605 if (!contains_no_active_insn_p (bb))
606 return false;
607
608 /* Protect loop latches, headers and preheaders. */
609 if (current_loops)
610 {
611 basic_block dest;
612 if (bb->loop_father->header == bb)
613 return false;
614 dest = EDGE_SUCC (bb, 0)->dest;
615 if (dest->loop_father->header == dest)
616 return false;
617 }
618
619 return true;
620 }
621
622 /* Return nonzero if we can reach target from src by falling through. */
623 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
624
625 bool
626 can_fallthru (basic_block src, basic_block target)
627 {
628 rtx_insn *insn = BB_END (src);
629 rtx_insn *insn2;
630 edge e;
631 edge_iterator ei;
632
633 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
634 return true;
635 if (src->next_bb != target)
636 return false;
637
638 /* ??? Later we may add code to move jump tables offline. */
639 if (tablejump_p (insn, NULL, NULL))
640 return false;
641
642 FOR_EACH_EDGE (e, ei, src->succs)
643 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
644 && e->flags & EDGE_FALLTHRU)
645 return false;
646
647 insn2 = BB_HEAD (target);
648 if (!active_insn_p (insn2))
649 insn2 = next_active_insn (insn2);
650
651 return next_active_insn (insn) == insn2;
652 }
653
654 /* Return nonzero if we could reach target from src by falling through,
655 if the target was made adjacent. If we already have a fall-through
656 edge to the exit block, we can't do that. */
657 static bool
658 could_fall_through (basic_block src, basic_block target)
659 {
660 edge e;
661 edge_iterator ei;
662
663 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
664 return true;
665 FOR_EACH_EDGE (e, ei, src->succs)
666 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
667 && e->flags & EDGE_FALLTHRU)
668 return 0;
669 return true;
670 }
671 \f
672 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
673 rtx_note *
674 bb_note (basic_block bb)
675 {
676 rtx_insn *note;
677
678 note = BB_HEAD (bb);
679 if (LABEL_P (note))
680 note = NEXT_INSN (note);
681
682 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
683 return as_a <rtx_note *> (note);
684 }
685
686 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
687 note associated with the BLOCK. */
688
689 static rtx_insn *
690 first_insn_after_basic_block_note (basic_block block)
691 {
692 rtx_insn *insn;
693
694 /* Get the first instruction in the block. */
695 insn = BB_HEAD (block);
696
697 if (insn == NULL_RTX)
698 return NULL;
699 if (LABEL_P (insn))
700 insn = NEXT_INSN (insn);
701 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
702
703 return NEXT_INSN (insn);
704 }
705
706 /* Creates a new basic block just after basic block BB by splitting
707 everything after specified instruction INSNP. */
708
709 static basic_block
710 rtl_split_block (basic_block bb, void *insnp)
711 {
712 basic_block new_bb;
713 rtx_insn *insn = (rtx_insn *) insnp;
714 edge e;
715 edge_iterator ei;
716
717 if (!insn)
718 {
719 insn = first_insn_after_basic_block_note (bb);
720
721 if (insn)
722 {
723 rtx_insn *next = insn;
724
725 insn = PREV_INSN (insn);
726
727 /* If the block contains only debug insns, insn would have
728 been NULL in a non-debug compilation, and then we'd end
729 up emitting a DELETED note. For -fcompare-debug
730 stability, emit the note too. */
731 if (insn != BB_END (bb)
732 && DEBUG_INSN_P (next)
733 && DEBUG_INSN_P (BB_END (bb)))
734 {
735 while (next != BB_END (bb) && DEBUG_INSN_P (next))
736 next = NEXT_INSN (next);
737
738 if (next == BB_END (bb))
739 emit_note_after (NOTE_INSN_DELETED, next);
740 }
741 }
742 else
743 insn = get_last_insn ();
744 }
745
746 /* We probably should check type of the insn so that we do not create
747 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
748 bother. */
749 if (insn == BB_END (bb))
750 emit_note_after (NOTE_INSN_DELETED, insn);
751
752 /* Create the new basic block. */
753 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
754 BB_COPY_PARTITION (new_bb, bb);
755 BB_END (bb) = insn;
756
757 /* Redirect the outgoing edges. */
758 new_bb->succs = bb->succs;
759 bb->succs = NULL;
760 FOR_EACH_EDGE (e, ei, new_bb->succs)
761 e->src = new_bb;
762
763 /* The new block starts off being dirty. */
764 df_set_bb_dirty (bb);
765 return new_bb;
766 }
767
768 /* Return true if the single edge between blocks A and B is the only place
769 in RTL which holds some unique locus. */
770
771 static bool
772 unique_locus_on_edge_between_p (basic_block a, basic_block b)
773 {
774 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
775 rtx_insn *insn, *end;
776
777 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
778 return false;
779
780 /* First scan block A backward. */
781 insn = BB_END (a);
782 end = PREV_INSN (BB_HEAD (a));
783 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
784 insn = PREV_INSN (insn);
785
786 if (insn != end && INSN_LOCATION (insn) == goto_locus)
787 return false;
788
789 /* Then scan block B forward. */
790 insn = BB_HEAD (b);
791 if (insn)
792 {
793 end = NEXT_INSN (BB_END (b));
794 while (insn != end && !NONDEBUG_INSN_P (insn))
795 insn = NEXT_INSN (insn);
796
797 if (insn != end && INSN_HAS_LOCATION (insn)
798 && INSN_LOCATION (insn) == goto_locus)
799 return false;
800 }
801
802 return true;
803 }
804
805 /* If the single edge between blocks A and B is the only place in RTL which
806 holds some unique locus, emit a nop with that locus between the blocks. */
807
808 static void
809 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
810 {
811 if (!unique_locus_on_edge_between_p (a, b))
812 return;
813
814 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
815 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
816 }
817
818 /* Blocks A and B are to be merged into a single block A. The insns
819 are already contiguous. */
820
821 static void
822 rtl_merge_blocks (basic_block a, basic_block b)
823 {
824 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
825 rtx_insn *del_first = NULL, *del_last = NULL;
826 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
827 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
828 int b_empty = 0;
829
830 if (dump_file)
831 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
832 a->index);
833
834 while (DEBUG_INSN_P (b_end))
835 b_end = PREV_INSN (b_debug_start = b_end);
836
837 /* If there was a CODE_LABEL beginning B, delete it. */
838 if (LABEL_P (b_head))
839 {
840 /* Detect basic blocks with nothing but a label. This can happen
841 in particular at the end of a function. */
842 if (b_head == b_end)
843 b_empty = 1;
844
845 del_first = del_last = b_head;
846 b_head = NEXT_INSN (b_head);
847 }
848
849 /* Delete the basic block note and handle blocks containing just that
850 note. */
851 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
852 {
853 if (b_head == b_end)
854 b_empty = 1;
855 if (! del_last)
856 del_first = b_head;
857
858 del_last = b_head;
859 b_head = NEXT_INSN (b_head);
860 }
861
862 /* If there was a jump out of A, delete it. */
863 if (JUMP_P (a_end))
864 {
865 rtx_insn *prev;
866
867 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
868 if (!NOTE_P (prev)
869 || NOTE_INSN_BASIC_BLOCK_P (prev)
870 || prev == BB_HEAD (a))
871 break;
872
873 del_first = a_end;
874
875 /* If this was a conditional jump, we need to also delete
876 the insn that set cc0. */
877 if (HAVE_cc0 && only_sets_cc0_p (prev))
878 {
879 rtx_insn *tmp = prev;
880
881 prev = prev_nonnote_insn (prev);
882 if (!prev)
883 prev = BB_HEAD (a);
884 del_first = tmp;
885 }
886
887 a_end = PREV_INSN (del_first);
888 }
889 else if (BARRIER_P (NEXT_INSN (a_end)))
890 del_first = NEXT_INSN (a_end);
891
892 /* Delete everything marked above as well as crap that might be
893 hanging out between the two blocks. */
894 BB_END (a) = a_end;
895 BB_HEAD (b) = b_empty ? NULL : b_head;
896 delete_insn_chain (del_first, del_last, true);
897
898 /* When not optimizing and the edge is the only place in RTL which holds
899 some unique locus, emit a nop with that locus in between. */
900 if (!optimize)
901 {
902 emit_nop_for_unique_locus_between (a, b);
903 a_end = BB_END (a);
904 }
905
906 /* Reassociate the insns of B with A. */
907 if (!b_empty)
908 {
909 update_bb_for_insn_chain (a_end, b_debug_end, a);
910
911 BB_END (a) = b_debug_end;
912 BB_HEAD (b) = NULL;
913 }
914 else if (b_end != b_debug_end)
915 {
916 /* Move any deleted labels and other notes between the end of A
917 and the debug insns that make up B after the debug insns,
918 bringing the debug insns into A while keeping the notes after
919 the end of A. */
920 if (NEXT_INSN (a_end) != b_debug_start)
921 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
922 b_debug_end);
923 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
924 BB_END (a) = b_debug_end;
925 }
926
927 df_bb_delete (b->index);
928
929 /* If B was a forwarder block, propagate the locus on the edge. */
930 if (forwarder_p
931 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
932 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
933
934 if (dump_file)
935 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
936 }
937
938
939 /* Return true when block A and B can be merged. */
940
941 static bool
942 rtl_can_merge_blocks (basic_block a, basic_block b)
943 {
944 /* If we are partitioning hot/cold basic blocks, we don't want to
945 mess up unconditional or indirect jumps that cross between hot
946 and cold sections.
947
948 Basic block partitioning may result in some jumps that appear to
949 be optimizable (or blocks that appear to be mergeable), but which really
950 must be left untouched (they are required to make it safely across
951 partition boundaries). See the comments at the top of
952 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
953
954 if (BB_PARTITION (a) != BB_PARTITION (b))
955 return false;
956
957 /* Protect the loop latches. */
958 if (current_loops && b->loop_father->latch == b)
959 return false;
960
961 /* There must be exactly one edge in between the blocks. */
962 return (single_succ_p (a)
963 && single_succ (a) == b
964 && single_pred_p (b)
965 && a != b
966 /* Must be simple edge. */
967 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
968 && a->next_bb == b
969 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
970 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
971 /* If the jump insn has side effects,
972 we can't kill the edge. */
973 && (!JUMP_P (BB_END (a))
974 || (reload_completed
975 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
976 }
977 \f
978 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
979 exist. */
980
981 rtx_code_label *
982 block_label (basic_block block)
983 {
984 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
985 return NULL;
986
987 if (!LABEL_P (BB_HEAD (block)))
988 {
989 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
990 }
991
992 return as_a <rtx_code_label *> (BB_HEAD (block));
993 }
994
995 /* Attempt to perform edge redirection by replacing possibly complex jump
996 instruction by unconditional jump or removing jump completely. This can
997 apply only if all edges now point to the same block. The parameters and
998 return values are equivalent to redirect_edge_and_branch. */
999
1000 edge
1001 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1002 {
1003 basic_block src = e->src;
1004 rtx_insn *insn = BB_END (src), *kill_from;
1005 rtx set;
1006 int fallthru = 0;
1007
1008 /* If we are partitioning hot/cold basic blocks, we don't want to
1009 mess up unconditional or indirect jumps that cross between hot
1010 and cold sections.
1011
1012 Basic block partitioning may result in some jumps that appear to
1013 be optimizable (or blocks that appear to be mergeable), but which really
1014 must be left untouched (they are required to make it safely across
1015 partition boundaries). See the comments at the top of
1016 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1017
1018 if (BB_PARTITION (src) != BB_PARTITION (target))
1019 return NULL;
1020
1021 /* We can replace or remove a complex jump only when we have exactly
1022 two edges. Also, if we have exactly one outgoing edge, we can
1023 redirect that. */
1024 if (EDGE_COUNT (src->succs) >= 3
1025 /* Verify that all targets will be TARGET. Specifically, the
1026 edge that is not E must also go to TARGET. */
1027 || (EDGE_COUNT (src->succs) == 2
1028 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1029 return NULL;
1030
1031 if (!onlyjump_p (insn))
1032 return NULL;
1033 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1034 return NULL;
1035
1036 /* Avoid removing branch with side effects. */
1037 set = single_set (insn);
1038 if (!set || side_effects_p (set))
1039 return NULL;
1040
1041 /* In case we zap a conditional jump, we'll need to kill
1042 the cc0 setter too. */
1043 kill_from = insn;
1044 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1045 && only_sets_cc0_p (PREV_INSN (insn)))
1046 kill_from = PREV_INSN (insn);
1047
1048 /* See if we can create the fallthru edge. */
1049 if (in_cfglayout || can_fallthru (src, target))
1050 {
1051 if (dump_file)
1052 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1053 fallthru = 1;
1054
1055 /* Selectively unlink whole insn chain. */
1056 if (in_cfglayout)
1057 {
1058 rtx_insn *insn = BB_FOOTER (src);
1059
1060 delete_insn_chain (kill_from, BB_END (src), false);
1061
1062 /* Remove barriers but keep jumptables. */
1063 while (insn)
1064 {
1065 if (BARRIER_P (insn))
1066 {
1067 if (PREV_INSN (insn))
1068 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1069 else
1070 BB_FOOTER (src) = NEXT_INSN (insn);
1071 if (NEXT_INSN (insn))
1072 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1073 }
1074 if (LABEL_P (insn))
1075 break;
1076 insn = NEXT_INSN (insn);
1077 }
1078 }
1079 else
1080 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1081 false);
1082 }
1083
1084 /* If this already is simplejump, redirect it. */
1085 else if (simplejump_p (insn))
1086 {
1087 if (e->dest == target)
1088 return NULL;
1089 if (dump_file)
1090 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1091 INSN_UID (insn), e->dest->index, target->index);
1092 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1093 block_label (target), 0))
1094 {
1095 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1096 return NULL;
1097 }
1098 }
1099
1100 /* Cannot do anything for target exit block. */
1101 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1102 return NULL;
1103
1104 /* Or replace possibly complicated jump insn by simple jump insn. */
1105 else
1106 {
1107 rtx_code_label *target_label = block_label (target);
1108 rtx_insn *barrier;
1109 rtx label;
1110 rtx_jump_table_data *table;
1111
1112 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1113 JUMP_LABEL (BB_END (src)) = target_label;
1114 LABEL_NUSES (target_label)++;
1115 if (dump_file)
1116 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1117 INSN_UID (insn), INSN_UID (BB_END (src)));
1118
1119
1120 delete_insn_chain (kill_from, insn, false);
1121
1122 /* Recognize a tablejump that we are converting to a
1123 simple jump and remove its associated CODE_LABEL
1124 and ADDR_VEC or ADDR_DIFF_VEC. */
1125 if (tablejump_p (insn, &label, &table))
1126 delete_insn_chain (label, table, false);
1127
1128 barrier = next_nonnote_insn (BB_END (src));
1129 if (!barrier || !BARRIER_P (barrier))
1130 emit_barrier_after (BB_END (src));
1131 else
1132 {
1133 if (barrier != NEXT_INSN (BB_END (src)))
1134 {
1135 /* Move the jump before barrier so that the notes
1136 which originally were or were created before jump table are
1137 inside the basic block. */
1138 rtx_insn *new_insn = BB_END (src);
1139
1140 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1141 PREV_INSN (barrier), src);
1142
1143 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1144 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1145
1146 SET_NEXT_INSN (new_insn) = barrier;
1147 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1148
1149 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1150 SET_PREV_INSN (barrier) = new_insn;
1151 }
1152 }
1153 }
1154
1155 /* Keep only one edge out and set proper flags. */
1156 if (!single_succ_p (src))
1157 remove_edge (e);
1158 gcc_assert (single_succ_p (src));
1159
1160 e = single_succ_edge (src);
1161 if (fallthru)
1162 e->flags = EDGE_FALLTHRU;
1163 else
1164 e->flags = 0;
1165
1166 e->probability = REG_BR_PROB_BASE;
1167 e->count = src->count;
1168
1169 if (e->dest != target)
1170 redirect_edge_succ (e, target);
1171 return e;
1172 }
1173
1174 /* Subroutine of redirect_branch_edge that tries to patch the jump
1175 instruction INSN so that it reaches block NEW. Do this
1176 only when it originally reached block OLD. Return true if this
1177 worked or the original target wasn't OLD, return false if redirection
1178 doesn't work. */
1179
1180 static bool
1181 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1182 {
1183 rtx_jump_table_data *table;
1184 rtx tmp;
1185 /* Recognize a tablejump and adjust all matching cases. */
1186 if (tablejump_p (insn, NULL, &table))
1187 {
1188 rtvec vec;
1189 int j;
1190 rtx_code_label *new_label = block_label (new_bb);
1191
1192 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1193 return false;
1194 vec = table->get_labels ();
1195
1196 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1197 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1198 {
1199 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1200 --LABEL_NUSES (old_label);
1201 ++LABEL_NUSES (new_label);
1202 }
1203
1204 /* Handle casesi dispatch insns. */
1205 if ((tmp = single_set (insn)) != NULL
1206 && SET_DEST (tmp) == pc_rtx
1207 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1208 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1209 && LABEL_REF_LABEL (XEXP (SET_SRC (tmp), 2)) == old_label)
1210 {
1211 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1212 new_label);
1213 --LABEL_NUSES (old_label);
1214 ++LABEL_NUSES (new_label);
1215 }
1216 }
1217 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1218 {
1219 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1220 rtx note;
1221
1222 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1223 return false;
1224 rtx_code_label *new_label = block_label (new_bb);
1225
1226 for (i = 0; i < n; ++i)
1227 {
1228 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1229 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1230 if (XEXP (old_ref, 0) == old_label)
1231 {
1232 ASM_OPERANDS_LABEL (tmp, i)
1233 = gen_rtx_LABEL_REF (Pmode, new_label);
1234 --LABEL_NUSES (old_label);
1235 ++LABEL_NUSES (new_label);
1236 }
1237 }
1238
1239 if (JUMP_LABEL (insn) == old_label)
1240 {
1241 JUMP_LABEL (insn) = new_label;
1242 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1243 if (note)
1244 remove_note (insn, note);
1245 }
1246 else
1247 {
1248 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1249 if (note)
1250 remove_note (insn, note);
1251 if (JUMP_LABEL (insn) != new_label
1252 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1253 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1254 }
1255 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1256 != NULL_RTX)
1257 XEXP (note, 0) = new_label;
1258 }
1259 else
1260 {
1261 /* ?? We may play the games with moving the named labels from
1262 one basic block to the other in case only one computed_jump is
1263 available. */
1264 if (computed_jump_p (insn)
1265 /* A return instruction can't be redirected. */
1266 || returnjump_p (insn))
1267 return false;
1268
1269 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1270 {
1271 /* If the insn doesn't go where we think, we're confused. */
1272 gcc_assert (JUMP_LABEL (insn) == old_label);
1273
1274 /* If the substitution doesn't succeed, die. This can happen
1275 if the back end emitted unrecognizable instructions or if
1276 target is exit block on some arches. */
1277 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1278 block_label (new_bb), 0))
1279 {
1280 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
1281 return false;
1282 }
1283 }
1284 }
1285 return true;
1286 }
1287
1288
1289 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1290 NULL on failure */
1291 static edge
1292 redirect_branch_edge (edge e, basic_block target)
1293 {
1294 rtx_insn *old_label = BB_HEAD (e->dest);
1295 basic_block src = e->src;
1296 rtx_insn *insn = BB_END (src);
1297
1298 /* We can only redirect non-fallthru edges of jump insn. */
1299 if (e->flags & EDGE_FALLTHRU)
1300 return NULL;
1301 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1302 return NULL;
1303
1304 if (!currently_expanding_to_rtl)
1305 {
1306 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1307 return NULL;
1308 }
1309 else
1310 /* When expanding this BB might actually contain multiple
1311 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1312 Redirect all of those that match our label. */
1313 FOR_BB_INSNS (src, insn)
1314 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1315 old_label, target))
1316 return NULL;
1317
1318 if (dump_file)
1319 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1320 e->src->index, e->dest->index, target->index);
1321
1322 if (e->dest != target)
1323 e = redirect_edge_succ_nodup (e, target);
1324
1325 return e;
1326 }
1327
1328 /* Called when edge E has been redirected to a new destination,
1329 in order to update the region crossing flag on the edge and
1330 jump. */
1331
1332 static void
1333 fixup_partition_crossing (edge e)
1334 {
1335 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1336 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1337 return;
1338 /* If we redirected an existing edge, it may already be marked
1339 crossing, even though the new src is missing a reg crossing note.
1340 But make sure reg crossing note doesn't already exist before
1341 inserting. */
1342 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1343 {
1344 e->flags |= EDGE_CROSSING;
1345 if (JUMP_P (BB_END (e->src))
1346 && !CROSSING_JUMP_P (BB_END (e->src)))
1347 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1348 }
1349 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1350 {
1351 e->flags &= ~EDGE_CROSSING;
1352 /* Remove the section crossing note from jump at end of
1353 src if it exists, and if no other successors are
1354 still crossing. */
1355 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1356 {
1357 bool has_crossing_succ = false;
1358 edge e2;
1359 edge_iterator ei;
1360 FOR_EACH_EDGE (e2, ei, e->src->succs)
1361 {
1362 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1363 if (has_crossing_succ)
1364 break;
1365 }
1366 if (!has_crossing_succ)
1367 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1368 }
1369 }
1370 }
1371
1372 /* Called when block BB has been reassigned to the cold partition,
1373 because it is now dominated by another cold block,
1374 to ensure that the region crossing attributes are updated. */
1375
1376 static void
1377 fixup_new_cold_bb (basic_block bb)
1378 {
1379 edge e;
1380 edge_iterator ei;
1381
1382 /* This is called when a hot bb is found to now be dominated
1383 by a cold bb and therefore needs to become cold. Therefore,
1384 its preds will no longer be region crossing. Any non-dominating
1385 preds that were previously hot would also have become cold
1386 in the caller for the same region. Any preds that were previously
1387 region-crossing will be adjusted in fixup_partition_crossing. */
1388 FOR_EACH_EDGE (e, ei, bb->preds)
1389 {
1390 fixup_partition_crossing (e);
1391 }
1392
1393 /* Possibly need to make bb's successor edges region crossing,
1394 or remove stale region crossing. */
1395 FOR_EACH_EDGE (e, ei, bb->succs)
1396 {
1397 /* We can't have fall-through edges across partition boundaries.
1398 Note that force_nonfallthru will do any necessary partition
1399 boundary fixup by calling fixup_partition_crossing itself. */
1400 if ((e->flags & EDGE_FALLTHRU)
1401 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1402 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1403 force_nonfallthru (e);
1404 else
1405 fixup_partition_crossing (e);
1406 }
1407 }
1408
1409 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1410 expense of adding new instructions or reordering basic blocks.
1411
1412 Function can be also called with edge destination equivalent to the TARGET.
1413 Then it should try the simplifications and do nothing if none is possible.
1414
1415 Return edge representing the branch if transformation succeeded. Return NULL
1416 on failure.
1417 We still return NULL in case E already destinated TARGET and we didn't
1418 managed to simplify instruction stream. */
1419
1420 static edge
1421 rtl_redirect_edge_and_branch (edge e, basic_block target)
1422 {
1423 edge ret;
1424 basic_block src = e->src;
1425 basic_block dest = e->dest;
1426
1427 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1428 return NULL;
1429
1430 if (dest == target)
1431 return e;
1432
1433 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1434 {
1435 df_set_bb_dirty (src);
1436 fixup_partition_crossing (ret);
1437 return ret;
1438 }
1439
1440 ret = redirect_branch_edge (e, target);
1441 if (!ret)
1442 return NULL;
1443
1444 df_set_bb_dirty (src);
1445 fixup_partition_crossing (ret);
1446 return ret;
1447 }
1448
1449 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1450
1451 void
1452 emit_barrier_after_bb (basic_block bb)
1453 {
1454 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1455 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1456 || current_ir_type () == IR_RTL_CFGLAYOUT);
1457 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1458 {
1459 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1460
1461 if (BB_FOOTER (bb))
1462 {
1463 rtx_insn *footer_tail = BB_FOOTER (bb);
1464
1465 while (NEXT_INSN (footer_tail))
1466 footer_tail = NEXT_INSN (footer_tail);
1467 if (!BARRIER_P (footer_tail))
1468 {
1469 SET_NEXT_INSN (footer_tail) = insn;
1470 SET_PREV_INSN (insn) = footer_tail;
1471 }
1472 }
1473 else
1474 BB_FOOTER (bb) = insn;
1475 }
1476 }
1477
1478 /* Like force_nonfallthru below, but additionally performs redirection
1479 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1480 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1481 simple_return_rtx, indicating which kind of returnjump to create.
1482 It should be NULL otherwise. */
1483
1484 basic_block
1485 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1486 {
1487 basic_block jump_block, new_bb = NULL, src = e->src;
1488 rtx note;
1489 edge new_edge;
1490 int abnormal_edge_flags = 0;
1491 bool asm_goto_edge = false;
1492 int loc;
1493
1494 /* In the case the last instruction is conditional jump to the next
1495 instruction, first redirect the jump itself and then continue
1496 by creating a basic block afterwards to redirect fallthru edge. */
1497 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1498 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1499 && any_condjump_p (BB_END (e->src))
1500 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1501 {
1502 rtx note;
1503 edge b = unchecked_make_edge (e->src, target, 0);
1504 bool redirected;
1505
1506 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1507 block_label (target), 0);
1508 gcc_assert (redirected);
1509
1510 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1511 if (note)
1512 {
1513 int prob = XINT (note, 0);
1514
1515 b->probability = prob;
1516 /* Update this to use GCOV_COMPUTE_SCALE. */
1517 b->count = e->count * prob / REG_BR_PROB_BASE;
1518 e->probability -= e->probability;
1519 e->count -= b->count;
1520 if (e->probability < 0)
1521 e->probability = 0;
1522 if (e->count < 0)
1523 e->count = 0;
1524 }
1525 }
1526
1527 if (e->flags & EDGE_ABNORMAL)
1528 {
1529 /* Irritating special case - fallthru edge to the same block as abnormal
1530 edge.
1531 We can't redirect abnormal edge, but we still can split the fallthru
1532 one and create separate abnormal edge to original destination.
1533 This allows bb-reorder to make such edge non-fallthru. */
1534 gcc_assert (e->dest == target);
1535 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1536 e->flags &= EDGE_FALLTHRU;
1537 }
1538 else
1539 {
1540 gcc_assert (e->flags & EDGE_FALLTHRU);
1541 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1542 {
1543 /* We can't redirect the entry block. Create an empty block
1544 at the start of the function which we use to add the new
1545 jump. */
1546 edge tmp;
1547 edge_iterator ei;
1548 bool found = false;
1549
1550 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1551 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1552
1553 /* Change the existing edge's source to be the new block, and add
1554 a new edge from the entry block to the new block. */
1555 e->src = bb;
1556 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1557 (tmp = ei_safe_edge (ei)); )
1558 {
1559 if (tmp == e)
1560 {
1561 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1562 found = true;
1563 break;
1564 }
1565 else
1566 ei_next (&ei);
1567 }
1568
1569 gcc_assert (found);
1570
1571 vec_safe_push (bb->succs, e);
1572 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1573 EDGE_FALLTHRU);
1574 }
1575 }
1576
1577 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1578 don't point to the target or fallthru label. */
1579 if (JUMP_P (BB_END (e->src))
1580 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1581 && (e->flags & EDGE_FALLTHRU)
1582 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1583 {
1584 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1585 bool adjust_jump_target = false;
1586
1587 for (i = 0; i < n; ++i)
1588 {
1589 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1590 {
1591 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1592 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1593 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1594 adjust_jump_target = true;
1595 }
1596 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1597 asm_goto_edge = true;
1598 }
1599 if (adjust_jump_target)
1600 {
1601 rtx_insn *insn = BB_END (e->src);
1602 rtx note;
1603 rtx_insn *old_label = BB_HEAD (e->dest);
1604 rtx_insn *new_label = BB_HEAD (target);
1605
1606 if (JUMP_LABEL (insn) == old_label)
1607 {
1608 JUMP_LABEL (insn) = new_label;
1609 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1610 if (note)
1611 remove_note (insn, note);
1612 }
1613 else
1614 {
1615 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1616 if (note)
1617 remove_note (insn, note);
1618 if (JUMP_LABEL (insn) != new_label
1619 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1620 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1621 }
1622 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1623 != NULL_RTX)
1624 XEXP (note, 0) = new_label;
1625 }
1626 }
1627
1628 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1629 {
1630 rtx_insn *new_head;
1631 gcov_type count = e->count;
1632 int probability = e->probability;
1633 /* Create the new structures. */
1634
1635 /* If the old block ended with a tablejump, skip its table
1636 by searching forward from there. Otherwise start searching
1637 forward from the last instruction of the old block. */
1638 rtx_jump_table_data *table;
1639 if (tablejump_p (BB_END (e->src), NULL, &table))
1640 new_head = table;
1641 else
1642 new_head = BB_END (e->src);
1643 new_head = NEXT_INSN (new_head);
1644
1645 jump_block = create_basic_block (new_head, NULL, e->src);
1646 jump_block->count = count;
1647 jump_block->frequency = EDGE_FREQUENCY (e);
1648
1649 /* Make sure new block ends up in correct hot/cold section. */
1650
1651 BB_COPY_PARTITION (jump_block, e->src);
1652
1653 /* Wire edge in. */
1654 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1655 new_edge->probability = probability;
1656 new_edge->count = count;
1657
1658 /* Redirect old edge. */
1659 redirect_edge_pred (e, jump_block);
1660 e->probability = REG_BR_PROB_BASE;
1661
1662 /* If e->src was previously region crossing, it no longer is
1663 and the reg crossing note should be removed. */
1664 fixup_partition_crossing (new_edge);
1665
1666 /* If asm goto has any label refs to target's label,
1667 add also edge from asm goto bb to target. */
1668 if (asm_goto_edge)
1669 {
1670 new_edge->probability /= 2;
1671 new_edge->count /= 2;
1672 jump_block->count /= 2;
1673 jump_block->frequency /= 2;
1674 new_edge = make_edge (new_edge->src, target,
1675 e->flags & ~EDGE_FALLTHRU);
1676 new_edge->probability = probability - probability / 2;
1677 new_edge->count = count - count / 2;
1678 }
1679
1680 new_bb = jump_block;
1681 }
1682 else
1683 jump_block = e->src;
1684
1685 loc = e->goto_locus;
1686 e->flags &= ~EDGE_FALLTHRU;
1687 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1688 {
1689 if (jump_label == ret_rtx)
1690 emit_jump_insn_after_setloc (targetm.gen_return (),
1691 BB_END (jump_block), loc);
1692 else
1693 {
1694 gcc_assert (jump_label == simple_return_rtx);
1695 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1696 BB_END (jump_block), loc);
1697 }
1698 set_return_jump_label (BB_END (jump_block));
1699 }
1700 else
1701 {
1702 rtx_code_label *label = block_label (target);
1703 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1704 BB_END (jump_block), loc);
1705 JUMP_LABEL (BB_END (jump_block)) = label;
1706 LABEL_NUSES (label)++;
1707 }
1708
1709 /* We might be in cfg layout mode, and if so, the following routine will
1710 insert the barrier correctly. */
1711 emit_barrier_after_bb (jump_block);
1712 redirect_edge_succ_nodup (e, target);
1713
1714 if (abnormal_edge_flags)
1715 make_edge (src, target, abnormal_edge_flags);
1716
1717 df_mark_solutions_dirty ();
1718 fixup_partition_crossing (e);
1719 return new_bb;
1720 }
1721
1722 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1723 (and possibly create new basic block) to make edge non-fallthru.
1724 Return newly created BB or NULL if none. */
1725
1726 static basic_block
1727 rtl_force_nonfallthru (edge e)
1728 {
1729 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1730 }
1731
1732 /* Redirect edge even at the expense of creating new jump insn or
1733 basic block. Return new basic block if created, NULL otherwise.
1734 Conversion must be possible. */
1735
1736 static basic_block
1737 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1738 {
1739 if (redirect_edge_and_branch (e, target)
1740 || e->dest == target)
1741 return NULL;
1742
1743 /* In case the edge redirection failed, try to force it to be non-fallthru
1744 and redirect newly created simplejump. */
1745 df_set_bb_dirty (e->src);
1746 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1747 }
1748
1749 /* The given edge should potentially be a fallthru edge. If that is in
1750 fact true, delete the jump and barriers that are in the way. */
1751
1752 static void
1753 rtl_tidy_fallthru_edge (edge e)
1754 {
1755 rtx_insn *q;
1756 basic_block b = e->src, c = b->next_bb;
1757
1758 /* ??? In a late-running flow pass, other folks may have deleted basic
1759 blocks by nopping out blocks, leaving multiple BARRIERs between here
1760 and the target label. They ought to be chastised and fixed.
1761
1762 We can also wind up with a sequence of undeletable labels between
1763 one block and the next.
1764
1765 So search through a sequence of barriers, labels, and notes for
1766 the head of block C and assert that we really do fall through. */
1767
1768 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1769 if (INSN_P (q))
1770 return;
1771
1772 /* Remove what will soon cease being the jump insn from the source block.
1773 If block B consisted only of this single jump, turn it into a deleted
1774 note. */
1775 q = BB_END (b);
1776 if (JUMP_P (q)
1777 && onlyjump_p (q)
1778 && (any_uncondjump_p (q)
1779 || single_succ_p (b)))
1780 {
1781 rtx label;
1782 rtx_jump_table_data *table;
1783
1784 if (tablejump_p (q, &label, &table))
1785 {
1786 /* The label is likely mentioned in some instruction before
1787 the tablejump and might not be DCEd, so turn it into
1788 a note instead and move before the tablejump that is going to
1789 be deleted. */
1790 const char *name = LABEL_NAME (label);
1791 PUT_CODE (label, NOTE);
1792 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1793 NOTE_DELETED_LABEL_NAME (label) = name;
1794 rtx_insn *lab = safe_as_a <rtx_insn *> (label);
1795 reorder_insns (lab, lab, PREV_INSN (q));
1796 delete_insn (table);
1797 }
1798
1799 /* If this was a conditional jump, we need to also delete
1800 the insn that set cc0. */
1801 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1802 q = PREV_INSN (q);
1803
1804 q = PREV_INSN (q);
1805 }
1806
1807 /* Selectively unlink the sequence. */
1808 if (q != PREV_INSN (BB_HEAD (c)))
1809 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1810
1811 e->flags |= EDGE_FALLTHRU;
1812 }
1813 \f
1814 /* Should move basic block BB after basic block AFTER. NIY. */
1815
1816 static bool
1817 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1818 basic_block after ATTRIBUTE_UNUSED)
1819 {
1820 return false;
1821 }
1822
1823 /* Locate the last bb in the same partition as START_BB. */
1824
1825 static basic_block
1826 last_bb_in_partition (basic_block start_bb)
1827 {
1828 basic_block bb;
1829 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1830 {
1831 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1832 return bb;
1833 }
1834 /* Return bb before the exit block. */
1835 return bb->prev_bb;
1836 }
1837
1838 /* Split a (typically critical) edge. Return the new block.
1839 The edge must not be abnormal.
1840
1841 ??? The code generally expects to be called on critical edges.
1842 The case of a block ending in an unconditional jump to a
1843 block with multiple predecessors is not handled optimally. */
1844
1845 static basic_block
1846 rtl_split_edge (edge edge_in)
1847 {
1848 basic_block bb, new_bb;
1849 rtx_insn *before;
1850
1851 /* Abnormal edges cannot be split. */
1852 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1853
1854 /* We are going to place the new block in front of edge destination.
1855 Avoid existence of fallthru predecessors. */
1856 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1857 {
1858 edge e = find_fallthru_edge (edge_in->dest->preds);
1859
1860 if (e)
1861 force_nonfallthru (e);
1862 }
1863
1864 /* Create the basic block note. */
1865 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1866 before = BB_HEAD (edge_in->dest);
1867 else
1868 before = NULL;
1869
1870 /* If this is a fall through edge to the exit block, the blocks might be
1871 not adjacent, and the right place is after the source. */
1872 if ((edge_in->flags & EDGE_FALLTHRU)
1873 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1874 {
1875 before = NEXT_INSN (BB_END (edge_in->src));
1876 bb = create_basic_block (before, NULL, edge_in->src);
1877 BB_COPY_PARTITION (bb, edge_in->src);
1878 }
1879 else
1880 {
1881 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1882 {
1883 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1884 BB_COPY_PARTITION (bb, edge_in->dest);
1885 }
1886 else
1887 {
1888 basic_block after = edge_in->dest->prev_bb;
1889 /* If this is post-bb reordering, and the edge crosses a partition
1890 boundary, the new block needs to be inserted in the bb chain
1891 at the end of the src partition (since we put the new bb into
1892 that partition, see below). Otherwise we may end up creating
1893 an extra partition crossing in the chain, which is illegal.
1894 It can't go after the src, because src may have a fall-through
1895 to a different block. */
1896 if (crtl->bb_reorder_complete
1897 && (edge_in->flags & EDGE_CROSSING))
1898 {
1899 after = last_bb_in_partition (edge_in->src);
1900 before = get_last_bb_insn (after);
1901 /* The instruction following the last bb in partition should
1902 be a barrier, since it cannot end in a fall-through. */
1903 gcc_checking_assert (BARRIER_P (before));
1904 before = NEXT_INSN (before);
1905 }
1906 bb = create_basic_block (before, NULL, after);
1907 /* Put the split bb into the src partition, to avoid creating
1908 a situation where a cold bb dominates a hot bb, in the case
1909 where src is cold and dest is hot. The src will dominate
1910 the new bb (whereas it might not have dominated dest). */
1911 BB_COPY_PARTITION (bb, edge_in->src);
1912 }
1913 }
1914
1915 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1916
1917 /* Can't allow a region crossing edge to be fallthrough. */
1918 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1919 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1920 {
1921 new_bb = force_nonfallthru (single_succ_edge (bb));
1922 gcc_assert (!new_bb);
1923 }
1924
1925 /* For non-fallthru edges, we must adjust the predecessor's
1926 jump instruction to target our new block. */
1927 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1928 {
1929 edge redirected = redirect_edge_and_branch (edge_in, bb);
1930 gcc_assert (redirected);
1931 }
1932 else
1933 {
1934 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1935 {
1936 /* For asm goto even splitting of fallthru edge might
1937 need insn patching, as other labels might point to the
1938 old label. */
1939 rtx_insn *last = BB_END (edge_in->src);
1940 if (last
1941 && JUMP_P (last)
1942 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1943 && extract_asm_operands (PATTERN (last)) != NULL_RTX
1944 && patch_jump_insn (last, before, bb))
1945 df_set_bb_dirty (edge_in->src);
1946 }
1947 redirect_edge_succ (edge_in, bb);
1948 }
1949
1950 return bb;
1951 }
1952
1953 /* Queue instructions for insertion on an edge between two basic blocks.
1954 The new instructions and basic blocks (if any) will not appear in the
1955 CFG until commit_edge_insertions is called. */
1956
1957 void
1958 insert_insn_on_edge (rtx pattern, edge e)
1959 {
1960 /* We cannot insert instructions on an abnormal critical edge.
1961 It will be easier to find the culprit if we die now. */
1962 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1963
1964 if (e->insns.r == NULL_RTX)
1965 start_sequence ();
1966 else
1967 push_to_sequence (e->insns.r);
1968
1969 emit_insn (pattern);
1970
1971 e->insns.r = get_insns ();
1972 end_sequence ();
1973 }
1974
1975 /* Update the CFG for the instructions queued on edge E. */
1976
1977 void
1978 commit_one_edge_insertion (edge e)
1979 {
1980 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1981 basic_block bb;
1982
1983 /* Pull the insns off the edge now since the edge might go away. */
1984 insns = e->insns.r;
1985 e->insns.r = NULL;
1986
1987 /* Figure out where to put these insns. If the destination has
1988 one predecessor, insert there. Except for the exit block. */
1989 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1990 {
1991 bb = e->dest;
1992
1993 /* Get the location correct wrt a code label, and "nice" wrt
1994 a basic block note, and before everything else. */
1995 tmp = BB_HEAD (bb);
1996 if (LABEL_P (tmp))
1997 tmp = NEXT_INSN (tmp);
1998 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1999 tmp = NEXT_INSN (tmp);
2000 if (tmp == BB_HEAD (bb))
2001 before = tmp;
2002 else if (tmp)
2003 after = PREV_INSN (tmp);
2004 else
2005 after = get_last_insn ();
2006 }
2007
2008 /* If the source has one successor and the edge is not abnormal,
2009 insert there. Except for the entry block.
2010 Don't do this if the predecessor ends in a jump other than
2011 unconditional simple jump. E.g. for asm goto that points all
2012 its labels at the fallthru basic block, we can't insert instructions
2013 before the asm goto, as the asm goto can have various of side effects,
2014 and can't emit instructions after the asm goto, as it must end
2015 the basic block. */
2016 else if ((e->flags & EDGE_ABNORMAL) == 0
2017 && single_succ_p (e->src)
2018 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2019 && (!JUMP_P (BB_END (e->src))
2020 || simplejump_p (BB_END (e->src))))
2021 {
2022 bb = e->src;
2023
2024 /* It is possible to have a non-simple jump here. Consider a target
2025 where some forms of unconditional jumps clobber a register. This
2026 happens on the fr30 for example.
2027
2028 We know this block has a single successor, so we can just emit
2029 the queued insns before the jump. */
2030 if (JUMP_P (BB_END (bb)))
2031 before = BB_END (bb);
2032 else
2033 {
2034 /* We'd better be fallthru, or we've lost track of what's what. */
2035 gcc_assert (e->flags & EDGE_FALLTHRU);
2036
2037 after = BB_END (bb);
2038 }
2039 }
2040
2041 /* Otherwise we must split the edge. */
2042 else
2043 {
2044 bb = split_edge (e);
2045
2046 /* If E crossed a partition boundary, we needed to make bb end in
2047 a region-crossing jump, even though it was originally fallthru. */
2048 if (JUMP_P (BB_END (bb)))
2049 before = BB_END (bb);
2050 else
2051 after = BB_END (bb);
2052 }
2053
2054 /* Now that we've found the spot, do the insertion. */
2055 if (before)
2056 {
2057 emit_insn_before_noloc (insns, before, bb);
2058 last = prev_nonnote_insn (before);
2059 }
2060 else
2061 last = emit_insn_after_noloc (insns, after, bb);
2062
2063 if (returnjump_p (last))
2064 {
2065 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2066 This is not currently a problem because this only happens
2067 for the (single) epilogue, which already has a fallthru edge
2068 to EXIT. */
2069
2070 e = single_succ_edge (bb);
2071 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2072 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2073
2074 e->flags &= ~EDGE_FALLTHRU;
2075 emit_barrier_after (last);
2076
2077 if (before)
2078 delete_insn (before);
2079 }
2080 else
2081 gcc_assert (!JUMP_P (last));
2082 }
2083
2084 /* Update the CFG for all queued instructions. */
2085
2086 void
2087 commit_edge_insertions (void)
2088 {
2089 basic_block bb;
2090
2091 /* Optimization passes that invoke this routine can cause hot blocks
2092 previously reached by both hot and cold blocks to become dominated only
2093 by cold blocks. This will cause the verification below to fail,
2094 and lead to now cold code in the hot section. In some cases this
2095 may only be visible after newly unreachable blocks are deleted,
2096 which will be done by fixup_partitions. */
2097 fixup_partitions ();
2098
2099 #ifdef ENABLE_CHECKING
2100 verify_flow_info ();
2101 #endif
2102
2103 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2104 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2105 {
2106 edge e;
2107 edge_iterator ei;
2108
2109 FOR_EACH_EDGE (e, ei, bb->succs)
2110 if (e->insns.r)
2111 commit_one_edge_insertion (e);
2112 }
2113 }
2114 \f
2115
2116 /* Print out RTL-specific basic block information (live information
2117 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2118 documented in dumpfile.h. */
2119
2120 static void
2121 rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
2122 {
2123 rtx_insn *insn;
2124 rtx_insn *last;
2125 char *s_indent;
2126
2127 s_indent = (char *) alloca ((size_t) indent + 1);
2128 memset (s_indent, ' ', (size_t) indent);
2129 s_indent[indent] = '\0';
2130
2131 if (df && (flags & TDF_DETAILS))
2132 {
2133 df_dump_top (bb, outf);
2134 putc ('\n', outf);
2135 }
2136
2137 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2138 for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
2139 insn = NEXT_INSN (insn))
2140 {
2141 if (flags & TDF_DETAILS)
2142 df_dump_insn_top (insn, outf);
2143 if (! (flags & TDF_SLIM))
2144 print_rtl_single (outf, insn);
2145 else
2146 dump_insn_slim (outf, insn);
2147 if (flags & TDF_DETAILS)
2148 df_dump_insn_bottom (insn, outf);
2149 }
2150
2151 if (df && (flags & TDF_DETAILS))
2152 {
2153 df_dump_bottom (bb, outf);
2154 putc ('\n', outf);
2155 }
2156
2157 }
2158 \f
2159 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2160 for the start of each basic block. FLAGS are the TDF_* masks documented
2161 in dumpfile.h. */
2162
2163 void
2164 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, int flags)
2165 {
2166 const rtx_insn *tmp_rtx;
2167 if (rtx_first == 0)
2168 fprintf (outf, "(nil)\n");
2169 else
2170 {
2171 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2172 int max_uid = get_max_uid ();
2173 basic_block *start = XCNEWVEC (basic_block, max_uid);
2174 basic_block *end = XCNEWVEC (basic_block, max_uid);
2175 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2176 basic_block bb;
2177
2178 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2179 insns, but the CFG is not maintained so the basic block info
2180 is not reliable. Therefore it's omitted from the dumps. */
2181 if (! (cfun->curr_properties & PROP_cfg))
2182 flags &= ~TDF_BLOCKS;
2183
2184 if (df)
2185 df_dump_start (outf);
2186
2187 if (flags & TDF_BLOCKS)
2188 {
2189 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2190 {
2191 rtx_insn *x;
2192
2193 start[INSN_UID (BB_HEAD (bb))] = bb;
2194 end[INSN_UID (BB_END (bb))] = bb;
2195 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2196 {
2197 enum bb_state state = IN_MULTIPLE_BB;
2198
2199 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2200 state = IN_ONE_BB;
2201 in_bb_p[INSN_UID (x)] = state;
2202
2203 if (x == BB_END (bb))
2204 break;
2205 }
2206 }
2207 }
2208
2209 for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
2210 {
2211 if (flags & TDF_BLOCKS)
2212 {
2213 bb = start[INSN_UID (tmp_rtx)];
2214 if (bb != NULL)
2215 {
2216 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
2217 if (df && (flags & TDF_DETAILS))
2218 df_dump_top (bb, outf);
2219 }
2220
2221 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2222 && !NOTE_P (tmp_rtx)
2223 && !BARRIER_P (tmp_rtx))
2224 fprintf (outf, ";; Insn is not within a basic block\n");
2225 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2226 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2227 }
2228
2229 if (flags & TDF_DETAILS)
2230 df_dump_insn_top (tmp_rtx, outf);
2231 if (! (flags & TDF_SLIM))
2232 print_rtl_single (outf, tmp_rtx);
2233 else
2234 dump_insn_slim (outf, tmp_rtx);
2235 if (flags & TDF_DETAILS)
2236 df_dump_insn_bottom (tmp_rtx, outf);
2237
2238 if (flags & TDF_BLOCKS)
2239 {
2240 bb = end[INSN_UID (tmp_rtx)];
2241 if (bb != NULL)
2242 {
2243 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
2244 if (df && (flags & TDF_DETAILS))
2245 df_dump_bottom (bb, outf);
2246 putc ('\n', outf);
2247 }
2248 }
2249 }
2250
2251 free (start);
2252 free (end);
2253 free (in_bb_p);
2254 }
2255 }
2256 \f
2257 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2258
2259 void
2260 update_br_prob_note (basic_block bb)
2261 {
2262 rtx note;
2263 if (!JUMP_P (BB_END (bb)))
2264 return;
2265 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2266 if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
2267 return;
2268 XINT (note, 0) = BRANCH_EDGE (bb)->probability;
2269 }
2270
2271 /* Get the last insn associated with block BB (that includes barriers and
2272 tablejumps after BB). */
2273 rtx_insn *
2274 get_last_bb_insn (basic_block bb)
2275 {
2276 rtx_jump_table_data *table;
2277 rtx_insn *tmp;
2278 rtx_insn *end = BB_END (bb);
2279
2280 /* Include any jump table following the basic block. */
2281 if (tablejump_p (end, NULL, &table))
2282 end = table;
2283
2284 /* Include any barriers that may follow the basic block. */
2285 tmp = next_nonnote_insn_bb (end);
2286 while (tmp && BARRIER_P (tmp))
2287 {
2288 end = tmp;
2289 tmp = next_nonnote_insn_bb (end);
2290 }
2291
2292 return end;
2293 }
2294
2295 /* Sanity check partition hotness to ensure that basic blocks in
2296   the cold partition don't dominate basic blocks in the hot partition.
2297 If FLAG_ONLY is true, report violations as errors. Otherwise
2298 re-mark the dominated blocks as cold, since this is run after
2299 cfg optimizations that may make hot blocks previously reached
2300 by both hot and cold blocks now only reachable along cold paths. */
2301
2302 static vec<basic_block>
2303 find_partition_fixes (bool flag_only)
2304 {
2305 basic_block bb;
2306 vec<basic_block> bbs_in_cold_partition = vNULL;
2307 vec<basic_block> bbs_to_fix = vNULL;
2308
2309 /* Callers check this. */
2310 gcc_checking_assert (crtl->has_bb_partition);
2311
2312 FOR_EACH_BB_FN (bb, cfun)
2313 if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
2314 bbs_in_cold_partition.safe_push (bb);
2315
2316 if (bbs_in_cold_partition.is_empty ())
2317 return vNULL;
2318
2319 bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
2320
2321 if (dom_calculated_here)
2322 calculate_dominance_info (CDI_DOMINATORS);
2323
2324 while (! bbs_in_cold_partition.is_empty ())
2325 {
2326 bb = bbs_in_cold_partition.pop ();
2327 /* Any blocks dominated by a block in the cold section
2328 must also be cold. */
2329 basic_block son;
2330 for (son = first_dom_son (CDI_DOMINATORS, bb);
2331 son;
2332 son = next_dom_son (CDI_DOMINATORS, son))
2333 {
2334 /* If son is not yet cold, then mark it cold here and
2335 enqueue it for further processing. */
2336 if ((BB_PARTITION (son) != BB_COLD_PARTITION))
2337 {
2338 if (flag_only)
2339 error ("non-cold basic block %d dominated "
2340 "by a block in the cold partition (%d)", son->index, bb->index);
2341 else
2342 BB_SET_PARTITION (son, BB_COLD_PARTITION);
2343 bbs_to_fix.safe_push (son);
2344 bbs_in_cold_partition.safe_push (son);
2345 }
2346 }
2347 }
2348
2349 if (dom_calculated_here)
2350 free_dominance_info (CDI_DOMINATORS);
2351
2352 return bbs_to_fix;
2353 }
2354
2355 /* Perform cleanup on the hot/cold bb partitioning after optimization
2356 passes that modify the cfg. */
2357
2358 void
2359 fixup_partitions (void)
2360 {
2361 basic_block bb;
2362
2363 if (!crtl->has_bb_partition)
2364 return;
2365
2366 /* Delete any blocks that became unreachable and weren't
2367 already cleaned up, for example during edge forwarding
2368 and convert_jumps_to_returns. This will expose more
2369 opportunities for fixing the partition boundaries here.
2370 Also, the calculation of the dominance graph during verification
2371 will assert if there are unreachable nodes. */
2372 delete_unreachable_blocks ();
2373
2374 /* If there are partitions, do a sanity check on them: A basic block in
2375   a cold partition cannot dominate a basic block in a hot partition.
2376 Fixup any that now violate this requirement, as a result of edge
2377 forwarding and unreachable block deletion.  */
2378 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2379
2380 /* Do the partition fixup after all necessary blocks have been converted to
2381 cold, so that we only update the region crossings the minimum number of
2382 places, which can require forcing edges to be non fallthru. */
2383 while (! bbs_to_fix.is_empty ())
2384 {
2385 bb = bbs_to_fix.pop ();
2386 fixup_new_cold_bb (bb);
2387 }
2388 }
2389
2390 /* Verify, in the basic block chain, that there is at most one switch
2391 between hot/cold partitions. This condition will not be true until
2392 after reorder_basic_blocks is called. */
2393
2394 static int
2395 verify_hot_cold_block_grouping (void)
2396 {
2397 basic_block bb;
2398 int err = 0;
2399 bool switched_sections = false;
2400 int current_partition = BB_UNPARTITIONED;
2401
2402 /* Even after bb reordering is complete, we go into cfglayout mode
2403 again (in compgoto). Ensure we don't call this before going back
2404 into linearized RTL when any layout fixes would have been committed. */
2405 if (!crtl->bb_reorder_complete
2406 || current_ir_type () != IR_RTL_CFGRTL)
2407 return err;
2408
2409 FOR_EACH_BB_FN (bb, cfun)
2410 {
2411 if (current_partition != BB_UNPARTITIONED
2412 && BB_PARTITION (bb) != current_partition)
2413 {
2414 if (switched_sections)
2415 {
2416 error ("multiple hot/cold transitions found (bb %i)",
2417 bb->index);
2418 err = 1;
2419 }
2420 else
2421 switched_sections = true;
2422
2423 if (!crtl->has_bb_partition)
2424 error ("partition found but function partition flag not set");
2425 }
2426 current_partition = BB_PARTITION (bb);
2427 }
2428
2429 return err;
2430 }
2431 \f
2432
2433 /* Perform several checks on the edges out of each block, such as
2434 the consistency of the branch probabilities, the correctness
2435 of hot/cold partition crossing edges, and the number of expected
2436 successor edges. Also verify that the dominance relationship
2437 between hot/cold blocks is sane. */
2438
2439 static int
2440 rtl_verify_edges (void)
2441 {
2442 int err = 0;
2443 basic_block bb;
2444
2445 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2446 {
2447 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2448 int n_eh = 0, n_abnormal = 0;
2449 edge e, fallthru = NULL;
2450 edge_iterator ei;
2451 rtx note;
2452 bool has_crossing_edge = false;
2453
2454 if (JUMP_P (BB_END (bb))
2455 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2456 && EDGE_COUNT (bb->succs) >= 2
2457 && any_condjump_p (BB_END (bb)))
2458 {
2459 if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
2460 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2461 {
2462 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2463 XINT (note, 0), BRANCH_EDGE (bb)->probability);
2464 err = 1;
2465 }
2466 }
2467
2468 FOR_EACH_EDGE (e, ei, bb->succs)
2469 {
2470 bool is_crossing;
2471
2472 if (e->flags & EDGE_FALLTHRU)
2473 n_fallthru++, fallthru = e;
2474
2475 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2476 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2477 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2478 has_crossing_edge |= is_crossing;
2479 if (e->flags & EDGE_CROSSING)
2480 {
2481 if (!is_crossing)
2482 {
2483 error ("EDGE_CROSSING incorrectly set across same section");
2484 err = 1;
2485 }
2486 if (e->flags & EDGE_FALLTHRU)
2487 {
2488 error ("fallthru edge crosses section boundary in bb %i",
2489 e->src->index);
2490 err = 1;
2491 }
2492 if (e->flags & EDGE_EH)
2493 {
2494 error ("EH edge crosses section boundary in bb %i",
2495 e->src->index);
2496 err = 1;
2497 }
2498 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2499 {
2500 error ("No region crossing jump at section boundary in bb %i",
2501 bb->index);
2502 err = 1;
2503 }
2504 }
2505 else if (is_crossing)
2506 {
2507 error ("EDGE_CROSSING missing across section boundary");
2508 err = 1;
2509 }
2510
2511 if ((e->flags & ~(EDGE_DFS_BACK
2512 | EDGE_CAN_FALLTHRU
2513 | EDGE_IRREDUCIBLE_LOOP
2514 | EDGE_LOOP_EXIT
2515 | EDGE_CROSSING
2516 | EDGE_PRESERVE)) == 0)
2517 n_branch++;
2518
2519 if (e->flags & EDGE_ABNORMAL_CALL)
2520 n_abnormal_call++;
2521
2522 if (e->flags & EDGE_SIBCALL)
2523 n_sibcall++;
2524
2525 if (e->flags & EDGE_EH)
2526 n_eh++;
2527
2528 if (e->flags & EDGE_ABNORMAL)
2529 n_abnormal++;
2530 }
2531
2532 if (!has_crossing_edge
2533 && JUMP_P (BB_END (bb))
2534 && CROSSING_JUMP_P (BB_END (bb)))
2535 {
2536 print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
2537 error ("Region crossing jump across same section in bb %i",
2538 bb->index);
2539 err = 1;
2540 }
2541
2542 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2543 {
2544 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2545 err = 1;
2546 }
2547 if (n_eh > 1)
2548 {
2549 error ("too many exception handling edges in bb %i", bb->index);
2550 err = 1;
2551 }
2552 if (n_branch
2553 && (!JUMP_P (BB_END (bb))
2554 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2555 || any_condjump_p (BB_END (bb))))))
2556 {
2557 error ("too many outgoing branch edges from bb %i", bb->index);
2558 err = 1;
2559 }
2560 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2561 {
2562 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2563 err = 1;
2564 }
2565 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2566 {
2567 error ("wrong number of branch edges after unconditional jump"
2568 " in bb %i", bb->index);
2569 err = 1;
2570 }
2571 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2572 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2573 {
2574 error ("wrong amount of branch edges after conditional jump"
2575 " in bb %i", bb->index);
2576 err = 1;
2577 }
2578 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2579 {
2580 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2581 err = 1;
2582 }
2583 if (n_sibcall && !CALL_P (BB_END (bb)))
2584 {
2585 error ("sibcall edges for non-call insn in bb %i", bb->index);
2586 err = 1;
2587 }
2588 if (n_abnormal > n_eh
2589 && !(CALL_P (BB_END (bb))
2590 && n_abnormal == n_abnormal_call + n_sibcall)
2591 && (!JUMP_P (BB_END (bb))
2592 || any_condjump_p (BB_END (bb))
2593 || any_uncondjump_p (BB_END (bb))))
2594 {
2595 error ("abnormal edges for no purpose in bb %i", bb->index);
2596 err = 1;
2597 }
2598 }
2599
2600 /* If there are partitions, do a sanity check on them: A basic block in
2601   a cold partition cannot dominate a basic block in a hot partition.  */
2602 if (crtl->has_bb_partition && !err)
2603 {
2604 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2605 err = !bbs_to_fix.is_empty ();
2606 }
2607
2608 /* Clean up. */
2609 return err;
2610 }
2611
2612 /* Checks on the instructions within blocks. Currently checks that each
2613 block starts with a basic block note, and that basic block notes and
2614 control flow jumps are not found in the middle of the block. */
2615
2616 static int
2617 rtl_verify_bb_insns (void)
2618 {
2619 rtx_insn *x;
2620 int err = 0;
2621 basic_block bb;
2622
2623 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2624 {
2625 /* Now check the header of basic
2626 block. It ought to contain optional CODE_LABEL followed
2627 by NOTE_BASIC_BLOCK. */
2628 x = BB_HEAD (bb);
2629 if (LABEL_P (x))
2630 {
2631 if (BB_END (bb) == x)
2632 {
2633 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2634 bb->index);
2635 err = 1;
2636 }
2637
2638 x = NEXT_INSN (x);
2639 }
2640
2641 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2642 {
2643 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2644 bb->index);
2645 err = 1;
2646 }
2647
2648 if (BB_END (bb) == x)
2649 /* Do checks for empty blocks here. */
2650 ;
2651 else
2652 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2653 {
2654 if (NOTE_INSN_BASIC_BLOCK_P (x))
2655 {
2656 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2657 INSN_UID (x), bb->index);
2658 err = 1;
2659 }
2660
2661 if (x == BB_END (bb))
2662 break;
2663
2664 if (control_flow_insn_p (x))
2665 {
2666 error ("in basic block %d:", bb->index);
2667 fatal_insn ("flow control insn inside a basic block", x);
2668 }
2669 }
2670 }
2671
2672 /* Clean up. */
2673 return err;
2674 }
2675
2676 /* Verify that block pointers for instructions in basic blocks, headers and
2677 footers are set appropriately. */
2678
2679 static int
2680 rtl_verify_bb_pointers (void)
2681 {
2682 int err = 0;
2683 basic_block bb;
2684
2685 /* Check the general integrity of the basic blocks. */
2686 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2687 {
2688 rtx_insn *insn;
2689
2690 if (!(bb->flags & BB_RTL))
2691 {
2692 error ("BB_RTL flag not set for block %d", bb->index);
2693 err = 1;
2694 }
2695
2696 FOR_BB_INSNS (bb, insn)
2697 if (BLOCK_FOR_INSN (insn) != bb)
2698 {
2699 error ("insn %d basic block pointer is %d, should be %d",
2700 INSN_UID (insn),
2701 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2702 bb->index);
2703 err = 1;
2704 }
2705
2706 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2707 if (!BARRIER_P (insn)
2708 && BLOCK_FOR_INSN (insn) != NULL)
2709 {
2710 error ("insn %d in header of bb %d has non-NULL basic block",
2711 INSN_UID (insn), bb->index);
2712 err = 1;
2713 }
2714 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2715 if (!BARRIER_P (insn)
2716 && BLOCK_FOR_INSN (insn) != NULL)
2717 {
2718 error ("insn %d in footer of bb %d has non-NULL basic block",
2719 INSN_UID (insn), bb->index);
2720 err = 1;
2721 }
2722 }
2723
2724 /* Clean up. */
2725 return err;
2726 }
2727
2728 /* Verify the CFG and RTL consistency common for both underlying RTL and
2729 cfglayout RTL.
2730
2731 Currently it does following checks:
2732
2733 - overlapping of basic blocks
2734 - insns with wrong BLOCK_FOR_INSN pointers
2735 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2736 - tails of basic blocks (ensure that boundary is necessary)
2737 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2738 and NOTE_INSN_BASIC_BLOCK
2739 - verify that no fall_thru edge crosses hot/cold partition boundaries
2740 - verify that there are no pending RTL branch predictions
2741 - verify that hot blocks are not dominated by cold blocks
2742
2743 In future it can be extended check a lot of other stuff as well
2744 (reachability of basic blocks, life information, etc. etc.). */
2745
2746 static int
2747 rtl_verify_flow_info_1 (void)
2748 {
2749 int err = 0;
2750
2751 err |= rtl_verify_bb_pointers ();
2752
2753 err |= rtl_verify_bb_insns ();
2754
2755 err |= rtl_verify_edges ();
2756
2757 return err;
2758 }
2759
2760 /* Walk the instruction chain and verify that bb head/end pointers
2761 are correct, and that instructions are in exactly one bb and have
2762 correct block pointers. */
2763
2764 static int
2765 rtl_verify_bb_insn_chain (void)
2766 {
2767 basic_block bb;
2768 int err = 0;
2769 rtx_insn *x;
2770 rtx_insn *last_head = get_last_insn ();
2771 basic_block *bb_info;
2772 const int max_uid = get_max_uid ();
2773
2774 bb_info = XCNEWVEC (basic_block, max_uid);
2775
2776 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2777 {
2778 rtx_insn *head = BB_HEAD (bb);
2779 rtx_insn *end = BB_END (bb);
2780
2781 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2782 {
2783 /* Verify the end of the basic block is in the INSN chain. */
2784 if (x == end)
2785 break;
2786
2787 /* And that the code outside of basic blocks has NULL bb field. */
2788 if (!BARRIER_P (x)
2789 && BLOCK_FOR_INSN (x) != NULL)
2790 {
2791 error ("insn %d outside of basic blocks has non-NULL bb field",
2792 INSN_UID (x));
2793 err = 1;
2794 }
2795 }
2796
2797 if (!x)
2798 {
2799 error ("end insn %d for block %d not found in the insn stream",
2800 INSN_UID (end), bb->index);
2801 err = 1;
2802 }
2803
2804 /* Work backwards from the end to the head of the basic block
2805 to verify the head is in the RTL chain. */
2806 for (; x != NULL_RTX; x = PREV_INSN (x))
2807 {
2808 /* While walking over the insn chain, verify insns appear
2809 in only one basic block. */
2810 if (bb_info[INSN_UID (x)] != NULL)
2811 {
2812 error ("insn %d is in multiple basic blocks (%d and %d)",
2813 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2814 err = 1;
2815 }
2816
2817 bb_info[INSN_UID (x)] = bb;
2818
2819 if (x == head)
2820 break;
2821 }
2822 if (!x)
2823 {
2824 error ("head insn %d for block %d not found in the insn stream",
2825 INSN_UID (head), bb->index);
2826 err = 1;
2827 }
2828
2829 last_head = PREV_INSN (x);
2830 }
2831
2832 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2833 {
2834 /* Check that the code before the first basic block has NULL
2835 bb field. */
2836 if (!BARRIER_P (x)
2837 && BLOCK_FOR_INSN (x) != NULL)
2838 {
2839 error ("insn %d outside of basic blocks has non-NULL bb field",
2840 INSN_UID (x));
2841 err = 1;
2842 }
2843 }
2844 free (bb_info);
2845
2846 return err;
2847 }
2848
2849 /* Verify that fallthru edges point to adjacent blocks in layout order and
2850 that barriers exist after non-fallthru blocks. */
2851
2852 static int
2853 rtl_verify_fallthru (void)
2854 {
2855 basic_block bb;
2856 int err = 0;
2857
2858 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2859 {
2860 edge e;
2861
2862 e = find_fallthru_edge (bb->succs);
2863 if (!e)
2864 {
2865 rtx_insn *insn;
2866
2867 /* Ensure existence of barrier in BB with no fallthru edges. */
2868 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2869 {
2870 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2871 {
2872 error ("missing barrier after block %i", bb->index);
2873 err = 1;
2874 break;
2875 }
2876 if (BARRIER_P (insn))
2877 break;
2878 }
2879 }
2880 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2881 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2882 {
2883 rtx_insn *insn;
2884
2885 if (e->src->next_bb != e->dest)
2886 {
2887 error
2888 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2889 e->src->index, e->dest->index);
2890 err = 1;
2891 }
2892 else
2893 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2894 insn = NEXT_INSN (insn))
2895 if (BARRIER_P (insn) || INSN_P (insn))
2896 {
2897 error ("verify_flow_info: Incorrect fallthru %i->%i",
2898 e->src->index, e->dest->index);
2899 fatal_insn ("wrong insn in the fallthru edge", insn);
2900 err = 1;
2901 }
2902 }
2903 }
2904
2905 return err;
2906 }
2907
2908 /* Verify that blocks are laid out in consecutive order. While walking the
2909 instructions, verify that all expected instructions are inside the basic
2910 blocks, and that all returns are followed by barriers. */
2911
2912 static int
2913 rtl_verify_bb_layout (void)
2914 {
2915 basic_block bb;
2916 int err = 0;
2917 rtx_insn *x;
2918 int num_bb_notes;
2919 rtx_insn * const rtx_first = get_insns ();
2920 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2921
2922 num_bb_notes = 0;
2923 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2924
2925 for (x = rtx_first; x; x = NEXT_INSN (x))
2926 {
2927 if (NOTE_INSN_BASIC_BLOCK_P (x))
2928 {
2929 bb = NOTE_BASIC_BLOCK (x);
2930
2931 num_bb_notes++;
2932 if (bb != last_bb_seen->next_bb)
2933 internal_error ("basic blocks not laid down consecutively");
2934
2935 curr_bb = last_bb_seen = bb;
2936 }
2937
2938 if (!curr_bb)
2939 {
2940 switch (GET_CODE (x))
2941 {
2942 case BARRIER:
2943 case NOTE:
2944 break;
2945
2946 case CODE_LABEL:
2947 /* An ADDR_VEC is placed outside any basic block. */
2948 if (NEXT_INSN (x)
2949 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2950 x = NEXT_INSN (x);
2951
2952 /* But in any case, non-deletable labels can appear anywhere. */
2953 break;
2954
2955 default:
2956 fatal_insn ("insn outside basic block", x);
2957 }
2958 }
2959
2960 if (JUMP_P (x)
2961 && returnjump_p (x) && ! condjump_p (x)
2962 && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
2963 fatal_insn ("return not followed by barrier", x);
2964
2965 if (curr_bb && x == BB_END (curr_bb))
2966 curr_bb = NULL;
2967 }
2968
2969 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2970 internal_error
2971 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2972 num_bb_notes, n_basic_blocks_for_fn (cfun));
2973
2974 return err;
2975 }
2976
2977 /* Verify the CFG and RTL consistency common for both underlying RTL and
2978 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2979
2980 Currently it does following checks:
2981 - all checks of rtl_verify_flow_info_1
2982 - test head/end pointers
2983 - check that blocks are laid out in consecutive order
2984 - check that all insns are in the basic blocks
2985 (except the switch handling code, barriers and notes)
2986 - check that all returns are followed by barriers
2987 - check that all fallthru edge points to the adjacent blocks
2988 - verify that there is a single hot/cold partition boundary after bbro */
2989
2990 static int
2991 rtl_verify_flow_info (void)
2992 {
2993 int err = 0;
2994
2995 err |= rtl_verify_flow_info_1 ();
2996
2997 err |= rtl_verify_bb_insn_chain ();
2998
2999 err |= rtl_verify_fallthru ();
3000
3001 err |= rtl_verify_bb_layout ();
3002
3003 err |= verify_hot_cold_block_grouping ();
3004
3005 return err;
3006 }
3007 \f
3008 /* Assume that the preceding pass has possibly eliminated jump instructions
3009 or converted the unconditional jumps. Eliminate the edges from CFG.
3010 Return true if any edges are eliminated. */
3011
3012 bool
3013 purge_dead_edges (basic_block bb)
3014 {
3015 edge e;
3016 rtx_insn *insn = BB_END (bb);
3017 rtx note;
3018 bool purged = false;
3019 bool found;
3020 edge_iterator ei;
3021
3022 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3023 do
3024 insn = PREV_INSN (insn);
3025 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3026
3027 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3028 if (NONJUMP_INSN_P (insn)
3029 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3030 {
3031 rtx eqnote;
3032
3033 if (! may_trap_p (PATTERN (insn))
3034 || ((eqnote = find_reg_equal_equiv_note (insn))
3035 && ! may_trap_p (XEXP (eqnote, 0))))
3036 remove_note (insn, note);
3037 }
3038
3039 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3040 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3041 {
3042 bool remove = false;
3043
3044 /* There are three types of edges we need to handle correctly here: EH
3045 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3046 latter can appear when nonlocal gotos are used. */
3047 if (e->flags & EDGE_ABNORMAL_CALL)
3048 {
3049 if (!CALL_P (insn))
3050 remove = true;
3051 else if (can_nonlocal_goto (insn))
3052 ;
3053 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3054 ;
3055 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3056 ;
3057 else
3058 remove = true;
3059 }
3060 else if (e->flags & EDGE_EH)
3061 remove = !can_throw_internal (insn);
3062
3063 if (remove)
3064 {
3065 remove_edge (e);
3066 df_set_bb_dirty (bb);
3067 purged = true;
3068 }
3069 else
3070 ei_next (&ei);
3071 }
3072
3073 if (JUMP_P (insn))
3074 {
3075 rtx note;
3076 edge b,f;
3077 edge_iterator ei;
3078
3079 /* We do care only about conditional jumps and simplejumps. */
3080 if (!any_condjump_p (insn)
3081 && !returnjump_p (insn)
3082 && !simplejump_p (insn))
3083 return purged;
3084
3085 /* Branch probability/prediction notes are defined only for
3086 condjumps. We've possibly turned condjump into simplejump. */
3087 if (simplejump_p (insn))
3088 {
3089 note = find_reg_note (insn, REG_BR_PROB, NULL);
3090 if (note)
3091 remove_note (insn, note);
3092 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3093 remove_note (insn, note);
3094 }
3095
3096 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3097 {
3098 /* Avoid abnormal flags to leak from computed jumps turned
3099 into simplejumps. */
3100
3101 e->flags &= ~EDGE_ABNORMAL;
3102
3103 /* See if this edge is one we should keep. */
3104 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3105 /* A conditional jump can fall through into the next
3106 block, so we should keep the edge. */
3107 {
3108 ei_next (&ei);
3109 continue;
3110 }
3111 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3112 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3113 /* If the destination block is the target of the jump,
3114 keep the edge. */
3115 {
3116 ei_next (&ei);
3117 continue;
3118 }
3119 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3120 && returnjump_p (insn))
3121 /* If the destination block is the exit block, and this
3122 instruction is a return, then keep the edge. */
3123 {
3124 ei_next (&ei);
3125 continue;
3126 }
3127 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3128 /* Keep the edges that correspond to exceptions thrown by
3129 this instruction and rematerialize the EDGE_ABNORMAL
3130 flag we just cleared above. */
3131 {
3132 e->flags |= EDGE_ABNORMAL;
3133 ei_next (&ei);
3134 continue;
3135 }
3136
3137 /* We do not need this edge. */
3138 df_set_bb_dirty (bb);
3139 purged = true;
3140 remove_edge (e);
3141 }
3142
3143 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3144 return purged;
3145
3146 if (dump_file)
3147 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3148
3149 if (!optimize)
3150 return purged;
3151
3152 /* Redistribute probabilities. */
3153 if (single_succ_p (bb))
3154 {
3155 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3156 single_succ_edge (bb)->count = bb->count;
3157 }
3158 else
3159 {
3160 note = find_reg_note (insn, REG_BR_PROB, NULL);
3161 if (!note)
3162 return purged;
3163
3164 b = BRANCH_EDGE (bb);
3165 f = FALLTHRU_EDGE (bb);
3166 b->probability = XINT (note, 0);
3167 f->probability = REG_BR_PROB_BASE - b->probability;
3168 /* Update these to use GCOV_COMPUTE_SCALE. */
3169 b->count = bb->count * b->probability / REG_BR_PROB_BASE;
3170 f->count = bb->count * f->probability / REG_BR_PROB_BASE;
3171 }
3172
3173 return purged;
3174 }
3175 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3176 {
3177 /* First, there should not be any EH or ABCALL edges resulting
3178 from non-local gotos and the like. If there were, we shouldn't
3179 have created the sibcall in the first place. Second, there
3180 should of course never have been a fallthru edge. */
3181 gcc_assert (single_succ_p (bb));
3182 gcc_assert (single_succ_edge (bb)->flags
3183 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3184
3185 return 0;
3186 }
3187
3188 /* If we don't see a jump insn, we don't know exactly why the block would
3189 have been broken at this point. Look for a simple, non-fallthru edge,
3190 as these are only created by conditional branches. If we find such an
3191 edge we know that there used to be a jump here and can then safely
3192 remove all non-fallthru edges. */
3193 found = false;
3194 FOR_EACH_EDGE (e, ei, bb->succs)
3195 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3196 {
3197 found = true;
3198 break;
3199 }
3200
3201 if (!found)
3202 return purged;
3203
3204 /* Remove all but the fake and fallthru edges. The fake edge may be
3205 the only successor for this block in the case of noreturn
3206 calls. */
3207 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3208 {
3209 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3210 {
3211 df_set_bb_dirty (bb);
3212 remove_edge (e);
3213 purged = true;
3214 }
3215 else
3216 ei_next (&ei);
3217 }
3218
3219 gcc_assert (single_succ_p (bb));
3220
3221 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3222 single_succ_edge (bb)->count = bb->count;
3223
3224 if (dump_file)
3225 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3226 bb->index);
3227 return purged;
3228 }
3229
3230 /* Search all basic blocks for potentially dead edges and purge them. Return
3231 true if some edge has been eliminated. */
3232
3233 bool
3234 purge_all_dead_edges (void)
3235 {
3236 int purged = false;
3237 basic_block bb;
3238
3239 FOR_EACH_BB_FN (bb, cfun)
3240 {
3241 bool purged_here = purge_dead_edges (bb);
3242
3243 purged |= purged_here;
3244 }
3245
3246 return purged;
3247 }
3248
3249 /* This is used by a few passes that emit some instructions after abnormal
3250 calls, moving the basic block's end, while they in fact do want to emit
3251 them on the fallthru edge. Look for abnormal call edges, find backward
3252 the call in the block and insert the instructions on the edge instead.
3253
3254 Similarly, handle instructions throwing exceptions internally.
3255
3256 Return true when instructions have been found and inserted on edges. */
3257
3258 bool
3259 fixup_abnormal_edges (void)
3260 {
3261 bool inserted = false;
3262 basic_block bb;
3263
3264 FOR_EACH_BB_FN (bb, cfun)
3265 {
3266 edge e;
3267 edge_iterator ei;
3268
3269 /* Look for cases we are interested in - calls or instructions causing
3270 exceptions. */
3271 FOR_EACH_EDGE (e, ei, bb->succs)
3272 if ((e->flags & EDGE_ABNORMAL_CALL)
3273 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3274 == (EDGE_ABNORMAL | EDGE_EH)))
3275 break;
3276
3277 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3278 {
3279 rtx_insn *insn;
3280
3281 /* Get past the new insns generated. Allow notes, as the insns
3282 may be already deleted. */
3283 insn = BB_END (bb);
3284 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3285 && !can_throw_internal (insn)
3286 && insn != BB_HEAD (bb))
3287 insn = PREV_INSN (insn);
3288
3289 if (CALL_P (insn) || can_throw_internal (insn))
3290 {
3291 rtx_insn *stop, *next;
3292
3293 e = find_fallthru_edge (bb->succs);
3294
3295 stop = NEXT_INSN (BB_END (bb));
3296 BB_END (bb) = insn;
3297
3298 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3299 {
3300 next = NEXT_INSN (insn);
3301 if (INSN_P (insn))
3302 {
3303 delete_insn (insn);
3304
3305 /* Sometimes there's still the return value USE.
3306 If it's placed after a trapping call (i.e. that
3307 call is the last insn anyway), we have no fallthru
3308 edge. Simply delete this use and don't try to insert
3309 on the non-existent edge. */
3310 if (GET_CODE (PATTERN (insn)) != USE)
3311 {
3312 /* We're not deleting it, we're moving it. */
3313 insn->set_undeleted ();
3314 SET_PREV_INSN (insn) = NULL_RTX;
3315 SET_NEXT_INSN (insn) = NULL_RTX;
3316
3317 insert_insn_on_edge (insn, e);
3318 inserted = true;
3319 }
3320 }
3321 else if (!BARRIER_P (insn))
3322 set_block_for_insn (insn, NULL);
3323 }
3324 }
3325
3326 /* It may be that we don't find any trapping insn. In this
3327 case we discovered quite late that the insn that had been
3328 marked as can_throw_internal in fact couldn't trap at all.
3329 So we should in fact delete the EH edges out of the block. */
3330 else
3331 purge_dead_edges (bb);
3332 }
3333 }
3334
3335 return inserted;
3336 }
3337 \f
3338 /* Cut the insns from FIRST to LAST out of the insns stream. */
3339
3340 rtx_insn *
3341 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3342 {
3343 rtx_insn *prevfirst = PREV_INSN (first);
3344 rtx_insn *nextlast = NEXT_INSN (last);
3345
3346 SET_PREV_INSN (first) = NULL;
3347 SET_NEXT_INSN (last) = NULL;
3348 if (prevfirst)
3349 SET_NEXT_INSN (prevfirst) = nextlast;
3350 if (nextlast)
3351 SET_PREV_INSN (nextlast) = prevfirst;
3352 else
3353 set_last_insn (prevfirst);
3354 if (!prevfirst)
3355 set_first_insn (nextlast);
3356 return first;
3357 }
3358 \f
3359 /* Skip over inter-block insns occurring after BB which are typically
3360 associated with BB (e.g., barriers). If there are any such insns,
3361 we return the last one. Otherwise, we return the end of BB. */
3362
3363 static rtx_insn *
3364 skip_insns_after_block (basic_block bb)
3365 {
3366 rtx_insn *insn, *last_insn, *next_head, *prev;
3367
3368 next_head = NULL;
3369 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3370 next_head = BB_HEAD (bb->next_bb);
3371
3372 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3373 {
3374 if (insn == next_head)
3375 break;
3376
3377 switch (GET_CODE (insn))
3378 {
3379 case BARRIER:
3380 last_insn = insn;
3381 continue;
3382
3383 case NOTE:
3384 switch (NOTE_KIND (insn))
3385 {
3386 case NOTE_INSN_BLOCK_END:
3387 gcc_unreachable ();
3388 continue;
3389 default:
3390 continue;
3391 break;
3392 }
3393 break;
3394
3395 case CODE_LABEL:
3396 if (NEXT_INSN (insn)
3397 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3398 {
3399 insn = NEXT_INSN (insn);
3400 last_insn = insn;
3401 continue;
3402 }
3403 break;
3404
3405 default:
3406 break;
3407 }
3408
3409 break;
3410 }
3411
3412 /* It is possible to hit contradictory sequence. For instance:
3413
3414 jump_insn
3415 NOTE_INSN_BLOCK_BEG
3416 barrier
3417
3418 Where barrier belongs to jump_insn, but the note does not. This can be
3419 created by removing the basic block originally following
3420 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3421
3422 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3423 {
3424 prev = PREV_INSN (insn);
3425 if (NOTE_P (insn))
3426 switch (NOTE_KIND (insn))
3427 {
3428 case NOTE_INSN_BLOCK_END:
3429 gcc_unreachable ();
3430 break;
3431 case NOTE_INSN_DELETED:
3432 case NOTE_INSN_DELETED_LABEL:
3433 case NOTE_INSN_DELETED_DEBUG_LABEL:
3434 continue;
3435 default:
3436 reorder_insns (insn, insn, last_insn);
3437 }
3438 }
3439
3440 return last_insn;
3441 }
3442
3443 /* Locate or create a label for a given basic block. */
3444
3445 static rtx_insn *
3446 label_for_bb (basic_block bb)
3447 {
3448 rtx_insn *label = BB_HEAD (bb);
3449
3450 if (!LABEL_P (label))
3451 {
3452 if (dump_file)
3453 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3454
3455 label = block_label (bb);
3456 }
3457
3458 return label;
3459 }
3460
3461 /* Locate the effective beginning and end of the insn chain for each
3462 block, as defined by skip_insns_after_block above. */
3463
3464 static void
3465 record_effective_endpoints (void)
3466 {
3467 rtx_insn *next_insn;
3468 basic_block bb;
3469 rtx_insn *insn;
3470
3471 for (insn = get_insns ();
3472 insn
3473 && NOTE_P (insn)
3474 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3475 insn = NEXT_INSN (insn))
3476 continue;
3477 /* No basic blocks at all? */
3478 gcc_assert (insn);
3479
3480 if (PREV_INSN (insn))
3481 cfg_layout_function_header =
3482 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3483 else
3484 cfg_layout_function_header = NULL;
3485
3486 next_insn = get_insns ();
3487 FOR_EACH_BB_FN (bb, cfun)
3488 {
3489 rtx_insn *end;
3490
3491 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3492 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3493 PREV_INSN (BB_HEAD (bb)));
3494 end = skip_insns_after_block (bb);
3495 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3496 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3497 next_insn = NEXT_INSN (BB_END (bb));
3498 }
3499
3500 cfg_layout_function_footer = next_insn;
3501 if (cfg_layout_function_footer)
3502 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3503 }
3504 \f
3505 namespace {
3506
3507 const pass_data pass_data_into_cfg_layout_mode =
3508 {
3509 RTL_PASS, /* type */
3510 "into_cfglayout", /* name */
3511 OPTGROUP_NONE, /* optinfo_flags */
3512 TV_CFG, /* tv_id */
3513 0, /* properties_required */
3514 PROP_cfglayout, /* properties_provided */
3515 0, /* properties_destroyed */
3516 0, /* todo_flags_start */
3517 0, /* todo_flags_finish */
3518 };
3519
3520 class pass_into_cfg_layout_mode : public rtl_opt_pass
3521 {
3522 public:
3523 pass_into_cfg_layout_mode (gcc::context *ctxt)
3524 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3525 {}
3526
3527 /* opt_pass methods: */
3528 virtual unsigned int execute (function *)
3529 {
3530 cfg_layout_initialize (0);
3531 return 0;
3532 }
3533
3534 }; // class pass_into_cfg_layout_mode
3535
3536 } // anon namespace
3537
3538 rtl_opt_pass *
3539 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3540 {
3541 return new pass_into_cfg_layout_mode (ctxt);
3542 }
3543
3544 namespace {
3545
3546 const pass_data pass_data_outof_cfg_layout_mode =
3547 {
3548 RTL_PASS, /* type */
3549 "outof_cfglayout", /* name */
3550 OPTGROUP_NONE, /* optinfo_flags */
3551 TV_CFG, /* tv_id */
3552 0, /* properties_required */
3553 0, /* properties_provided */
3554 PROP_cfglayout, /* properties_destroyed */
3555 0, /* todo_flags_start */
3556 0, /* todo_flags_finish */
3557 };
3558
3559 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3560 {
3561 public:
3562 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3563 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3564 {}
3565
3566 /* opt_pass methods: */
3567 virtual unsigned int execute (function *);
3568
3569 }; // class pass_outof_cfg_layout_mode
3570
3571 unsigned int
3572 pass_outof_cfg_layout_mode::execute (function *fun)
3573 {
3574 basic_block bb;
3575
3576 FOR_EACH_BB_FN (bb, fun)
3577 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3578 bb->aux = bb->next_bb;
3579
3580 cfg_layout_finalize ();
3581
3582 return 0;
3583 }
3584
3585 } // anon namespace
3586
3587 rtl_opt_pass *
3588 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3589 {
3590 return new pass_outof_cfg_layout_mode (ctxt);
3591 }
3592 \f
3593
3594 /* Link the basic blocks in the correct order, compacting the basic
3595 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3596 function also clears the basic block header and footer fields.
3597
3598 This function is usually called after a pass (e.g. tracer) finishes
3599 some transformations while in cfglayout mode. The required sequence
3600 of the basic blocks is in a linked list along the bb->aux field.
3601 This functions re-links the basic block prev_bb and next_bb pointers
3602 accordingly, and it compacts and renumbers the blocks.
3603
3604 FIXME: This currently works only for RTL, but the only RTL-specific
3605 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3606 to GIMPLE a long time ago, but it doesn't relink the basic block
3607 chain. It could do that (to give better initial RTL) if this function
3608 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3609
3610 void
3611 relink_block_chain (bool stay_in_cfglayout_mode)
3612 {
3613 basic_block bb, prev_bb;
3614 int index;
3615
3616 /* Maybe dump the re-ordered sequence. */
3617 if (dump_file)
3618 {
3619 fprintf (dump_file, "Reordered sequence:\n");
3620 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3621 NUM_FIXED_BLOCKS;
3622 bb;
3623 bb = (basic_block) bb->aux, index++)
3624 {
3625 fprintf (dump_file, " %i ", index);
3626 if (get_bb_original (bb))
3627 fprintf (dump_file, "duplicate of %i ",
3628 get_bb_original (bb)->index);
3629 else if (forwarder_block_p (bb)
3630 && !LABEL_P (BB_HEAD (bb)))
3631 fprintf (dump_file, "compensation ");
3632 else
3633 fprintf (dump_file, "bb %i ", bb->index);
3634 fprintf (dump_file, " [%i]\n", bb->frequency);
3635 }
3636 }
3637
3638 /* Now reorder the blocks. */
3639 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3640 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3641 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3642 {
3643 bb->prev_bb = prev_bb;
3644 prev_bb->next_bb = bb;
3645 }
3646 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3647 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3648
3649 /* Then, clean up the aux fields. */
3650 FOR_ALL_BB_FN (bb, cfun)
3651 {
3652 bb->aux = NULL;
3653 if (!stay_in_cfglayout_mode)
3654 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3655 }
3656
3657 /* Maybe reset the original copy tables, they are not valid anymore
3658 when we renumber the basic blocks in compact_blocks. If we are
3659 are going out of cfglayout mode, don't re-allocate the tables. */
3660 free_original_copy_tables ();
3661 if (stay_in_cfglayout_mode)
3662 initialize_original_copy_tables ();
3663
3664 /* Finally, put basic_block_info in the new order. */
3665 compact_blocks ();
3666 }
3667 \f
3668
3669 /* Given a reorder chain, rearrange the code to match. */
3670
3671 static void
3672 fixup_reorder_chain (void)
3673 {
3674 basic_block bb;
3675 rtx_insn *insn = NULL;
3676
3677 if (cfg_layout_function_header)
3678 {
3679 set_first_insn (cfg_layout_function_header);
3680 insn = cfg_layout_function_header;
3681 while (NEXT_INSN (insn))
3682 insn = NEXT_INSN (insn);
3683 }
3684
3685 /* First do the bulk reordering -- rechain the blocks without regard to
3686 the needed changes to jumps and labels. */
3687
3688 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3689 bb->aux)
3690 {
3691 if (BB_HEADER (bb))
3692 {
3693 if (insn)
3694 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3695 else
3696 set_first_insn (BB_HEADER (bb));
3697 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3698 insn = BB_HEADER (bb);
3699 while (NEXT_INSN (insn))
3700 insn = NEXT_INSN (insn);
3701 }
3702 if (insn)
3703 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3704 else
3705 set_first_insn (BB_HEAD (bb));
3706 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3707 insn = BB_END (bb);
3708 if (BB_FOOTER (bb))
3709 {
3710 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3711 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3712 while (NEXT_INSN (insn))
3713 insn = NEXT_INSN (insn);
3714 }
3715 }
3716
3717 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3718 if (cfg_layout_function_footer)
3719 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3720
3721 while (NEXT_INSN (insn))
3722 insn = NEXT_INSN (insn);
3723
3724 set_last_insn (insn);
3725 #ifdef ENABLE_CHECKING
3726 verify_insn_chain ();
3727 #endif
3728
3729 /* Now add jumps and labels as needed to match the blocks new
3730 outgoing edges. */
3731
3732 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3733 bb->aux)
3734 {
3735 edge e_fall, e_taken, e;
3736 rtx_insn *bb_end_insn;
3737 rtx ret_label = NULL_RTX;
3738 basic_block nb;
3739 edge_iterator ei;
3740
3741 if (EDGE_COUNT (bb->succs) == 0)
3742 continue;
3743
3744 /* Find the old fallthru edge, and another non-EH edge for
3745 a taken jump. */
3746 e_taken = e_fall = NULL;
3747
3748 FOR_EACH_EDGE (e, ei, bb->succs)
3749 if (e->flags & EDGE_FALLTHRU)
3750 e_fall = e;
3751 else if (! (e->flags & EDGE_EH))
3752 e_taken = e;
3753
3754 bb_end_insn = BB_END (bb);
3755 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3756 {
3757 ret_label = JUMP_LABEL (bb_end_jump);
3758 if (any_condjump_p (bb_end_jump))
3759 {
3760 /* This might happen if the conditional jump has side
3761 effects and could therefore not be optimized away.
3762 Make the basic block to end with a barrier in order
3763 to prevent rtl_verify_flow_info from complaining. */
3764 if (!e_fall)
3765 {
3766 gcc_assert (!onlyjump_p (bb_end_jump)
3767 || returnjump_p (bb_end_jump)
3768 || (e_taken->flags & EDGE_CROSSING));
3769 emit_barrier_after (bb_end_jump);
3770 continue;
3771 }
3772
3773 /* If the old fallthru is still next, nothing to do. */
3774 if (bb->aux == e_fall->dest
3775 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3776 continue;
3777
3778 /* The degenerated case of conditional jump jumping to the next
3779 instruction can happen for jumps with side effects. We need
3780 to construct a forwarder block and this will be done just
3781 fine by force_nonfallthru below. */
3782 if (!e_taken)
3783 ;
3784
3785 /* There is another special case: if *neither* block is next,
3786 such as happens at the very end of a function, then we'll
3787 need to add a new unconditional jump. Choose the taken
3788 edge based on known or assumed probability. */
3789 else if (bb->aux != e_taken->dest)
3790 {
3791 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3792
3793 if (note
3794 && XINT (note, 0) < REG_BR_PROB_BASE / 2
3795 && invert_jump (bb_end_jump,
3796 (e_fall->dest
3797 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3798 ? NULL_RTX
3799 : label_for_bb (e_fall->dest)), 0))
3800 {
3801 e_fall->flags &= ~EDGE_FALLTHRU;
3802 gcc_checking_assert (could_fall_through
3803 (e_taken->src, e_taken->dest));
3804 e_taken->flags |= EDGE_FALLTHRU;
3805 update_br_prob_note (bb);
3806 e = e_fall, e_fall = e_taken, e_taken = e;
3807 }
3808 }
3809
3810 /* If the "jumping" edge is a crossing edge, and the fall
3811 through edge is non-crossing, leave things as they are. */
3812 else if ((e_taken->flags & EDGE_CROSSING)
3813 && !(e_fall->flags & EDGE_CROSSING))
3814 continue;
3815
3816 /* Otherwise we can try to invert the jump. This will
3817 basically never fail, however, keep up the pretense. */
3818 else if (invert_jump (bb_end_jump,
3819 (e_fall->dest
3820 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3821 ? NULL_RTX
3822 : label_for_bb (e_fall->dest)), 0))
3823 {
3824 e_fall->flags &= ~EDGE_FALLTHRU;
3825 gcc_checking_assert (could_fall_through
3826 (e_taken->src, e_taken->dest));
3827 e_taken->flags |= EDGE_FALLTHRU;
3828 update_br_prob_note (bb);
3829 if (LABEL_NUSES (ret_label) == 0
3830 && single_pred_p (e_taken->dest))
3831 delete_insn (ret_label);
3832 continue;
3833 }
3834 }
3835 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3836 {
3837 /* If the old fallthru is still next or if
3838 asm goto doesn't have a fallthru (e.g. when followed by
3839 __builtin_unreachable ()), nothing to do. */
3840 if (! e_fall
3841 || bb->aux == e_fall->dest
3842 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3843 continue;
3844
3845 /* Otherwise we'll have to use the fallthru fixup below. */
3846 }
3847 else
3848 {
3849 /* Otherwise we have some return, switch or computed
3850 jump. In the 99% case, there should not have been a
3851 fallthru edge. */
3852 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3853 continue;
3854 }
3855 }
3856 else
3857 {
3858 /* No fallthru implies a noreturn function with EH edges, or
3859 something similarly bizarre. In any case, we don't need to
3860 do anything. */
3861 if (! e_fall)
3862 continue;
3863
3864 /* If the fallthru block is still next, nothing to do. */
3865 if (bb->aux == e_fall->dest)
3866 continue;
3867
3868 /* A fallthru to exit block. */
3869 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3870 continue;
3871 }
3872
3873 /* We got here if we need to add a new jump insn.
3874 Note force_nonfallthru can delete E_FALL and thus we have to
3875 save E_FALL->src prior to the call to force_nonfallthru. */
3876 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3877 if (nb)
3878 {
3879 nb->aux = bb->aux;
3880 bb->aux = nb;
3881 /* Don't process this new block. */
3882 bb = nb;
3883 }
3884 }
3885
3886 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3887
3888 /* Annoying special case - jump around dead jumptables left in the code. */
3889 FOR_EACH_BB_FN (bb, cfun)
3890 {
3891 edge e = find_fallthru_edge (bb->succs);
3892
3893 if (e && !can_fallthru (e->src, e->dest))
3894 force_nonfallthru (e);
3895 }
3896
3897 /* Ensure goto_locus from edges has some instructions with that locus
3898 in RTL. */
3899 if (!optimize)
3900 FOR_EACH_BB_FN (bb, cfun)
3901 {
3902 edge e;
3903 edge_iterator ei;
3904
3905 FOR_EACH_EDGE (e, ei, bb->succs)
3906 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3907 && !(e->flags & EDGE_ABNORMAL))
3908 {
3909 edge e2;
3910 edge_iterator ei2;
3911 basic_block dest, nb;
3912 rtx_insn *end;
3913
3914 insn = BB_END (e->src);
3915 end = PREV_INSN (BB_HEAD (e->src));
3916 while (insn != end
3917 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3918 insn = PREV_INSN (insn);
3919 if (insn != end
3920 && INSN_LOCATION (insn) == e->goto_locus)
3921 continue;
3922 if (simplejump_p (BB_END (e->src))
3923 && !INSN_HAS_LOCATION (BB_END (e->src)))
3924 {
3925 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3926 continue;
3927 }
3928 dest = e->dest;
3929 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3930 {
3931 /* Non-fallthru edges to the exit block cannot be split. */
3932 if (!(e->flags & EDGE_FALLTHRU))
3933 continue;
3934 }
3935 else
3936 {
3937 insn = BB_HEAD (dest);
3938 end = NEXT_INSN (BB_END (dest));
3939 while (insn != end && !NONDEBUG_INSN_P (insn))
3940 insn = NEXT_INSN (insn);
3941 if (insn != end && INSN_HAS_LOCATION (insn)
3942 && INSN_LOCATION (insn) == e->goto_locus)
3943 continue;
3944 }
3945 nb = split_edge (e);
3946 if (!INSN_P (BB_END (nb)))
3947 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3948 nb);
3949 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3950
3951 /* If there are other incoming edges to the destination block
3952 with the same goto locus, redirect them to the new block as
3953 well, this can prevent other such blocks from being created
3954 in subsequent iterations of the loop. */
3955 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3956 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3957 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3958 && e->goto_locus == e2->goto_locus)
3959 redirect_edge_and_branch (e2, nb);
3960 else
3961 ei_next (&ei2);
3962 }
3963 }
3964 }
3965 \f
3966 /* Perform sanity checks on the insn chain.
3967 1. Check that next/prev pointers are consistent in both the forward and
3968 reverse direction.
3969 2. Count insns in chain, going both directions, and check if equal.
3970 3. Check that get_last_insn () returns the actual end of chain. */
3971
3972 DEBUG_FUNCTION void
3973 verify_insn_chain (void)
3974 {
3975 rtx_insn *x, *prevx, *nextx;
3976 int insn_cnt1, insn_cnt2;
3977
3978 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3979 x != 0;
3980 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3981 gcc_assert (PREV_INSN (x) == prevx);
3982
3983 gcc_assert (prevx == get_last_insn ());
3984
3985 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3986 x != 0;
3987 nextx = x, insn_cnt2++, x = PREV_INSN (x))
3988 gcc_assert (NEXT_INSN (x) == nextx);
3989
3990 gcc_assert (insn_cnt1 == insn_cnt2);
3991 }
3992 \f
3993 /* If we have assembler epilogues, the block falling through to exit must
3994 be the last one in the reordered chain when we reach final. Ensure
3995 that this condition is met. */
3996 static void
3997 fixup_fallthru_exit_predecessor (void)
3998 {
3999 edge e;
4000 basic_block bb = NULL;
4001
4002 /* This transformation is not valid before reload, because we might
4003 separate a call from the instruction that copies the return
4004 value. */
4005 gcc_assert (reload_completed);
4006
4007 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4008 if (e)
4009 bb = e->src;
4010
4011 if (bb && bb->aux)
4012 {
4013 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4014
4015 /* If the very first block is the one with the fall-through exit
4016 edge, we have to split that block. */
4017 if (c == bb)
4018 {
4019 bb = split_block_after_labels (bb)->dest;
4020 bb->aux = c->aux;
4021 c->aux = bb;
4022 BB_FOOTER (bb) = BB_FOOTER (c);
4023 BB_FOOTER (c) = NULL;
4024 }
4025
4026 while (c->aux != bb)
4027 c = (basic_block) c->aux;
4028
4029 c->aux = bb->aux;
4030 while (c->aux)
4031 c = (basic_block) c->aux;
4032
4033 c->aux = bb;
4034 bb->aux = NULL;
4035 }
4036 }
4037
4038 /* In case there are more than one fallthru predecessors of exit, force that
4039 there is only one. */
4040
4041 static void
4042 force_one_exit_fallthru (void)
4043 {
4044 edge e, predecessor = NULL;
4045 bool more = false;
4046 edge_iterator ei;
4047 basic_block forwarder, bb;
4048
4049 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4050 if (e->flags & EDGE_FALLTHRU)
4051 {
4052 if (predecessor == NULL)
4053 predecessor = e;
4054 else
4055 {
4056 more = true;
4057 break;
4058 }
4059 }
4060
4061 if (!more)
4062 return;
4063
4064 /* Exit has several fallthru predecessors. Create a forwarder block for
4065 them. */
4066 forwarder = split_edge (predecessor);
4067 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4068 (e = ei_safe_edge (ei)); )
4069 {
4070 if (e->src == forwarder
4071 || !(e->flags & EDGE_FALLTHRU))
4072 ei_next (&ei);
4073 else
4074 redirect_edge_and_branch_force (e, forwarder);
4075 }
4076
4077 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4078 exit block. */
4079 FOR_EACH_BB_FN (bb, cfun)
4080 {
4081 if (bb->aux == NULL && bb != forwarder)
4082 {
4083 bb->aux = forwarder;
4084 break;
4085 }
4086 }
4087 }
4088 \f
4089 /* Return true in case it is possible to duplicate the basic block BB. */
4090
4091 static bool
4092 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4093 {
4094 /* Do not attempt to duplicate tablejumps, as we need to unshare
4095 the dispatch table. This is difficult to do, as the instructions
4096 computing jump destination may be hoisted outside the basic block. */
4097 if (tablejump_p (BB_END (bb), NULL, NULL))
4098 return false;
4099
4100 /* Do not duplicate blocks containing insns that can't be copied. */
4101 if (targetm.cannot_copy_insn_p)
4102 {
4103 rtx_insn *insn = BB_HEAD (bb);
4104 while (1)
4105 {
4106 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4107 return false;
4108 if (insn == BB_END (bb))
4109 break;
4110 insn = NEXT_INSN (insn);
4111 }
4112 }
4113
4114 return true;
4115 }
4116
4117 rtx_insn *
4118 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4119 {
4120 rtx_insn *insn, *next, *copy;
4121 rtx_note *last;
4122
4123 /* Avoid updating of boundaries of previous basic block. The
4124 note will get removed from insn stream in fixup. */
4125 last = emit_note (NOTE_INSN_DELETED);
4126
4127 /* Create copy at the end of INSN chain. The chain will
4128 be reordered later. */
4129 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4130 {
4131 switch (GET_CODE (insn))
4132 {
4133 case DEBUG_INSN:
4134 /* Don't duplicate label debug insns. */
4135 if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4136 break;
4137 /* FALLTHRU */
4138 case INSN:
4139 case CALL_INSN:
4140 case JUMP_INSN:
4141 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4142 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4143 && ANY_RETURN_P (JUMP_LABEL (insn)))
4144 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4145 maybe_copy_prologue_epilogue_insn (insn, copy);
4146 break;
4147
4148 case JUMP_TABLE_DATA:
4149 /* Avoid copying of dispatch tables. We never duplicate
4150 tablejumps, so this can hit only in case the table got
4151 moved far from original jump.
4152 Avoid copying following barrier as well if any
4153 (and debug insns in between). */
4154 for (next = NEXT_INSN (insn);
4155 next != NEXT_INSN (to);
4156 next = NEXT_INSN (next))
4157 if (!DEBUG_INSN_P (next))
4158 break;
4159 if (next != NEXT_INSN (to) && BARRIER_P (next))
4160 insn = next;
4161 break;
4162
4163 case CODE_LABEL:
4164 break;
4165
4166 case BARRIER:
4167 emit_barrier ();
4168 break;
4169
4170 case NOTE:
4171 switch (NOTE_KIND (insn))
4172 {
4173 /* In case prologue is empty and function contain label
4174 in first BB, we may want to copy the block. */
4175 case NOTE_INSN_PROLOGUE_END:
4176
4177 case NOTE_INSN_DELETED:
4178 case NOTE_INSN_DELETED_LABEL:
4179 case NOTE_INSN_DELETED_DEBUG_LABEL:
4180 /* No problem to strip these. */
4181 case NOTE_INSN_FUNCTION_BEG:
4182 /* There is always just single entry to function. */
4183 case NOTE_INSN_BASIC_BLOCK:
4184 /* We should only switch text sections once. */
4185 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4186 break;
4187
4188 case NOTE_INSN_EPILOGUE_BEG:
4189 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4190 emit_note_copy (as_a <rtx_note *> (insn));
4191 break;
4192
4193 default:
4194 /* All other notes should have already been eliminated. */
4195 gcc_unreachable ();
4196 }
4197 break;
4198 default:
4199 gcc_unreachable ();
4200 }
4201 }
4202 insn = NEXT_INSN (last);
4203 delete_insn (last);
4204 return insn;
4205 }
4206
4207 /* Create a duplicate of the basic block BB. */
4208
4209 static basic_block
4210 cfg_layout_duplicate_bb (basic_block bb)
4211 {
4212 rtx_insn *insn;
4213 basic_block new_bb;
4214
4215 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4216 new_bb = create_basic_block (insn,
4217 insn ? get_last_insn () : NULL,
4218 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4219
4220 BB_COPY_PARTITION (new_bb, bb);
4221 if (BB_HEADER (bb))
4222 {
4223 insn = BB_HEADER (bb);
4224 while (NEXT_INSN (insn))
4225 insn = NEXT_INSN (insn);
4226 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4227 if (insn)
4228 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4229 }
4230
4231 if (BB_FOOTER (bb))
4232 {
4233 insn = BB_FOOTER (bb);
4234 while (NEXT_INSN (insn))
4235 insn = NEXT_INSN (insn);
4236 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4237 if (insn)
4238 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4239 }
4240
4241 return new_bb;
4242 }
4243
4244 \f
4245 /* Main entry point to this module - initialize the datastructures for
4246 CFG layout changes. It keeps LOOPS up-to-date if not null.
4247
4248 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4249
4250 void
4251 cfg_layout_initialize (unsigned int flags)
4252 {
4253 rtx_insn_list *x;
4254 basic_block bb;
4255
4256 /* Once bb partitioning is complete, cfg layout mode should not be
4257 re-entered. Entering cfg layout mode may require fixups. As an
4258 example, if edge forwarding performed when optimizing the cfg
4259 layout required moving a block from the hot to the cold
4260 section. This would create an illegal partitioning unless some
4261 manual fixup was performed. */
4262 gcc_assert (!(crtl->bb_reorder_complete
4263 && flag_reorder_blocks_and_partition));
4264
4265 initialize_original_copy_tables ();
4266
4267 cfg_layout_rtl_register_cfg_hooks ();
4268
4269 record_effective_endpoints ();
4270
4271 /* Make sure that the targets of non local gotos are marked. */
4272 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4273 {
4274 bb = BLOCK_FOR_INSN (x->insn ());
4275 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4276 }
4277
4278 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4279 }
4280
4281 /* Splits superblocks. */
4282 void
4283 break_superblocks (void)
4284 {
4285 sbitmap superblocks;
4286 bool need = false;
4287 basic_block bb;
4288
4289 superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
4290 bitmap_clear (superblocks);
4291
4292 FOR_EACH_BB_FN (bb, cfun)
4293 if (bb->flags & BB_SUPERBLOCK)
4294 {
4295 bb->flags &= ~BB_SUPERBLOCK;
4296 bitmap_set_bit (superblocks, bb->index);
4297 need = true;
4298 }
4299
4300 if (need)
4301 {
4302 rebuild_jump_labels (get_insns ());
4303 find_many_sub_basic_blocks (superblocks);
4304 }
4305
4306 free (superblocks);
4307 }
4308
4309 /* Finalize the changes: reorder insn list according to the sequence specified
4310 by aux pointers, enter compensation code, rebuild scope forest. */
4311
4312 void
4313 cfg_layout_finalize (void)
4314 {
4315 #ifdef ENABLE_CHECKING
4316 verify_flow_info ();
4317 #endif
4318 force_one_exit_fallthru ();
4319 rtl_register_cfg_hooks ();
4320 if (reload_completed && !targetm.have_epilogue ())
4321 fixup_fallthru_exit_predecessor ();
4322 fixup_reorder_chain ();
4323
4324 rebuild_jump_labels (get_insns ());
4325 delete_dead_jumptables ();
4326
4327 #ifdef ENABLE_CHECKING
4328 verify_insn_chain ();
4329 verify_flow_info ();
4330 #endif
4331 }
4332
4333
4334 /* Same as split_block but update cfg_layout structures. */
4335
4336 static basic_block
4337 cfg_layout_split_block (basic_block bb, void *insnp)
4338 {
4339 rtx insn = (rtx) insnp;
4340 basic_block new_bb = rtl_split_block (bb, insn);
4341
4342 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4343 BB_FOOTER (bb) = NULL;
4344
4345 return new_bb;
4346 }
4347
4348 /* Redirect Edge to DEST. */
4349 static edge
4350 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4351 {
4352 basic_block src = e->src;
4353 edge ret;
4354
4355 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4356 return NULL;
4357
4358 if (e->dest == dest)
4359 return e;
4360
4361 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4362 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4363 {
4364 df_set_bb_dirty (src);
4365 return ret;
4366 }
4367
4368 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4369 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4370 {
4371 if (dump_file)
4372 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4373 e->src->index, dest->index);
4374
4375 df_set_bb_dirty (e->src);
4376 redirect_edge_succ (e, dest);
4377 return e;
4378 }
4379
4380 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4381 in the case the basic block appears to be in sequence. Avoid this
4382 transformation. */
4383
4384 if (e->flags & EDGE_FALLTHRU)
4385 {
4386 /* Redirect any branch edges unified with the fallthru one. */
4387 if (JUMP_P (BB_END (src))
4388 && label_is_jump_target_p (BB_HEAD (e->dest),
4389 BB_END (src)))
4390 {
4391 edge redirected;
4392
4393 if (dump_file)
4394 fprintf (dump_file, "Fallthru edge unified with branch "
4395 "%i->%i redirected to %i\n",
4396 e->src->index, e->dest->index, dest->index);
4397 e->flags &= ~EDGE_FALLTHRU;
4398 redirected = redirect_branch_edge (e, dest);
4399 gcc_assert (redirected);
4400 redirected->flags |= EDGE_FALLTHRU;
4401 df_set_bb_dirty (redirected->src);
4402 return redirected;
4403 }
4404 /* In case we are redirecting fallthru edge to the branch edge
4405 of conditional jump, remove it. */
4406 if (EDGE_COUNT (src->succs) == 2)
4407 {
4408 /* Find the edge that is different from E. */
4409 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4410
4411 if (s->dest == dest
4412 && any_condjump_p (BB_END (src))
4413 && onlyjump_p (BB_END (src)))
4414 delete_insn (BB_END (src));
4415 }
4416 if (dump_file)
4417 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4418 e->src->index, e->dest->index, dest->index);
4419 ret = redirect_edge_succ_nodup (e, dest);
4420 }
4421 else
4422 ret = redirect_branch_edge (e, dest);
4423
4424 /* We don't want simplejumps in the insn stream during cfglayout. */
4425 gcc_assert (!simplejump_p (BB_END (src)));
4426
4427 df_set_bb_dirty (src);
4428 return ret;
4429 }
4430
4431 /* Simple wrapper as we always can redirect fallthru edges. */
4432 static basic_block
4433 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4434 {
4435 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4436
4437 gcc_assert (redirected);
4438 return NULL;
4439 }
4440
4441 /* Same as delete_basic_block but update cfg_layout structures. */
4442
4443 static void
4444 cfg_layout_delete_block (basic_block bb)
4445 {
4446 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4447 rtx_insn **to;
4448
4449 if (BB_HEADER (bb))
4450 {
4451 next = BB_HEAD (bb);
4452 if (prev)
4453 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4454 else
4455 set_first_insn (BB_HEADER (bb));
4456 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4457 insn = BB_HEADER (bb);
4458 while (NEXT_INSN (insn))
4459 insn = NEXT_INSN (insn);
4460 SET_NEXT_INSN (insn) = next;
4461 SET_PREV_INSN (next) = insn;
4462 }
4463 next = NEXT_INSN (BB_END (bb));
4464 if (BB_FOOTER (bb))
4465 {
4466 insn = BB_FOOTER (bb);
4467 while (insn)
4468 {
4469 if (BARRIER_P (insn))
4470 {
4471 if (PREV_INSN (insn))
4472 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4473 else
4474 BB_FOOTER (bb) = NEXT_INSN (insn);
4475 if (NEXT_INSN (insn))
4476 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4477 }
4478 if (LABEL_P (insn))
4479 break;
4480 insn = NEXT_INSN (insn);
4481 }
4482 if (BB_FOOTER (bb))
4483 {
4484 insn = BB_END (bb);
4485 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4486 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4487 while (NEXT_INSN (insn))
4488 insn = NEXT_INSN (insn);
4489 SET_NEXT_INSN (insn) = next;
4490 if (next)
4491 SET_PREV_INSN (next) = insn;
4492 else
4493 set_last_insn (insn);
4494 }
4495 }
4496 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4497 to = &BB_HEADER (bb->next_bb);
4498 else
4499 to = &cfg_layout_function_footer;
4500
4501 rtl_delete_block (bb);
4502
4503 if (prev)
4504 prev = NEXT_INSN (prev);
4505 else
4506 prev = get_insns ();
4507 if (next)
4508 next = PREV_INSN (next);
4509 else
4510 next = get_last_insn ();
4511
4512 if (next && NEXT_INSN (next) != prev)
4513 {
4514 remaints = unlink_insn_chain (prev, next);
4515 insn = remaints;
4516 while (NEXT_INSN (insn))
4517 insn = NEXT_INSN (insn);
4518 SET_NEXT_INSN (insn) = *to;
4519 if (*to)
4520 SET_PREV_INSN (*to) = insn;
4521 *to = remaints;
4522 }
4523 }
4524
4525 /* Return true when blocks A and B can be safely merged. */
4526
4527 static bool
4528 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4529 {
4530 /* If we are partitioning hot/cold basic blocks, we don't want to
4531 mess up unconditional or indirect jumps that cross between hot
4532 and cold sections.
4533
4534 Basic block partitioning may result in some jumps that appear to
4535 be optimizable (or blocks that appear to be mergeable), but which really
4536 must be left untouched (they are required to make it safely across
4537 partition boundaries). See the comments at the top of
4538 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4539
4540 if (BB_PARTITION (a) != BB_PARTITION (b))
4541 return false;
4542
4543 /* Protect the loop latches. */
4544 if (current_loops && b->loop_father->latch == b)
4545 return false;
4546
4547 /* If we would end up moving B's instructions, make sure it doesn't fall
4548 through into the exit block, since we cannot recover from a fallthrough
4549 edge into the exit block occurring in the middle of a function. */
4550 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4551 {
4552 edge e = find_fallthru_edge (b->succs);
4553 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4554 return false;
4555 }
4556
4557 /* There must be exactly one edge in between the blocks. */
4558 return (single_succ_p (a)
4559 && single_succ (a) == b
4560 && single_pred_p (b) == 1
4561 && a != b
4562 /* Must be simple edge. */
4563 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4564 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4565 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4566 /* If the jump insn has side effects, we can't kill the edge.
4567 When not optimizing, try_redirect_by_replacing_jump will
4568 not allow us to redirect an edge by replacing a table jump. */
4569 && (!JUMP_P (BB_END (a))
4570 || ((!optimize || reload_completed)
4571 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4572 }
4573
4574 /* Merge block A and B. The blocks must be mergeable. */
4575
4576 static void
4577 cfg_layout_merge_blocks (basic_block a, basic_block b)
4578 {
4579 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4580 rtx_insn *insn;
4581
4582 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4583
4584 if (dump_file)
4585 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4586 a->index);
4587
4588 /* If there was a CODE_LABEL beginning B, delete it. */
4589 if (LABEL_P (BB_HEAD (b)))
4590 {
4591 delete_insn (BB_HEAD (b));
4592 }
4593
4594 /* We should have fallthru edge in a, or we can do dummy redirection to get
4595 it cleaned up. */
4596 if (JUMP_P (BB_END (a)))
4597 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4598 gcc_assert (!JUMP_P (BB_END (a)));
4599
4600 /* When not optimizing and the edge is the only place in RTL which holds
4601 some unique locus, emit a nop with that locus in between. */
4602 if (!optimize)
4603 emit_nop_for_unique_locus_between (a, b);
4604
4605 /* Move things from b->footer after a->footer. */
4606 if (BB_FOOTER (b))
4607 {
4608 if (!BB_FOOTER (a))
4609 BB_FOOTER (a) = BB_FOOTER (b);
4610 else
4611 {
4612 rtx_insn *last = BB_FOOTER (a);
4613
4614 while (NEXT_INSN (last))
4615 last = NEXT_INSN (last);
4616 SET_NEXT_INSN (last) = BB_FOOTER (b);
4617 SET_PREV_INSN (BB_FOOTER (b)) = last;
4618 }
4619 BB_FOOTER (b) = NULL;
4620 }
4621
4622 /* Move things from b->header before a->footer.
4623 Note that this may include dead tablejump data, but we don't clean
4624 those up until we go out of cfglayout mode. */
4625 if (BB_HEADER (b))
4626 {
4627 if (! BB_FOOTER (a))
4628 BB_FOOTER (a) = BB_HEADER (b);
4629 else
4630 {
4631 rtx_insn *last = BB_HEADER (b);
4632
4633 while (NEXT_INSN (last))
4634 last = NEXT_INSN (last);
4635 SET_NEXT_INSN (last) = BB_FOOTER (a);
4636 SET_PREV_INSN (BB_FOOTER (a)) = last;
4637 BB_FOOTER (a) = BB_HEADER (b);
4638 }
4639 BB_HEADER (b) = NULL;
4640 }
4641
4642 /* In the case basic blocks are not adjacent, move them around. */
4643 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4644 {
4645 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4646
4647 emit_insn_after_noloc (insn, BB_END (a), a);
4648 }
4649 /* Otherwise just re-associate the instructions. */
4650 else
4651 {
4652 insn = BB_HEAD (b);
4653 BB_END (a) = BB_END (b);
4654 }
4655
4656 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4657 We need to explicitly call. */
4658 update_bb_for_insn_chain (insn, BB_END (b), a);
4659
4660 /* Skip possible DELETED_LABEL insn. */
4661 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4662 insn = NEXT_INSN (insn);
4663 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4664 BB_HEAD (b) = BB_END (b) = NULL;
4665 delete_insn (insn);
4666
4667 df_bb_delete (b->index);
4668
4669 /* If B was a forwarder block, propagate the locus on the edge. */
4670 if (forwarder_p
4671 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4672 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4673
4674 if (dump_file)
4675 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4676 }
4677
4678 /* Split edge E. */
4679
4680 static basic_block
4681 cfg_layout_split_edge (edge e)
4682 {
4683 basic_block new_bb =
4684 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4685 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4686 NULL_RTX, e->src);
4687
4688 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4689 BB_COPY_PARTITION (new_bb, e->src);
4690 else
4691 BB_COPY_PARTITION (new_bb, e->dest);
4692 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4693 redirect_edge_and_branch_force (e, new_bb);
4694
4695 return new_bb;
4696 }
4697
4698 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4699
4700 static void
4701 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4702 {
4703 }
4704
4705 /* Return true if BB contains only labels or non-executable
4706 instructions. */
4707
4708 static bool
4709 rtl_block_empty_p (basic_block bb)
4710 {
4711 rtx_insn *insn;
4712
4713 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4714 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4715 return true;
4716
4717 FOR_BB_INSNS (bb, insn)
4718 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4719 return false;
4720
4721 return true;
4722 }
4723
4724 /* Split a basic block if it ends with a conditional branch and if
4725 the other part of the block is not empty. */
4726
4727 static basic_block
4728 rtl_split_block_before_cond_jump (basic_block bb)
4729 {
4730 rtx_insn *insn;
4731 rtx_insn *split_point = NULL;
4732 rtx_insn *last = NULL;
4733 bool found_code = false;
4734
4735 FOR_BB_INSNS (bb, insn)
4736 {
4737 if (any_condjump_p (insn))
4738 split_point = last;
4739 else if (NONDEBUG_INSN_P (insn))
4740 found_code = true;
4741 last = insn;
4742 }
4743
4744 /* Did not find everything. */
4745 if (found_code && split_point)
4746 return split_block (bb, split_point)->dest;
4747 else
4748 return NULL;
4749 }
4750
4751 /* Return 1 if BB ends with a call, possibly followed by some
4752 instructions that must stay with the call, 0 otherwise. */
4753
4754 static bool
4755 rtl_block_ends_with_call_p (basic_block bb)
4756 {
4757 rtx_insn *insn = BB_END (bb);
4758
4759 while (!CALL_P (insn)
4760 && insn != BB_HEAD (bb)
4761 && (keep_with_call_p (insn)
4762 || NOTE_P (insn)
4763 || DEBUG_INSN_P (insn)))
4764 insn = PREV_INSN (insn);
4765 return (CALL_P (insn));
4766 }
4767
4768 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4769
4770 static bool
4771 rtl_block_ends_with_condjump_p (const_basic_block bb)
4772 {
4773 return any_condjump_p (BB_END (bb));
4774 }
4775
4776 /* Return true if we need to add fake edge to exit.
4777 Helper function for rtl_flow_call_edges_add. */
4778
4779 static bool
4780 need_fake_edge_p (const rtx_insn *insn)
4781 {
4782 if (!INSN_P (insn))
4783 return false;
4784
4785 if ((CALL_P (insn)
4786 && !SIBLING_CALL_P (insn)
4787 && !find_reg_note (insn, REG_NORETURN, NULL)
4788 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4789 return true;
4790
4791 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4792 && MEM_VOLATILE_P (PATTERN (insn)))
4793 || (GET_CODE (PATTERN (insn)) == PARALLEL
4794 && asm_noperands (insn) != -1
4795 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4796 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4797 }
4798
4799 /* Add fake edges to the function exit for any non constant and non noreturn
4800 calls, volatile inline assembly in the bitmap of blocks specified by
4801 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4802 that were split.
4803
4804 The goal is to expose cases in which entering a basic block does not imply
4805 that all subsequent instructions must be executed. */
4806
4807 static int
4808 rtl_flow_call_edges_add (sbitmap blocks)
4809 {
4810 int i;
4811 int blocks_split = 0;
4812 int last_bb = last_basic_block_for_fn (cfun);
4813 bool check_last_block = false;
4814
4815 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4816 return 0;
4817
4818 if (! blocks)
4819 check_last_block = true;
4820 else
4821 check_last_block = bitmap_bit_p (blocks,
4822 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4823
4824 /* In the last basic block, before epilogue generation, there will be
4825 a fallthru edge to EXIT. Special care is required if the last insn
4826 of the last basic block is a call because make_edge folds duplicate
4827 edges, which would result in the fallthru edge also being marked
4828 fake, which would result in the fallthru edge being removed by
4829 remove_fake_edges, which would result in an invalid CFG.
4830
4831 Moreover, we can't elide the outgoing fake edge, since the block
4832 profiler needs to take this into account in order to solve the minimal
4833 spanning tree in the case that the call doesn't return.
4834
4835 Handle this by adding a dummy instruction in a new last basic block. */
4836 if (check_last_block)
4837 {
4838 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4839 rtx_insn *insn = BB_END (bb);
4840
4841 /* Back up past insns that must be kept in the same block as a call. */
4842 while (insn != BB_HEAD (bb)
4843 && keep_with_call_p (insn))
4844 insn = PREV_INSN (insn);
4845
4846 if (need_fake_edge_p (insn))
4847 {
4848 edge e;
4849
4850 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4851 if (e)
4852 {
4853 insert_insn_on_edge (gen_use (const0_rtx), e);
4854 commit_edge_insertions ();
4855 }
4856 }
4857 }
4858
4859 /* Now add fake edges to the function exit for any non constant
4860 calls since there is no way that we can determine if they will
4861 return or not... */
4862
4863 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4864 {
4865 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4866 rtx_insn *insn;
4867 rtx_insn *prev_insn;
4868
4869 if (!bb)
4870 continue;
4871
4872 if (blocks && !bitmap_bit_p (blocks, i))
4873 continue;
4874
4875 for (insn = BB_END (bb); ; insn = prev_insn)
4876 {
4877 prev_insn = PREV_INSN (insn);
4878 if (need_fake_edge_p (insn))
4879 {
4880 edge e;
4881 rtx_insn *split_at_insn = insn;
4882
4883 /* Don't split the block between a call and an insn that should
4884 remain in the same block as the call. */
4885 if (CALL_P (insn))
4886 while (split_at_insn != BB_END (bb)
4887 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4888 split_at_insn = NEXT_INSN (split_at_insn);
4889
4890 /* The handling above of the final block before the epilogue
4891 should be enough to verify that there is no edge to the exit
4892 block in CFG already. Calling make_edge in such case would
4893 cause us to mark that edge as fake and remove it later. */
4894
4895 #ifdef ENABLE_CHECKING
4896 if (split_at_insn == BB_END (bb))
4897 {
4898 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4899 gcc_assert (e == NULL);
4900 }
4901 #endif
4902
4903 /* Note that the following may create a new basic block
4904 and renumber the existing basic blocks. */
4905 if (split_at_insn != BB_END (bb))
4906 {
4907 e = split_block (bb, split_at_insn);
4908 if (e)
4909 blocks_split++;
4910 }
4911
4912 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4913 }
4914
4915 if (insn == BB_HEAD (bb))
4916 break;
4917 }
4918 }
4919
4920 if (blocks_split)
4921 verify_flow_info ();
4922
4923 return blocks_split;
4924 }
4925
4926 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4927 the conditional branch target, SECOND_HEAD should be the fall-thru
4928 there is no need to handle this here the loop versioning code handles
4929 this. the reason for SECON_HEAD is that it is needed for condition
4930 in trees, and this should be of the same type since it is a hook. */
4931 static void
4932 rtl_lv_add_condition_to_bb (basic_block first_head ,
4933 basic_block second_head ATTRIBUTE_UNUSED,
4934 basic_block cond_bb, void *comp_rtx)
4935 {
4936 rtx_code_label *label;
4937 rtx_insn *seq, *jump;
4938 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4939 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4940 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4941 machine_mode mode;
4942
4943
4944 label = block_label (first_head);
4945 mode = GET_MODE (op0);
4946 if (mode == VOIDmode)
4947 mode = GET_MODE (op1);
4948
4949 start_sequence ();
4950 op0 = force_operand (op0, NULL_RTX);
4951 op1 = force_operand (op1, NULL_RTX);
4952 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label, -1);
4953 jump = get_last_insn ();
4954 JUMP_LABEL (jump) = label;
4955 LABEL_NUSES (label)++;
4956 seq = get_insns ();
4957 end_sequence ();
4958
4959 /* Add the new cond, in the new head. */
4960 emit_insn_after (seq, BB_END (cond_bb));
4961 }
4962
4963
4964 /* Given a block B with unconditional branch at its end, get the
4965 store the return the branch edge and the fall-thru edge in
4966 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4967 static void
4968 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4969 edge *fallthru_edge)
4970 {
4971 edge e = EDGE_SUCC (b, 0);
4972
4973 if (e->flags & EDGE_FALLTHRU)
4974 {
4975 *fallthru_edge = e;
4976 *branch_edge = EDGE_SUCC (b, 1);
4977 }
4978 else
4979 {
4980 *branch_edge = e;
4981 *fallthru_edge = EDGE_SUCC (b, 1);
4982 }
4983 }
4984
4985 void
4986 init_rtl_bb_info (basic_block bb)
4987 {
4988 gcc_assert (!bb->il.x.rtl);
4989 bb->il.x.head_ = NULL;
4990 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
4991 }
4992
4993 /* Returns true if it is possible to remove edge E by redirecting
4994 it to the destination of the other edge from E->src. */
4995
4996 static bool
4997 rtl_can_remove_branch_p (const_edge e)
4998 {
4999 const_basic_block src = e->src;
5000 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5001 const rtx_insn *insn = BB_END (src);
5002 rtx set;
5003
5004 /* The conditions are taken from try_redirect_by_replacing_jump. */
5005 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5006 return false;
5007
5008 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5009 return false;
5010
5011 if (BB_PARTITION (src) != BB_PARTITION (target))
5012 return false;
5013
5014 if (!onlyjump_p (insn)
5015 || tablejump_p (insn, NULL, NULL))
5016 return false;
5017
5018 set = single_set (insn);
5019 if (!set || side_effects_p (set))
5020 return false;
5021
5022 return true;
5023 }
5024
5025 static basic_block
5026 rtl_duplicate_bb (basic_block bb)
5027 {
5028 bb = cfg_layout_duplicate_bb (bb);
5029 bb->aux = NULL;
5030 return bb;
5031 }
5032
5033 /* Do book-keeping of basic block BB for the profile consistency checker.
5034 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5035 then do post-pass accounting. Store the counting in RECORD. */
5036 static void
5037 rtl_account_profile_record (basic_block bb, int after_pass,
5038 struct profile_record *record)
5039 {
5040 rtx_insn *insn;
5041 FOR_BB_INSNS (bb, insn)
5042 if (INSN_P (insn))
5043 {
5044 record->size[after_pass]
5045 += insn_rtx_cost (PATTERN (insn), false);
5046 if (profile_status_for_fn (cfun) == PROFILE_READ)
5047 record->time[after_pass]
5048 += insn_rtx_cost (PATTERN (insn), true) * bb->count;
5049 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5050 record->time[after_pass]
5051 += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
5052 }
5053 }
5054
5055 /* Implementation of CFG manipulation for linearized RTL. */
5056 struct cfg_hooks rtl_cfg_hooks = {
5057 "rtl",
5058 rtl_verify_flow_info,
5059 rtl_dump_bb,
5060 rtl_dump_bb_for_graph,
5061 rtl_create_basic_block,
5062 rtl_redirect_edge_and_branch,
5063 rtl_redirect_edge_and_branch_force,
5064 rtl_can_remove_branch_p,
5065 rtl_delete_block,
5066 rtl_split_block,
5067 rtl_move_block_after,
5068 rtl_can_merge_blocks, /* can_merge_blocks_p */
5069 rtl_merge_blocks,
5070 rtl_predict_edge,
5071 rtl_predicted_by_p,
5072 cfg_layout_can_duplicate_bb_p,
5073 rtl_duplicate_bb,
5074 rtl_split_edge,
5075 rtl_make_forwarder_block,
5076 rtl_tidy_fallthru_edge,
5077 rtl_force_nonfallthru,
5078 rtl_block_ends_with_call_p,
5079 rtl_block_ends_with_condjump_p,
5080 rtl_flow_call_edges_add,
5081 NULL, /* execute_on_growing_pred */
5082 NULL, /* execute_on_shrinking_pred */
5083 NULL, /* duplicate loop for trees */
5084 NULL, /* lv_add_condition_to_bb */
5085 NULL, /* lv_adjust_loop_header_phi*/
5086 NULL, /* extract_cond_bb_edges */
5087 NULL, /* flush_pending_stmts */
5088 rtl_block_empty_p, /* block_empty_p */
5089 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5090 rtl_account_profile_record,
5091 };
5092
5093 /* Implementation of CFG manipulation for cfg layout RTL, where
5094 basic block connected via fallthru edges does not have to be adjacent.
5095 This representation will hopefully become the default one in future
5096 version of the compiler. */
5097
5098 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5099 "cfglayout mode",
5100 rtl_verify_flow_info_1,
5101 rtl_dump_bb,
5102 rtl_dump_bb_for_graph,
5103 cfg_layout_create_basic_block,
5104 cfg_layout_redirect_edge_and_branch,
5105 cfg_layout_redirect_edge_and_branch_force,
5106 rtl_can_remove_branch_p,
5107 cfg_layout_delete_block,
5108 cfg_layout_split_block,
5109 rtl_move_block_after,
5110 cfg_layout_can_merge_blocks_p,
5111 cfg_layout_merge_blocks,
5112 rtl_predict_edge,
5113 rtl_predicted_by_p,
5114 cfg_layout_can_duplicate_bb_p,
5115 cfg_layout_duplicate_bb,
5116 cfg_layout_split_edge,
5117 rtl_make_forwarder_block,
5118 NULL, /* tidy_fallthru_edge */
5119 rtl_force_nonfallthru,
5120 rtl_block_ends_with_call_p,
5121 rtl_block_ends_with_condjump_p,
5122 rtl_flow_call_edges_add,
5123 NULL, /* execute_on_growing_pred */
5124 NULL, /* execute_on_shrinking_pred */
5125 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5126 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5127 NULL, /* lv_adjust_loop_header_phi*/
5128 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5129 NULL, /* flush_pending_stmts */
5130 rtl_block_empty_p, /* block_empty_p */
5131 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5132 rtl_account_profile_record,
5133 };
5134
5135 #include "gt-cfgrtl.h"