re PR target/69140 (stack alignment + O1 breaks with Microsoft ABI)
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "emit-rtl.h"
51 #include "cfgrtl.h"
52 #include "cfganal.h"
53 #include "cfgbuild.h"
54 #include "cfgcleanup.h"
55 #include "bb-reorder.h"
56 #include "rtl-error.h"
57 #include "insn-attr.h"
58 #include "dojump.h"
59 #include "expr.h"
60 #include "cfgloop.h"
61 #include "tree-pass.h"
62 #include "print-rtl.h"
63
64 /* Holds the interesting leading and trailing notes for the function.
65 Only applicable if the CFG is in cfglayout mode. */
66 static GTY(()) rtx_insn *cfg_layout_function_footer;
67 static GTY(()) rtx_insn *cfg_layout_function_header;
68
69 static rtx_insn *skip_insns_after_block (basic_block);
70 static void record_effective_endpoints (void);
71 static void fixup_reorder_chain (void);
72
73 void verify_insn_chain (void);
74 static void fixup_fallthru_exit_predecessor (void);
75 static int can_delete_note_p (const rtx_note *);
76 static int can_delete_label_p (const rtx_code_label *);
77 static basic_block rtl_split_edge (edge);
78 static bool rtl_move_block_after (basic_block, basic_block);
79 static int rtl_verify_flow_info (void);
80 static basic_block cfg_layout_split_block (basic_block, void *);
81 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
82 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
83 static void cfg_layout_delete_block (basic_block);
84 static void rtl_delete_block (basic_block);
85 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
86 static edge rtl_redirect_edge_and_branch (edge, basic_block);
87 static basic_block rtl_split_block (basic_block, void *);
88 static void rtl_dump_bb (FILE *, basic_block, int, int);
89 static int rtl_verify_flow_info_1 (void);
90 static void rtl_make_forwarder_block (edge);
91 \f
92 /* Return true if NOTE is not one of the ones that must be kept paired,
93 so that we may simply delete it. */
94
95 static int
96 can_delete_note_p (const rtx_note *note)
97 {
98 switch (NOTE_KIND (note))
99 {
100 case NOTE_INSN_DELETED:
101 case NOTE_INSN_BASIC_BLOCK:
102 case NOTE_INSN_EPILOGUE_BEG:
103 return true;
104
105 default:
106 return false;
107 }
108 }
109
110 /* True if a given label can be deleted. */
111
112 static int
113 can_delete_label_p (const rtx_code_label *label)
114 {
115 return (!LABEL_PRESERVE_P (label)
116 /* User declared labels must be preserved. */
117 && LABEL_NAME (label) == 0
118 && !in_insn_list_p (forced_labels, label));
119 }
120
121 /* Delete INSN by patching it out. */
122
123 void
124 delete_insn (rtx uncast_insn)
125 {
126 rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
127 rtx note;
128 bool really_delete = true;
129
130 if (LABEL_P (insn))
131 {
132 /* Some labels can't be directly removed from the INSN chain, as they
133 might be references via variables, constant pool etc.
134 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
135 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
136 {
137 const char *name = LABEL_NAME (insn);
138 basic_block bb = BLOCK_FOR_INSN (insn);
139 rtx_insn *bb_note = NEXT_INSN (insn);
140
141 really_delete = false;
142 PUT_CODE (insn, NOTE);
143 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
144 NOTE_DELETED_LABEL_NAME (insn) = name;
145
146 /* If the note following the label starts a basic block, and the
147 label is a member of the same basic block, interchange the two. */
148 if (bb_note != NULL_RTX
149 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
150 && bb != NULL
151 && bb == BLOCK_FOR_INSN (bb_note))
152 {
153 reorder_insns_nobb (insn, insn, bb_note);
154 BB_HEAD (bb) = bb_note;
155 if (BB_END (bb) == bb_note)
156 BB_END (bb) = insn;
157 }
158 }
159
160 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
161 }
162
163 if (really_delete)
164 {
165 /* If this insn has already been deleted, something is very wrong. */
166 gcc_assert (!insn->deleted ());
167 if (INSN_P (insn))
168 df_insn_delete (insn);
169 remove_insn (insn);
170 insn->set_deleted ();
171 }
172
173 /* If deleting a jump, decrement the use count of the label. Deleting
174 the label itself should happen in the normal course of block merging. */
175 if (JUMP_P (insn))
176 {
177 if (JUMP_LABEL (insn)
178 && LABEL_P (JUMP_LABEL (insn)))
179 LABEL_NUSES (JUMP_LABEL (insn))--;
180
181 /* If there are more targets, remove them too. */
182 while ((note
183 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
184 && LABEL_P (XEXP (note, 0)))
185 {
186 LABEL_NUSES (XEXP (note, 0))--;
187 remove_note (insn, note);
188 }
189 }
190
191 /* Also if deleting any insn that references a label as an operand. */
192 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
193 && LABEL_P (XEXP (note, 0)))
194 {
195 LABEL_NUSES (XEXP (note, 0))--;
196 remove_note (insn, note);
197 }
198
199 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
200 {
201 rtvec vec = table->get_labels ();
202 int len = GET_NUM_ELEM (vec);
203 int i;
204
205 for (i = 0; i < len; i++)
206 {
207 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
208
209 /* When deleting code in bulk (e.g. removing many unreachable
210 blocks) we can delete a label that's a target of the vector
211 before deleting the vector itself. */
212 if (!NOTE_P (label))
213 LABEL_NUSES (label)--;
214 }
215 }
216 }
217
218 /* Like delete_insn but also purge dead edges from BB. */
219
220 void
221 delete_insn_and_edges (rtx_insn *insn)
222 {
223 bool purge = false;
224
225 if (INSN_P (insn)
226 && BLOCK_FOR_INSN (insn)
227 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
228 purge = true;
229 delete_insn (insn);
230 if (purge)
231 purge_dead_edges (BLOCK_FOR_INSN (insn));
232 }
233
234 /* Unlink a chain of insns between START and FINISH, leaving notes
235 that must be paired. If CLEAR_BB is true, we set bb field for
236 insns that cannot be removed to NULL. */
237
238 void
239 delete_insn_chain (rtx start, rtx finish, bool clear_bb)
240 {
241 rtx_insn *prev, *current;
242
243 /* Unchain the insns one by one. It would be quicker to delete all of these
244 with a single unchaining, rather than one at a time, but we need to keep
245 the NOTE's. */
246 current = safe_as_a <rtx_insn *> (finish);
247 while (1)
248 {
249 prev = PREV_INSN (current);
250 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
251 ;
252 else
253 delete_insn (current);
254
255 if (clear_bb && !current->deleted ())
256 set_block_for_insn (current, NULL);
257
258 if (current == start)
259 break;
260 current = prev;
261 }
262 }
263 \f
264 /* Create a new basic block consisting of the instructions between HEAD and END
265 inclusive. This function is designed to allow fast BB construction - reuses
266 the note and basic block struct in BB_NOTE, if any and do not grow
267 BASIC_BLOCK chain and should be used directly only by CFG construction code.
268 END can be NULL in to create new empty basic block before HEAD. Both END
269 and HEAD can be NULL to create basic block at the end of INSN chain.
270 AFTER is the basic block we should be put after. */
271
272 basic_block
273 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
274 basic_block after)
275 {
276 basic_block bb;
277
278 if (bb_note
279 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
280 && bb->aux == NULL)
281 {
282 /* If we found an existing note, thread it back onto the chain. */
283
284 rtx_insn *after;
285
286 if (LABEL_P (head))
287 after = head;
288 else
289 {
290 after = PREV_INSN (head);
291 head = bb_note;
292 }
293
294 if (after != bb_note && NEXT_INSN (after) != bb_note)
295 reorder_insns_nobb (bb_note, bb_note, after);
296 }
297 else
298 {
299 /* Otherwise we must create a note and a basic block structure. */
300
301 bb = alloc_block ();
302
303 init_rtl_bb_info (bb);
304 if (!head && !end)
305 head = end = bb_note
306 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
307 else if (LABEL_P (head) && end)
308 {
309 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
310 if (head == end)
311 end = bb_note;
312 }
313 else
314 {
315 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
316 head = bb_note;
317 if (!end)
318 end = head;
319 }
320
321 NOTE_BASIC_BLOCK (bb_note) = bb;
322 }
323
324 /* Always include the bb note in the block. */
325 if (NEXT_INSN (end) == bb_note)
326 end = bb_note;
327
328 BB_HEAD (bb) = head;
329 BB_END (bb) = end;
330 bb->index = last_basic_block_for_fn (cfun)++;
331 bb->flags = BB_NEW | BB_RTL;
332 link_block (bb, after);
333 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
334 df_bb_refs_record (bb->index, false);
335 update_bb_for_insn (bb);
336 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
337
338 /* Tag the block so that we know it has been used when considering
339 other basic block notes. */
340 bb->aux = bb;
341
342 return bb;
343 }
344
345 /* Create new basic block consisting of instructions in between HEAD and END
346 and place it to the BB chain after block AFTER. END can be NULL to
347 create a new empty basic block before HEAD. Both END and HEAD can be
348 NULL to create basic block at the end of INSN chain. */
349
350 static basic_block
351 rtl_create_basic_block (void *headp, void *endp, basic_block after)
352 {
353 rtx_insn *head = (rtx_insn *) headp;
354 rtx_insn *end = (rtx_insn *) endp;
355 basic_block bb;
356
357 /* Grow the basic block array if needed. */
358 if ((size_t) last_basic_block_for_fn (cfun)
359 >= basic_block_info_for_fn (cfun)->length ())
360 {
361 size_t new_size =
362 (last_basic_block_for_fn (cfun)
363 + (last_basic_block_for_fn (cfun) + 3) / 4);
364 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
365 }
366
367 n_basic_blocks_for_fn (cfun)++;
368
369 bb = create_basic_block_structure (head, end, NULL, after);
370 bb->aux = NULL;
371 return bb;
372 }
373
374 static basic_block
375 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
376 {
377 basic_block newbb = rtl_create_basic_block (head, end, after);
378
379 return newbb;
380 }
381 \f
382 /* Delete the insns in a (non-live) block. We physically delete every
383 non-deleted-note insn, and update the flow graph appropriately.
384
385 Return nonzero if we deleted an exception handler. */
386
387 /* ??? Preserving all such notes strikes me as wrong. It would be nice
388 to post-process the stream to remove empty blocks, loops, ranges, etc. */
389
390 static void
391 rtl_delete_block (basic_block b)
392 {
393 rtx_insn *insn, *end;
394
395 /* If the head of this block is a CODE_LABEL, then it might be the
396 label for an exception handler which can't be reached. We need
397 to remove the label from the exception_handler_label list. */
398 insn = BB_HEAD (b);
399
400 end = get_last_bb_insn (b);
401
402 /* Selectively delete the entire chain. */
403 BB_HEAD (b) = NULL;
404 delete_insn_chain (insn, end, true);
405
406
407 if (dump_file)
408 fprintf (dump_file, "deleting block %d\n", b->index);
409 df_bb_delete (b->index);
410 }
411 \f
412 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
413
414 void
415 compute_bb_for_insn (void)
416 {
417 basic_block bb;
418
419 FOR_EACH_BB_FN (bb, cfun)
420 {
421 rtx_insn *end = BB_END (bb);
422 rtx_insn *insn;
423
424 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
425 {
426 BLOCK_FOR_INSN (insn) = bb;
427 if (insn == end)
428 break;
429 }
430 }
431 }
432
433 /* Release the basic_block_for_insn array. */
434
435 unsigned int
436 free_bb_for_insn (void)
437 {
438 rtx_insn *insn;
439 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
440 if (!BARRIER_P (insn))
441 BLOCK_FOR_INSN (insn) = NULL;
442 return 0;
443 }
444
445 namespace {
446
447 const pass_data pass_data_free_cfg =
448 {
449 RTL_PASS, /* type */
450 "*free_cfg", /* name */
451 OPTGROUP_NONE, /* optinfo_flags */
452 TV_NONE, /* tv_id */
453 0, /* properties_required */
454 0, /* properties_provided */
455 PROP_cfg, /* properties_destroyed */
456 0, /* todo_flags_start */
457 0, /* todo_flags_finish */
458 };
459
460 class pass_free_cfg : public rtl_opt_pass
461 {
462 public:
463 pass_free_cfg (gcc::context *ctxt)
464 : rtl_opt_pass (pass_data_free_cfg, ctxt)
465 {}
466
467 /* opt_pass methods: */
468 virtual unsigned int execute (function *);
469
470 }; // class pass_free_cfg
471
472 unsigned int
473 pass_free_cfg::execute (function *)
474 {
475 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
476 valid at that point so it would be too late to call df_analyze. */
477 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
478 {
479 df_note_add_problem ();
480 df_analyze ();
481 }
482
483 if (crtl->has_bb_partition)
484 insert_section_boundary_note ();
485
486 free_bb_for_insn ();
487 return 0;
488 }
489
490 } // anon namespace
491
492 rtl_opt_pass *
493 make_pass_free_cfg (gcc::context *ctxt)
494 {
495 return new pass_free_cfg (ctxt);
496 }
497
498 /* Return RTX to emit after when we want to emit code on the entry of function. */
499 rtx_insn *
500 entry_of_function (void)
501 {
502 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
503 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
504 }
505
506 /* Emit INSN at the entry point of the function, ensuring that it is only
507 executed once per function. */
508 void
509 emit_insn_at_entry (rtx insn)
510 {
511 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
512 edge e = ei_safe_edge (ei);
513 gcc_assert (e->flags & EDGE_FALLTHRU);
514
515 insert_insn_on_edge (insn, e);
516 commit_edge_insertions ();
517 }
518
519 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
520 (or BARRIER if found) and notify df of the bb change.
521 The insn chain range is inclusive
522 (i.e. both BEGIN and END will be updated. */
523
524 static void
525 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
526 {
527 rtx_insn *insn;
528
529 end = NEXT_INSN (end);
530 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
531 if (!BARRIER_P (insn))
532 df_insn_change_bb (insn, bb);
533 }
534
535 /* Update BLOCK_FOR_INSN of insns in BB to BB,
536 and notify df of the change. */
537
538 void
539 update_bb_for_insn (basic_block bb)
540 {
541 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
542 }
543
544 \f
545 /* Like active_insn_p, except keep the return value clobber around
546 even after reload. */
547
548 static bool
549 flow_active_insn_p (const rtx_insn *insn)
550 {
551 if (active_insn_p (insn))
552 return true;
553
554 /* A clobber of the function return value exists for buggy
555 programs that fail to return a value. Its effect is to
556 keep the return value from being live across the entire
557 function. If we allow it to be skipped, we introduce the
558 possibility for register lifetime confusion. */
559 if (GET_CODE (PATTERN (insn)) == CLOBBER
560 && REG_P (XEXP (PATTERN (insn), 0))
561 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
562 return true;
563
564 return false;
565 }
566
567 /* Return true if the block has no effect and only forwards control flow to
568 its single destination. */
569
570 bool
571 contains_no_active_insn_p (const_basic_block bb)
572 {
573 rtx_insn *insn;
574
575 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
576 || !single_succ_p (bb))
577 return false;
578
579 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
580 if (INSN_P (insn) && flow_active_insn_p (insn))
581 return false;
582
583 return (!INSN_P (insn)
584 || (JUMP_P (insn) && simplejump_p (insn))
585 || !flow_active_insn_p (insn));
586 }
587
588 /* Likewise, but protect loop latches, headers and preheaders. */
589 /* FIXME: Make this a cfg hook. */
590
591 bool
592 forwarder_block_p (const_basic_block bb)
593 {
594 if (!contains_no_active_insn_p (bb))
595 return false;
596
597 /* Protect loop latches, headers and preheaders. */
598 if (current_loops)
599 {
600 basic_block dest;
601 if (bb->loop_father->header == bb)
602 return false;
603 dest = EDGE_SUCC (bb, 0)->dest;
604 if (dest->loop_father->header == dest)
605 return false;
606 }
607
608 return true;
609 }
610
611 /* Return nonzero if we can reach target from src by falling through. */
612 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
613
614 bool
615 can_fallthru (basic_block src, basic_block target)
616 {
617 rtx_insn *insn = BB_END (src);
618 rtx_insn *insn2;
619 edge e;
620 edge_iterator ei;
621
622 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
623 return true;
624 if (src->next_bb != target)
625 return false;
626
627 /* ??? Later we may add code to move jump tables offline. */
628 if (tablejump_p (insn, NULL, NULL))
629 return false;
630
631 FOR_EACH_EDGE (e, ei, src->succs)
632 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
633 && e->flags & EDGE_FALLTHRU)
634 return false;
635
636 insn2 = BB_HEAD (target);
637 if (!active_insn_p (insn2))
638 insn2 = next_active_insn (insn2);
639
640 return next_active_insn (insn) == insn2;
641 }
642
643 /* Return nonzero if we could reach target from src by falling through,
644 if the target was made adjacent. If we already have a fall-through
645 edge to the exit block, we can't do that. */
646 static bool
647 could_fall_through (basic_block src, basic_block target)
648 {
649 edge e;
650 edge_iterator ei;
651
652 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
653 return true;
654 FOR_EACH_EDGE (e, ei, src->succs)
655 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
656 && e->flags & EDGE_FALLTHRU)
657 return 0;
658 return true;
659 }
660 \f
661 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
662 rtx_note *
663 bb_note (basic_block bb)
664 {
665 rtx_insn *note;
666
667 note = BB_HEAD (bb);
668 if (LABEL_P (note))
669 note = NEXT_INSN (note);
670
671 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
672 return as_a <rtx_note *> (note);
673 }
674
675 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
676 note associated with the BLOCK. */
677
678 static rtx_insn *
679 first_insn_after_basic_block_note (basic_block block)
680 {
681 rtx_insn *insn;
682
683 /* Get the first instruction in the block. */
684 insn = BB_HEAD (block);
685
686 if (insn == NULL_RTX)
687 return NULL;
688 if (LABEL_P (insn))
689 insn = NEXT_INSN (insn);
690 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
691
692 return NEXT_INSN (insn);
693 }
694
695 /* Creates a new basic block just after basic block BB by splitting
696 everything after specified instruction INSNP. */
697
698 static basic_block
699 rtl_split_block (basic_block bb, void *insnp)
700 {
701 basic_block new_bb;
702 rtx_insn *insn = (rtx_insn *) insnp;
703 edge e;
704 edge_iterator ei;
705
706 if (!insn)
707 {
708 insn = first_insn_after_basic_block_note (bb);
709
710 if (insn)
711 {
712 rtx_insn *next = insn;
713
714 insn = PREV_INSN (insn);
715
716 /* If the block contains only debug insns, insn would have
717 been NULL in a non-debug compilation, and then we'd end
718 up emitting a DELETED note. For -fcompare-debug
719 stability, emit the note too. */
720 if (insn != BB_END (bb)
721 && DEBUG_INSN_P (next)
722 && DEBUG_INSN_P (BB_END (bb)))
723 {
724 while (next != BB_END (bb) && DEBUG_INSN_P (next))
725 next = NEXT_INSN (next);
726
727 if (next == BB_END (bb))
728 emit_note_after (NOTE_INSN_DELETED, next);
729 }
730 }
731 else
732 insn = get_last_insn ();
733 }
734
735 /* We probably should check type of the insn so that we do not create
736 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
737 bother. */
738 if (insn == BB_END (bb))
739 emit_note_after (NOTE_INSN_DELETED, insn);
740
741 /* Create the new basic block. */
742 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
743 BB_COPY_PARTITION (new_bb, bb);
744 BB_END (bb) = insn;
745
746 /* Redirect the outgoing edges. */
747 new_bb->succs = bb->succs;
748 bb->succs = NULL;
749 FOR_EACH_EDGE (e, ei, new_bb->succs)
750 e->src = new_bb;
751
752 /* The new block starts off being dirty. */
753 df_set_bb_dirty (bb);
754 return new_bb;
755 }
756
757 /* Return true if the single edge between blocks A and B is the only place
758 in RTL which holds some unique locus. */
759
760 static bool
761 unique_locus_on_edge_between_p (basic_block a, basic_block b)
762 {
763 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
764 rtx_insn *insn, *end;
765
766 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
767 return false;
768
769 /* First scan block A backward. */
770 insn = BB_END (a);
771 end = PREV_INSN (BB_HEAD (a));
772 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
773 insn = PREV_INSN (insn);
774
775 if (insn != end && INSN_LOCATION (insn) == goto_locus)
776 return false;
777
778 /* Then scan block B forward. */
779 insn = BB_HEAD (b);
780 if (insn)
781 {
782 end = NEXT_INSN (BB_END (b));
783 while (insn != end && !NONDEBUG_INSN_P (insn))
784 insn = NEXT_INSN (insn);
785
786 if (insn != end && INSN_HAS_LOCATION (insn)
787 && INSN_LOCATION (insn) == goto_locus)
788 return false;
789 }
790
791 return true;
792 }
793
794 /* If the single edge between blocks A and B is the only place in RTL which
795 holds some unique locus, emit a nop with that locus between the blocks. */
796
797 static void
798 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
799 {
800 if (!unique_locus_on_edge_between_p (a, b))
801 return;
802
803 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
804 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
805 }
806
807 /* Blocks A and B are to be merged into a single block A. The insns
808 are already contiguous. */
809
810 static void
811 rtl_merge_blocks (basic_block a, basic_block b)
812 {
813 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
814 rtx_insn *del_first = NULL, *del_last = NULL;
815 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
816 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
817 int b_empty = 0;
818
819 if (dump_file)
820 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
821 a->index);
822
823 while (DEBUG_INSN_P (b_end))
824 b_end = PREV_INSN (b_debug_start = b_end);
825
826 /* If there was a CODE_LABEL beginning B, delete it. */
827 if (LABEL_P (b_head))
828 {
829 /* Detect basic blocks with nothing but a label. This can happen
830 in particular at the end of a function. */
831 if (b_head == b_end)
832 b_empty = 1;
833
834 del_first = del_last = b_head;
835 b_head = NEXT_INSN (b_head);
836 }
837
838 /* Delete the basic block note and handle blocks containing just that
839 note. */
840 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
841 {
842 if (b_head == b_end)
843 b_empty = 1;
844 if (! del_last)
845 del_first = b_head;
846
847 del_last = b_head;
848 b_head = NEXT_INSN (b_head);
849 }
850
851 /* If there was a jump out of A, delete it. */
852 if (JUMP_P (a_end))
853 {
854 rtx_insn *prev;
855
856 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
857 if (!NOTE_P (prev)
858 || NOTE_INSN_BASIC_BLOCK_P (prev)
859 || prev == BB_HEAD (a))
860 break;
861
862 del_first = a_end;
863
864 /* If this was a conditional jump, we need to also delete
865 the insn that set cc0. */
866 if (HAVE_cc0 && only_sets_cc0_p (prev))
867 {
868 rtx_insn *tmp = prev;
869
870 prev = prev_nonnote_insn (prev);
871 if (!prev)
872 prev = BB_HEAD (a);
873 del_first = tmp;
874 }
875
876 a_end = PREV_INSN (del_first);
877 }
878 else if (BARRIER_P (NEXT_INSN (a_end)))
879 del_first = NEXT_INSN (a_end);
880
881 /* Delete everything marked above as well as crap that might be
882 hanging out between the two blocks. */
883 BB_END (a) = a_end;
884 BB_HEAD (b) = b_empty ? NULL : b_head;
885 delete_insn_chain (del_first, del_last, true);
886
887 /* When not optimizing and the edge is the only place in RTL which holds
888 some unique locus, emit a nop with that locus in between. */
889 if (!optimize)
890 {
891 emit_nop_for_unique_locus_between (a, b);
892 a_end = BB_END (a);
893 }
894
895 /* Reassociate the insns of B with A. */
896 if (!b_empty)
897 {
898 update_bb_for_insn_chain (a_end, b_debug_end, a);
899
900 BB_END (a) = b_debug_end;
901 BB_HEAD (b) = NULL;
902 }
903 else if (b_end != b_debug_end)
904 {
905 /* Move any deleted labels and other notes between the end of A
906 and the debug insns that make up B after the debug insns,
907 bringing the debug insns into A while keeping the notes after
908 the end of A. */
909 if (NEXT_INSN (a_end) != b_debug_start)
910 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
911 b_debug_end);
912 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
913 BB_END (a) = b_debug_end;
914 }
915
916 df_bb_delete (b->index);
917
918 /* If B was a forwarder block, propagate the locus on the edge. */
919 if (forwarder_p
920 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
921 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
922
923 if (dump_file)
924 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
925 }
926
927
928 /* Return true when block A and B can be merged. */
929
930 static bool
931 rtl_can_merge_blocks (basic_block a, basic_block b)
932 {
933 /* If we are partitioning hot/cold basic blocks, we don't want to
934 mess up unconditional or indirect jumps that cross between hot
935 and cold sections.
936
937 Basic block partitioning may result in some jumps that appear to
938 be optimizable (or blocks that appear to be mergeable), but which really
939 must be left untouched (they are required to make it safely across
940 partition boundaries). See the comments at the top of
941 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
942
943 if (BB_PARTITION (a) != BB_PARTITION (b))
944 return false;
945
946 /* Protect the loop latches. */
947 if (current_loops && b->loop_father->latch == b)
948 return false;
949
950 /* There must be exactly one edge in between the blocks. */
951 return (single_succ_p (a)
952 && single_succ (a) == b
953 && single_pred_p (b)
954 && a != b
955 /* Must be simple edge. */
956 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
957 && a->next_bb == b
958 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
959 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
960 /* If the jump insn has side effects,
961 we can't kill the edge. */
962 && (!JUMP_P (BB_END (a))
963 || (reload_completed
964 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
965 }
966 \f
967 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
968 exist. */
969
970 rtx_code_label *
971 block_label (basic_block block)
972 {
973 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
974 return NULL;
975
976 if (!LABEL_P (BB_HEAD (block)))
977 {
978 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
979 }
980
981 return as_a <rtx_code_label *> (BB_HEAD (block));
982 }
983
984 /* Attempt to perform edge redirection by replacing possibly complex jump
985 instruction by unconditional jump or removing jump completely. This can
986 apply only if all edges now point to the same block. The parameters and
987 return values are equivalent to redirect_edge_and_branch. */
988
989 edge
990 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
991 {
992 basic_block src = e->src;
993 rtx_insn *insn = BB_END (src), *kill_from;
994 rtx set;
995 int fallthru = 0;
996
997 /* If we are partitioning hot/cold basic blocks, we don't want to
998 mess up unconditional or indirect jumps that cross between hot
999 and cold sections.
1000
1001 Basic block partitioning may result in some jumps that appear to
1002 be optimizable (or blocks that appear to be mergeable), but which really
1003 must be left untouched (they are required to make it safely across
1004 partition boundaries). See the comments at the top of
1005 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1006
1007 if (BB_PARTITION (src) != BB_PARTITION (target))
1008 return NULL;
1009
1010 /* We can replace or remove a complex jump only when we have exactly
1011 two edges. Also, if we have exactly one outgoing edge, we can
1012 redirect that. */
1013 if (EDGE_COUNT (src->succs) >= 3
1014 /* Verify that all targets will be TARGET. Specifically, the
1015 edge that is not E must also go to TARGET. */
1016 || (EDGE_COUNT (src->succs) == 2
1017 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1018 return NULL;
1019
1020 if (!onlyjump_p (insn))
1021 return NULL;
1022 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1023 return NULL;
1024
1025 /* Avoid removing branch with side effects. */
1026 set = single_set (insn);
1027 if (!set || side_effects_p (set))
1028 return NULL;
1029
1030 /* In case we zap a conditional jump, we'll need to kill
1031 the cc0 setter too. */
1032 kill_from = insn;
1033 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1034 && only_sets_cc0_p (PREV_INSN (insn)))
1035 kill_from = PREV_INSN (insn);
1036
1037 /* See if we can create the fallthru edge. */
1038 if (in_cfglayout || can_fallthru (src, target))
1039 {
1040 if (dump_file)
1041 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1042 fallthru = 1;
1043
1044 /* Selectively unlink whole insn chain. */
1045 if (in_cfglayout)
1046 {
1047 rtx_insn *insn = BB_FOOTER (src);
1048
1049 delete_insn_chain (kill_from, BB_END (src), false);
1050
1051 /* Remove barriers but keep jumptables. */
1052 while (insn)
1053 {
1054 if (BARRIER_P (insn))
1055 {
1056 if (PREV_INSN (insn))
1057 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1058 else
1059 BB_FOOTER (src) = NEXT_INSN (insn);
1060 if (NEXT_INSN (insn))
1061 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1062 }
1063 if (LABEL_P (insn))
1064 break;
1065 insn = NEXT_INSN (insn);
1066 }
1067 }
1068 else
1069 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1070 false);
1071 }
1072
1073 /* If this already is simplejump, redirect it. */
1074 else if (simplejump_p (insn))
1075 {
1076 if (e->dest == target)
1077 return NULL;
1078 if (dump_file)
1079 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1080 INSN_UID (insn), e->dest->index, target->index);
1081 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1082 block_label (target), 0))
1083 {
1084 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1085 return NULL;
1086 }
1087 }
1088
1089 /* Cannot do anything for target exit block. */
1090 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1091 return NULL;
1092
1093 /* Or replace possibly complicated jump insn by simple jump insn. */
1094 else
1095 {
1096 rtx_code_label *target_label = block_label (target);
1097 rtx_insn *barrier;
1098 rtx label;
1099 rtx_jump_table_data *table;
1100
1101 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1102 JUMP_LABEL (BB_END (src)) = target_label;
1103 LABEL_NUSES (target_label)++;
1104 if (dump_file)
1105 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1106 INSN_UID (insn), INSN_UID (BB_END (src)));
1107
1108
1109 delete_insn_chain (kill_from, insn, false);
1110
1111 /* Recognize a tablejump that we are converting to a
1112 simple jump and remove its associated CODE_LABEL
1113 and ADDR_VEC or ADDR_DIFF_VEC. */
1114 if (tablejump_p (insn, &label, &table))
1115 delete_insn_chain (label, table, false);
1116
1117 barrier = next_nonnote_insn (BB_END (src));
1118 if (!barrier || !BARRIER_P (barrier))
1119 emit_barrier_after (BB_END (src));
1120 else
1121 {
1122 if (barrier != NEXT_INSN (BB_END (src)))
1123 {
1124 /* Move the jump before barrier so that the notes
1125 which originally were or were created before jump table are
1126 inside the basic block. */
1127 rtx_insn *new_insn = BB_END (src);
1128
1129 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1130 PREV_INSN (barrier), src);
1131
1132 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1133 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1134
1135 SET_NEXT_INSN (new_insn) = barrier;
1136 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1137
1138 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1139 SET_PREV_INSN (barrier) = new_insn;
1140 }
1141 }
1142 }
1143
1144 /* Keep only one edge out and set proper flags. */
1145 if (!single_succ_p (src))
1146 remove_edge (e);
1147 gcc_assert (single_succ_p (src));
1148
1149 e = single_succ_edge (src);
1150 if (fallthru)
1151 e->flags = EDGE_FALLTHRU;
1152 else
1153 e->flags = 0;
1154
1155 e->probability = REG_BR_PROB_BASE;
1156 e->count = src->count;
1157
1158 if (e->dest != target)
1159 redirect_edge_succ (e, target);
1160 return e;
1161 }
1162
1163 /* Subroutine of redirect_branch_edge that tries to patch the jump
1164 instruction INSN so that it reaches block NEW. Do this
1165 only when it originally reached block OLD. Return true if this
1166 worked or the original target wasn't OLD, return false if redirection
1167 doesn't work. */
1168
1169 static bool
1170 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1171 {
1172 rtx_jump_table_data *table;
1173 rtx tmp;
1174 /* Recognize a tablejump and adjust all matching cases. */
1175 if (tablejump_p (insn, NULL, &table))
1176 {
1177 rtvec vec;
1178 int j;
1179 rtx_code_label *new_label = block_label (new_bb);
1180
1181 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1182 return false;
1183 vec = table->get_labels ();
1184
1185 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1186 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1187 {
1188 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1189 --LABEL_NUSES (old_label);
1190 ++LABEL_NUSES (new_label);
1191 }
1192
1193 /* Handle casesi dispatch insns. */
1194 if ((tmp = single_set (insn)) != NULL
1195 && SET_DEST (tmp) == pc_rtx
1196 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1197 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1198 && LABEL_REF_LABEL (XEXP (SET_SRC (tmp), 2)) == old_label)
1199 {
1200 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1201 new_label);
1202 --LABEL_NUSES (old_label);
1203 ++LABEL_NUSES (new_label);
1204 }
1205 }
1206 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1207 {
1208 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1209 rtx note;
1210
1211 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1212 return false;
1213 rtx_code_label *new_label = block_label (new_bb);
1214
1215 for (i = 0; i < n; ++i)
1216 {
1217 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1218 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1219 if (XEXP (old_ref, 0) == old_label)
1220 {
1221 ASM_OPERANDS_LABEL (tmp, i)
1222 = gen_rtx_LABEL_REF (Pmode, new_label);
1223 --LABEL_NUSES (old_label);
1224 ++LABEL_NUSES (new_label);
1225 }
1226 }
1227
1228 if (JUMP_LABEL (insn) == old_label)
1229 {
1230 JUMP_LABEL (insn) = new_label;
1231 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1232 if (note)
1233 remove_note (insn, note);
1234 }
1235 else
1236 {
1237 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1238 if (note)
1239 remove_note (insn, note);
1240 if (JUMP_LABEL (insn) != new_label
1241 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1242 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1243 }
1244 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1245 != NULL_RTX)
1246 XEXP (note, 0) = new_label;
1247 }
1248 else
1249 {
1250 /* ?? We may play the games with moving the named labels from
1251 one basic block to the other in case only one computed_jump is
1252 available. */
1253 if (computed_jump_p (insn)
1254 /* A return instruction can't be redirected. */
1255 || returnjump_p (insn))
1256 return false;
1257
1258 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1259 {
1260 /* If the insn doesn't go where we think, we're confused. */
1261 gcc_assert (JUMP_LABEL (insn) == old_label);
1262
1263 /* If the substitution doesn't succeed, die. This can happen
1264 if the back end emitted unrecognizable instructions or if
1265 target is exit block on some arches. */
1266 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1267 block_label (new_bb), 0))
1268 {
1269 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
1270 return false;
1271 }
1272 }
1273 }
1274 return true;
1275 }
1276
1277
1278 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1279 NULL on failure */
1280 static edge
1281 redirect_branch_edge (edge e, basic_block target)
1282 {
1283 rtx_insn *old_label = BB_HEAD (e->dest);
1284 basic_block src = e->src;
1285 rtx_insn *insn = BB_END (src);
1286
1287 /* We can only redirect non-fallthru edges of jump insn. */
1288 if (e->flags & EDGE_FALLTHRU)
1289 return NULL;
1290 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1291 return NULL;
1292
1293 if (!currently_expanding_to_rtl)
1294 {
1295 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1296 return NULL;
1297 }
1298 else
1299 /* When expanding this BB might actually contain multiple
1300 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1301 Redirect all of those that match our label. */
1302 FOR_BB_INSNS (src, insn)
1303 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1304 old_label, target))
1305 return NULL;
1306
1307 if (dump_file)
1308 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1309 e->src->index, e->dest->index, target->index);
1310
1311 if (e->dest != target)
1312 e = redirect_edge_succ_nodup (e, target);
1313
1314 return e;
1315 }
1316
1317 /* Called when edge E has been redirected to a new destination,
1318 in order to update the region crossing flag on the edge and
1319 jump. */
1320
1321 static void
1322 fixup_partition_crossing (edge e)
1323 {
1324 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1325 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1326 return;
1327 /* If we redirected an existing edge, it may already be marked
1328 crossing, even though the new src is missing a reg crossing note.
1329 But make sure reg crossing note doesn't already exist before
1330 inserting. */
1331 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1332 {
1333 e->flags |= EDGE_CROSSING;
1334 if (JUMP_P (BB_END (e->src))
1335 && !CROSSING_JUMP_P (BB_END (e->src)))
1336 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1337 }
1338 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1339 {
1340 e->flags &= ~EDGE_CROSSING;
1341 /* Remove the section crossing note from jump at end of
1342 src if it exists, and if no other successors are
1343 still crossing. */
1344 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1345 {
1346 bool has_crossing_succ = false;
1347 edge e2;
1348 edge_iterator ei;
1349 FOR_EACH_EDGE (e2, ei, e->src->succs)
1350 {
1351 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1352 if (has_crossing_succ)
1353 break;
1354 }
1355 if (!has_crossing_succ)
1356 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1357 }
1358 }
1359 }
1360
1361 /* Called when block BB has been reassigned to the cold partition,
1362 because it is now dominated by another cold block,
1363 to ensure that the region crossing attributes are updated. */
1364
1365 static void
1366 fixup_new_cold_bb (basic_block bb)
1367 {
1368 edge e;
1369 edge_iterator ei;
1370
1371 /* This is called when a hot bb is found to now be dominated
1372 by a cold bb and therefore needs to become cold. Therefore,
1373 its preds will no longer be region crossing. Any non-dominating
1374 preds that were previously hot would also have become cold
1375 in the caller for the same region. Any preds that were previously
1376 region-crossing will be adjusted in fixup_partition_crossing. */
1377 FOR_EACH_EDGE (e, ei, bb->preds)
1378 {
1379 fixup_partition_crossing (e);
1380 }
1381
1382 /* Possibly need to make bb's successor edges region crossing,
1383 or remove stale region crossing. */
1384 FOR_EACH_EDGE (e, ei, bb->succs)
1385 {
1386 /* We can't have fall-through edges across partition boundaries.
1387 Note that force_nonfallthru will do any necessary partition
1388 boundary fixup by calling fixup_partition_crossing itself. */
1389 if ((e->flags & EDGE_FALLTHRU)
1390 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1391 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1392 force_nonfallthru (e);
1393 else
1394 fixup_partition_crossing (e);
1395 }
1396 }
1397
1398 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1399 expense of adding new instructions or reordering basic blocks.
1400
1401 Function can be also called with edge destination equivalent to the TARGET.
1402 Then it should try the simplifications and do nothing if none is possible.
1403
1404 Return edge representing the branch if transformation succeeded. Return NULL
1405 on failure.
1406 We still return NULL in case E already destinated TARGET and we didn't
1407 managed to simplify instruction stream. */
1408
1409 static edge
1410 rtl_redirect_edge_and_branch (edge e, basic_block target)
1411 {
1412 edge ret;
1413 basic_block src = e->src;
1414 basic_block dest = e->dest;
1415
1416 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1417 return NULL;
1418
1419 if (dest == target)
1420 return e;
1421
1422 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1423 {
1424 df_set_bb_dirty (src);
1425 fixup_partition_crossing (ret);
1426 return ret;
1427 }
1428
1429 ret = redirect_branch_edge (e, target);
1430 if (!ret)
1431 return NULL;
1432
1433 df_set_bb_dirty (src);
1434 fixup_partition_crossing (ret);
1435 return ret;
1436 }
1437
1438 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1439
1440 void
1441 emit_barrier_after_bb (basic_block bb)
1442 {
1443 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1444 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1445 || current_ir_type () == IR_RTL_CFGLAYOUT);
1446 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1447 {
1448 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1449
1450 if (BB_FOOTER (bb))
1451 {
1452 rtx_insn *footer_tail = BB_FOOTER (bb);
1453
1454 while (NEXT_INSN (footer_tail))
1455 footer_tail = NEXT_INSN (footer_tail);
1456 if (!BARRIER_P (footer_tail))
1457 {
1458 SET_NEXT_INSN (footer_tail) = insn;
1459 SET_PREV_INSN (insn) = footer_tail;
1460 }
1461 }
1462 else
1463 BB_FOOTER (bb) = insn;
1464 }
1465 }
1466
1467 /* Like force_nonfallthru below, but additionally performs redirection
1468 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1469 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1470 simple_return_rtx, indicating which kind of returnjump to create.
1471 It should be NULL otherwise. */
1472
1473 basic_block
1474 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1475 {
1476 basic_block jump_block, new_bb = NULL, src = e->src;
1477 rtx note;
1478 edge new_edge;
1479 int abnormal_edge_flags = 0;
1480 bool asm_goto_edge = false;
1481 int loc;
1482
1483 /* In the case the last instruction is conditional jump to the next
1484 instruction, first redirect the jump itself and then continue
1485 by creating a basic block afterwards to redirect fallthru edge. */
1486 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1487 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1488 && any_condjump_p (BB_END (e->src))
1489 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1490 {
1491 rtx note;
1492 edge b = unchecked_make_edge (e->src, target, 0);
1493 bool redirected;
1494
1495 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1496 block_label (target), 0);
1497 gcc_assert (redirected);
1498
1499 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1500 if (note)
1501 {
1502 int prob = XINT (note, 0);
1503
1504 b->probability = prob;
1505 /* Update this to use GCOV_COMPUTE_SCALE. */
1506 b->count = e->count * prob / REG_BR_PROB_BASE;
1507 e->probability -= e->probability;
1508 e->count -= b->count;
1509 if (e->probability < 0)
1510 e->probability = 0;
1511 if (e->count < 0)
1512 e->count = 0;
1513 }
1514 }
1515
1516 if (e->flags & EDGE_ABNORMAL)
1517 {
1518 /* Irritating special case - fallthru edge to the same block as abnormal
1519 edge.
1520 We can't redirect abnormal edge, but we still can split the fallthru
1521 one and create separate abnormal edge to original destination.
1522 This allows bb-reorder to make such edge non-fallthru. */
1523 gcc_assert (e->dest == target);
1524 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1525 e->flags &= EDGE_FALLTHRU;
1526 }
1527 else
1528 {
1529 gcc_assert (e->flags & EDGE_FALLTHRU);
1530 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1531 {
1532 /* We can't redirect the entry block. Create an empty block
1533 at the start of the function which we use to add the new
1534 jump. */
1535 edge tmp;
1536 edge_iterator ei;
1537 bool found = false;
1538
1539 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1540 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1541
1542 /* Change the existing edge's source to be the new block, and add
1543 a new edge from the entry block to the new block. */
1544 e->src = bb;
1545 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1546 (tmp = ei_safe_edge (ei)); )
1547 {
1548 if (tmp == e)
1549 {
1550 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1551 found = true;
1552 break;
1553 }
1554 else
1555 ei_next (&ei);
1556 }
1557
1558 gcc_assert (found);
1559
1560 vec_safe_push (bb->succs, e);
1561 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1562 EDGE_FALLTHRU);
1563 }
1564 }
1565
1566 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1567 don't point to the target or fallthru label. */
1568 if (JUMP_P (BB_END (e->src))
1569 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1570 && (e->flags & EDGE_FALLTHRU)
1571 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1572 {
1573 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1574 bool adjust_jump_target = false;
1575
1576 for (i = 0; i < n; ++i)
1577 {
1578 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1579 {
1580 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1581 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1582 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1583 adjust_jump_target = true;
1584 }
1585 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1586 asm_goto_edge = true;
1587 }
1588 if (adjust_jump_target)
1589 {
1590 rtx_insn *insn = BB_END (e->src);
1591 rtx note;
1592 rtx_insn *old_label = BB_HEAD (e->dest);
1593 rtx_insn *new_label = BB_HEAD (target);
1594
1595 if (JUMP_LABEL (insn) == old_label)
1596 {
1597 JUMP_LABEL (insn) = new_label;
1598 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1599 if (note)
1600 remove_note (insn, note);
1601 }
1602 else
1603 {
1604 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1605 if (note)
1606 remove_note (insn, note);
1607 if (JUMP_LABEL (insn) != new_label
1608 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1609 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1610 }
1611 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1612 != NULL_RTX)
1613 XEXP (note, 0) = new_label;
1614 }
1615 }
1616
1617 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1618 {
1619 rtx_insn *new_head;
1620 gcov_type count = e->count;
1621 int probability = e->probability;
1622 /* Create the new structures. */
1623
1624 /* If the old block ended with a tablejump, skip its table
1625 by searching forward from there. Otherwise start searching
1626 forward from the last instruction of the old block. */
1627 rtx_jump_table_data *table;
1628 if (tablejump_p (BB_END (e->src), NULL, &table))
1629 new_head = table;
1630 else
1631 new_head = BB_END (e->src);
1632 new_head = NEXT_INSN (new_head);
1633
1634 jump_block = create_basic_block (new_head, NULL, e->src);
1635 jump_block->count = count;
1636 jump_block->frequency = EDGE_FREQUENCY (e);
1637
1638 /* Make sure new block ends up in correct hot/cold section. */
1639
1640 BB_COPY_PARTITION (jump_block, e->src);
1641
1642 /* Wire edge in. */
1643 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1644 new_edge->probability = probability;
1645 new_edge->count = count;
1646
1647 /* Redirect old edge. */
1648 redirect_edge_pred (e, jump_block);
1649 e->probability = REG_BR_PROB_BASE;
1650
1651 /* If e->src was previously region crossing, it no longer is
1652 and the reg crossing note should be removed. */
1653 fixup_partition_crossing (new_edge);
1654
1655 /* If asm goto has any label refs to target's label,
1656 add also edge from asm goto bb to target. */
1657 if (asm_goto_edge)
1658 {
1659 new_edge->probability /= 2;
1660 new_edge->count /= 2;
1661 jump_block->count /= 2;
1662 jump_block->frequency /= 2;
1663 new_edge = make_edge (new_edge->src, target,
1664 e->flags & ~EDGE_FALLTHRU);
1665 new_edge->probability = probability - probability / 2;
1666 new_edge->count = count - count / 2;
1667 }
1668
1669 new_bb = jump_block;
1670 }
1671 else
1672 jump_block = e->src;
1673
1674 loc = e->goto_locus;
1675 e->flags &= ~EDGE_FALLTHRU;
1676 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1677 {
1678 if (jump_label == ret_rtx)
1679 emit_jump_insn_after_setloc (targetm.gen_return (),
1680 BB_END (jump_block), loc);
1681 else
1682 {
1683 gcc_assert (jump_label == simple_return_rtx);
1684 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1685 BB_END (jump_block), loc);
1686 }
1687 set_return_jump_label (BB_END (jump_block));
1688 }
1689 else
1690 {
1691 rtx_code_label *label = block_label (target);
1692 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1693 BB_END (jump_block), loc);
1694 JUMP_LABEL (BB_END (jump_block)) = label;
1695 LABEL_NUSES (label)++;
1696 }
1697
1698 /* We might be in cfg layout mode, and if so, the following routine will
1699 insert the barrier correctly. */
1700 emit_barrier_after_bb (jump_block);
1701 redirect_edge_succ_nodup (e, target);
1702
1703 if (abnormal_edge_flags)
1704 make_edge (src, target, abnormal_edge_flags);
1705
1706 df_mark_solutions_dirty ();
1707 fixup_partition_crossing (e);
1708 return new_bb;
1709 }
1710
1711 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1712 (and possibly create new basic block) to make edge non-fallthru.
1713 Return newly created BB or NULL if none. */
1714
1715 static basic_block
1716 rtl_force_nonfallthru (edge e)
1717 {
1718 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1719 }
1720
1721 /* Redirect edge even at the expense of creating new jump insn or
1722 basic block. Return new basic block if created, NULL otherwise.
1723 Conversion must be possible. */
1724
1725 static basic_block
1726 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1727 {
1728 if (redirect_edge_and_branch (e, target)
1729 || e->dest == target)
1730 return NULL;
1731
1732 /* In case the edge redirection failed, try to force it to be non-fallthru
1733 and redirect newly created simplejump. */
1734 df_set_bb_dirty (e->src);
1735 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1736 }
1737
1738 /* The given edge should potentially be a fallthru edge. If that is in
1739 fact true, delete the jump and barriers that are in the way. */
1740
1741 static void
1742 rtl_tidy_fallthru_edge (edge e)
1743 {
1744 rtx_insn *q;
1745 basic_block b = e->src, c = b->next_bb;
1746
1747 /* ??? In a late-running flow pass, other folks may have deleted basic
1748 blocks by nopping out blocks, leaving multiple BARRIERs between here
1749 and the target label. They ought to be chastised and fixed.
1750
1751 We can also wind up with a sequence of undeletable labels between
1752 one block and the next.
1753
1754 So search through a sequence of barriers, labels, and notes for
1755 the head of block C and assert that we really do fall through. */
1756
1757 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1758 if (INSN_P (q))
1759 return;
1760
1761 /* Remove what will soon cease being the jump insn from the source block.
1762 If block B consisted only of this single jump, turn it into a deleted
1763 note. */
1764 q = BB_END (b);
1765 if (JUMP_P (q)
1766 && onlyjump_p (q)
1767 && (any_uncondjump_p (q)
1768 || single_succ_p (b)))
1769 {
1770 rtx label;
1771 rtx_jump_table_data *table;
1772
1773 if (tablejump_p (q, &label, &table))
1774 {
1775 /* The label is likely mentioned in some instruction before
1776 the tablejump and might not be DCEd, so turn it into
1777 a note instead and move before the tablejump that is going to
1778 be deleted. */
1779 const char *name = LABEL_NAME (label);
1780 PUT_CODE (label, NOTE);
1781 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1782 NOTE_DELETED_LABEL_NAME (label) = name;
1783 rtx_insn *lab = safe_as_a <rtx_insn *> (label);
1784 reorder_insns (lab, lab, PREV_INSN (q));
1785 delete_insn (table);
1786 }
1787
1788 /* If this was a conditional jump, we need to also delete
1789 the insn that set cc0. */
1790 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1791 q = PREV_INSN (q);
1792
1793 q = PREV_INSN (q);
1794 }
1795
1796 /* Selectively unlink the sequence. */
1797 if (q != PREV_INSN (BB_HEAD (c)))
1798 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1799
1800 e->flags |= EDGE_FALLTHRU;
1801 }
1802 \f
1803 /* Should move basic block BB after basic block AFTER. NIY. */
1804
1805 static bool
1806 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1807 basic_block after ATTRIBUTE_UNUSED)
1808 {
1809 return false;
1810 }
1811
1812 /* Locate the last bb in the same partition as START_BB. */
1813
1814 static basic_block
1815 last_bb_in_partition (basic_block start_bb)
1816 {
1817 basic_block bb;
1818 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1819 {
1820 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1821 return bb;
1822 }
1823 /* Return bb before the exit block. */
1824 return bb->prev_bb;
1825 }
1826
1827 /* Split a (typically critical) edge. Return the new block.
1828 The edge must not be abnormal.
1829
1830 ??? The code generally expects to be called on critical edges.
1831 The case of a block ending in an unconditional jump to a
1832 block with multiple predecessors is not handled optimally. */
1833
1834 static basic_block
1835 rtl_split_edge (edge edge_in)
1836 {
1837 basic_block bb, new_bb;
1838 rtx_insn *before;
1839
1840 /* Abnormal edges cannot be split. */
1841 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1842
1843 /* We are going to place the new block in front of edge destination.
1844 Avoid existence of fallthru predecessors. */
1845 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1846 {
1847 edge e = find_fallthru_edge (edge_in->dest->preds);
1848
1849 if (e)
1850 force_nonfallthru (e);
1851 }
1852
1853 /* Create the basic block note. */
1854 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1855 before = BB_HEAD (edge_in->dest);
1856 else
1857 before = NULL;
1858
1859 /* If this is a fall through edge to the exit block, the blocks might be
1860 not adjacent, and the right place is after the source. */
1861 if ((edge_in->flags & EDGE_FALLTHRU)
1862 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1863 {
1864 before = NEXT_INSN (BB_END (edge_in->src));
1865 bb = create_basic_block (before, NULL, edge_in->src);
1866 BB_COPY_PARTITION (bb, edge_in->src);
1867 }
1868 else
1869 {
1870 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1871 {
1872 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1873 BB_COPY_PARTITION (bb, edge_in->dest);
1874 }
1875 else
1876 {
1877 basic_block after = edge_in->dest->prev_bb;
1878 /* If this is post-bb reordering, and the edge crosses a partition
1879 boundary, the new block needs to be inserted in the bb chain
1880 at the end of the src partition (since we put the new bb into
1881 that partition, see below). Otherwise we may end up creating
1882 an extra partition crossing in the chain, which is illegal.
1883 It can't go after the src, because src may have a fall-through
1884 to a different block. */
1885 if (crtl->bb_reorder_complete
1886 && (edge_in->flags & EDGE_CROSSING))
1887 {
1888 after = last_bb_in_partition (edge_in->src);
1889 before = get_last_bb_insn (after);
1890 /* The instruction following the last bb in partition should
1891 be a barrier, since it cannot end in a fall-through. */
1892 gcc_checking_assert (BARRIER_P (before));
1893 before = NEXT_INSN (before);
1894 }
1895 bb = create_basic_block (before, NULL, after);
1896 /* Put the split bb into the src partition, to avoid creating
1897 a situation where a cold bb dominates a hot bb, in the case
1898 where src is cold and dest is hot. The src will dominate
1899 the new bb (whereas it might not have dominated dest). */
1900 BB_COPY_PARTITION (bb, edge_in->src);
1901 }
1902 }
1903
1904 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1905
1906 /* Can't allow a region crossing edge to be fallthrough. */
1907 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1908 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1909 {
1910 new_bb = force_nonfallthru (single_succ_edge (bb));
1911 gcc_assert (!new_bb);
1912 }
1913
1914 /* For non-fallthru edges, we must adjust the predecessor's
1915 jump instruction to target our new block. */
1916 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1917 {
1918 edge redirected = redirect_edge_and_branch (edge_in, bb);
1919 gcc_assert (redirected);
1920 }
1921 else
1922 {
1923 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1924 {
1925 /* For asm goto even splitting of fallthru edge might
1926 need insn patching, as other labels might point to the
1927 old label. */
1928 rtx_insn *last = BB_END (edge_in->src);
1929 if (last
1930 && JUMP_P (last)
1931 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1932 && extract_asm_operands (PATTERN (last)) != NULL_RTX
1933 && patch_jump_insn (last, before, bb))
1934 df_set_bb_dirty (edge_in->src);
1935 }
1936 redirect_edge_succ (edge_in, bb);
1937 }
1938
1939 return bb;
1940 }
1941
1942 /* Queue instructions for insertion on an edge between two basic blocks.
1943 The new instructions and basic blocks (if any) will not appear in the
1944 CFG until commit_edge_insertions is called. */
1945
1946 void
1947 insert_insn_on_edge (rtx pattern, edge e)
1948 {
1949 /* We cannot insert instructions on an abnormal critical edge.
1950 It will be easier to find the culprit if we die now. */
1951 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1952
1953 if (e->insns.r == NULL_RTX)
1954 start_sequence ();
1955 else
1956 push_to_sequence (e->insns.r);
1957
1958 emit_insn (pattern);
1959
1960 e->insns.r = get_insns ();
1961 end_sequence ();
1962 }
1963
1964 /* Update the CFG for the instructions queued on edge E. */
1965
1966 void
1967 commit_one_edge_insertion (edge e)
1968 {
1969 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1970 basic_block bb;
1971
1972 /* Pull the insns off the edge now since the edge might go away. */
1973 insns = e->insns.r;
1974 e->insns.r = NULL;
1975
1976 /* Figure out where to put these insns. If the destination has
1977 one predecessor, insert there. Except for the exit block. */
1978 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1979 {
1980 bb = e->dest;
1981
1982 /* Get the location correct wrt a code label, and "nice" wrt
1983 a basic block note, and before everything else. */
1984 tmp = BB_HEAD (bb);
1985 if (LABEL_P (tmp))
1986 tmp = NEXT_INSN (tmp);
1987 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1988 tmp = NEXT_INSN (tmp);
1989 if (tmp == BB_HEAD (bb))
1990 before = tmp;
1991 else if (tmp)
1992 after = PREV_INSN (tmp);
1993 else
1994 after = get_last_insn ();
1995 }
1996
1997 /* If the source has one successor and the edge is not abnormal,
1998 insert there. Except for the entry block.
1999 Don't do this if the predecessor ends in a jump other than
2000 unconditional simple jump. E.g. for asm goto that points all
2001 its labels at the fallthru basic block, we can't insert instructions
2002 before the asm goto, as the asm goto can have various of side effects,
2003 and can't emit instructions after the asm goto, as it must end
2004 the basic block. */
2005 else if ((e->flags & EDGE_ABNORMAL) == 0
2006 && single_succ_p (e->src)
2007 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2008 && (!JUMP_P (BB_END (e->src))
2009 || simplejump_p (BB_END (e->src))))
2010 {
2011 bb = e->src;
2012
2013 /* It is possible to have a non-simple jump here. Consider a target
2014 where some forms of unconditional jumps clobber a register. This
2015 happens on the fr30 for example.
2016
2017 We know this block has a single successor, so we can just emit
2018 the queued insns before the jump. */
2019 if (JUMP_P (BB_END (bb)))
2020 before = BB_END (bb);
2021 else
2022 {
2023 /* We'd better be fallthru, or we've lost track of what's what. */
2024 gcc_assert (e->flags & EDGE_FALLTHRU);
2025
2026 after = BB_END (bb);
2027 }
2028 }
2029
2030 /* Otherwise we must split the edge. */
2031 else
2032 {
2033 bb = split_edge (e);
2034
2035 /* If E crossed a partition boundary, we needed to make bb end in
2036 a region-crossing jump, even though it was originally fallthru. */
2037 if (JUMP_P (BB_END (bb)))
2038 before = BB_END (bb);
2039 else
2040 after = BB_END (bb);
2041 }
2042
2043 /* Now that we've found the spot, do the insertion. */
2044 if (before)
2045 {
2046 emit_insn_before_noloc (insns, before, bb);
2047 last = prev_nonnote_insn (before);
2048 }
2049 else
2050 last = emit_insn_after_noloc (insns, after, bb);
2051
2052 if (returnjump_p (last))
2053 {
2054 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2055 This is not currently a problem because this only happens
2056 for the (single) epilogue, which already has a fallthru edge
2057 to EXIT. */
2058
2059 e = single_succ_edge (bb);
2060 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2061 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2062
2063 e->flags &= ~EDGE_FALLTHRU;
2064 emit_barrier_after (last);
2065
2066 if (before)
2067 delete_insn (before);
2068 }
2069 else
2070 gcc_assert (!JUMP_P (last));
2071 }
2072
2073 /* Update the CFG for all queued instructions. */
2074
2075 void
2076 commit_edge_insertions (void)
2077 {
2078 basic_block bb;
2079
2080 /* Optimization passes that invoke this routine can cause hot blocks
2081 previously reached by both hot and cold blocks to become dominated only
2082 by cold blocks. This will cause the verification below to fail,
2083 and lead to now cold code in the hot section. In some cases this
2084 may only be visible after newly unreachable blocks are deleted,
2085 which will be done by fixup_partitions. */
2086 fixup_partitions ();
2087
2088 checking_verify_flow_info ();
2089
2090 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2091 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2092 {
2093 edge e;
2094 edge_iterator ei;
2095
2096 FOR_EACH_EDGE (e, ei, bb->succs)
2097 if (e->insns.r)
2098 commit_one_edge_insertion (e);
2099 }
2100 }
2101 \f
2102
2103 /* Print out RTL-specific basic block information (live information
2104 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2105 documented in dumpfile.h. */
2106
2107 static void
2108 rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
2109 {
2110 rtx_insn *insn;
2111 rtx_insn *last;
2112 char *s_indent;
2113
2114 s_indent = (char *) alloca ((size_t) indent + 1);
2115 memset (s_indent, ' ', (size_t) indent);
2116 s_indent[indent] = '\0';
2117
2118 if (df && (flags & TDF_DETAILS))
2119 {
2120 df_dump_top (bb, outf);
2121 putc ('\n', outf);
2122 }
2123
2124 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2125 for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
2126 insn = NEXT_INSN (insn))
2127 {
2128 if (flags & TDF_DETAILS)
2129 df_dump_insn_top (insn, outf);
2130 if (! (flags & TDF_SLIM))
2131 print_rtl_single (outf, insn);
2132 else
2133 dump_insn_slim (outf, insn);
2134 if (flags & TDF_DETAILS)
2135 df_dump_insn_bottom (insn, outf);
2136 }
2137
2138 if (df && (flags & TDF_DETAILS))
2139 {
2140 df_dump_bottom (bb, outf);
2141 putc ('\n', outf);
2142 }
2143
2144 }
2145 \f
2146 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2147 for the start of each basic block. FLAGS are the TDF_* masks documented
2148 in dumpfile.h. */
2149
2150 void
2151 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, int flags)
2152 {
2153 const rtx_insn *tmp_rtx;
2154 if (rtx_first == 0)
2155 fprintf (outf, "(nil)\n");
2156 else
2157 {
2158 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2159 int max_uid = get_max_uid ();
2160 basic_block *start = XCNEWVEC (basic_block, max_uid);
2161 basic_block *end = XCNEWVEC (basic_block, max_uid);
2162 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2163 basic_block bb;
2164
2165 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2166 insns, but the CFG is not maintained so the basic block info
2167 is not reliable. Therefore it's omitted from the dumps. */
2168 if (! (cfun->curr_properties & PROP_cfg))
2169 flags &= ~TDF_BLOCKS;
2170
2171 if (df)
2172 df_dump_start (outf);
2173
2174 if (flags & TDF_BLOCKS)
2175 {
2176 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2177 {
2178 rtx_insn *x;
2179
2180 start[INSN_UID (BB_HEAD (bb))] = bb;
2181 end[INSN_UID (BB_END (bb))] = bb;
2182 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2183 {
2184 enum bb_state state = IN_MULTIPLE_BB;
2185
2186 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2187 state = IN_ONE_BB;
2188 in_bb_p[INSN_UID (x)] = state;
2189
2190 if (x == BB_END (bb))
2191 break;
2192 }
2193 }
2194 }
2195
2196 for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
2197 {
2198 if (flags & TDF_BLOCKS)
2199 {
2200 bb = start[INSN_UID (tmp_rtx)];
2201 if (bb != NULL)
2202 {
2203 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
2204 if (df && (flags & TDF_DETAILS))
2205 df_dump_top (bb, outf);
2206 }
2207
2208 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2209 && !NOTE_P (tmp_rtx)
2210 && !BARRIER_P (tmp_rtx))
2211 fprintf (outf, ";; Insn is not within a basic block\n");
2212 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2213 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2214 }
2215
2216 if (flags & TDF_DETAILS)
2217 df_dump_insn_top (tmp_rtx, outf);
2218 if (! (flags & TDF_SLIM))
2219 print_rtl_single (outf, tmp_rtx);
2220 else
2221 dump_insn_slim (outf, tmp_rtx);
2222 if (flags & TDF_DETAILS)
2223 df_dump_insn_bottom (tmp_rtx, outf);
2224
2225 if (flags & TDF_BLOCKS)
2226 {
2227 bb = end[INSN_UID (tmp_rtx)];
2228 if (bb != NULL)
2229 {
2230 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
2231 if (df && (flags & TDF_DETAILS))
2232 df_dump_bottom (bb, outf);
2233 putc ('\n', outf);
2234 }
2235 }
2236 }
2237
2238 free (start);
2239 free (end);
2240 free (in_bb_p);
2241 }
2242 }
2243 \f
2244 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2245
2246 void
2247 update_br_prob_note (basic_block bb)
2248 {
2249 rtx note;
2250 if (!JUMP_P (BB_END (bb)))
2251 return;
2252 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2253 if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
2254 return;
2255 XINT (note, 0) = BRANCH_EDGE (bb)->probability;
2256 }
2257
2258 /* Get the last insn associated with block BB (that includes barriers and
2259 tablejumps after BB). */
2260 rtx_insn *
2261 get_last_bb_insn (basic_block bb)
2262 {
2263 rtx_jump_table_data *table;
2264 rtx_insn *tmp;
2265 rtx_insn *end = BB_END (bb);
2266
2267 /* Include any jump table following the basic block. */
2268 if (tablejump_p (end, NULL, &table))
2269 end = table;
2270
2271 /* Include any barriers that may follow the basic block. */
2272 tmp = next_nonnote_insn_bb (end);
2273 while (tmp && BARRIER_P (tmp))
2274 {
2275 end = tmp;
2276 tmp = next_nonnote_insn_bb (end);
2277 }
2278
2279 return end;
2280 }
2281
2282 /* Sanity check partition hotness to ensure that basic blocks in
2283   the cold partition don't dominate basic blocks in the hot partition.
2284 If FLAG_ONLY is true, report violations as errors. Otherwise
2285 re-mark the dominated blocks as cold, since this is run after
2286 cfg optimizations that may make hot blocks previously reached
2287 by both hot and cold blocks now only reachable along cold paths. */
2288
2289 static vec<basic_block>
2290 find_partition_fixes (bool flag_only)
2291 {
2292 basic_block bb;
2293 vec<basic_block> bbs_in_cold_partition = vNULL;
2294 vec<basic_block> bbs_to_fix = vNULL;
2295
2296 /* Callers check this. */
2297 gcc_checking_assert (crtl->has_bb_partition);
2298
2299 FOR_EACH_BB_FN (bb, cfun)
2300 if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
2301 bbs_in_cold_partition.safe_push (bb);
2302
2303 if (bbs_in_cold_partition.is_empty ())
2304 return vNULL;
2305
2306 bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
2307
2308 if (dom_calculated_here)
2309 calculate_dominance_info (CDI_DOMINATORS);
2310
2311 while (! bbs_in_cold_partition.is_empty ())
2312 {
2313 bb = bbs_in_cold_partition.pop ();
2314 /* Any blocks dominated by a block in the cold section
2315 must also be cold. */
2316 basic_block son;
2317 for (son = first_dom_son (CDI_DOMINATORS, bb);
2318 son;
2319 son = next_dom_son (CDI_DOMINATORS, son))
2320 {
2321 /* If son is not yet cold, then mark it cold here and
2322 enqueue it for further processing. */
2323 if ((BB_PARTITION (son) != BB_COLD_PARTITION))
2324 {
2325 if (flag_only)
2326 error ("non-cold basic block %d dominated "
2327 "by a block in the cold partition (%d)", son->index, bb->index);
2328 else
2329 BB_SET_PARTITION (son, BB_COLD_PARTITION);
2330 bbs_to_fix.safe_push (son);
2331 bbs_in_cold_partition.safe_push (son);
2332 }
2333 }
2334 }
2335
2336 if (dom_calculated_here)
2337 free_dominance_info (CDI_DOMINATORS);
2338
2339 return bbs_to_fix;
2340 }
2341
2342 /* Perform cleanup on the hot/cold bb partitioning after optimization
2343 passes that modify the cfg. */
2344
2345 void
2346 fixup_partitions (void)
2347 {
2348 basic_block bb;
2349
2350 if (!crtl->has_bb_partition)
2351 return;
2352
2353 /* Delete any blocks that became unreachable and weren't
2354 already cleaned up, for example during edge forwarding
2355 and convert_jumps_to_returns. This will expose more
2356 opportunities for fixing the partition boundaries here.
2357 Also, the calculation of the dominance graph during verification
2358 will assert if there are unreachable nodes. */
2359 delete_unreachable_blocks ();
2360
2361 /* If there are partitions, do a sanity check on them: A basic block in
2362   a cold partition cannot dominate a basic block in a hot partition.
2363 Fixup any that now violate this requirement, as a result of edge
2364 forwarding and unreachable block deletion.  */
2365 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2366
2367 /* Do the partition fixup after all necessary blocks have been converted to
2368 cold, so that we only update the region crossings the minimum number of
2369 places, which can require forcing edges to be non fallthru. */
2370 while (! bbs_to_fix.is_empty ())
2371 {
2372 bb = bbs_to_fix.pop ();
2373 fixup_new_cold_bb (bb);
2374 }
2375 }
2376
2377 /* Verify, in the basic block chain, that there is at most one switch
2378 between hot/cold partitions. This condition will not be true until
2379 after reorder_basic_blocks is called. */
2380
2381 static int
2382 verify_hot_cold_block_grouping (void)
2383 {
2384 basic_block bb;
2385 int err = 0;
2386 bool switched_sections = false;
2387 int current_partition = BB_UNPARTITIONED;
2388
2389 /* Even after bb reordering is complete, we go into cfglayout mode
2390 again (in compgoto). Ensure we don't call this before going back
2391 into linearized RTL when any layout fixes would have been committed. */
2392 if (!crtl->bb_reorder_complete
2393 || current_ir_type () != IR_RTL_CFGRTL)
2394 return err;
2395
2396 FOR_EACH_BB_FN (bb, cfun)
2397 {
2398 if (current_partition != BB_UNPARTITIONED
2399 && BB_PARTITION (bb) != current_partition)
2400 {
2401 if (switched_sections)
2402 {
2403 error ("multiple hot/cold transitions found (bb %i)",
2404 bb->index);
2405 err = 1;
2406 }
2407 else
2408 switched_sections = true;
2409
2410 if (!crtl->has_bb_partition)
2411 error ("partition found but function partition flag not set");
2412 }
2413 current_partition = BB_PARTITION (bb);
2414 }
2415
2416 return err;
2417 }
2418 \f
2419
2420 /* Perform several checks on the edges out of each block, such as
2421 the consistency of the branch probabilities, the correctness
2422 of hot/cold partition crossing edges, and the number of expected
2423 successor edges. Also verify that the dominance relationship
2424 between hot/cold blocks is sane. */
2425
2426 static int
2427 rtl_verify_edges (void)
2428 {
2429 int err = 0;
2430 basic_block bb;
2431
2432 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2433 {
2434 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2435 int n_eh = 0, n_abnormal = 0;
2436 edge e, fallthru = NULL;
2437 edge_iterator ei;
2438 rtx note;
2439 bool has_crossing_edge = false;
2440
2441 if (JUMP_P (BB_END (bb))
2442 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2443 && EDGE_COUNT (bb->succs) >= 2
2444 && any_condjump_p (BB_END (bb)))
2445 {
2446 if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
2447 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2448 {
2449 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2450 XINT (note, 0), BRANCH_EDGE (bb)->probability);
2451 err = 1;
2452 }
2453 }
2454
2455 FOR_EACH_EDGE (e, ei, bb->succs)
2456 {
2457 bool is_crossing;
2458
2459 if (e->flags & EDGE_FALLTHRU)
2460 n_fallthru++, fallthru = e;
2461
2462 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2463 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2464 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2465 has_crossing_edge |= is_crossing;
2466 if (e->flags & EDGE_CROSSING)
2467 {
2468 if (!is_crossing)
2469 {
2470 error ("EDGE_CROSSING incorrectly set across same section");
2471 err = 1;
2472 }
2473 if (e->flags & EDGE_FALLTHRU)
2474 {
2475 error ("fallthru edge crosses section boundary in bb %i",
2476 e->src->index);
2477 err = 1;
2478 }
2479 if (e->flags & EDGE_EH)
2480 {
2481 error ("EH edge crosses section boundary in bb %i",
2482 e->src->index);
2483 err = 1;
2484 }
2485 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2486 {
2487 error ("No region crossing jump at section boundary in bb %i",
2488 bb->index);
2489 err = 1;
2490 }
2491 }
2492 else if (is_crossing)
2493 {
2494 error ("EDGE_CROSSING missing across section boundary");
2495 err = 1;
2496 }
2497
2498 if ((e->flags & ~(EDGE_DFS_BACK
2499 | EDGE_CAN_FALLTHRU
2500 | EDGE_IRREDUCIBLE_LOOP
2501 | EDGE_LOOP_EXIT
2502 | EDGE_CROSSING
2503 | EDGE_PRESERVE)) == 0)
2504 n_branch++;
2505
2506 if (e->flags & EDGE_ABNORMAL_CALL)
2507 n_abnormal_call++;
2508
2509 if (e->flags & EDGE_SIBCALL)
2510 n_sibcall++;
2511
2512 if (e->flags & EDGE_EH)
2513 n_eh++;
2514
2515 if (e->flags & EDGE_ABNORMAL)
2516 n_abnormal++;
2517 }
2518
2519 if (!has_crossing_edge
2520 && JUMP_P (BB_END (bb))
2521 && CROSSING_JUMP_P (BB_END (bb)))
2522 {
2523 print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
2524 error ("Region crossing jump across same section in bb %i",
2525 bb->index);
2526 err = 1;
2527 }
2528
2529 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2530 {
2531 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2532 err = 1;
2533 }
2534 if (n_eh > 1)
2535 {
2536 error ("too many exception handling edges in bb %i", bb->index);
2537 err = 1;
2538 }
2539 if (n_branch
2540 && (!JUMP_P (BB_END (bb))
2541 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2542 || any_condjump_p (BB_END (bb))))))
2543 {
2544 error ("too many outgoing branch edges from bb %i", bb->index);
2545 err = 1;
2546 }
2547 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2548 {
2549 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2550 err = 1;
2551 }
2552 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2553 {
2554 error ("wrong number of branch edges after unconditional jump"
2555 " in bb %i", bb->index);
2556 err = 1;
2557 }
2558 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2559 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2560 {
2561 error ("wrong amount of branch edges after conditional jump"
2562 " in bb %i", bb->index);
2563 err = 1;
2564 }
2565 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2566 {
2567 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2568 err = 1;
2569 }
2570 if (n_sibcall && !CALL_P (BB_END (bb)))
2571 {
2572 error ("sibcall edges for non-call insn in bb %i", bb->index);
2573 err = 1;
2574 }
2575 if (n_abnormal > n_eh
2576 && !(CALL_P (BB_END (bb))
2577 && n_abnormal == n_abnormal_call + n_sibcall)
2578 && (!JUMP_P (BB_END (bb))
2579 || any_condjump_p (BB_END (bb))
2580 || any_uncondjump_p (BB_END (bb))))
2581 {
2582 error ("abnormal edges for no purpose in bb %i", bb->index);
2583 err = 1;
2584 }
2585 }
2586
2587 /* If there are partitions, do a sanity check on them: A basic block in
2588   a cold partition cannot dominate a basic block in a hot partition.  */
2589 if (crtl->has_bb_partition && !err)
2590 {
2591 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2592 err = !bbs_to_fix.is_empty ();
2593 }
2594
2595 /* Clean up. */
2596 return err;
2597 }
2598
2599 /* Checks on the instructions within blocks. Currently checks that each
2600 block starts with a basic block note, and that basic block notes and
2601 control flow jumps are not found in the middle of the block. */
2602
2603 static int
2604 rtl_verify_bb_insns (void)
2605 {
2606 rtx_insn *x;
2607 int err = 0;
2608 basic_block bb;
2609
2610 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2611 {
2612 /* Now check the header of basic
2613 block. It ought to contain optional CODE_LABEL followed
2614 by NOTE_BASIC_BLOCK. */
2615 x = BB_HEAD (bb);
2616 if (LABEL_P (x))
2617 {
2618 if (BB_END (bb) == x)
2619 {
2620 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2621 bb->index);
2622 err = 1;
2623 }
2624
2625 x = NEXT_INSN (x);
2626 }
2627
2628 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2629 {
2630 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2631 bb->index);
2632 err = 1;
2633 }
2634
2635 if (BB_END (bb) == x)
2636 /* Do checks for empty blocks here. */
2637 ;
2638 else
2639 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2640 {
2641 if (NOTE_INSN_BASIC_BLOCK_P (x))
2642 {
2643 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2644 INSN_UID (x), bb->index);
2645 err = 1;
2646 }
2647
2648 if (x == BB_END (bb))
2649 break;
2650
2651 if (control_flow_insn_p (x))
2652 {
2653 error ("in basic block %d:", bb->index);
2654 fatal_insn ("flow control insn inside a basic block", x);
2655 }
2656 }
2657 }
2658
2659 /* Clean up. */
2660 return err;
2661 }
2662
2663 /* Verify that block pointers for instructions in basic blocks, headers and
2664 footers are set appropriately. */
2665
2666 static int
2667 rtl_verify_bb_pointers (void)
2668 {
2669 int err = 0;
2670 basic_block bb;
2671
2672 /* Check the general integrity of the basic blocks. */
2673 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2674 {
2675 rtx_insn *insn;
2676
2677 if (!(bb->flags & BB_RTL))
2678 {
2679 error ("BB_RTL flag not set for block %d", bb->index);
2680 err = 1;
2681 }
2682
2683 FOR_BB_INSNS (bb, insn)
2684 if (BLOCK_FOR_INSN (insn) != bb)
2685 {
2686 error ("insn %d basic block pointer is %d, should be %d",
2687 INSN_UID (insn),
2688 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2689 bb->index);
2690 err = 1;
2691 }
2692
2693 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2694 if (!BARRIER_P (insn)
2695 && BLOCK_FOR_INSN (insn) != NULL)
2696 {
2697 error ("insn %d in header of bb %d has non-NULL basic block",
2698 INSN_UID (insn), bb->index);
2699 err = 1;
2700 }
2701 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2702 if (!BARRIER_P (insn)
2703 && BLOCK_FOR_INSN (insn) != NULL)
2704 {
2705 error ("insn %d in footer of bb %d has non-NULL basic block",
2706 INSN_UID (insn), bb->index);
2707 err = 1;
2708 }
2709 }
2710
2711 /* Clean up. */
2712 return err;
2713 }
2714
2715 /* Verify the CFG and RTL consistency common for both underlying RTL and
2716 cfglayout RTL.
2717
2718 Currently it does following checks:
2719
2720 - overlapping of basic blocks
2721 - insns with wrong BLOCK_FOR_INSN pointers
2722 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2723 - tails of basic blocks (ensure that boundary is necessary)
2724 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2725 and NOTE_INSN_BASIC_BLOCK
2726 - verify that no fall_thru edge crosses hot/cold partition boundaries
2727 - verify that there are no pending RTL branch predictions
2728 - verify that hot blocks are not dominated by cold blocks
2729
2730 In future it can be extended check a lot of other stuff as well
2731 (reachability of basic blocks, life information, etc. etc.). */
2732
2733 static int
2734 rtl_verify_flow_info_1 (void)
2735 {
2736 int err = 0;
2737
2738 err |= rtl_verify_bb_pointers ();
2739
2740 err |= rtl_verify_bb_insns ();
2741
2742 err |= rtl_verify_edges ();
2743
2744 return err;
2745 }
2746
2747 /* Walk the instruction chain and verify that bb head/end pointers
2748 are correct, and that instructions are in exactly one bb and have
2749 correct block pointers. */
2750
2751 static int
2752 rtl_verify_bb_insn_chain (void)
2753 {
2754 basic_block bb;
2755 int err = 0;
2756 rtx_insn *x;
2757 rtx_insn *last_head = get_last_insn ();
2758 basic_block *bb_info;
2759 const int max_uid = get_max_uid ();
2760
2761 bb_info = XCNEWVEC (basic_block, max_uid);
2762
2763 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2764 {
2765 rtx_insn *head = BB_HEAD (bb);
2766 rtx_insn *end = BB_END (bb);
2767
2768 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2769 {
2770 /* Verify the end of the basic block is in the INSN chain. */
2771 if (x == end)
2772 break;
2773
2774 /* And that the code outside of basic blocks has NULL bb field. */
2775 if (!BARRIER_P (x)
2776 && BLOCK_FOR_INSN (x) != NULL)
2777 {
2778 error ("insn %d outside of basic blocks has non-NULL bb field",
2779 INSN_UID (x));
2780 err = 1;
2781 }
2782 }
2783
2784 if (!x)
2785 {
2786 error ("end insn %d for block %d not found in the insn stream",
2787 INSN_UID (end), bb->index);
2788 err = 1;
2789 }
2790
2791 /* Work backwards from the end to the head of the basic block
2792 to verify the head is in the RTL chain. */
2793 for (; x != NULL_RTX; x = PREV_INSN (x))
2794 {
2795 /* While walking over the insn chain, verify insns appear
2796 in only one basic block. */
2797 if (bb_info[INSN_UID (x)] != NULL)
2798 {
2799 error ("insn %d is in multiple basic blocks (%d and %d)",
2800 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2801 err = 1;
2802 }
2803
2804 bb_info[INSN_UID (x)] = bb;
2805
2806 if (x == head)
2807 break;
2808 }
2809 if (!x)
2810 {
2811 error ("head insn %d for block %d not found in the insn stream",
2812 INSN_UID (head), bb->index);
2813 err = 1;
2814 }
2815
2816 last_head = PREV_INSN (x);
2817 }
2818
2819 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2820 {
2821 /* Check that the code before the first basic block has NULL
2822 bb field. */
2823 if (!BARRIER_P (x)
2824 && BLOCK_FOR_INSN (x) != NULL)
2825 {
2826 error ("insn %d outside of basic blocks has non-NULL bb field",
2827 INSN_UID (x));
2828 err = 1;
2829 }
2830 }
2831 free (bb_info);
2832
2833 return err;
2834 }
2835
2836 /* Verify that fallthru edges point to adjacent blocks in layout order and
2837 that barriers exist after non-fallthru blocks. */
2838
2839 static int
2840 rtl_verify_fallthru (void)
2841 {
2842 basic_block bb;
2843 int err = 0;
2844
2845 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2846 {
2847 edge e;
2848
2849 e = find_fallthru_edge (bb->succs);
2850 if (!e)
2851 {
2852 rtx_insn *insn;
2853
2854 /* Ensure existence of barrier in BB with no fallthru edges. */
2855 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2856 {
2857 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2858 {
2859 error ("missing barrier after block %i", bb->index);
2860 err = 1;
2861 break;
2862 }
2863 if (BARRIER_P (insn))
2864 break;
2865 }
2866 }
2867 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2868 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2869 {
2870 rtx_insn *insn;
2871
2872 if (e->src->next_bb != e->dest)
2873 {
2874 error
2875 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2876 e->src->index, e->dest->index);
2877 err = 1;
2878 }
2879 else
2880 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2881 insn = NEXT_INSN (insn))
2882 if (BARRIER_P (insn) || INSN_P (insn))
2883 {
2884 error ("verify_flow_info: Incorrect fallthru %i->%i",
2885 e->src->index, e->dest->index);
2886 fatal_insn ("wrong insn in the fallthru edge", insn);
2887 err = 1;
2888 }
2889 }
2890 }
2891
2892 return err;
2893 }
2894
2895 /* Verify that blocks are laid out in consecutive order. While walking the
2896 instructions, verify that all expected instructions are inside the basic
2897 blocks, and that all returns are followed by barriers. */
2898
2899 static int
2900 rtl_verify_bb_layout (void)
2901 {
2902 basic_block bb;
2903 int err = 0;
2904 rtx_insn *x;
2905 int num_bb_notes;
2906 rtx_insn * const rtx_first = get_insns ();
2907 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2908
2909 num_bb_notes = 0;
2910 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2911
2912 for (x = rtx_first; x; x = NEXT_INSN (x))
2913 {
2914 if (NOTE_INSN_BASIC_BLOCK_P (x))
2915 {
2916 bb = NOTE_BASIC_BLOCK (x);
2917
2918 num_bb_notes++;
2919 if (bb != last_bb_seen->next_bb)
2920 internal_error ("basic blocks not laid down consecutively");
2921
2922 curr_bb = last_bb_seen = bb;
2923 }
2924
2925 if (!curr_bb)
2926 {
2927 switch (GET_CODE (x))
2928 {
2929 case BARRIER:
2930 case NOTE:
2931 break;
2932
2933 case CODE_LABEL:
2934 /* An ADDR_VEC is placed outside any basic block. */
2935 if (NEXT_INSN (x)
2936 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2937 x = NEXT_INSN (x);
2938
2939 /* But in any case, non-deletable labels can appear anywhere. */
2940 break;
2941
2942 default:
2943 fatal_insn ("insn outside basic block", x);
2944 }
2945 }
2946
2947 if (JUMP_P (x)
2948 && returnjump_p (x) && ! condjump_p (x)
2949 && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
2950 fatal_insn ("return not followed by barrier", x);
2951
2952 if (curr_bb && x == BB_END (curr_bb))
2953 curr_bb = NULL;
2954 }
2955
2956 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2957 internal_error
2958 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2959 num_bb_notes, n_basic_blocks_for_fn (cfun));
2960
2961 return err;
2962 }
2963
2964 /* Verify the CFG and RTL consistency common for both underlying RTL and
2965 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2966
2967 Currently it does following checks:
2968 - all checks of rtl_verify_flow_info_1
2969 - test head/end pointers
2970 - check that blocks are laid out in consecutive order
2971 - check that all insns are in the basic blocks
2972 (except the switch handling code, barriers and notes)
2973 - check that all returns are followed by barriers
2974 - check that all fallthru edge points to the adjacent blocks
2975 - verify that there is a single hot/cold partition boundary after bbro */
2976
2977 static int
2978 rtl_verify_flow_info (void)
2979 {
2980 int err = 0;
2981
2982 err |= rtl_verify_flow_info_1 ();
2983
2984 err |= rtl_verify_bb_insn_chain ();
2985
2986 err |= rtl_verify_fallthru ();
2987
2988 err |= rtl_verify_bb_layout ();
2989
2990 err |= verify_hot_cold_block_grouping ();
2991
2992 return err;
2993 }
2994 \f
2995 /* Assume that the preceding pass has possibly eliminated jump instructions
2996 or converted the unconditional jumps. Eliminate the edges from CFG.
2997 Return true if any edges are eliminated. */
2998
2999 bool
3000 purge_dead_edges (basic_block bb)
3001 {
3002 edge e;
3003 rtx_insn *insn = BB_END (bb);
3004 rtx note;
3005 bool purged = false;
3006 bool found;
3007 edge_iterator ei;
3008
3009 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3010 do
3011 insn = PREV_INSN (insn);
3012 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3013
3014 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3015 if (NONJUMP_INSN_P (insn)
3016 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3017 {
3018 rtx eqnote;
3019
3020 if (! may_trap_p (PATTERN (insn))
3021 || ((eqnote = find_reg_equal_equiv_note (insn))
3022 && ! may_trap_p (XEXP (eqnote, 0))))
3023 remove_note (insn, note);
3024 }
3025
3026 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3027 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3028 {
3029 bool remove = false;
3030
3031 /* There are three types of edges we need to handle correctly here: EH
3032 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3033 latter can appear when nonlocal gotos are used. */
3034 if (e->flags & EDGE_ABNORMAL_CALL)
3035 {
3036 if (!CALL_P (insn))
3037 remove = true;
3038 else if (can_nonlocal_goto (insn))
3039 ;
3040 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3041 ;
3042 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3043 ;
3044 else
3045 remove = true;
3046 }
3047 else if (e->flags & EDGE_EH)
3048 remove = !can_throw_internal (insn);
3049
3050 if (remove)
3051 {
3052 remove_edge (e);
3053 df_set_bb_dirty (bb);
3054 purged = true;
3055 }
3056 else
3057 ei_next (&ei);
3058 }
3059
3060 if (JUMP_P (insn))
3061 {
3062 rtx note;
3063 edge b,f;
3064 edge_iterator ei;
3065
3066 /* We do care only about conditional jumps and simplejumps. */
3067 if (!any_condjump_p (insn)
3068 && !returnjump_p (insn)
3069 && !simplejump_p (insn))
3070 return purged;
3071
3072 /* Branch probability/prediction notes are defined only for
3073 condjumps. We've possibly turned condjump into simplejump. */
3074 if (simplejump_p (insn))
3075 {
3076 note = find_reg_note (insn, REG_BR_PROB, NULL);
3077 if (note)
3078 remove_note (insn, note);
3079 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3080 remove_note (insn, note);
3081 }
3082
3083 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3084 {
3085 /* Avoid abnormal flags to leak from computed jumps turned
3086 into simplejumps. */
3087
3088 e->flags &= ~EDGE_ABNORMAL;
3089
3090 /* See if this edge is one we should keep. */
3091 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3092 /* A conditional jump can fall through into the next
3093 block, so we should keep the edge. */
3094 {
3095 ei_next (&ei);
3096 continue;
3097 }
3098 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3099 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3100 /* If the destination block is the target of the jump,
3101 keep the edge. */
3102 {
3103 ei_next (&ei);
3104 continue;
3105 }
3106 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3107 && returnjump_p (insn))
3108 /* If the destination block is the exit block, and this
3109 instruction is a return, then keep the edge. */
3110 {
3111 ei_next (&ei);
3112 continue;
3113 }
3114 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3115 /* Keep the edges that correspond to exceptions thrown by
3116 this instruction and rematerialize the EDGE_ABNORMAL
3117 flag we just cleared above. */
3118 {
3119 e->flags |= EDGE_ABNORMAL;
3120 ei_next (&ei);
3121 continue;
3122 }
3123
3124 /* We do not need this edge. */
3125 df_set_bb_dirty (bb);
3126 purged = true;
3127 remove_edge (e);
3128 }
3129
3130 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3131 return purged;
3132
3133 if (dump_file)
3134 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3135
3136 if (!optimize)
3137 return purged;
3138
3139 /* Redistribute probabilities. */
3140 if (single_succ_p (bb))
3141 {
3142 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3143 single_succ_edge (bb)->count = bb->count;
3144 }
3145 else
3146 {
3147 note = find_reg_note (insn, REG_BR_PROB, NULL);
3148 if (!note)
3149 return purged;
3150
3151 b = BRANCH_EDGE (bb);
3152 f = FALLTHRU_EDGE (bb);
3153 b->probability = XINT (note, 0);
3154 f->probability = REG_BR_PROB_BASE - b->probability;
3155 /* Update these to use GCOV_COMPUTE_SCALE. */
3156 b->count = bb->count * b->probability / REG_BR_PROB_BASE;
3157 f->count = bb->count * f->probability / REG_BR_PROB_BASE;
3158 }
3159
3160 return purged;
3161 }
3162 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3163 {
3164 /* First, there should not be any EH or ABCALL edges resulting
3165 from non-local gotos and the like. If there were, we shouldn't
3166 have created the sibcall in the first place. Second, there
3167 should of course never have been a fallthru edge. */
3168 gcc_assert (single_succ_p (bb));
3169 gcc_assert (single_succ_edge (bb)->flags
3170 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3171
3172 return 0;
3173 }
3174
3175 /* If we don't see a jump insn, we don't know exactly why the block would
3176 have been broken at this point. Look for a simple, non-fallthru edge,
3177 as these are only created by conditional branches. If we find such an
3178 edge we know that there used to be a jump here and can then safely
3179 remove all non-fallthru edges. */
3180 found = false;
3181 FOR_EACH_EDGE (e, ei, bb->succs)
3182 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3183 {
3184 found = true;
3185 break;
3186 }
3187
3188 if (!found)
3189 return purged;
3190
3191 /* Remove all but the fake and fallthru edges. The fake edge may be
3192 the only successor for this block in the case of noreturn
3193 calls. */
3194 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3195 {
3196 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3197 {
3198 df_set_bb_dirty (bb);
3199 remove_edge (e);
3200 purged = true;
3201 }
3202 else
3203 ei_next (&ei);
3204 }
3205
3206 gcc_assert (single_succ_p (bb));
3207
3208 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3209 single_succ_edge (bb)->count = bb->count;
3210
3211 if (dump_file)
3212 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3213 bb->index);
3214 return purged;
3215 }
3216
3217 /* Search all basic blocks for potentially dead edges and purge them. Return
3218 true if some edge has been eliminated. */
3219
3220 bool
3221 purge_all_dead_edges (void)
3222 {
3223 int purged = false;
3224 basic_block bb;
3225
3226 FOR_EACH_BB_FN (bb, cfun)
3227 {
3228 bool purged_here = purge_dead_edges (bb);
3229
3230 purged |= purged_here;
3231 }
3232
3233 return purged;
3234 }
3235
3236 /* This is used by a few passes that emit some instructions after abnormal
3237 calls, moving the basic block's end, while they in fact do want to emit
3238 them on the fallthru edge. Look for abnormal call edges, find backward
3239 the call in the block and insert the instructions on the edge instead.
3240
3241 Similarly, handle instructions throwing exceptions internally.
3242
3243 Return true when instructions have been found and inserted on edges. */
3244
3245 bool
3246 fixup_abnormal_edges (void)
3247 {
3248 bool inserted = false;
3249 basic_block bb;
3250
3251 FOR_EACH_BB_FN (bb, cfun)
3252 {
3253 edge e;
3254 edge_iterator ei;
3255
3256 /* Look for cases we are interested in - calls or instructions causing
3257 exceptions. */
3258 FOR_EACH_EDGE (e, ei, bb->succs)
3259 if ((e->flags & EDGE_ABNORMAL_CALL)
3260 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3261 == (EDGE_ABNORMAL | EDGE_EH)))
3262 break;
3263
3264 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3265 {
3266 rtx_insn *insn;
3267
3268 /* Get past the new insns generated. Allow notes, as the insns
3269 may be already deleted. */
3270 insn = BB_END (bb);
3271 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3272 && !can_throw_internal (insn)
3273 && insn != BB_HEAD (bb))
3274 insn = PREV_INSN (insn);
3275
3276 if (CALL_P (insn) || can_throw_internal (insn))
3277 {
3278 rtx_insn *stop, *next;
3279
3280 e = find_fallthru_edge (bb->succs);
3281
3282 stop = NEXT_INSN (BB_END (bb));
3283 BB_END (bb) = insn;
3284
3285 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3286 {
3287 next = NEXT_INSN (insn);
3288 if (INSN_P (insn))
3289 {
3290 delete_insn (insn);
3291
3292 /* Sometimes there's still the return value USE.
3293 If it's placed after a trapping call (i.e. that
3294 call is the last insn anyway), we have no fallthru
3295 edge. Simply delete this use and don't try to insert
3296 on the non-existent edge. */
3297 if (GET_CODE (PATTERN (insn)) != USE)
3298 {
3299 /* We're not deleting it, we're moving it. */
3300 insn->set_undeleted ();
3301 SET_PREV_INSN (insn) = NULL_RTX;
3302 SET_NEXT_INSN (insn) = NULL_RTX;
3303
3304 insert_insn_on_edge (insn, e);
3305 inserted = true;
3306 }
3307 }
3308 else if (!BARRIER_P (insn))
3309 set_block_for_insn (insn, NULL);
3310 }
3311 }
3312
3313 /* It may be that we don't find any trapping insn. In this
3314 case we discovered quite late that the insn that had been
3315 marked as can_throw_internal in fact couldn't trap at all.
3316 So we should in fact delete the EH edges out of the block. */
3317 else
3318 purge_dead_edges (bb);
3319 }
3320 }
3321
3322 return inserted;
3323 }
3324 \f
3325 /* Cut the insns from FIRST to LAST out of the insns stream. */
3326
3327 rtx_insn *
3328 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3329 {
3330 rtx_insn *prevfirst = PREV_INSN (first);
3331 rtx_insn *nextlast = NEXT_INSN (last);
3332
3333 SET_PREV_INSN (first) = NULL;
3334 SET_NEXT_INSN (last) = NULL;
3335 if (prevfirst)
3336 SET_NEXT_INSN (prevfirst) = nextlast;
3337 if (nextlast)
3338 SET_PREV_INSN (nextlast) = prevfirst;
3339 else
3340 set_last_insn (prevfirst);
3341 if (!prevfirst)
3342 set_first_insn (nextlast);
3343 return first;
3344 }
3345 \f
3346 /* Skip over inter-block insns occurring after BB which are typically
3347 associated with BB (e.g., barriers). If there are any such insns,
3348 we return the last one. Otherwise, we return the end of BB. */
3349
3350 static rtx_insn *
3351 skip_insns_after_block (basic_block bb)
3352 {
3353 rtx_insn *insn, *last_insn, *next_head, *prev;
3354
3355 next_head = NULL;
3356 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3357 next_head = BB_HEAD (bb->next_bb);
3358
3359 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3360 {
3361 if (insn == next_head)
3362 break;
3363
3364 switch (GET_CODE (insn))
3365 {
3366 case BARRIER:
3367 last_insn = insn;
3368 continue;
3369
3370 case NOTE:
3371 switch (NOTE_KIND (insn))
3372 {
3373 case NOTE_INSN_BLOCK_END:
3374 gcc_unreachable ();
3375 continue;
3376 default:
3377 continue;
3378 break;
3379 }
3380 break;
3381
3382 case CODE_LABEL:
3383 if (NEXT_INSN (insn)
3384 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3385 {
3386 insn = NEXT_INSN (insn);
3387 last_insn = insn;
3388 continue;
3389 }
3390 break;
3391
3392 default:
3393 break;
3394 }
3395
3396 break;
3397 }
3398
3399 /* It is possible to hit contradictory sequence. For instance:
3400
3401 jump_insn
3402 NOTE_INSN_BLOCK_BEG
3403 barrier
3404
3405 Where barrier belongs to jump_insn, but the note does not. This can be
3406 created by removing the basic block originally following
3407 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3408
3409 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3410 {
3411 prev = PREV_INSN (insn);
3412 if (NOTE_P (insn))
3413 switch (NOTE_KIND (insn))
3414 {
3415 case NOTE_INSN_BLOCK_END:
3416 gcc_unreachable ();
3417 break;
3418 case NOTE_INSN_DELETED:
3419 case NOTE_INSN_DELETED_LABEL:
3420 case NOTE_INSN_DELETED_DEBUG_LABEL:
3421 continue;
3422 default:
3423 reorder_insns (insn, insn, last_insn);
3424 }
3425 }
3426
3427 return last_insn;
3428 }
3429
3430 /* Locate or create a label for a given basic block. */
3431
3432 static rtx_insn *
3433 label_for_bb (basic_block bb)
3434 {
3435 rtx_insn *label = BB_HEAD (bb);
3436
3437 if (!LABEL_P (label))
3438 {
3439 if (dump_file)
3440 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3441
3442 label = block_label (bb);
3443 }
3444
3445 return label;
3446 }
3447
3448 /* Locate the effective beginning and end of the insn chain for each
3449 block, as defined by skip_insns_after_block above. */
3450
3451 static void
3452 record_effective_endpoints (void)
3453 {
3454 rtx_insn *next_insn;
3455 basic_block bb;
3456 rtx_insn *insn;
3457
3458 for (insn = get_insns ();
3459 insn
3460 && NOTE_P (insn)
3461 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3462 insn = NEXT_INSN (insn))
3463 continue;
3464 /* No basic blocks at all? */
3465 gcc_assert (insn);
3466
3467 if (PREV_INSN (insn))
3468 cfg_layout_function_header =
3469 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3470 else
3471 cfg_layout_function_header = NULL;
3472
3473 next_insn = get_insns ();
3474 FOR_EACH_BB_FN (bb, cfun)
3475 {
3476 rtx_insn *end;
3477
3478 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3479 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3480 PREV_INSN (BB_HEAD (bb)));
3481 end = skip_insns_after_block (bb);
3482 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3483 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3484 next_insn = NEXT_INSN (BB_END (bb));
3485 }
3486
3487 cfg_layout_function_footer = next_insn;
3488 if (cfg_layout_function_footer)
3489 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3490 }
3491 \f
3492 namespace {
3493
3494 const pass_data pass_data_into_cfg_layout_mode =
3495 {
3496 RTL_PASS, /* type */
3497 "into_cfglayout", /* name */
3498 OPTGROUP_NONE, /* optinfo_flags */
3499 TV_CFG, /* tv_id */
3500 0, /* properties_required */
3501 PROP_cfglayout, /* properties_provided */
3502 0, /* properties_destroyed */
3503 0, /* todo_flags_start */
3504 0, /* todo_flags_finish */
3505 };
3506
3507 class pass_into_cfg_layout_mode : public rtl_opt_pass
3508 {
3509 public:
3510 pass_into_cfg_layout_mode (gcc::context *ctxt)
3511 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3512 {}
3513
3514 /* opt_pass methods: */
3515 virtual unsigned int execute (function *)
3516 {
3517 cfg_layout_initialize (0);
3518 return 0;
3519 }
3520
3521 }; // class pass_into_cfg_layout_mode
3522
3523 } // anon namespace
3524
3525 rtl_opt_pass *
3526 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3527 {
3528 return new pass_into_cfg_layout_mode (ctxt);
3529 }
3530
3531 namespace {
3532
3533 const pass_data pass_data_outof_cfg_layout_mode =
3534 {
3535 RTL_PASS, /* type */
3536 "outof_cfglayout", /* name */
3537 OPTGROUP_NONE, /* optinfo_flags */
3538 TV_CFG, /* tv_id */
3539 0, /* properties_required */
3540 0, /* properties_provided */
3541 PROP_cfglayout, /* properties_destroyed */
3542 0, /* todo_flags_start */
3543 0, /* todo_flags_finish */
3544 };
3545
3546 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3547 {
3548 public:
3549 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3550 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3551 {}
3552
3553 /* opt_pass methods: */
3554 virtual unsigned int execute (function *);
3555
3556 }; // class pass_outof_cfg_layout_mode
3557
3558 unsigned int
3559 pass_outof_cfg_layout_mode::execute (function *fun)
3560 {
3561 basic_block bb;
3562
3563 FOR_EACH_BB_FN (bb, fun)
3564 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3565 bb->aux = bb->next_bb;
3566
3567 cfg_layout_finalize ();
3568
3569 return 0;
3570 }
3571
3572 } // anon namespace
3573
3574 rtl_opt_pass *
3575 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3576 {
3577 return new pass_outof_cfg_layout_mode (ctxt);
3578 }
3579 \f
3580
3581 /* Link the basic blocks in the correct order, compacting the basic
3582 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3583 function also clears the basic block header and footer fields.
3584
3585 This function is usually called after a pass (e.g. tracer) finishes
3586 some transformations while in cfglayout mode. The required sequence
3587 of the basic blocks is in a linked list along the bb->aux field.
3588 This functions re-links the basic block prev_bb and next_bb pointers
3589 accordingly, and it compacts and renumbers the blocks.
3590
3591 FIXME: This currently works only for RTL, but the only RTL-specific
3592 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3593 to GIMPLE a long time ago, but it doesn't relink the basic block
3594 chain. It could do that (to give better initial RTL) if this function
3595 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3596
3597 void
3598 relink_block_chain (bool stay_in_cfglayout_mode)
3599 {
3600 basic_block bb, prev_bb;
3601 int index;
3602
3603 /* Maybe dump the re-ordered sequence. */
3604 if (dump_file)
3605 {
3606 fprintf (dump_file, "Reordered sequence:\n");
3607 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3608 NUM_FIXED_BLOCKS;
3609 bb;
3610 bb = (basic_block) bb->aux, index++)
3611 {
3612 fprintf (dump_file, " %i ", index);
3613 if (get_bb_original (bb))
3614 fprintf (dump_file, "duplicate of %i ",
3615 get_bb_original (bb)->index);
3616 else if (forwarder_block_p (bb)
3617 && !LABEL_P (BB_HEAD (bb)))
3618 fprintf (dump_file, "compensation ");
3619 else
3620 fprintf (dump_file, "bb %i ", bb->index);
3621 fprintf (dump_file, " [%i]\n", bb->frequency);
3622 }
3623 }
3624
3625 /* Now reorder the blocks. */
3626 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3627 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3628 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3629 {
3630 bb->prev_bb = prev_bb;
3631 prev_bb->next_bb = bb;
3632 }
3633 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3634 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3635
3636 /* Then, clean up the aux fields. */
3637 FOR_ALL_BB_FN (bb, cfun)
3638 {
3639 bb->aux = NULL;
3640 if (!stay_in_cfglayout_mode)
3641 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3642 }
3643
3644 /* Maybe reset the original copy tables, they are not valid anymore
3645 when we renumber the basic blocks in compact_blocks. If we are
3646 are going out of cfglayout mode, don't re-allocate the tables. */
3647 free_original_copy_tables ();
3648 if (stay_in_cfglayout_mode)
3649 initialize_original_copy_tables ();
3650
3651 /* Finally, put basic_block_info in the new order. */
3652 compact_blocks ();
3653 }
3654 \f
3655
3656 /* Given a reorder chain, rearrange the code to match. */
3657
3658 static void
3659 fixup_reorder_chain (void)
3660 {
3661 basic_block bb;
3662 rtx_insn *insn = NULL;
3663
3664 if (cfg_layout_function_header)
3665 {
3666 set_first_insn (cfg_layout_function_header);
3667 insn = cfg_layout_function_header;
3668 while (NEXT_INSN (insn))
3669 insn = NEXT_INSN (insn);
3670 }
3671
3672 /* First do the bulk reordering -- rechain the blocks without regard to
3673 the needed changes to jumps and labels. */
3674
3675 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3676 bb->aux)
3677 {
3678 if (BB_HEADER (bb))
3679 {
3680 if (insn)
3681 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3682 else
3683 set_first_insn (BB_HEADER (bb));
3684 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3685 insn = BB_HEADER (bb);
3686 while (NEXT_INSN (insn))
3687 insn = NEXT_INSN (insn);
3688 }
3689 if (insn)
3690 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3691 else
3692 set_first_insn (BB_HEAD (bb));
3693 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3694 insn = BB_END (bb);
3695 if (BB_FOOTER (bb))
3696 {
3697 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3698 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3699 while (NEXT_INSN (insn))
3700 insn = NEXT_INSN (insn);
3701 }
3702 }
3703
3704 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3705 if (cfg_layout_function_footer)
3706 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3707
3708 while (NEXT_INSN (insn))
3709 insn = NEXT_INSN (insn);
3710
3711 set_last_insn (insn);
3712 if (flag_checking)
3713 verify_insn_chain ();
3714
3715 /* Now add jumps and labels as needed to match the blocks new
3716 outgoing edges. */
3717
3718 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3719 bb->aux)
3720 {
3721 edge e_fall, e_taken, e;
3722 rtx_insn *bb_end_insn;
3723 rtx ret_label = NULL_RTX;
3724 basic_block nb;
3725 edge_iterator ei;
3726
3727 if (EDGE_COUNT (bb->succs) == 0)
3728 continue;
3729
3730 /* Find the old fallthru edge, and another non-EH edge for
3731 a taken jump. */
3732 e_taken = e_fall = NULL;
3733
3734 FOR_EACH_EDGE (e, ei, bb->succs)
3735 if (e->flags & EDGE_FALLTHRU)
3736 e_fall = e;
3737 else if (! (e->flags & EDGE_EH))
3738 e_taken = e;
3739
3740 bb_end_insn = BB_END (bb);
3741 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3742 {
3743 ret_label = JUMP_LABEL (bb_end_jump);
3744 if (any_condjump_p (bb_end_jump))
3745 {
3746 /* This might happen if the conditional jump has side
3747 effects and could therefore not be optimized away.
3748 Make the basic block to end with a barrier in order
3749 to prevent rtl_verify_flow_info from complaining. */
3750 if (!e_fall)
3751 {
3752 gcc_assert (!onlyjump_p (bb_end_jump)
3753 || returnjump_p (bb_end_jump)
3754 || (e_taken->flags & EDGE_CROSSING));
3755 emit_barrier_after (bb_end_jump);
3756 continue;
3757 }
3758
3759 /* If the old fallthru is still next, nothing to do. */
3760 if (bb->aux == e_fall->dest
3761 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3762 continue;
3763
3764 /* The degenerated case of conditional jump jumping to the next
3765 instruction can happen for jumps with side effects. We need
3766 to construct a forwarder block and this will be done just
3767 fine by force_nonfallthru below. */
3768 if (!e_taken)
3769 ;
3770
3771 /* There is another special case: if *neither* block is next,
3772 such as happens at the very end of a function, then we'll
3773 need to add a new unconditional jump. Choose the taken
3774 edge based on known or assumed probability. */
3775 else if (bb->aux != e_taken->dest)
3776 {
3777 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3778
3779 if (note
3780 && XINT (note, 0) < REG_BR_PROB_BASE / 2
3781 && invert_jump (bb_end_jump,
3782 (e_fall->dest
3783 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3784 ? NULL_RTX
3785 : label_for_bb (e_fall->dest)), 0))
3786 {
3787 e_fall->flags &= ~EDGE_FALLTHRU;
3788 gcc_checking_assert (could_fall_through
3789 (e_taken->src, e_taken->dest));
3790 e_taken->flags |= EDGE_FALLTHRU;
3791 update_br_prob_note (bb);
3792 e = e_fall, e_fall = e_taken, e_taken = e;
3793 }
3794 }
3795
3796 /* If the "jumping" edge is a crossing edge, and the fall
3797 through edge is non-crossing, leave things as they are. */
3798 else if ((e_taken->flags & EDGE_CROSSING)
3799 && !(e_fall->flags & EDGE_CROSSING))
3800 continue;
3801
3802 /* Otherwise we can try to invert the jump. This will
3803 basically never fail, however, keep up the pretense. */
3804 else if (invert_jump (bb_end_jump,
3805 (e_fall->dest
3806 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3807 ? NULL_RTX
3808 : label_for_bb (e_fall->dest)), 0))
3809 {
3810 e_fall->flags &= ~EDGE_FALLTHRU;
3811 gcc_checking_assert (could_fall_through
3812 (e_taken->src, e_taken->dest));
3813 e_taken->flags |= EDGE_FALLTHRU;
3814 update_br_prob_note (bb);
3815 if (LABEL_NUSES (ret_label) == 0
3816 && single_pred_p (e_taken->dest))
3817 delete_insn (ret_label);
3818 continue;
3819 }
3820 }
3821 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3822 {
3823 /* If the old fallthru is still next or if
3824 asm goto doesn't have a fallthru (e.g. when followed by
3825 __builtin_unreachable ()), nothing to do. */
3826 if (! e_fall
3827 || bb->aux == e_fall->dest
3828 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3829 continue;
3830
3831 /* Otherwise we'll have to use the fallthru fixup below. */
3832 }
3833 else
3834 {
3835 /* Otherwise we have some return, switch or computed
3836 jump. In the 99% case, there should not have been a
3837 fallthru edge. */
3838 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3839 continue;
3840 }
3841 }
3842 else
3843 {
3844 /* No fallthru implies a noreturn function with EH edges, or
3845 something similarly bizarre. In any case, we don't need to
3846 do anything. */
3847 if (! e_fall)
3848 continue;
3849
3850 /* If the fallthru block is still next, nothing to do. */
3851 if (bb->aux == e_fall->dest)
3852 continue;
3853
3854 /* A fallthru to exit block. */
3855 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3856 continue;
3857 }
3858
3859 /* We got here if we need to add a new jump insn.
3860 Note force_nonfallthru can delete E_FALL and thus we have to
3861 save E_FALL->src prior to the call to force_nonfallthru. */
3862 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3863 if (nb)
3864 {
3865 nb->aux = bb->aux;
3866 bb->aux = nb;
3867 /* Don't process this new block. */
3868 bb = nb;
3869 }
3870 }
3871
3872 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3873
3874 /* Annoying special case - jump around dead jumptables left in the code. */
3875 FOR_EACH_BB_FN (bb, cfun)
3876 {
3877 edge e = find_fallthru_edge (bb->succs);
3878
3879 if (e && !can_fallthru (e->src, e->dest))
3880 force_nonfallthru (e);
3881 }
3882
3883 /* Ensure goto_locus from edges has some instructions with that locus
3884 in RTL. */
3885 if (!optimize)
3886 FOR_EACH_BB_FN (bb, cfun)
3887 {
3888 edge e;
3889 edge_iterator ei;
3890
3891 FOR_EACH_EDGE (e, ei, bb->succs)
3892 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3893 && !(e->flags & EDGE_ABNORMAL))
3894 {
3895 edge e2;
3896 edge_iterator ei2;
3897 basic_block dest, nb;
3898 rtx_insn *end;
3899
3900 insn = BB_END (e->src);
3901 end = PREV_INSN (BB_HEAD (e->src));
3902 while (insn != end
3903 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3904 insn = PREV_INSN (insn);
3905 if (insn != end
3906 && INSN_LOCATION (insn) == e->goto_locus)
3907 continue;
3908 if (simplejump_p (BB_END (e->src))
3909 && !INSN_HAS_LOCATION (BB_END (e->src)))
3910 {
3911 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3912 continue;
3913 }
3914 dest = e->dest;
3915 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3916 {
3917 /* Non-fallthru edges to the exit block cannot be split. */
3918 if (!(e->flags & EDGE_FALLTHRU))
3919 continue;
3920 }
3921 else
3922 {
3923 insn = BB_HEAD (dest);
3924 end = NEXT_INSN (BB_END (dest));
3925 while (insn != end && !NONDEBUG_INSN_P (insn))
3926 insn = NEXT_INSN (insn);
3927 if (insn != end && INSN_HAS_LOCATION (insn)
3928 && INSN_LOCATION (insn) == e->goto_locus)
3929 continue;
3930 }
3931 nb = split_edge (e);
3932 if (!INSN_P (BB_END (nb)))
3933 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3934 nb);
3935 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3936
3937 /* If there are other incoming edges to the destination block
3938 with the same goto locus, redirect them to the new block as
3939 well, this can prevent other such blocks from being created
3940 in subsequent iterations of the loop. */
3941 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3942 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3943 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3944 && e->goto_locus == e2->goto_locus)
3945 redirect_edge_and_branch (e2, nb);
3946 else
3947 ei_next (&ei2);
3948 }
3949 }
3950 }
3951 \f
3952 /* Perform sanity checks on the insn chain.
3953 1. Check that next/prev pointers are consistent in both the forward and
3954 reverse direction.
3955 2. Count insns in chain, going both directions, and check if equal.
3956 3. Check that get_last_insn () returns the actual end of chain. */
3957
3958 DEBUG_FUNCTION void
3959 verify_insn_chain (void)
3960 {
3961 rtx_insn *x, *prevx, *nextx;
3962 int insn_cnt1, insn_cnt2;
3963
3964 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3965 x != 0;
3966 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3967 gcc_assert (PREV_INSN (x) == prevx);
3968
3969 gcc_assert (prevx == get_last_insn ());
3970
3971 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3972 x != 0;
3973 nextx = x, insn_cnt2++, x = PREV_INSN (x))
3974 gcc_assert (NEXT_INSN (x) == nextx);
3975
3976 gcc_assert (insn_cnt1 == insn_cnt2);
3977 }
3978 \f
3979 /* If we have assembler epilogues, the block falling through to exit must
3980 be the last one in the reordered chain when we reach final. Ensure
3981 that this condition is met. */
3982 static void
3983 fixup_fallthru_exit_predecessor (void)
3984 {
3985 edge e;
3986 basic_block bb = NULL;
3987
3988 /* This transformation is not valid before reload, because we might
3989 separate a call from the instruction that copies the return
3990 value. */
3991 gcc_assert (reload_completed);
3992
3993 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
3994 if (e)
3995 bb = e->src;
3996
3997 if (bb && bb->aux)
3998 {
3999 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4000
4001 /* If the very first block is the one with the fall-through exit
4002 edge, we have to split that block. */
4003 if (c == bb)
4004 {
4005 bb = split_block_after_labels (bb)->dest;
4006 bb->aux = c->aux;
4007 c->aux = bb;
4008 BB_FOOTER (bb) = BB_FOOTER (c);
4009 BB_FOOTER (c) = NULL;
4010 }
4011
4012 while (c->aux != bb)
4013 c = (basic_block) c->aux;
4014
4015 c->aux = bb->aux;
4016 while (c->aux)
4017 c = (basic_block) c->aux;
4018
4019 c->aux = bb;
4020 bb->aux = NULL;
4021 }
4022 }
4023
4024 /* In case there are more than one fallthru predecessors of exit, force that
4025 there is only one. */
4026
4027 static void
4028 force_one_exit_fallthru (void)
4029 {
4030 edge e, predecessor = NULL;
4031 bool more = false;
4032 edge_iterator ei;
4033 basic_block forwarder, bb;
4034
4035 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4036 if (e->flags & EDGE_FALLTHRU)
4037 {
4038 if (predecessor == NULL)
4039 predecessor = e;
4040 else
4041 {
4042 more = true;
4043 break;
4044 }
4045 }
4046
4047 if (!more)
4048 return;
4049
4050 /* Exit has several fallthru predecessors. Create a forwarder block for
4051 them. */
4052 forwarder = split_edge (predecessor);
4053 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4054 (e = ei_safe_edge (ei)); )
4055 {
4056 if (e->src == forwarder
4057 || !(e->flags & EDGE_FALLTHRU))
4058 ei_next (&ei);
4059 else
4060 redirect_edge_and_branch_force (e, forwarder);
4061 }
4062
4063 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4064 exit block. */
4065 FOR_EACH_BB_FN (bb, cfun)
4066 {
4067 if (bb->aux == NULL && bb != forwarder)
4068 {
4069 bb->aux = forwarder;
4070 break;
4071 }
4072 }
4073 }
4074 \f
4075 /* Return true in case it is possible to duplicate the basic block BB. */
4076
4077 static bool
4078 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4079 {
4080 /* Do not attempt to duplicate tablejumps, as we need to unshare
4081 the dispatch table. This is difficult to do, as the instructions
4082 computing jump destination may be hoisted outside the basic block. */
4083 if (tablejump_p (BB_END (bb), NULL, NULL))
4084 return false;
4085
4086 /* Do not duplicate blocks containing insns that can't be copied. */
4087 if (targetm.cannot_copy_insn_p)
4088 {
4089 rtx_insn *insn = BB_HEAD (bb);
4090 while (1)
4091 {
4092 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4093 return false;
4094 if (insn == BB_END (bb))
4095 break;
4096 insn = NEXT_INSN (insn);
4097 }
4098 }
4099
4100 return true;
4101 }
4102
4103 rtx_insn *
4104 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4105 {
4106 rtx_insn *insn, *next, *copy;
4107 rtx_note *last;
4108
4109 /* Avoid updating of boundaries of previous basic block. The
4110 note will get removed from insn stream in fixup. */
4111 last = emit_note (NOTE_INSN_DELETED);
4112
4113 /* Create copy at the end of INSN chain. The chain will
4114 be reordered later. */
4115 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4116 {
4117 switch (GET_CODE (insn))
4118 {
4119 case DEBUG_INSN:
4120 /* Don't duplicate label debug insns. */
4121 if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4122 break;
4123 /* FALLTHRU */
4124 case INSN:
4125 case CALL_INSN:
4126 case JUMP_INSN:
4127 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4128 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4129 && ANY_RETURN_P (JUMP_LABEL (insn)))
4130 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4131 maybe_copy_prologue_epilogue_insn (insn, copy);
4132 break;
4133
4134 case JUMP_TABLE_DATA:
4135 /* Avoid copying of dispatch tables. We never duplicate
4136 tablejumps, so this can hit only in case the table got
4137 moved far from original jump.
4138 Avoid copying following barrier as well if any
4139 (and debug insns in between). */
4140 for (next = NEXT_INSN (insn);
4141 next != NEXT_INSN (to);
4142 next = NEXT_INSN (next))
4143 if (!DEBUG_INSN_P (next))
4144 break;
4145 if (next != NEXT_INSN (to) && BARRIER_P (next))
4146 insn = next;
4147 break;
4148
4149 case CODE_LABEL:
4150 break;
4151
4152 case BARRIER:
4153 emit_barrier ();
4154 break;
4155
4156 case NOTE:
4157 switch (NOTE_KIND (insn))
4158 {
4159 /* In case prologue is empty and function contain label
4160 in first BB, we may want to copy the block. */
4161 case NOTE_INSN_PROLOGUE_END:
4162
4163 case NOTE_INSN_DELETED:
4164 case NOTE_INSN_DELETED_LABEL:
4165 case NOTE_INSN_DELETED_DEBUG_LABEL:
4166 /* No problem to strip these. */
4167 case NOTE_INSN_FUNCTION_BEG:
4168 /* There is always just single entry to function. */
4169 case NOTE_INSN_BASIC_BLOCK:
4170 /* We should only switch text sections once. */
4171 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4172 break;
4173
4174 case NOTE_INSN_EPILOGUE_BEG:
4175 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4176 emit_note_copy (as_a <rtx_note *> (insn));
4177 break;
4178
4179 default:
4180 /* All other notes should have already been eliminated. */
4181 gcc_unreachable ();
4182 }
4183 break;
4184 default:
4185 gcc_unreachable ();
4186 }
4187 }
4188 insn = NEXT_INSN (last);
4189 delete_insn (last);
4190 return insn;
4191 }
4192
4193 /* Create a duplicate of the basic block BB. */
4194
4195 static basic_block
4196 cfg_layout_duplicate_bb (basic_block bb)
4197 {
4198 rtx_insn *insn;
4199 basic_block new_bb;
4200
4201 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4202 new_bb = create_basic_block (insn,
4203 insn ? get_last_insn () : NULL,
4204 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4205
4206 BB_COPY_PARTITION (new_bb, bb);
4207 if (BB_HEADER (bb))
4208 {
4209 insn = BB_HEADER (bb);
4210 while (NEXT_INSN (insn))
4211 insn = NEXT_INSN (insn);
4212 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4213 if (insn)
4214 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4215 }
4216
4217 if (BB_FOOTER (bb))
4218 {
4219 insn = BB_FOOTER (bb);
4220 while (NEXT_INSN (insn))
4221 insn = NEXT_INSN (insn);
4222 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4223 if (insn)
4224 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4225 }
4226
4227 return new_bb;
4228 }
4229
4230 \f
4231 /* Main entry point to this module - initialize the datastructures for
4232 CFG layout changes. It keeps LOOPS up-to-date if not null.
4233
4234 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4235
4236 void
4237 cfg_layout_initialize (unsigned int flags)
4238 {
4239 rtx_insn_list *x;
4240 basic_block bb;
4241
4242 /* Once bb partitioning is complete, cfg layout mode should not be
4243 re-entered. Entering cfg layout mode may require fixups. As an
4244 example, if edge forwarding performed when optimizing the cfg
4245 layout required moving a block from the hot to the cold
4246 section. This would create an illegal partitioning unless some
4247 manual fixup was performed. */
4248 gcc_assert (!(crtl->bb_reorder_complete
4249 && flag_reorder_blocks_and_partition));
4250
4251 initialize_original_copy_tables ();
4252
4253 cfg_layout_rtl_register_cfg_hooks ();
4254
4255 record_effective_endpoints ();
4256
4257 /* Make sure that the targets of non local gotos are marked. */
4258 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4259 {
4260 bb = BLOCK_FOR_INSN (x->insn ());
4261 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4262 }
4263
4264 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4265 }
4266
4267 /* Splits superblocks. */
4268 void
4269 break_superblocks (void)
4270 {
4271 sbitmap superblocks;
4272 bool need = false;
4273 basic_block bb;
4274
4275 superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
4276 bitmap_clear (superblocks);
4277
4278 FOR_EACH_BB_FN (bb, cfun)
4279 if (bb->flags & BB_SUPERBLOCK)
4280 {
4281 bb->flags &= ~BB_SUPERBLOCK;
4282 bitmap_set_bit (superblocks, bb->index);
4283 need = true;
4284 }
4285
4286 if (need)
4287 {
4288 rebuild_jump_labels (get_insns ());
4289 find_many_sub_basic_blocks (superblocks);
4290 }
4291
4292 free (superblocks);
4293 }
4294
4295 /* Finalize the changes: reorder insn list according to the sequence specified
4296 by aux pointers, enter compensation code, rebuild scope forest. */
4297
4298 void
4299 cfg_layout_finalize (void)
4300 {
4301 checking_verify_flow_info ();
4302 free_dominance_info (CDI_DOMINATORS);
4303 force_one_exit_fallthru ();
4304 rtl_register_cfg_hooks ();
4305 if (reload_completed && !targetm.have_epilogue ())
4306 fixup_fallthru_exit_predecessor ();
4307 fixup_reorder_chain ();
4308
4309 rebuild_jump_labels (get_insns ());
4310 delete_dead_jumptables ();
4311
4312 if (flag_checking)
4313 verify_insn_chain ();
4314 checking_verify_flow_info ();
4315 }
4316
4317
4318 /* Same as split_block but update cfg_layout structures. */
4319
4320 static basic_block
4321 cfg_layout_split_block (basic_block bb, void *insnp)
4322 {
4323 rtx insn = (rtx) insnp;
4324 basic_block new_bb = rtl_split_block (bb, insn);
4325
4326 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4327 BB_FOOTER (bb) = NULL;
4328
4329 return new_bb;
4330 }
4331
4332 /* Redirect Edge to DEST. */
4333 static edge
4334 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4335 {
4336 basic_block src = e->src;
4337 edge ret;
4338
4339 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4340 return NULL;
4341
4342 if (e->dest == dest)
4343 return e;
4344
4345 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4346 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4347 {
4348 df_set_bb_dirty (src);
4349 return ret;
4350 }
4351
4352 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4353 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4354 {
4355 if (dump_file)
4356 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4357 e->src->index, dest->index);
4358
4359 df_set_bb_dirty (e->src);
4360 redirect_edge_succ (e, dest);
4361 return e;
4362 }
4363
4364 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4365 in the case the basic block appears to be in sequence. Avoid this
4366 transformation. */
4367
4368 if (e->flags & EDGE_FALLTHRU)
4369 {
4370 /* Redirect any branch edges unified with the fallthru one. */
4371 if (JUMP_P (BB_END (src))
4372 && label_is_jump_target_p (BB_HEAD (e->dest),
4373 BB_END (src)))
4374 {
4375 edge redirected;
4376
4377 if (dump_file)
4378 fprintf (dump_file, "Fallthru edge unified with branch "
4379 "%i->%i redirected to %i\n",
4380 e->src->index, e->dest->index, dest->index);
4381 e->flags &= ~EDGE_FALLTHRU;
4382 redirected = redirect_branch_edge (e, dest);
4383 gcc_assert (redirected);
4384 redirected->flags |= EDGE_FALLTHRU;
4385 df_set_bb_dirty (redirected->src);
4386 return redirected;
4387 }
4388 /* In case we are redirecting fallthru edge to the branch edge
4389 of conditional jump, remove it. */
4390 if (EDGE_COUNT (src->succs) == 2)
4391 {
4392 /* Find the edge that is different from E. */
4393 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4394
4395 if (s->dest == dest
4396 && any_condjump_p (BB_END (src))
4397 && onlyjump_p (BB_END (src)))
4398 delete_insn (BB_END (src));
4399 }
4400 if (dump_file)
4401 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4402 e->src->index, e->dest->index, dest->index);
4403 ret = redirect_edge_succ_nodup (e, dest);
4404 }
4405 else
4406 ret = redirect_branch_edge (e, dest);
4407
4408 /* We don't want simplejumps in the insn stream during cfglayout. */
4409 gcc_assert (!simplejump_p (BB_END (src)));
4410
4411 df_set_bb_dirty (src);
4412 return ret;
4413 }
4414
4415 /* Simple wrapper as we always can redirect fallthru edges. */
4416 static basic_block
4417 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4418 {
4419 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4420
4421 gcc_assert (redirected);
4422 return NULL;
4423 }
4424
4425 /* Same as delete_basic_block but update cfg_layout structures. */
4426
4427 static void
4428 cfg_layout_delete_block (basic_block bb)
4429 {
4430 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4431 rtx_insn **to;
4432
4433 if (BB_HEADER (bb))
4434 {
4435 next = BB_HEAD (bb);
4436 if (prev)
4437 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4438 else
4439 set_first_insn (BB_HEADER (bb));
4440 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4441 insn = BB_HEADER (bb);
4442 while (NEXT_INSN (insn))
4443 insn = NEXT_INSN (insn);
4444 SET_NEXT_INSN (insn) = next;
4445 SET_PREV_INSN (next) = insn;
4446 }
4447 next = NEXT_INSN (BB_END (bb));
4448 if (BB_FOOTER (bb))
4449 {
4450 insn = BB_FOOTER (bb);
4451 while (insn)
4452 {
4453 if (BARRIER_P (insn))
4454 {
4455 if (PREV_INSN (insn))
4456 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4457 else
4458 BB_FOOTER (bb) = NEXT_INSN (insn);
4459 if (NEXT_INSN (insn))
4460 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4461 }
4462 if (LABEL_P (insn))
4463 break;
4464 insn = NEXT_INSN (insn);
4465 }
4466 if (BB_FOOTER (bb))
4467 {
4468 insn = BB_END (bb);
4469 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4470 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4471 while (NEXT_INSN (insn))
4472 insn = NEXT_INSN (insn);
4473 SET_NEXT_INSN (insn) = next;
4474 if (next)
4475 SET_PREV_INSN (next) = insn;
4476 else
4477 set_last_insn (insn);
4478 }
4479 }
4480 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4481 to = &BB_HEADER (bb->next_bb);
4482 else
4483 to = &cfg_layout_function_footer;
4484
4485 rtl_delete_block (bb);
4486
4487 if (prev)
4488 prev = NEXT_INSN (prev);
4489 else
4490 prev = get_insns ();
4491 if (next)
4492 next = PREV_INSN (next);
4493 else
4494 next = get_last_insn ();
4495
4496 if (next && NEXT_INSN (next) != prev)
4497 {
4498 remaints = unlink_insn_chain (prev, next);
4499 insn = remaints;
4500 while (NEXT_INSN (insn))
4501 insn = NEXT_INSN (insn);
4502 SET_NEXT_INSN (insn) = *to;
4503 if (*to)
4504 SET_PREV_INSN (*to) = insn;
4505 *to = remaints;
4506 }
4507 }
4508
4509 /* Return true when blocks A and B can be safely merged. */
4510
4511 static bool
4512 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4513 {
4514 /* If we are partitioning hot/cold basic blocks, we don't want to
4515 mess up unconditional or indirect jumps that cross between hot
4516 and cold sections.
4517
4518 Basic block partitioning may result in some jumps that appear to
4519 be optimizable (or blocks that appear to be mergeable), but which really
4520 must be left untouched (they are required to make it safely across
4521 partition boundaries). See the comments at the top of
4522 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4523
4524 if (BB_PARTITION (a) != BB_PARTITION (b))
4525 return false;
4526
4527 /* Protect the loop latches. */
4528 if (current_loops && b->loop_father->latch == b)
4529 return false;
4530
4531 /* If we would end up moving B's instructions, make sure it doesn't fall
4532 through into the exit block, since we cannot recover from a fallthrough
4533 edge into the exit block occurring in the middle of a function. */
4534 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4535 {
4536 edge e = find_fallthru_edge (b->succs);
4537 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4538 return false;
4539 }
4540
4541 /* There must be exactly one edge in between the blocks. */
4542 return (single_succ_p (a)
4543 && single_succ (a) == b
4544 && single_pred_p (b) == 1
4545 && a != b
4546 /* Must be simple edge. */
4547 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4548 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4549 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4550 /* If the jump insn has side effects, we can't kill the edge.
4551 When not optimizing, try_redirect_by_replacing_jump will
4552 not allow us to redirect an edge by replacing a table jump. */
4553 && (!JUMP_P (BB_END (a))
4554 || ((!optimize || reload_completed)
4555 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4556 }
4557
4558 /* Merge block A and B. The blocks must be mergeable. */
4559
4560 static void
4561 cfg_layout_merge_blocks (basic_block a, basic_block b)
4562 {
4563 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4564 rtx_insn *insn;
4565
4566 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4567
4568 if (dump_file)
4569 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4570 a->index);
4571
4572 /* If there was a CODE_LABEL beginning B, delete it. */
4573 if (LABEL_P (BB_HEAD (b)))
4574 {
4575 delete_insn (BB_HEAD (b));
4576 }
4577
4578 /* We should have fallthru edge in a, or we can do dummy redirection to get
4579 it cleaned up. */
4580 if (JUMP_P (BB_END (a)))
4581 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4582 gcc_assert (!JUMP_P (BB_END (a)));
4583
4584 /* When not optimizing and the edge is the only place in RTL which holds
4585 some unique locus, emit a nop with that locus in between. */
4586 if (!optimize)
4587 emit_nop_for_unique_locus_between (a, b);
4588
4589 /* Move things from b->footer after a->footer. */
4590 if (BB_FOOTER (b))
4591 {
4592 if (!BB_FOOTER (a))
4593 BB_FOOTER (a) = BB_FOOTER (b);
4594 else
4595 {
4596 rtx_insn *last = BB_FOOTER (a);
4597
4598 while (NEXT_INSN (last))
4599 last = NEXT_INSN (last);
4600 SET_NEXT_INSN (last) = BB_FOOTER (b);
4601 SET_PREV_INSN (BB_FOOTER (b)) = last;
4602 }
4603 BB_FOOTER (b) = NULL;
4604 }
4605
4606 /* Move things from b->header before a->footer.
4607 Note that this may include dead tablejump data, but we don't clean
4608 those up until we go out of cfglayout mode. */
4609 if (BB_HEADER (b))
4610 {
4611 if (! BB_FOOTER (a))
4612 BB_FOOTER (a) = BB_HEADER (b);
4613 else
4614 {
4615 rtx_insn *last = BB_HEADER (b);
4616
4617 while (NEXT_INSN (last))
4618 last = NEXT_INSN (last);
4619 SET_NEXT_INSN (last) = BB_FOOTER (a);
4620 SET_PREV_INSN (BB_FOOTER (a)) = last;
4621 BB_FOOTER (a) = BB_HEADER (b);
4622 }
4623 BB_HEADER (b) = NULL;
4624 }
4625
4626 /* In the case basic blocks are not adjacent, move them around. */
4627 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4628 {
4629 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4630
4631 emit_insn_after_noloc (insn, BB_END (a), a);
4632 }
4633 /* Otherwise just re-associate the instructions. */
4634 else
4635 {
4636 insn = BB_HEAD (b);
4637 BB_END (a) = BB_END (b);
4638 }
4639
4640 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4641 We need to explicitly call. */
4642 update_bb_for_insn_chain (insn, BB_END (b), a);
4643
4644 /* Skip possible DELETED_LABEL insn. */
4645 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4646 insn = NEXT_INSN (insn);
4647 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4648 BB_HEAD (b) = BB_END (b) = NULL;
4649 delete_insn (insn);
4650
4651 df_bb_delete (b->index);
4652
4653 /* If B was a forwarder block, propagate the locus on the edge. */
4654 if (forwarder_p
4655 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4656 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4657
4658 if (dump_file)
4659 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4660 }
4661
4662 /* Split edge E. */
4663
4664 static basic_block
4665 cfg_layout_split_edge (edge e)
4666 {
4667 basic_block new_bb =
4668 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4669 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4670 NULL_RTX, e->src);
4671
4672 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4673 BB_COPY_PARTITION (new_bb, e->src);
4674 else
4675 BB_COPY_PARTITION (new_bb, e->dest);
4676 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4677 redirect_edge_and_branch_force (e, new_bb);
4678
4679 return new_bb;
4680 }
4681
4682 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4683
4684 static void
4685 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4686 {
4687 }
4688
4689 /* Return true if BB contains only labels or non-executable
4690 instructions. */
4691
4692 static bool
4693 rtl_block_empty_p (basic_block bb)
4694 {
4695 rtx_insn *insn;
4696
4697 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4698 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4699 return true;
4700
4701 FOR_BB_INSNS (bb, insn)
4702 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4703 return false;
4704
4705 return true;
4706 }
4707
4708 /* Split a basic block if it ends with a conditional branch and if
4709 the other part of the block is not empty. */
4710
4711 static basic_block
4712 rtl_split_block_before_cond_jump (basic_block bb)
4713 {
4714 rtx_insn *insn;
4715 rtx_insn *split_point = NULL;
4716 rtx_insn *last = NULL;
4717 bool found_code = false;
4718
4719 FOR_BB_INSNS (bb, insn)
4720 {
4721 if (any_condjump_p (insn))
4722 split_point = last;
4723 else if (NONDEBUG_INSN_P (insn))
4724 found_code = true;
4725 last = insn;
4726 }
4727
4728 /* Did not find everything. */
4729 if (found_code && split_point)
4730 return split_block (bb, split_point)->dest;
4731 else
4732 return NULL;
4733 }
4734
4735 /* Return 1 if BB ends with a call, possibly followed by some
4736 instructions that must stay with the call, 0 otherwise. */
4737
4738 static bool
4739 rtl_block_ends_with_call_p (basic_block bb)
4740 {
4741 rtx_insn *insn = BB_END (bb);
4742
4743 while (!CALL_P (insn)
4744 && insn != BB_HEAD (bb)
4745 && (keep_with_call_p (insn)
4746 || NOTE_P (insn)
4747 || DEBUG_INSN_P (insn)))
4748 insn = PREV_INSN (insn);
4749 return (CALL_P (insn));
4750 }
4751
4752 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4753
4754 static bool
4755 rtl_block_ends_with_condjump_p (const_basic_block bb)
4756 {
4757 return any_condjump_p (BB_END (bb));
4758 }
4759
4760 /* Return true if we need to add fake edge to exit.
4761 Helper function for rtl_flow_call_edges_add. */
4762
4763 static bool
4764 need_fake_edge_p (const rtx_insn *insn)
4765 {
4766 if (!INSN_P (insn))
4767 return false;
4768
4769 if ((CALL_P (insn)
4770 && !SIBLING_CALL_P (insn)
4771 && !find_reg_note (insn, REG_NORETURN, NULL)
4772 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4773 return true;
4774
4775 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4776 && MEM_VOLATILE_P (PATTERN (insn)))
4777 || (GET_CODE (PATTERN (insn)) == PARALLEL
4778 && asm_noperands (insn) != -1
4779 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4780 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4781 }
4782
4783 /* Add fake edges to the function exit for any non constant and non noreturn
4784 calls, volatile inline assembly in the bitmap of blocks specified by
4785 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4786 that were split.
4787
4788 The goal is to expose cases in which entering a basic block does not imply
4789 that all subsequent instructions must be executed. */
4790
4791 static int
4792 rtl_flow_call_edges_add (sbitmap blocks)
4793 {
4794 int i;
4795 int blocks_split = 0;
4796 int last_bb = last_basic_block_for_fn (cfun);
4797 bool check_last_block = false;
4798
4799 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4800 return 0;
4801
4802 if (! blocks)
4803 check_last_block = true;
4804 else
4805 check_last_block = bitmap_bit_p (blocks,
4806 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4807
4808 /* In the last basic block, before epilogue generation, there will be
4809 a fallthru edge to EXIT. Special care is required if the last insn
4810 of the last basic block is a call because make_edge folds duplicate
4811 edges, which would result in the fallthru edge also being marked
4812 fake, which would result in the fallthru edge being removed by
4813 remove_fake_edges, which would result in an invalid CFG.
4814
4815 Moreover, we can't elide the outgoing fake edge, since the block
4816 profiler needs to take this into account in order to solve the minimal
4817 spanning tree in the case that the call doesn't return.
4818
4819 Handle this by adding a dummy instruction in a new last basic block. */
4820 if (check_last_block)
4821 {
4822 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4823 rtx_insn *insn = BB_END (bb);
4824
4825 /* Back up past insns that must be kept in the same block as a call. */
4826 while (insn != BB_HEAD (bb)
4827 && keep_with_call_p (insn))
4828 insn = PREV_INSN (insn);
4829
4830 if (need_fake_edge_p (insn))
4831 {
4832 edge e;
4833
4834 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4835 if (e)
4836 {
4837 insert_insn_on_edge (gen_use (const0_rtx), e);
4838 commit_edge_insertions ();
4839 }
4840 }
4841 }
4842
4843 /* Now add fake edges to the function exit for any non constant
4844 calls since there is no way that we can determine if they will
4845 return or not... */
4846
4847 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4848 {
4849 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4850 rtx_insn *insn;
4851 rtx_insn *prev_insn;
4852
4853 if (!bb)
4854 continue;
4855
4856 if (blocks && !bitmap_bit_p (blocks, i))
4857 continue;
4858
4859 for (insn = BB_END (bb); ; insn = prev_insn)
4860 {
4861 prev_insn = PREV_INSN (insn);
4862 if (need_fake_edge_p (insn))
4863 {
4864 edge e;
4865 rtx_insn *split_at_insn = insn;
4866
4867 /* Don't split the block between a call and an insn that should
4868 remain in the same block as the call. */
4869 if (CALL_P (insn))
4870 while (split_at_insn != BB_END (bb)
4871 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4872 split_at_insn = NEXT_INSN (split_at_insn);
4873
4874 /* The handling above of the final block before the epilogue
4875 should be enough to verify that there is no edge to the exit
4876 block in CFG already. Calling make_edge in such case would
4877 cause us to mark that edge as fake and remove it later. */
4878
4879 if (flag_checking && split_at_insn == BB_END (bb))
4880 {
4881 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4882 gcc_assert (e == NULL);
4883 }
4884
4885 /* Note that the following may create a new basic block
4886 and renumber the existing basic blocks. */
4887 if (split_at_insn != BB_END (bb))
4888 {
4889 e = split_block (bb, split_at_insn);
4890 if (e)
4891 blocks_split++;
4892 }
4893
4894 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4895 }
4896
4897 if (insn == BB_HEAD (bb))
4898 break;
4899 }
4900 }
4901
4902 if (blocks_split)
4903 verify_flow_info ();
4904
4905 return blocks_split;
4906 }
4907
4908 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4909 the conditional branch target, SECOND_HEAD should be the fall-thru
4910 there is no need to handle this here the loop versioning code handles
4911 this. the reason for SECON_HEAD is that it is needed for condition
4912 in trees, and this should be of the same type since it is a hook. */
4913 static void
4914 rtl_lv_add_condition_to_bb (basic_block first_head ,
4915 basic_block second_head ATTRIBUTE_UNUSED,
4916 basic_block cond_bb, void *comp_rtx)
4917 {
4918 rtx_code_label *label;
4919 rtx_insn *seq, *jump;
4920 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4921 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4922 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4923 machine_mode mode;
4924
4925
4926 label = block_label (first_head);
4927 mode = GET_MODE (op0);
4928 if (mode == VOIDmode)
4929 mode = GET_MODE (op1);
4930
4931 start_sequence ();
4932 op0 = force_operand (op0, NULL_RTX);
4933 op1 = force_operand (op1, NULL_RTX);
4934 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label, -1);
4935 jump = get_last_insn ();
4936 JUMP_LABEL (jump) = label;
4937 LABEL_NUSES (label)++;
4938 seq = get_insns ();
4939 end_sequence ();
4940
4941 /* Add the new cond, in the new head. */
4942 emit_insn_after (seq, BB_END (cond_bb));
4943 }
4944
4945
4946 /* Given a block B with unconditional branch at its end, get the
4947 store the return the branch edge and the fall-thru edge in
4948 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4949 static void
4950 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4951 edge *fallthru_edge)
4952 {
4953 edge e = EDGE_SUCC (b, 0);
4954
4955 if (e->flags & EDGE_FALLTHRU)
4956 {
4957 *fallthru_edge = e;
4958 *branch_edge = EDGE_SUCC (b, 1);
4959 }
4960 else
4961 {
4962 *branch_edge = e;
4963 *fallthru_edge = EDGE_SUCC (b, 1);
4964 }
4965 }
4966
4967 void
4968 init_rtl_bb_info (basic_block bb)
4969 {
4970 gcc_assert (!bb->il.x.rtl);
4971 bb->il.x.head_ = NULL;
4972 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
4973 }
4974
4975 /* Returns true if it is possible to remove edge E by redirecting
4976 it to the destination of the other edge from E->src. */
4977
4978 static bool
4979 rtl_can_remove_branch_p (const_edge e)
4980 {
4981 const_basic_block src = e->src;
4982 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
4983 const rtx_insn *insn = BB_END (src);
4984 rtx set;
4985
4986 /* The conditions are taken from try_redirect_by_replacing_jump. */
4987 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
4988 return false;
4989
4990 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4991 return false;
4992
4993 if (BB_PARTITION (src) != BB_PARTITION (target))
4994 return false;
4995
4996 if (!onlyjump_p (insn)
4997 || tablejump_p (insn, NULL, NULL))
4998 return false;
4999
5000 set = single_set (insn);
5001 if (!set || side_effects_p (set))
5002 return false;
5003
5004 return true;
5005 }
5006
5007 static basic_block
5008 rtl_duplicate_bb (basic_block bb)
5009 {
5010 bb = cfg_layout_duplicate_bb (bb);
5011 bb->aux = NULL;
5012 return bb;
5013 }
5014
5015 /* Do book-keeping of basic block BB for the profile consistency checker.
5016 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5017 then do post-pass accounting. Store the counting in RECORD. */
5018 static void
5019 rtl_account_profile_record (basic_block bb, int after_pass,
5020 struct profile_record *record)
5021 {
5022 rtx_insn *insn;
5023 FOR_BB_INSNS (bb, insn)
5024 if (INSN_P (insn))
5025 {
5026 record->size[after_pass]
5027 += insn_rtx_cost (PATTERN (insn), false);
5028 if (profile_status_for_fn (cfun) == PROFILE_READ)
5029 record->time[after_pass]
5030 += insn_rtx_cost (PATTERN (insn), true) * bb->count;
5031 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5032 record->time[after_pass]
5033 += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
5034 }
5035 }
5036
5037 /* Implementation of CFG manipulation for linearized RTL. */
5038 struct cfg_hooks rtl_cfg_hooks = {
5039 "rtl",
5040 rtl_verify_flow_info,
5041 rtl_dump_bb,
5042 rtl_dump_bb_for_graph,
5043 rtl_create_basic_block,
5044 rtl_redirect_edge_and_branch,
5045 rtl_redirect_edge_and_branch_force,
5046 rtl_can_remove_branch_p,
5047 rtl_delete_block,
5048 rtl_split_block,
5049 rtl_move_block_after,
5050 rtl_can_merge_blocks, /* can_merge_blocks_p */
5051 rtl_merge_blocks,
5052 rtl_predict_edge,
5053 rtl_predicted_by_p,
5054 cfg_layout_can_duplicate_bb_p,
5055 rtl_duplicate_bb,
5056 rtl_split_edge,
5057 rtl_make_forwarder_block,
5058 rtl_tidy_fallthru_edge,
5059 rtl_force_nonfallthru,
5060 rtl_block_ends_with_call_p,
5061 rtl_block_ends_with_condjump_p,
5062 rtl_flow_call_edges_add,
5063 NULL, /* execute_on_growing_pred */
5064 NULL, /* execute_on_shrinking_pred */
5065 NULL, /* duplicate loop for trees */
5066 NULL, /* lv_add_condition_to_bb */
5067 NULL, /* lv_adjust_loop_header_phi*/
5068 NULL, /* extract_cond_bb_edges */
5069 NULL, /* flush_pending_stmts */
5070 rtl_block_empty_p, /* block_empty_p */
5071 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5072 rtl_account_profile_record,
5073 };
5074
5075 /* Implementation of CFG manipulation for cfg layout RTL, where
5076 basic block connected via fallthru edges does not have to be adjacent.
5077 This representation will hopefully become the default one in future
5078 version of the compiler. */
5079
5080 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5081 "cfglayout mode",
5082 rtl_verify_flow_info_1,
5083 rtl_dump_bb,
5084 rtl_dump_bb_for_graph,
5085 cfg_layout_create_basic_block,
5086 cfg_layout_redirect_edge_and_branch,
5087 cfg_layout_redirect_edge_and_branch_force,
5088 rtl_can_remove_branch_p,
5089 cfg_layout_delete_block,
5090 cfg_layout_split_block,
5091 rtl_move_block_after,
5092 cfg_layout_can_merge_blocks_p,
5093 cfg_layout_merge_blocks,
5094 rtl_predict_edge,
5095 rtl_predicted_by_p,
5096 cfg_layout_can_duplicate_bb_p,
5097 cfg_layout_duplicate_bb,
5098 cfg_layout_split_edge,
5099 rtl_make_forwarder_block,
5100 NULL, /* tidy_fallthru_edge */
5101 rtl_force_nonfallthru,
5102 rtl_block_ends_with_call_p,
5103 rtl_block_ends_with_condjump_p,
5104 rtl_flow_call_edges_add,
5105 NULL, /* execute_on_growing_pred */
5106 NULL, /* execute_on_shrinking_pred */
5107 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5108 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5109 NULL, /* lv_adjust_loop_header_phi*/
5110 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5111 NULL, /* flush_pending_stmts */
5112 rtl_block_empty_p, /* block_empty_p */
5113 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5114 rtl_account_profile_record,
5115 };
5116
5117 #include "gt-cfgrtl.h"