* config/nvptx/nvptx.md (call_operation): Remove unused variables.
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "tm.h"
44 #include "alias.h"
45 #include "symtab.h"
46 #include "tree.h"
47 #include "hard-reg-set.h"
48 #include "predict.h"
49 #include "function.h"
50 #include "dominance.h"
51 #include "cfg.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "basic-block.h"
57 #include "bb-reorder.h"
58 #include "regs.h"
59 #include "flags.h"
60 #include "except.h"
61 #include "rtl-error.h"
62 #include "tm_p.h"
63 #include "obstack.h"
64 #include "insn-attr.h"
65 #include "insn-config.h"
66 #include "rtl.h"
67 #include "expmed.h"
68 #include "dojump.h"
69 #include "explow.h"
70 #include "calls.h"
71 #include "emit-rtl.h"
72 #include "varasm.h"
73 #include "stmt.h"
74 #include "expr.h"
75 #include "target.h"
76 #include "common/common-target.h"
77 #include "cfgloop.h"
78 #include "tree-pass.h"
79 #include "df.h"
80
81 /* Holds the interesting leading and trailing notes for the function.
82 Only applicable if the CFG is in cfglayout mode. */
83 static GTY(()) rtx_insn *cfg_layout_function_footer;
84 static GTY(()) rtx_insn *cfg_layout_function_header;
85
86 static rtx_insn *skip_insns_after_block (basic_block);
87 static void record_effective_endpoints (void);
88 static void fixup_reorder_chain (void);
89
90 void verify_insn_chain (void);
91 static void fixup_fallthru_exit_predecessor (void);
92 static int can_delete_note_p (const rtx_note *);
93 static int can_delete_label_p (const rtx_code_label *);
94 static basic_block rtl_split_edge (edge);
95 static bool rtl_move_block_after (basic_block, basic_block);
96 static int rtl_verify_flow_info (void);
97 static basic_block cfg_layout_split_block (basic_block, void *);
98 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
99 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
100 static void cfg_layout_delete_block (basic_block);
101 static void rtl_delete_block (basic_block);
102 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
103 static edge rtl_redirect_edge_and_branch (edge, basic_block);
104 static basic_block rtl_split_block (basic_block, void *);
105 static void rtl_dump_bb (FILE *, basic_block, int, int);
106 static int rtl_verify_flow_info_1 (void);
107 static void rtl_make_forwarder_block (edge);
108 \f
109 /* Return true if NOTE is not one of the ones that must be kept paired,
110 so that we may simply delete it. */
111
112 static int
113 can_delete_note_p (const rtx_note *note)
114 {
115 switch (NOTE_KIND (note))
116 {
117 case NOTE_INSN_DELETED:
118 case NOTE_INSN_BASIC_BLOCK:
119 case NOTE_INSN_EPILOGUE_BEG:
120 return true;
121
122 default:
123 return false;
124 }
125 }
126
127 /* True if a given label can be deleted. */
128
129 static int
130 can_delete_label_p (const rtx_code_label *label)
131 {
132 return (!LABEL_PRESERVE_P (label)
133 /* User declared labels must be preserved. */
134 && LABEL_NAME (label) == 0
135 && !in_insn_list_p (forced_labels, label));
136 }
137
138 /* Delete INSN by patching it out. */
139
140 void
141 delete_insn (rtx uncast_insn)
142 {
143 rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
144 rtx note;
145 bool really_delete = true;
146
147 if (LABEL_P (insn))
148 {
149 /* Some labels can't be directly removed from the INSN chain, as they
150 might be references via variables, constant pool etc.
151 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
152 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
153 {
154 const char *name = LABEL_NAME (insn);
155 basic_block bb = BLOCK_FOR_INSN (insn);
156 rtx_insn *bb_note = NEXT_INSN (insn);
157
158 really_delete = false;
159 PUT_CODE (insn, NOTE);
160 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
161 NOTE_DELETED_LABEL_NAME (insn) = name;
162
163 /* If the note following the label starts a basic block, and the
164 label is a member of the same basic block, interchange the two. */
165 if (bb_note != NULL_RTX
166 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
167 && bb != NULL
168 && bb == BLOCK_FOR_INSN (bb_note))
169 {
170 reorder_insns_nobb (insn, insn, bb_note);
171 BB_HEAD (bb) = bb_note;
172 if (BB_END (bb) == bb_note)
173 BB_END (bb) = insn;
174 }
175 }
176
177 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
178 }
179
180 if (really_delete)
181 {
182 /* If this insn has already been deleted, something is very wrong. */
183 gcc_assert (!insn->deleted ());
184 if (INSN_P (insn))
185 df_insn_delete (insn);
186 remove_insn (insn);
187 insn->set_deleted ();
188 }
189
190 /* If deleting a jump, decrement the use count of the label. Deleting
191 the label itself should happen in the normal course of block merging. */
192 if (JUMP_P (insn))
193 {
194 if (JUMP_LABEL (insn)
195 && LABEL_P (JUMP_LABEL (insn)))
196 LABEL_NUSES (JUMP_LABEL (insn))--;
197
198 /* If there are more targets, remove them too. */
199 while ((note
200 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
201 && LABEL_P (XEXP (note, 0)))
202 {
203 LABEL_NUSES (XEXP (note, 0))--;
204 remove_note (insn, note);
205 }
206 }
207
208 /* Also if deleting any insn that references a label as an operand. */
209 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
210 && LABEL_P (XEXP (note, 0)))
211 {
212 LABEL_NUSES (XEXP (note, 0))--;
213 remove_note (insn, note);
214 }
215
216 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
217 {
218 rtvec vec = table->get_labels ();
219 int len = GET_NUM_ELEM (vec);
220 int i;
221
222 for (i = 0; i < len; i++)
223 {
224 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
225
226 /* When deleting code in bulk (e.g. removing many unreachable
227 blocks) we can delete a label that's a target of the vector
228 before deleting the vector itself. */
229 if (!NOTE_P (label))
230 LABEL_NUSES (label)--;
231 }
232 }
233 }
234
235 /* Like delete_insn but also purge dead edges from BB. */
236
237 void
238 delete_insn_and_edges (rtx_insn *insn)
239 {
240 bool purge = false;
241
242 if (INSN_P (insn)
243 && BLOCK_FOR_INSN (insn)
244 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
245 purge = true;
246 delete_insn (insn);
247 if (purge)
248 purge_dead_edges (BLOCK_FOR_INSN (insn));
249 }
250
251 /* Unlink a chain of insns between START and FINISH, leaving notes
252 that must be paired. If CLEAR_BB is true, we set bb field for
253 insns that cannot be removed to NULL. */
254
255 void
256 delete_insn_chain (rtx start, rtx finish, bool clear_bb)
257 {
258 rtx_insn *prev, *current;
259
260 /* Unchain the insns one by one. It would be quicker to delete all of these
261 with a single unchaining, rather than one at a time, but we need to keep
262 the NOTE's. */
263 current = safe_as_a <rtx_insn *> (finish);
264 while (1)
265 {
266 prev = PREV_INSN (current);
267 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
268 ;
269 else
270 delete_insn (current);
271
272 if (clear_bb && !current->deleted ())
273 set_block_for_insn (current, NULL);
274
275 if (current == start)
276 break;
277 current = prev;
278 }
279 }
280 \f
281 /* Create a new basic block consisting of the instructions between HEAD and END
282 inclusive. This function is designed to allow fast BB construction - reuses
283 the note and basic block struct in BB_NOTE, if any and do not grow
284 BASIC_BLOCK chain and should be used directly only by CFG construction code.
285 END can be NULL in to create new empty basic block before HEAD. Both END
286 and HEAD can be NULL to create basic block at the end of INSN chain.
287 AFTER is the basic block we should be put after. */
288
289 basic_block
290 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
291 basic_block after)
292 {
293 basic_block bb;
294
295 if (bb_note
296 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
297 && bb->aux == NULL)
298 {
299 /* If we found an existing note, thread it back onto the chain. */
300
301 rtx_insn *after;
302
303 if (LABEL_P (head))
304 after = head;
305 else
306 {
307 after = PREV_INSN (head);
308 head = bb_note;
309 }
310
311 if (after != bb_note && NEXT_INSN (after) != bb_note)
312 reorder_insns_nobb (bb_note, bb_note, after);
313 }
314 else
315 {
316 /* Otherwise we must create a note and a basic block structure. */
317
318 bb = alloc_block ();
319
320 init_rtl_bb_info (bb);
321 if (!head && !end)
322 head = end = bb_note
323 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
324 else if (LABEL_P (head) && end)
325 {
326 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
327 if (head == end)
328 end = bb_note;
329 }
330 else
331 {
332 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
333 head = bb_note;
334 if (!end)
335 end = head;
336 }
337
338 NOTE_BASIC_BLOCK (bb_note) = bb;
339 }
340
341 /* Always include the bb note in the block. */
342 if (NEXT_INSN (end) == bb_note)
343 end = bb_note;
344
345 BB_HEAD (bb) = head;
346 BB_END (bb) = end;
347 bb->index = last_basic_block_for_fn (cfun)++;
348 bb->flags = BB_NEW | BB_RTL;
349 link_block (bb, after);
350 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
351 df_bb_refs_record (bb->index, false);
352 update_bb_for_insn (bb);
353 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
354
355 /* Tag the block so that we know it has been used when considering
356 other basic block notes. */
357 bb->aux = bb;
358
359 return bb;
360 }
361
362 /* Create new basic block consisting of instructions in between HEAD and END
363 and place it to the BB chain after block AFTER. END can be NULL to
364 create a new empty basic block before HEAD. Both END and HEAD can be
365 NULL to create basic block at the end of INSN chain. */
366
367 static basic_block
368 rtl_create_basic_block (void *headp, void *endp, basic_block after)
369 {
370 rtx_insn *head = (rtx_insn *) headp;
371 rtx_insn *end = (rtx_insn *) endp;
372 basic_block bb;
373
374 /* Grow the basic block array if needed. */
375 if ((size_t) last_basic_block_for_fn (cfun)
376 >= basic_block_info_for_fn (cfun)->length ())
377 {
378 size_t new_size =
379 (last_basic_block_for_fn (cfun)
380 + (last_basic_block_for_fn (cfun) + 3) / 4);
381 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
382 }
383
384 n_basic_blocks_for_fn (cfun)++;
385
386 bb = create_basic_block_structure (head, end, NULL, after);
387 bb->aux = NULL;
388 return bb;
389 }
390
391 static basic_block
392 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
393 {
394 basic_block newbb = rtl_create_basic_block (head, end, after);
395
396 return newbb;
397 }
398 \f
399 /* Delete the insns in a (non-live) block. We physically delete every
400 non-deleted-note insn, and update the flow graph appropriately.
401
402 Return nonzero if we deleted an exception handler. */
403
404 /* ??? Preserving all such notes strikes me as wrong. It would be nice
405 to post-process the stream to remove empty blocks, loops, ranges, etc. */
406
407 static void
408 rtl_delete_block (basic_block b)
409 {
410 rtx_insn *insn, *end;
411
412 /* If the head of this block is a CODE_LABEL, then it might be the
413 label for an exception handler which can't be reached. We need
414 to remove the label from the exception_handler_label list. */
415 insn = BB_HEAD (b);
416
417 end = get_last_bb_insn (b);
418
419 /* Selectively delete the entire chain. */
420 BB_HEAD (b) = NULL;
421 delete_insn_chain (insn, end, true);
422
423
424 if (dump_file)
425 fprintf (dump_file, "deleting block %d\n", b->index);
426 df_bb_delete (b->index);
427 }
428 \f
429 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
430
431 void
432 compute_bb_for_insn (void)
433 {
434 basic_block bb;
435
436 FOR_EACH_BB_FN (bb, cfun)
437 {
438 rtx_insn *end = BB_END (bb);
439 rtx_insn *insn;
440
441 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
442 {
443 BLOCK_FOR_INSN (insn) = bb;
444 if (insn == end)
445 break;
446 }
447 }
448 }
449
450 /* Release the basic_block_for_insn array. */
451
452 unsigned int
453 free_bb_for_insn (void)
454 {
455 rtx_insn *insn;
456 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
457 if (!BARRIER_P (insn))
458 BLOCK_FOR_INSN (insn) = NULL;
459 return 0;
460 }
461
462 namespace {
463
464 const pass_data pass_data_free_cfg =
465 {
466 RTL_PASS, /* type */
467 "*free_cfg", /* name */
468 OPTGROUP_NONE, /* optinfo_flags */
469 TV_NONE, /* tv_id */
470 0, /* properties_required */
471 0, /* properties_provided */
472 PROP_cfg, /* properties_destroyed */
473 0, /* todo_flags_start */
474 0, /* todo_flags_finish */
475 };
476
477 class pass_free_cfg : public rtl_opt_pass
478 {
479 public:
480 pass_free_cfg (gcc::context *ctxt)
481 : rtl_opt_pass (pass_data_free_cfg, ctxt)
482 {}
483
484 /* opt_pass methods: */
485 virtual unsigned int execute (function *);
486
487 }; // class pass_free_cfg
488
489 unsigned int
490 pass_free_cfg::execute (function *)
491 {
492 #ifdef DELAY_SLOTS
493 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
494 valid at that point so it would be too late to call df_analyze. */
495 if (optimize > 0 && flag_delayed_branch)
496 {
497 df_note_add_problem ();
498 df_analyze ();
499 }
500 #endif
501
502 if (crtl->has_bb_partition)
503 insert_section_boundary_note ();
504
505 free_bb_for_insn ();
506 return 0;
507 }
508
509 } // anon namespace
510
511 rtl_opt_pass *
512 make_pass_free_cfg (gcc::context *ctxt)
513 {
514 return new pass_free_cfg (ctxt);
515 }
516
517 /* Return RTX to emit after when we want to emit code on the entry of function. */
518 rtx_insn *
519 entry_of_function (void)
520 {
521 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
522 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
523 }
524
525 /* Emit INSN at the entry point of the function, ensuring that it is only
526 executed once per function. */
527 void
528 emit_insn_at_entry (rtx insn)
529 {
530 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
531 edge e = ei_safe_edge (ei);
532 gcc_assert (e->flags & EDGE_FALLTHRU);
533
534 insert_insn_on_edge (insn, e);
535 commit_edge_insertions ();
536 }
537
538 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
539 (or BARRIER if found) and notify df of the bb change.
540 The insn chain range is inclusive
541 (i.e. both BEGIN and END will be updated. */
542
543 static void
544 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
545 {
546 rtx_insn *insn;
547
548 end = NEXT_INSN (end);
549 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
550 if (!BARRIER_P (insn))
551 df_insn_change_bb (insn, bb);
552 }
553
554 /* Update BLOCK_FOR_INSN of insns in BB to BB,
555 and notify df of the change. */
556
557 void
558 update_bb_for_insn (basic_block bb)
559 {
560 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
561 }
562
563 \f
564 /* Like active_insn_p, except keep the return value clobber around
565 even after reload. */
566
567 static bool
568 flow_active_insn_p (const rtx_insn *insn)
569 {
570 if (active_insn_p (insn))
571 return true;
572
573 /* A clobber of the function return value exists for buggy
574 programs that fail to return a value. Its effect is to
575 keep the return value from being live across the entire
576 function. If we allow it to be skipped, we introduce the
577 possibility for register lifetime confusion. */
578 if (GET_CODE (PATTERN (insn)) == CLOBBER
579 && REG_P (XEXP (PATTERN (insn), 0))
580 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
581 return true;
582
583 return false;
584 }
585
586 /* Return true if the block has no effect and only forwards control flow to
587 its single destination. */
588
589 bool
590 contains_no_active_insn_p (const_basic_block bb)
591 {
592 rtx_insn *insn;
593
594 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
595 || !single_succ_p (bb))
596 return false;
597
598 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
599 if (INSN_P (insn) && flow_active_insn_p (insn))
600 return false;
601
602 return (!INSN_P (insn)
603 || (JUMP_P (insn) && simplejump_p (insn))
604 || !flow_active_insn_p (insn));
605 }
606
607 /* Likewise, but protect loop latches, headers and preheaders. */
608 /* FIXME: Make this a cfg hook. */
609
610 bool
611 forwarder_block_p (const_basic_block bb)
612 {
613 if (!contains_no_active_insn_p (bb))
614 return false;
615
616 /* Protect loop latches, headers and preheaders. */
617 if (current_loops)
618 {
619 basic_block dest;
620 if (bb->loop_father->header == bb)
621 return false;
622 dest = EDGE_SUCC (bb, 0)->dest;
623 if (dest->loop_father->header == dest)
624 return false;
625 }
626
627 return true;
628 }
629
630 /* Return nonzero if we can reach target from src by falling through. */
631 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
632
633 bool
634 can_fallthru (basic_block src, basic_block target)
635 {
636 rtx_insn *insn = BB_END (src);
637 rtx_insn *insn2;
638 edge e;
639 edge_iterator ei;
640
641 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
642 return true;
643 if (src->next_bb != target)
644 return false;
645
646 /* ??? Later we may add code to move jump tables offline. */
647 if (tablejump_p (insn, NULL, NULL))
648 return false;
649
650 FOR_EACH_EDGE (e, ei, src->succs)
651 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
652 && e->flags & EDGE_FALLTHRU)
653 return false;
654
655 insn2 = BB_HEAD (target);
656 if (!active_insn_p (insn2))
657 insn2 = next_active_insn (insn2);
658
659 return next_active_insn (insn) == insn2;
660 }
661
662 /* Return nonzero if we could reach target from src by falling through,
663 if the target was made adjacent. If we already have a fall-through
664 edge to the exit block, we can't do that. */
665 static bool
666 could_fall_through (basic_block src, basic_block target)
667 {
668 edge e;
669 edge_iterator ei;
670
671 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
672 return true;
673 FOR_EACH_EDGE (e, ei, src->succs)
674 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
675 && e->flags & EDGE_FALLTHRU)
676 return 0;
677 return true;
678 }
679 \f
680 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
681 rtx_note *
682 bb_note (basic_block bb)
683 {
684 rtx_insn *note;
685
686 note = BB_HEAD (bb);
687 if (LABEL_P (note))
688 note = NEXT_INSN (note);
689
690 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
691 return as_a <rtx_note *> (note);
692 }
693
694 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
695 note associated with the BLOCK. */
696
697 static rtx_insn *
698 first_insn_after_basic_block_note (basic_block block)
699 {
700 rtx_insn *insn;
701
702 /* Get the first instruction in the block. */
703 insn = BB_HEAD (block);
704
705 if (insn == NULL_RTX)
706 return NULL;
707 if (LABEL_P (insn))
708 insn = NEXT_INSN (insn);
709 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
710
711 return NEXT_INSN (insn);
712 }
713
714 /* Creates a new basic block just after basic block BB by splitting
715 everything after specified instruction INSNP. */
716
717 static basic_block
718 rtl_split_block (basic_block bb, void *insnp)
719 {
720 basic_block new_bb;
721 rtx_insn *insn = (rtx_insn *) insnp;
722 edge e;
723 edge_iterator ei;
724
725 if (!insn)
726 {
727 insn = first_insn_after_basic_block_note (bb);
728
729 if (insn)
730 {
731 rtx_insn *next = insn;
732
733 insn = PREV_INSN (insn);
734
735 /* If the block contains only debug insns, insn would have
736 been NULL in a non-debug compilation, and then we'd end
737 up emitting a DELETED note. For -fcompare-debug
738 stability, emit the note too. */
739 if (insn != BB_END (bb)
740 && DEBUG_INSN_P (next)
741 && DEBUG_INSN_P (BB_END (bb)))
742 {
743 while (next != BB_END (bb) && DEBUG_INSN_P (next))
744 next = NEXT_INSN (next);
745
746 if (next == BB_END (bb))
747 emit_note_after (NOTE_INSN_DELETED, next);
748 }
749 }
750 else
751 insn = get_last_insn ();
752 }
753
754 /* We probably should check type of the insn so that we do not create
755 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
756 bother. */
757 if (insn == BB_END (bb))
758 emit_note_after (NOTE_INSN_DELETED, insn);
759
760 /* Create the new basic block. */
761 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
762 BB_COPY_PARTITION (new_bb, bb);
763 BB_END (bb) = insn;
764
765 /* Redirect the outgoing edges. */
766 new_bb->succs = bb->succs;
767 bb->succs = NULL;
768 FOR_EACH_EDGE (e, ei, new_bb->succs)
769 e->src = new_bb;
770
771 /* The new block starts off being dirty. */
772 df_set_bb_dirty (bb);
773 return new_bb;
774 }
775
776 /* Return true if the single edge between blocks A and B is the only place
777 in RTL which holds some unique locus. */
778
779 static bool
780 unique_locus_on_edge_between_p (basic_block a, basic_block b)
781 {
782 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
783 rtx_insn *insn, *end;
784
785 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
786 return false;
787
788 /* First scan block A backward. */
789 insn = BB_END (a);
790 end = PREV_INSN (BB_HEAD (a));
791 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
792 insn = PREV_INSN (insn);
793
794 if (insn != end && INSN_LOCATION (insn) == goto_locus)
795 return false;
796
797 /* Then scan block B forward. */
798 insn = BB_HEAD (b);
799 if (insn)
800 {
801 end = NEXT_INSN (BB_END (b));
802 while (insn != end && !NONDEBUG_INSN_P (insn))
803 insn = NEXT_INSN (insn);
804
805 if (insn != end && INSN_HAS_LOCATION (insn)
806 && INSN_LOCATION (insn) == goto_locus)
807 return false;
808 }
809
810 return true;
811 }
812
813 /* If the single edge between blocks A and B is the only place in RTL which
814 holds some unique locus, emit a nop with that locus between the blocks. */
815
816 static void
817 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
818 {
819 if (!unique_locus_on_edge_between_p (a, b))
820 return;
821
822 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
823 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
824 }
825
826 /* Blocks A and B are to be merged into a single block A. The insns
827 are already contiguous. */
828
829 static void
830 rtl_merge_blocks (basic_block a, basic_block b)
831 {
832 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
833 rtx_insn *del_first = NULL, *del_last = NULL;
834 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
835 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
836 int b_empty = 0;
837
838 if (dump_file)
839 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
840 a->index);
841
842 while (DEBUG_INSN_P (b_end))
843 b_end = PREV_INSN (b_debug_start = b_end);
844
845 /* If there was a CODE_LABEL beginning B, delete it. */
846 if (LABEL_P (b_head))
847 {
848 /* Detect basic blocks with nothing but a label. This can happen
849 in particular at the end of a function. */
850 if (b_head == b_end)
851 b_empty = 1;
852
853 del_first = del_last = b_head;
854 b_head = NEXT_INSN (b_head);
855 }
856
857 /* Delete the basic block note and handle blocks containing just that
858 note. */
859 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
860 {
861 if (b_head == b_end)
862 b_empty = 1;
863 if (! del_last)
864 del_first = b_head;
865
866 del_last = b_head;
867 b_head = NEXT_INSN (b_head);
868 }
869
870 /* If there was a jump out of A, delete it. */
871 if (JUMP_P (a_end))
872 {
873 rtx_insn *prev;
874
875 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
876 if (!NOTE_P (prev)
877 || NOTE_INSN_BASIC_BLOCK_P (prev)
878 || prev == BB_HEAD (a))
879 break;
880
881 del_first = a_end;
882
883 /* If this was a conditional jump, we need to also delete
884 the insn that set cc0. */
885 if (HAVE_cc0 && only_sets_cc0_p (prev))
886 {
887 rtx_insn *tmp = prev;
888
889 prev = prev_nonnote_insn (prev);
890 if (!prev)
891 prev = BB_HEAD (a);
892 del_first = tmp;
893 }
894
895 a_end = PREV_INSN (del_first);
896 }
897 else if (BARRIER_P (NEXT_INSN (a_end)))
898 del_first = NEXT_INSN (a_end);
899
900 /* Delete everything marked above as well as crap that might be
901 hanging out between the two blocks. */
902 BB_END (a) = a_end;
903 BB_HEAD (b) = b_empty ? NULL : b_head;
904 delete_insn_chain (del_first, del_last, true);
905
906 /* When not optimizing and the edge is the only place in RTL which holds
907 some unique locus, emit a nop with that locus in between. */
908 if (!optimize)
909 {
910 emit_nop_for_unique_locus_between (a, b);
911 a_end = BB_END (a);
912 }
913
914 /* Reassociate the insns of B with A. */
915 if (!b_empty)
916 {
917 update_bb_for_insn_chain (a_end, b_debug_end, a);
918
919 BB_END (a) = b_debug_end;
920 BB_HEAD (b) = NULL;
921 }
922 else if (b_end != b_debug_end)
923 {
924 /* Move any deleted labels and other notes between the end of A
925 and the debug insns that make up B after the debug insns,
926 bringing the debug insns into A while keeping the notes after
927 the end of A. */
928 if (NEXT_INSN (a_end) != b_debug_start)
929 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
930 b_debug_end);
931 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
932 BB_END (a) = b_debug_end;
933 }
934
935 df_bb_delete (b->index);
936
937 /* If B was a forwarder block, propagate the locus on the edge. */
938 if (forwarder_p
939 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
940 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
941
942 if (dump_file)
943 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
944 }
945
946
947 /* Return true when block A and B can be merged. */
948
949 static bool
950 rtl_can_merge_blocks (basic_block a, basic_block b)
951 {
952 /* If we are partitioning hot/cold basic blocks, we don't want to
953 mess up unconditional or indirect jumps that cross between hot
954 and cold sections.
955
956 Basic block partitioning may result in some jumps that appear to
957 be optimizable (or blocks that appear to be mergeable), but which really
958 must be left untouched (they are required to make it safely across
959 partition boundaries). See the comments at the top of
960 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
961
962 if (BB_PARTITION (a) != BB_PARTITION (b))
963 return false;
964
965 /* Protect the loop latches. */
966 if (current_loops && b->loop_father->latch == b)
967 return false;
968
969 /* There must be exactly one edge in between the blocks. */
970 return (single_succ_p (a)
971 && single_succ (a) == b
972 && single_pred_p (b)
973 && a != b
974 /* Must be simple edge. */
975 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
976 && a->next_bb == b
977 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
978 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
979 /* If the jump insn has side effects,
980 we can't kill the edge. */
981 && (!JUMP_P (BB_END (a))
982 || (reload_completed
983 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
984 }
985 \f
986 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
987 exist. */
988
989 rtx_code_label *
990 block_label (basic_block block)
991 {
992 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
993 return NULL;
994
995 if (!LABEL_P (BB_HEAD (block)))
996 {
997 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
998 }
999
1000 return as_a <rtx_code_label *> (BB_HEAD (block));
1001 }
1002
1003 /* Attempt to perform edge redirection by replacing possibly complex jump
1004 instruction by unconditional jump or removing jump completely. This can
1005 apply only if all edges now point to the same block. The parameters and
1006 return values are equivalent to redirect_edge_and_branch. */
1007
1008 edge
1009 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1010 {
1011 basic_block src = e->src;
1012 rtx_insn *insn = BB_END (src), *kill_from;
1013 rtx set;
1014 int fallthru = 0;
1015
1016 /* If we are partitioning hot/cold basic blocks, we don't want to
1017 mess up unconditional or indirect jumps that cross between hot
1018 and cold sections.
1019
1020 Basic block partitioning may result in some jumps that appear to
1021 be optimizable (or blocks that appear to be mergeable), but which really
1022 must be left untouched (they are required to make it safely across
1023 partition boundaries). See the comments at the top of
1024 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1025
1026 if (BB_PARTITION (src) != BB_PARTITION (target))
1027 return NULL;
1028
1029 /* We can replace or remove a complex jump only when we have exactly
1030 two edges. Also, if we have exactly one outgoing edge, we can
1031 redirect that. */
1032 if (EDGE_COUNT (src->succs) >= 3
1033 /* Verify that all targets will be TARGET. Specifically, the
1034 edge that is not E must also go to TARGET. */
1035 || (EDGE_COUNT (src->succs) == 2
1036 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1037 return NULL;
1038
1039 if (!onlyjump_p (insn))
1040 return NULL;
1041 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1042 return NULL;
1043
1044 /* Avoid removing branch with side effects. */
1045 set = single_set (insn);
1046 if (!set || side_effects_p (set))
1047 return NULL;
1048
1049 /* In case we zap a conditional jump, we'll need to kill
1050 the cc0 setter too. */
1051 kill_from = insn;
1052 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1053 && only_sets_cc0_p (PREV_INSN (insn)))
1054 kill_from = PREV_INSN (insn);
1055
1056 /* See if we can create the fallthru edge. */
1057 if (in_cfglayout || can_fallthru (src, target))
1058 {
1059 if (dump_file)
1060 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1061 fallthru = 1;
1062
1063 /* Selectively unlink whole insn chain. */
1064 if (in_cfglayout)
1065 {
1066 rtx_insn *insn = BB_FOOTER (src);
1067
1068 delete_insn_chain (kill_from, BB_END (src), false);
1069
1070 /* Remove barriers but keep jumptables. */
1071 while (insn)
1072 {
1073 if (BARRIER_P (insn))
1074 {
1075 if (PREV_INSN (insn))
1076 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1077 else
1078 BB_FOOTER (src) = NEXT_INSN (insn);
1079 if (NEXT_INSN (insn))
1080 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1081 }
1082 if (LABEL_P (insn))
1083 break;
1084 insn = NEXT_INSN (insn);
1085 }
1086 }
1087 else
1088 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1089 false);
1090 }
1091
1092 /* If this already is simplejump, redirect it. */
1093 else if (simplejump_p (insn))
1094 {
1095 if (e->dest == target)
1096 return NULL;
1097 if (dump_file)
1098 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1099 INSN_UID (insn), e->dest->index, target->index);
1100 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1101 block_label (target), 0))
1102 {
1103 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1104 return NULL;
1105 }
1106 }
1107
1108 /* Cannot do anything for target exit block. */
1109 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1110 return NULL;
1111
1112 /* Or replace possibly complicated jump insn by simple jump insn. */
1113 else
1114 {
1115 rtx_code_label *target_label = block_label (target);
1116 rtx_insn *barrier;
1117 rtx label;
1118 rtx_jump_table_data *table;
1119
1120 emit_jump_insn_after_noloc (gen_jump (target_label), insn);
1121 JUMP_LABEL (BB_END (src)) = target_label;
1122 LABEL_NUSES (target_label)++;
1123 if (dump_file)
1124 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1125 INSN_UID (insn), INSN_UID (BB_END (src)));
1126
1127
1128 delete_insn_chain (kill_from, insn, false);
1129
1130 /* Recognize a tablejump that we are converting to a
1131 simple jump and remove its associated CODE_LABEL
1132 and ADDR_VEC or ADDR_DIFF_VEC. */
1133 if (tablejump_p (insn, &label, &table))
1134 delete_insn_chain (label, table, false);
1135
1136 barrier = next_nonnote_insn (BB_END (src));
1137 if (!barrier || !BARRIER_P (barrier))
1138 emit_barrier_after (BB_END (src));
1139 else
1140 {
1141 if (barrier != NEXT_INSN (BB_END (src)))
1142 {
1143 /* Move the jump before barrier so that the notes
1144 which originally were or were created before jump table are
1145 inside the basic block. */
1146 rtx_insn *new_insn = BB_END (src);
1147
1148 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1149 PREV_INSN (barrier), src);
1150
1151 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1152 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1153
1154 SET_NEXT_INSN (new_insn) = barrier;
1155 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1156
1157 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1158 SET_PREV_INSN (barrier) = new_insn;
1159 }
1160 }
1161 }
1162
1163 /* Keep only one edge out and set proper flags. */
1164 if (!single_succ_p (src))
1165 remove_edge (e);
1166 gcc_assert (single_succ_p (src));
1167
1168 e = single_succ_edge (src);
1169 if (fallthru)
1170 e->flags = EDGE_FALLTHRU;
1171 else
1172 e->flags = 0;
1173
1174 e->probability = REG_BR_PROB_BASE;
1175 e->count = src->count;
1176
1177 if (e->dest != target)
1178 redirect_edge_succ (e, target);
1179 return e;
1180 }
1181
1182 /* Subroutine of redirect_branch_edge that tries to patch the jump
1183 instruction INSN so that it reaches block NEW. Do this
1184 only when it originally reached block OLD. Return true if this
1185 worked or the original target wasn't OLD, return false if redirection
1186 doesn't work. */
1187
1188 static bool
1189 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1190 {
1191 rtx_jump_table_data *table;
1192 rtx tmp;
1193 /* Recognize a tablejump and adjust all matching cases. */
1194 if (tablejump_p (insn, NULL, &table))
1195 {
1196 rtvec vec;
1197 int j;
1198 rtx_code_label *new_label = block_label (new_bb);
1199
1200 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1201 return false;
1202 vec = table->get_labels ();
1203
1204 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1205 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1206 {
1207 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1208 --LABEL_NUSES (old_label);
1209 ++LABEL_NUSES (new_label);
1210 }
1211
1212 /* Handle casesi dispatch insns. */
1213 if ((tmp = single_set (insn)) != NULL
1214 && SET_DEST (tmp) == pc_rtx
1215 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1216 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1217 && LABEL_REF_LABEL (XEXP (SET_SRC (tmp), 2)) == old_label)
1218 {
1219 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1220 new_label);
1221 --LABEL_NUSES (old_label);
1222 ++LABEL_NUSES (new_label);
1223 }
1224 }
1225 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1226 {
1227 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1228 rtx note;
1229
1230 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1231 return false;
1232 rtx_code_label *new_label = block_label (new_bb);
1233
1234 for (i = 0; i < n; ++i)
1235 {
1236 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1237 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1238 if (XEXP (old_ref, 0) == old_label)
1239 {
1240 ASM_OPERANDS_LABEL (tmp, i)
1241 = gen_rtx_LABEL_REF (Pmode, new_label);
1242 --LABEL_NUSES (old_label);
1243 ++LABEL_NUSES (new_label);
1244 }
1245 }
1246
1247 if (JUMP_LABEL (insn) == old_label)
1248 {
1249 JUMP_LABEL (insn) = new_label;
1250 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1251 if (note)
1252 remove_note (insn, note);
1253 }
1254 else
1255 {
1256 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1257 if (note)
1258 remove_note (insn, note);
1259 if (JUMP_LABEL (insn) != new_label
1260 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1261 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1262 }
1263 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1264 != NULL_RTX)
1265 XEXP (note, 0) = new_label;
1266 }
1267 else
1268 {
1269 /* ?? We may play the games with moving the named labels from
1270 one basic block to the other in case only one computed_jump is
1271 available. */
1272 if (computed_jump_p (insn)
1273 /* A return instruction can't be redirected. */
1274 || returnjump_p (insn))
1275 return false;
1276
1277 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1278 {
1279 /* If the insn doesn't go where we think, we're confused. */
1280 gcc_assert (JUMP_LABEL (insn) == old_label);
1281
1282 /* If the substitution doesn't succeed, die. This can happen
1283 if the back end emitted unrecognizable instructions or if
1284 target is exit block on some arches. */
1285 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1286 block_label (new_bb), 0))
1287 {
1288 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
1289 return false;
1290 }
1291 }
1292 }
1293 return true;
1294 }
1295
1296
1297 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1298 NULL on failure */
1299 static edge
1300 redirect_branch_edge (edge e, basic_block target)
1301 {
1302 rtx_insn *old_label = BB_HEAD (e->dest);
1303 basic_block src = e->src;
1304 rtx_insn *insn = BB_END (src);
1305
1306 /* We can only redirect non-fallthru edges of jump insn. */
1307 if (e->flags & EDGE_FALLTHRU)
1308 return NULL;
1309 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1310 return NULL;
1311
1312 if (!currently_expanding_to_rtl)
1313 {
1314 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1315 return NULL;
1316 }
1317 else
1318 /* When expanding this BB might actually contain multiple
1319 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1320 Redirect all of those that match our label. */
1321 FOR_BB_INSNS (src, insn)
1322 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1323 old_label, target))
1324 return NULL;
1325
1326 if (dump_file)
1327 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1328 e->src->index, e->dest->index, target->index);
1329
1330 if (e->dest != target)
1331 e = redirect_edge_succ_nodup (e, target);
1332
1333 return e;
1334 }
1335
1336 /* Called when edge E has been redirected to a new destination,
1337 in order to update the region crossing flag on the edge and
1338 jump. */
1339
1340 static void
1341 fixup_partition_crossing (edge e)
1342 {
1343 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1344 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1345 return;
1346 /* If we redirected an existing edge, it may already be marked
1347 crossing, even though the new src is missing a reg crossing note.
1348 But make sure reg crossing note doesn't already exist before
1349 inserting. */
1350 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1351 {
1352 e->flags |= EDGE_CROSSING;
1353 if (JUMP_P (BB_END (e->src))
1354 && !CROSSING_JUMP_P (BB_END (e->src)))
1355 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1356 }
1357 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1358 {
1359 e->flags &= ~EDGE_CROSSING;
1360 /* Remove the section crossing note from jump at end of
1361 src if it exists, and if no other successors are
1362 still crossing. */
1363 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1364 {
1365 bool has_crossing_succ = false;
1366 edge e2;
1367 edge_iterator ei;
1368 FOR_EACH_EDGE (e2, ei, e->src->succs)
1369 {
1370 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1371 if (has_crossing_succ)
1372 break;
1373 }
1374 if (!has_crossing_succ)
1375 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1376 }
1377 }
1378 }
1379
1380 /* Called when block BB has been reassigned to the cold partition,
1381 because it is now dominated by another cold block,
1382 to ensure that the region crossing attributes are updated. */
1383
1384 static void
1385 fixup_new_cold_bb (basic_block bb)
1386 {
1387 edge e;
1388 edge_iterator ei;
1389
1390 /* This is called when a hot bb is found to now be dominated
1391 by a cold bb and therefore needs to become cold. Therefore,
1392 its preds will no longer be region crossing. Any non-dominating
1393 preds that were previously hot would also have become cold
1394 in the caller for the same region. Any preds that were previously
1395 region-crossing will be adjusted in fixup_partition_crossing. */
1396 FOR_EACH_EDGE (e, ei, bb->preds)
1397 {
1398 fixup_partition_crossing (e);
1399 }
1400
1401 /* Possibly need to make bb's successor edges region crossing,
1402 or remove stale region crossing. */
1403 FOR_EACH_EDGE (e, ei, bb->succs)
1404 {
1405 /* We can't have fall-through edges across partition boundaries.
1406 Note that force_nonfallthru will do any necessary partition
1407 boundary fixup by calling fixup_partition_crossing itself. */
1408 if ((e->flags & EDGE_FALLTHRU)
1409 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1410 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1411 force_nonfallthru (e);
1412 else
1413 fixup_partition_crossing (e);
1414 }
1415 }
1416
1417 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1418 expense of adding new instructions or reordering basic blocks.
1419
1420 Function can be also called with edge destination equivalent to the TARGET.
1421 Then it should try the simplifications and do nothing if none is possible.
1422
1423 Return edge representing the branch if transformation succeeded. Return NULL
1424 on failure.
1425 We still return NULL in case E already destinated TARGET and we didn't
1426 managed to simplify instruction stream. */
1427
1428 static edge
1429 rtl_redirect_edge_and_branch (edge e, basic_block target)
1430 {
1431 edge ret;
1432 basic_block src = e->src;
1433 basic_block dest = e->dest;
1434
1435 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1436 return NULL;
1437
1438 if (dest == target)
1439 return e;
1440
1441 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1442 {
1443 df_set_bb_dirty (src);
1444 fixup_partition_crossing (ret);
1445 return ret;
1446 }
1447
1448 ret = redirect_branch_edge (e, target);
1449 if (!ret)
1450 return NULL;
1451
1452 df_set_bb_dirty (src);
1453 fixup_partition_crossing (ret);
1454 return ret;
1455 }
1456
1457 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1458
1459 void
1460 emit_barrier_after_bb (basic_block bb)
1461 {
1462 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1463 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1464 || current_ir_type () == IR_RTL_CFGLAYOUT);
1465 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1466 {
1467 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1468
1469 if (BB_FOOTER (bb))
1470 {
1471 rtx_insn *footer_tail = BB_FOOTER (bb);
1472
1473 while (NEXT_INSN (footer_tail))
1474 footer_tail = NEXT_INSN (footer_tail);
1475 if (!BARRIER_P (footer_tail))
1476 {
1477 SET_NEXT_INSN (footer_tail) = insn;
1478 SET_PREV_INSN (insn) = footer_tail;
1479 }
1480 }
1481 else
1482 BB_FOOTER (bb) = insn;
1483 }
1484 }
1485
1486 /* Like force_nonfallthru below, but additionally performs redirection
1487 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1488 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1489 simple_return_rtx, indicating which kind of returnjump to create.
1490 It should be NULL otherwise. */
1491
1492 basic_block
1493 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1494 {
1495 basic_block jump_block, new_bb = NULL, src = e->src;
1496 rtx note;
1497 edge new_edge;
1498 int abnormal_edge_flags = 0;
1499 bool asm_goto_edge = false;
1500 int loc;
1501
1502 /* In the case the last instruction is conditional jump to the next
1503 instruction, first redirect the jump itself and then continue
1504 by creating a basic block afterwards to redirect fallthru edge. */
1505 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1506 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1507 && any_condjump_p (BB_END (e->src))
1508 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1509 {
1510 rtx note;
1511 edge b = unchecked_make_edge (e->src, target, 0);
1512 bool redirected;
1513
1514 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1515 block_label (target), 0);
1516 gcc_assert (redirected);
1517
1518 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1519 if (note)
1520 {
1521 int prob = XINT (note, 0);
1522
1523 b->probability = prob;
1524 /* Update this to use GCOV_COMPUTE_SCALE. */
1525 b->count = e->count * prob / REG_BR_PROB_BASE;
1526 e->probability -= e->probability;
1527 e->count -= b->count;
1528 if (e->probability < 0)
1529 e->probability = 0;
1530 if (e->count < 0)
1531 e->count = 0;
1532 }
1533 }
1534
1535 if (e->flags & EDGE_ABNORMAL)
1536 {
1537 /* Irritating special case - fallthru edge to the same block as abnormal
1538 edge.
1539 We can't redirect abnormal edge, but we still can split the fallthru
1540 one and create separate abnormal edge to original destination.
1541 This allows bb-reorder to make such edge non-fallthru. */
1542 gcc_assert (e->dest == target);
1543 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1544 e->flags &= EDGE_FALLTHRU;
1545 }
1546 else
1547 {
1548 gcc_assert (e->flags & EDGE_FALLTHRU);
1549 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1550 {
1551 /* We can't redirect the entry block. Create an empty block
1552 at the start of the function which we use to add the new
1553 jump. */
1554 edge tmp;
1555 edge_iterator ei;
1556 bool found = false;
1557
1558 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1559 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1560
1561 /* Change the existing edge's source to be the new block, and add
1562 a new edge from the entry block to the new block. */
1563 e->src = bb;
1564 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1565 (tmp = ei_safe_edge (ei)); )
1566 {
1567 if (tmp == e)
1568 {
1569 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1570 found = true;
1571 break;
1572 }
1573 else
1574 ei_next (&ei);
1575 }
1576
1577 gcc_assert (found);
1578
1579 vec_safe_push (bb->succs, e);
1580 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1581 EDGE_FALLTHRU);
1582 }
1583 }
1584
1585 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1586 don't point to the target or fallthru label. */
1587 if (JUMP_P (BB_END (e->src))
1588 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1589 && (e->flags & EDGE_FALLTHRU)
1590 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1591 {
1592 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1593 bool adjust_jump_target = false;
1594
1595 for (i = 0; i < n; ++i)
1596 {
1597 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1598 {
1599 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1600 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1601 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1602 adjust_jump_target = true;
1603 }
1604 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1605 asm_goto_edge = true;
1606 }
1607 if (adjust_jump_target)
1608 {
1609 rtx_insn *insn = BB_END (e->src);
1610 rtx note;
1611 rtx_insn *old_label = BB_HEAD (e->dest);
1612 rtx_insn *new_label = BB_HEAD (target);
1613
1614 if (JUMP_LABEL (insn) == old_label)
1615 {
1616 JUMP_LABEL (insn) = new_label;
1617 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1618 if (note)
1619 remove_note (insn, note);
1620 }
1621 else
1622 {
1623 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1624 if (note)
1625 remove_note (insn, note);
1626 if (JUMP_LABEL (insn) != new_label
1627 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1628 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1629 }
1630 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1631 != NULL_RTX)
1632 XEXP (note, 0) = new_label;
1633 }
1634 }
1635
1636 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1637 {
1638 rtx_insn *new_head;
1639 gcov_type count = e->count;
1640 int probability = e->probability;
1641 /* Create the new structures. */
1642
1643 /* If the old block ended with a tablejump, skip its table
1644 by searching forward from there. Otherwise start searching
1645 forward from the last instruction of the old block. */
1646 rtx_jump_table_data *table;
1647 if (tablejump_p (BB_END (e->src), NULL, &table))
1648 new_head = table;
1649 else
1650 new_head = BB_END (e->src);
1651 new_head = NEXT_INSN (new_head);
1652
1653 jump_block = create_basic_block (new_head, NULL, e->src);
1654 jump_block->count = count;
1655 jump_block->frequency = EDGE_FREQUENCY (e);
1656
1657 /* Make sure new block ends up in correct hot/cold section. */
1658
1659 BB_COPY_PARTITION (jump_block, e->src);
1660
1661 /* Wire edge in. */
1662 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1663 new_edge->probability = probability;
1664 new_edge->count = count;
1665
1666 /* Redirect old edge. */
1667 redirect_edge_pred (e, jump_block);
1668 e->probability = REG_BR_PROB_BASE;
1669
1670 /* If e->src was previously region crossing, it no longer is
1671 and the reg crossing note should be removed. */
1672 fixup_partition_crossing (new_edge);
1673
1674 /* If asm goto has any label refs to target's label,
1675 add also edge from asm goto bb to target. */
1676 if (asm_goto_edge)
1677 {
1678 new_edge->probability /= 2;
1679 new_edge->count /= 2;
1680 jump_block->count /= 2;
1681 jump_block->frequency /= 2;
1682 new_edge = make_edge (new_edge->src, target,
1683 e->flags & ~EDGE_FALLTHRU);
1684 new_edge->probability = probability - probability / 2;
1685 new_edge->count = count - count / 2;
1686 }
1687
1688 new_bb = jump_block;
1689 }
1690 else
1691 jump_block = e->src;
1692
1693 loc = e->goto_locus;
1694 e->flags &= ~EDGE_FALLTHRU;
1695 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1696 {
1697 if (jump_label == ret_rtx)
1698 emit_jump_insn_after_setloc (targetm.gen_return (),
1699 BB_END (jump_block), loc);
1700 else
1701 {
1702 gcc_assert (jump_label == simple_return_rtx);
1703 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1704 BB_END (jump_block), loc);
1705 }
1706 set_return_jump_label (BB_END (jump_block));
1707 }
1708 else
1709 {
1710 rtx_code_label *label = block_label (target);
1711 emit_jump_insn_after_setloc (gen_jump (label), BB_END (jump_block), loc);
1712 JUMP_LABEL (BB_END (jump_block)) = label;
1713 LABEL_NUSES (label)++;
1714 }
1715
1716 /* We might be in cfg layout mode, and if so, the following routine will
1717 insert the barrier correctly. */
1718 emit_barrier_after_bb (jump_block);
1719 redirect_edge_succ_nodup (e, target);
1720
1721 if (abnormal_edge_flags)
1722 make_edge (src, target, abnormal_edge_flags);
1723
1724 df_mark_solutions_dirty ();
1725 fixup_partition_crossing (e);
1726 return new_bb;
1727 }
1728
1729 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1730 (and possibly create new basic block) to make edge non-fallthru.
1731 Return newly created BB or NULL if none. */
1732
1733 static basic_block
1734 rtl_force_nonfallthru (edge e)
1735 {
1736 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1737 }
1738
1739 /* Redirect edge even at the expense of creating new jump insn or
1740 basic block. Return new basic block if created, NULL otherwise.
1741 Conversion must be possible. */
1742
1743 static basic_block
1744 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1745 {
1746 if (redirect_edge_and_branch (e, target)
1747 || e->dest == target)
1748 return NULL;
1749
1750 /* In case the edge redirection failed, try to force it to be non-fallthru
1751 and redirect newly created simplejump. */
1752 df_set_bb_dirty (e->src);
1753 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1754 }
1755
1756 /* The given edge should potentially be a fallthru edge. If that is in
1757 fact true, delete the jump and barriers that are in the way. */
1758
1759 static void
1760 rtl_tidy_fallthru_edge (edge e)
1761 {
1762 rtx_insn *q;
1763 basic_block b = e->src, c = b->next_bb;
1764
1765 /* ??? In a late-running flow pass, other folks may have deleted basic
1766 blocks by nopping out blocks, leaving multiple BARRIERs between here
1767 and the target label. They ought to be chastised and fixed.
1768
1769 We can also wind up with a sequence of undeletable labels between
1770 one block and the next.
1771
1772 So search through a sequence of barriers, labels, and notes for
1773 the head of block C and assert that we really do fall through. */
1774
1775 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1776 if (INSN_P (q))
1777 return;
1778
1779 /* Remove what will soon cease being the jump insn from the source block.
1780 If block B consisted only of this single jump, turn it into a deleted
1781 note. */
1782 q = BB_END (b);
1783 if (JUMP_P (q)
1784 && onlyjump_p (q)
1785 && (any_uncondjump_p (q)
1786 || single_succ_p (b)))
1787 {
1788 rtx label;
1789 rtx_jump_table_data *table;
1790
1791 if (tablejump_p (q, &label, &table))
1792 {
1793 /* The label is likely mentioned in some instruction before
1794 the tablejump and might not be DCEd, so turn it into
1795 a note instead and move before the tablejump that is going to
1796 be deleted. */
1797 const char *name = LABEL_NAME (label);
1798 PUT_CODE (label, NOTE);
1799 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1800 NOTE_DELETED_LABEL_NAME (label) = name;
1801 rtx_insn *lab = safe_as_a <rtx_insn *> (label);
1802 reorder_insns (lab, lab, PREV_INSN (q));
1803 delete_insn (table);
1804 }
1805
1806 /* If this was a conditional jump, we need to also delete
1807 the insn that set cc0. */
1808 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1809 q = PREV_INSN (q);
1810
1811 q = PREV_INSN (q);
1812 }
1813
1814 /* Selectively unlink the sequence. */
1815 if (q != PREV_INSN (BB_HEAD (c)))
1816 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1817
1818 e->flags |= EDGE_FALLTHRU;
1819 }
1820 \f
1821 /* Should move basic block BB after basic block AFTER. NIY. */
1822
1823 static bool
1824 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1825 basic_block after ATTRIBUTE_UNUSED)
1826 {
1827 return false;
1828 }
1829
1830 /* Locate the last bb in the same partition as START_BB. */
1831
1832 static basic_block
1833 last_bb_in_partition (basic_block start_bb)
1834 {
1835 basic_block bb;
1836 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1837 {
1838 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1839 return bb;
1840 }
1841 /* Return bb before the exit block. */
1842 return bb->prev_bb;
1843 }
1844
1845 /* Split a (typically critical) edge. Return the new block.
1846 The edge must not be abnormal.
1847
1848 ??? The code generally expects to be called on critical edges.
1849 The case of a block ending in an unconditional jump to a
1850 block with multiple predecessors is not handled optimally. */
1851
1852 static basic_block
1853 rtl_split_edge (edge edge_in)
1854 {
1855 basic_block bb, new_bb;
1856 rtx_insn *before;
1857
1858 /* Abnormal edges cannot be split. */
1859 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1860
1861 /* We are going to place the new block in front of edge destination.
1862 Avoid existence of fallthru predecessors. */
1863 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1864 {
1865 edge e = find_fallthru_edge (edge_in->dest->preds);
1866
1867 if (e)
1868 force_nonfallthru (e);
1869 }
1870
1871 /* Create the basic block note. */
1872 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1873 before = BB_HEAD (edge_in->dest);
1874 else
1875 before = NULL;
1876
1877 /* If this is a fall through edge to the exit block, the blocks might be
1878 not adjacent, and the right place is after the source. */
1879 if ((edge_in->flags & EDGE_FALLTHRU)
1880 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1881 {
1882 before = NEXT_INSN (BB_END (edge_in->src));
1883 bb = create_basic_block (before, NULL, edge_in->src);
1884 BB_COPY_PARTITION (bb, edge_in->src);
1885 }
1886 else
1887 {
1888 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1889 {
1890 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1891 BB_COPY_PARTITION (bb, edge_in->dest);
1892 }
1893 else
1894 {
1895 basic_block after = edge_in->dest->prev_bb;
1896 /* If this is post-bb reordering, and the edge crosses a partition
1897 boundary, the new block needs to be inserted in the bb chain
1898 at the end of the src partition (since we put the new bb into
1899 that partition, see below). Otherwise we may end up creating
1900 an extra partition crossing in the chain, which is illegal.
1901 It can't go after the src, because src may have a fall-through
1902 to a different block. */
1903 if (crtl->bb_reorder_complete
1904 && (edge_in->flags & EDGE_CROSSING))
1905 {
1906 after = last_bb_in_partition (edge_in->src);
1907 before = get_last_bb_insn (after);
1908 /* The instruction following the last bb in partition should
1909 be a barrier, since it cannot end in a fall-through. */
1910 gcc_checking_assert (BARRIER_P (before));
1911 before = NEXT_INSN (before);
1912 }
1913 bb = create_basic_block (before, NULL, after);
1914 /* Put the split bb into the src partition, to avoid creating
1915 a situation where a cold bb dominates a hot bb, in the case
1916 where src is cold and dest is hot. The src will dominate
1917 the new bb (whereas it might not have dominated dest). */
1918 BB_COPY_PARTITION (bb, edge_in->src);
1919 }
1920 }
1921
1922 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1923
1924 /* Can't allow a region crossing edge to be fallthrough. */
1925 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1926 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1927 {
1928 new_bb = force_nonfallthru (single_succ_edge (bb));
1929 gcc_assert (!new_bb);
1930 }
1931
1932 /* For non-fallthru edges, we must adjust the predecessor's
1933 jump instruction to target our new block. */
1934 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1935 {
1936 edge redirected = redirect_edge_and_branch (edge_in, bb);
1937 gcc_assert (redirected);
1938 }
1939 else
1940 {
1941 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1942 {
1943 /* For asm goto even splitting of fallthru edge might
1944 need insn patching, as other labels might point to the
1945 old label. */
1946 rtx_insn *last = BB_END (edge_in->src);
1947 if (last
1948 && JUMP_P (last)
1949 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1950 && extract_asm_operands (PATTERN (last)) != NULL_RTX
1951 && patch_jump_insn (last, before, bb))
1952 df_set_bb_dirty (edge_in->src);
1953 }
1954 redirect_edge_succ (edge_in, bb);
1955 }
1956
1957 return bb;
1958 }
1959
1960 /* Queue instructions for insertion on an edge between two basic blocks.
1961 The new instructions and basic blocks (if any) will not appear in the
1962 CFG until commit_edge_insertions is called. */
1963
1964 void
1965 insert_insn_on_edge (rtx pattern, edge e)
1966 {
1967 /* We cannot insert instructions on an abnormal critical edge.
1968 It will be easier to find the culprit if we die now. */
1969 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1970
1971 if (e->insns.r == NULL_RTX)
1972 start_sequence ();
1973 else
1974 push_to_sequence (e->insns.r);
1975
1976 emit_insn (pattern);
1977
1978 e->insns.r = get_insns ();
1979 end_sequence ();
1980 }
1981
1982 /* Update the CFG for the instructions queued on edge E. */
1983
1984 void
1985 commit_one_edge_insertion (edge e)
1986 {
1987 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1988 basic_block bb;
1989
1990 /* Pull the insns off the edge now since the edge might go away. */
1991 insns = e->insns.r;
1992 e->insns.r = NULL;
1993
1994 /* Figure out where to put these insns. If the destination has
1995 one predecessor, insert there. Except for the exit block. */
1996 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1997 {
1998 bb = e->dest;
1999
2000 /* Get the location correct wrt a code label, and "nice" wrt
2001 a basic block note, and before everything else. */
2002 tmp = BB_HEAD (bb);
2003 if (LABEL_P (tmp))
2004 tmp = NEXT_INSN (tmp);
2005 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2006 tmp = NEXT_INSN (tmp);
2007 if (tmp == BB_HEAD (bb))
2008 before = tmp;
2009 else if (tmp)
2010 after = PREV_INSN (tmp);
2011 else
2012 after = get_last_insn ();
2013 }
2014
2015 /* If the source has one successor and the edge is not abnormal,
2016 insert there. Except for the entry block.
2017 Don't do this if the predecessor ends in a jump other than
2018 unconditional simple jump. E.g. for asm goto that points all
2019 its labels at the fallthru basic block, we can't insert instructions
2020 before the asm goto, as the asm goto can have various of side effects,
2021 and can't emit instructions after the asm goto, as it must end
2022 the basic block. */
2023 else if ((e->flags & EDGE_ABNORMAL) == 0
2024 && single_succ_p (e->src)
2025 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2026 && (!JUMP_P (BB_END (e->src))
2027 || simplejump_p (BB_END (e->src))))
2028 {
2029 bb = e->src;
2030
2031 /* It is possible to have a non-simple jump here. Consider a target
2032 where some forms of unconditional jumps clobber a register. This
2033 happens on the fr30 for example.
2034
2035 We know this block has a single successor, so we can just emit
2036 the queued insns before the jump. */
2037 if (JUMP_P (BB_END (bb)))
2038 before = BB_END (bb);
2039 else
2040 {
2041 /* We'd better be fallthru, or we've lost track of what's what. */
2042 gcc_assert (e->flags & EDGE_FALLTHRU);
2043
2044 after = BB_END (bb);
2045 }
2046 }
2047
2048 /* Otherwise we must split the edge. */
2049 else
2050 {
2051 bb = split_edge (e);
2052
2053 /* If E crossed a partition boundary, we needed to make bb end in
2054 a region-crossing jump, even though it was originally fallthru. */
2055 if (JUMP_P (BB_END (bb)))
2056 before = BB_END (bb);
2057 else
2058 after = BB_END (bb);
2059 }
2060
2061 /* Now that we've found the spot, do the insertion. */
2062 if (before)
2063 {
2064 emit_insn_before_noloc (insns, before, bb);
2065 last = prev_nonnote_insn (before);
2066 }
2067 else
2068 last = emit_insn_after_noloc (insns, after, bb);
2069
2070 if (returnjump_p (last))
2071 {
2072 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2073 This is not currently a problem because this only happens
2074 for the (single) epilogue, which already has a fallthru edge
2075 to EXIT. */
2076
2077 e = single_succ_edge (bb);
2078 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2079 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2080
2081 e->flags &= ~EDGE_FALLTHRU;
2082 emit_barrier_after (last);
2083
2084 if (before)
2085 delete_insn (before);
2086 }
2087 else
2088 gcc_assert (!JUMP_P (last));
2089 }
2090
2091 /* Update the CFG for all queued instructions. */
2092
2093 void
2094 commit_edge_insertions (void)
2095 {
2096 basic_block bb;
2097
2098 /* Optimization passes that invoke this routine can cause hot blocks
2099 previously reached by both hot and cold blocks to become dominated only
2100 by cold blocks. This will cause the verification below to fail,
2101 and lead to now cold code in the hot section. In some cases this
2102 may only be visible after newly unreachable blocks are deleted,
2103 which will be done by fixup_partitions. */
2104 fixup_partitions ();
2105
2106 #ifdef ENABLE_CHECKING
2107 verify_flow_info ();
2108 #endif
2109
2110 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2111 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2112 {
2113 edge e;
2114 edge_iterator ei;
2115
2116 FOR_EACH_EDGE (e, ei, bb->succs)
2117 if (e->insns.r)
2118 commit_one_edge_insertion (e);
2119 }
2120 }
2121 \f
2122
2123 /* Print out RTL-specific basic block information (live information
2124 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2125 documented in dumpfile.h. */
2126
2127 static void
2128 rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
2129 {
2130 rtx_insn *insn;
2131 rtx_insn *last;
2132 char *s_indent;
2133
2134 s_indent = (char *) alloca ((size_t) indent + 1);
2135 memset (s_indent, ' ', (size_t) indent);
2136 s_indent[indent] = '\0';
2137
2138 if (df && (flags & TDF_DETAILS))
2139 {
2140 df_dump_top (bb, outf);
2141 putc ('\n', outf);
2142 }
2143
2144 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2145 for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
2146 insn = NEXT_INSN (insn))
2147 {
2148 if (flags & TDF_DETAILS)
2149 df_dump_insn_top (insn, outf);
2150 if (! (flags & TDF_SLIM))
2151 print_rtl_single (outf, insn);
2152 else
2153 dump_insn_slim (outf, insn);
2154 if (flags & TDF_DETAILS)
2155 df_dump_insn_bottom (insn, outf);
2156 }
2157
2158 if (df && (flags & TDF_DETAILS))
2159 {
2160 df_dump_bottom (bb, outf);
2161 putc ('\n', outf);
2162 }
2163
2164 }
2165 \f
2166 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2167 for the start of each basic block. FLAGS are the TDF_* masks documented
2168 in dumpfile.h. */
2169
2170 void
2171 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, int flags)
2172 {
2173 const rtx_insn *tmp_rtx;
2174 if (rtx_first == 0)
2175 fprintf (outf, "(nil)\n");
2176 else
2177 {
2178 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2179 int max_uid = get_max_uid ();
2180 basic_block *start = XCNEWVEC (basic_block, max_uid);
2181 basic_block *end = XCNEWVEC (basic_block, max_uid);
2182 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2183 basic_block bb;
2184
2185 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2186 insns, but the CFG is not maintained so the basic block info
2187 is not reliable. Therefore it's omitted from the dumps. */
2188 if (! (cfun->curr_properties & PROP_cfg))
2189 flags &= ~TDF_BLOCKS;
2190
2191 if (df)
2192 df_dump_start (outf);
2193
2194 if (flags & TDF_BLOCKS)
2195 {
2196 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2197 {
2198 rtx_insn *x;
2199
2200 start[INSN_UID (BB_HEAD (bb))] = bb;
2201 end[INSN_UID (BB_END (bb))] = bb;
2202 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2203 {
2204 enum bb_state state = IN_MULTIPLE_BB;
2205
2206 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2207 state = IN_ONE_BB;
2208 in_bb_p[INSN_UID (x)] = state;
2209
2210 if (x == BB_END (bb))
2211 break;
2212 }
2213 }
2214 }
2215
2216 for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
2217 {
2218 if (flags & TDF_BLOCKS)
2219 {
2220 bb = start[INSN_UID (tmp_rtx)];
2221 if (bb != NULL)
2222 {
2223 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
2224 if (df && (flags & TDF_DETAILS))
2225 df_dump_top (bb, outf);
2226 }
2227
2228 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2229 && !NOTE_P (tmp_rtx)
2230 && !BARRIER_P (tmp_rtx))
2231 fprintf (outf, ";; Insn is not within a basic block\n");
2232 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2233 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2234 }
2235
2236 if (flags & TDF_DETAILS)
2237 df_dump_insn_top (tmp_rtx, outf);
2238 if (! (flags & TDF_SLIM))
2239 print_rtl_single (outf, tmp_rtx);
2240 else
2241 dump_insn_slim (outf, tmp_rtx);
2242 if (flags & TDF_DETAILS)
2243 df_dump_insn_bottom (tmp_rtx, outf);
2244
2245 if (flags & TDF_BLOCKS)
2246 {
2247 bb = end[INSN_UID (tmp_rtx)];
2248 if (bb != NULL)
2249 {
2250 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
2251 if (df && (flags & TDF_DETAILS))
2252 df_dump_bottom (bb, outf);
2253 putc ('\n', outf);
2254 }
2255 }
2256 }
2257
2258 free (start);
2259 free (end);
2260 free (in_bb_p);
2261 }
2262 }
2263 \f
2264 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2265
2266 void
2267 update_br_prob_note (basic_block bb)
2268 {
2269 rtx note;
2270 if (!JUMP_P (BB_END (bb)))
2271 return;
2272 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2273 if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
2274 return;
2275 XINT (note, 0) = BRANCH_EDGE (bb)->probability;
2276 }
2277
2278 /* Get the last insn associated with block BB (that includes barriers and
2279 tablejumps after BB). */
2280 rtx_insn *
2281 get_last_bb_insn (basic_block bb)
2282 {
2283 rtx_jump_table_data *table;
2284 rtx_insn *tmp;
2285 rtx_insn *end = BB_END (bb);
2286
2287 /* Include any jump table following the basic block. */
2288 if (tablejump_p (end, NULL, &table))
2289 end = table;
2290
2291 /* Include any barriers that may follow the basic block. */
2292 tmp = next_nonnote_insn_bb (end);
2293 while (tmp && BARRIER_P (tmp))
2294 {
2295 end = tmp;
2296 tmp = next_nonnote_insn_bb (end);
2297 }
2298
2299 return end;
2300 }
2301
2302 /* Sanity check partition hotness to ensure that basic blocks in
2303   the cold partition don't dominate basic blocks in the hot partition.
2304 If FLAG_ONLY is true, report violations as errors. Otherwise
2305 re-mark the dominated blocks as cold, since this is run after
2306 cfg optimizations that may make hot blocks previously reached
2307 by both hot and cold blocks now only reachable along cold paths. */
2308
2309 static vec<basic_block>
2310 find_partition_fixes (bool flag_only)
2311 {
2312 basic_block bb;
2313 vec<basic_block> bbs_in_cold_partition = vNULL;
2314 vec<basic_block> bbs_to_fix = vNULL;
2315
2316 /* Callers check this. */
2317 gcc_checking_assert (crtl->has_bb_partition);
2318
2319 FOR_EACH_BB_FN (bb, cfun)
2320 if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
2321 bbs_in_cold_partition.safe_push (bb);
2322
2323 if (bbs_in_cold_partition.is_empty ())
2324 return vNULL;
2325
2326 bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
2327
2328 if (dom_calculated_here)
2329 calculate_dominance_info (CDI_DOMINATORS);
2330
2331 while (! bbs_in_cold_partition.is_empty ())
2332 {
2333 bb = bbs_in_cold_partition.pop ();
2334 /* Any blocks dominated by a block in the cold section
2335 must also be cold. */
2336 basic_block son;
2337 for (son = first_dom_son (CDI_DOMINATORS, bb);
2338 son;
2339 son = next_dom_son (CDI_DOMINATORS, son))
2340 {
2341 /* If son is not yet cold, then mark it cold here and
2342 enqueue it for further processing. */
2343 if ((BB_PARTITION (son) != BB_COLD_PARTITION))
2344 {
2345 if (flag_only)
2346 error ("non-cold basic block %d dominated "
2347 "by a block in the cold partition (%d)", son->index, bb->index);
2348 else
2349 BB_SET_PARTITION (son, BB_COLD_PARTITION);
2350 bbs_to_fix.safe_push (son);
2351 bbs_in_cold_partition.safe_push (son);
2352 }
2353 }
2354 }
2355
2356 if (dom_calculated_here)
2357 free_dominance_info (CDI_DOMINATORS);
2358
2359 return bbs_to_fix;
2360 }
2361
2362 /* Perform cleanup on the hot/cold bb partitioning after optimization
2363 passes that modify the cfg. */
2364
2365 void
2366 fixup_partitions (void)
2367 {
2368 basic_block bb;
2369
2370 if (!crtl->has_bb_partition)
2371 return;
2372
2373 /* Delete any blocks that became unreachable and weren't
2374 already cleaned up, for example during edge forwarding
2375 and convert_jumps_to_returns. This will expose more
2376 opportunities for fixing the partition boundaries here.
2377 Also, the calculation of the dominance graph during verification
2378 will assert if there are unreachable nodes. */
2379 delete_unreachable_blocks ();
2380
2381 /* If there are partitions, do a sanity check on them: A basic block in
2382   a cold partition cannot dominate a basic block in a hot partition.
2383 Fixup any that now violate this requirement, as a result of edge
2384 forwarding and unreachable block deletion.  */
2385 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2386
2387 /* Do the partition fixup after all necessary blocks have been converted to
2388 cold, so that we only update the region crossings the minimum number of
2389 places, which can require forcing edges to be non fallthru. */
2390 while (! bbs_to_fix.is_empty ())
2391 {
2392 bb = bbs_to_fix.pop ();
2393 fixup_new_cold_bb (bb);
2394 }
2395 }
2396
2397 /* Verify, in the basic block chain, that there is at most one switch
2398 between hot/cold partitions. This condition will not be true until
2399 after reorder_basic_blocks is called. */
2400
2401 static int
2402 verify_hot_cold_block_grouping (void)
2403 {
2404 basic_block bb;
2405 int err = 0;
2406 bool switched_sections = false;
2407 int current_partition = BB_UNPARTITIONED;
2408
2409 /* Even after bb reordering is complete, we go into cfglayout mode
2410 again (in compgoto). Ensure we don't call this before going back
2411 into linearized RTL when any layout fixes would have been committed. */
2412 if (!crtl->bb_reorder_complete
2413 || current_ir_type () != IR_RTL_CFGRTL)
2414 return err;
2415
2416 FOR_EACH_BB_FN (bb, cfun)
2417 {
2418 if (current_partition != BB_UNPARTITIONED
2419 && BB_PARTITION (bb) != current_partition)
2420 {
2421 if (switched_sections)
2422 {
2423 error ("multiple hot/cold transitions found (bb %i)",
2424 bb->index);
2425 err = 1;
2426 }
2427 else
2428 switched_sections = true;
2429
2430 if (!crtl->has_bb_partition)
2431 error ("partition found but function partition flag not set");
2432 }
2433 current_partition = BB_PARTITION (bb);
2434 }
2435
2436 return err;
2437 }
2438 \f
2439
2440 /* Perform several checks on the edges out of each block, such as
2441 the consistency of the branch probabilities, the correctness
2442 of hot/cold partition crossing edges, and the number of expected
2443 successor edges. Also verify that the dominance relationship
2444 between hot/cold blocks is sane. */
2445
2446 static int
2447 rtl_verify_edges (void)
2448 {
2449 int err = 0;
2450 basic_block bb;
2451
2452 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2453 {
2454 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2455 int n_eh = 0, n_abnormal = 0;
2456 edge e, fallthru = NULL;
2457 edge_iterator ei;
2458 rtx note;
2459 bool has_crossing_edge = false;
2460
2461 if (JUMP_P (BB_END (bb))
2462 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2463 && EDGE_COUNT (bb->succs) >= 2
2464 && any_condjump_p (BB_END (bb)))
2465 {
2466 if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
2467 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2468 {
2469 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2470 XINT (note, 0), BRANCH_EDGE (bb)->probability);
2471 err = 1;
2472 }
2473 }
2474
2475 FOR_EACH_EDGE (e, ei, bb->succs)
2476 {
2477 bool is_crossing;
2478
2479 if (e->flags & EDGE_FALLTHRU)
2480 n_fallthru++, fallthru = e;
2481
2482 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2483 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2484 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2485 has_crossing_edge |= is_crossing;
2486 if (e->flags & EDGE_CROSSING)
2487 {
2488 if (!is_crossing)
2489 {
2490 error ("EDGE_CROSSING incorrectly set across same section");
2491 err = 1;
2492 }
2493 if (e->flags & EDGE_FALLTHRU)
2494 {
2495 error ("fallthru edge crosses section boundary in bb %i",
2496 e->src->index);
2497 err = 1;
2498 }
2499 if (e->flags & EDGE_EH)
2500 {
2501 error ("EH edge crosses section boundary in bb %i",
2502 e->src->index);
2503 err = 1;
2504 }
2505 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2506 {
2507 error ("No region crossing jump at section boundary in bb %i",
2508 bb->index);
2509 err = 1;
2510 }
2511 }
2512 else if (is_crossing)
2513 {
2514 error ("EDGE_CROSSING missing across section boundary");
2515 err = 1;
2516 }
2517
2518 if ((e->flags & ~(EDGE_DFS_BACK
2519 | EDGE_CAN_FALLTHRU
2520 | EDGE_IRREDUCIBLE_LOOP
2521 | EDGE_LOOP_EXIT
2522 | EDGE_CROSSING
2523 | EDGE_PRESERVE)) == 0)
2524 n_branch++;
2525
2526 if (e->flags & EDGE_ABNORMAL_CALL)
2527 n_abnormal_call++;
2528
2529 if (e->flags & EDGE_SIBCALL)
2530 n_sibcall++;
2531
2532 if (e->flags & EDGE_EH)
2533 n_eh++;
2534
2535 if (e->flags & EDGE_ABNORMAL)
2536 n_abnormal++;
2537 }
2538
2539 if (!has_crossing_edge
2540 && JUMP_P (BB_END (bb))
2541 && CROSSING_JUMP_P (BB_END (bb)))
2542 {
2543 print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
2544 error ("Region crossing jump across same section in bb %i",
2545 bb->index);
2546 err = 1;
2547 }
2548
2549 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2550 {
2551 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2552 err = 1;
2553 }
2554 if (n_eh > 1)
2555 {
2556 error ("too many exception handling edges in bb %i", bb->index);
2557 err = 1;
2558 }
2559 if (n_branch
2560 && (!JUMP_P (BB_END (bb))
2561 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2562 || any_condjump_p (BB_END (bb))))))
2563 {
2564 error ("too many outgoing branch edges from bb %i", bb->index);
2565 err = 1;
2566 }
2567 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2568 {
2569 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2570 err = 1;
2571 }
2572 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2573 {
2574 error ("wrong number of branch edges after unconditional jump"
2575 " in bb %i", bb->index);
2576 err = 1;
2577 }
2578 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2579 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2580 {
2581 error ("wrong amount of branch edges after conditional jump"
2582 " in bb %i", bb->index);
2583 err = 1;
2584 }
2585 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2586 {
2587 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2588 err = 1;
2589 }
2590 if (n_sibcall && !CALL_P (BB_END (bb)))
2591 {
2592 error ("sibcall edges for non-call insn in bb %i", bb->index);
2593 err = 1;
2594 }
2595 if (n_abnormal > n_eh
2596 && !(CALL_P (BB_END (bb))
2597 && n_abnormal == n_abnormal_call + n_sibcall)
2598 && (!JUMP_P (BB_END (bb))
2599 || any_condjump_p (BB_END (bb))
2600 || any_uncondjump_p (BB_END (bb))))
2601 {
2602 error ("abnormal edges for no purpose in bb %i", bb->index);
2603 err = 1;
2604 }
2605 }
2606
2607 /* If there are partitions, do a sanity check on them: A basic block in
2608   a cold partition cannot dominate a basic block in a hot partition.  */
2609 if (crtl->has_bb_partition && !err)
2610 {
2611 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2612 err = !bbs_to_fix.is_empty ();
2613 }
2614
2615 /* Clean up. */
2616 return err;
2617 }
2618
2619 /* Checks on the instructions within blocks. Currently checks that each
2620 block starts with a basic block note, and that basic block notes and
2621 control flow jumps are not found in the middle of the block. */
2622
2623 static int
2624 rtl_verify_bb_insns (void)
2625 {
2626 rtx_insn *x;
2627 int err = 0;
2628 basic_block bb;
2629
2630 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2631 {
2632 /* Now check the header of basic
2633 block. It ought to contain optional CODE_LABEL followed
2634 by NOTE_BASIC_BLOCK. */
2635 x = BB_HEAD (bb);
2636 if (LABEL_P (x))
2637 {
2638 if (BB_END (bb) == x)
2639 {
2640 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2641 bb->index);
2642 err = 1;
2643 }
2644
2645 x = NEXT_INSN (x);
2646 }
2647
2648 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2649 {
2650 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2651 bb->index);
2652 err = 1;
2653 }
2654
2655 if (BB_END (bb) == x)
2656 /* Do checks for empty blocks here. */
2657 ;
2658 else
2659 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2660 {
2661 if (NOTE_INSN_BASIC_BLOCK_P (x))
2662 {
2663 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2664 INSN_UID (x), bb->index);
2665 err = 1;
2666 }
2667
2668 if (x == BB_END (bb))
2669 break;
2670
2671 if (control_flow_insn_p (x))
2672 {
2673 error ("in basic block %d:", bb->index);
2674 fatal_insn ("flow control insn inside a basic block", x);
2675 }
2676 }
2677 }
2678
2679 /* Clean up. */
2680 return err;
2681 }
2682
2683 /* Verify that block pointers for instructions in basic blocks, headers and
2684 footers are set appropriately. */
2685
2686 static int
2687 rtl_verify_bb_pointers (void)
2688 {
2689 int err = 0;
2690 basic_block bb;
2691
2692 /* Check the general integrity of the basic blocks. */
2693 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2694 {
2695 rtx_insn *insn;
2696
2697 if (!(bb->flags & BB_RTL))
2698 {
2699 error ("BB_RTL flag not set for block %d", bb->index);
2700 err = 1;
2701 }
2702
2703 FOR_BB_INSNS (bb, insn)
2704 if (BLOCK_FOR_INSN (insn) != bb)
2705 {
2706 error ("insn %d basic block pointer is %d, should be %d",
2707 INSN_UID (insn),
2708 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2709 bb->index);
2710 err = 1;
2711 }
2712
2713 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2714 if (!BARRIER_P (insn)
2715 && BLOCK_FOR_INSN (insn) != NULL)
2716 {
2717 error ("insn %d in header of bb %d has non-NULL basic block",
2718 INSN_UID (insn), bb->index);
2719 err = 1;
2720 }
2721 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2722 if (!BARRIER_P (insn)
2723 && BLOCK_FOR_INSN (insn) != NULL)
2724 {
2725 error ("insn %d in footer of bb %d has non-NULL basic block",
2726 INSN_UID (insn), bb->index);
2727 err = 1;
2728 }
2729 }
2730
2731 /* Clean up. */
2732 return err;
2733 }
2734
2735 /* Verify the CFG and RTL consistency common for both underlying RTL and
2736 cfglayout RTL.
2737
2738 Currently it does following checks:
2739
2740 - overlapping of basic blocks
2741 - insns with wrong BLOCK_FOR_INSN pointers
2742 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2743 - tails of basic blocks (ensure that boundary is necessary)
2744 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2745 and NOTE_INSN_BASIC_BLOCK
2746 - verify that no fall_thru edge crosses hot/cold partition boundaries
2747 - verify that there are no pending RTL branch predictions
2748 - verify that hot blocks are not dominated by cold blocks
2749
2750 In future it can be extended check a lot of other stuff as well
2751 (reachability of basic blocks, life information, etc. etc.). */
2752
2753 static int
2754 rtl_verify_flow_info_1 (void)
2755 {
2756 int err = 0;
2757
2758 err |= rtl_verify_bb_pointers ();
2759
2760 err |= rtl_verify_bb_insns ();
2761
2762 err |= rtl_verify_edges ();
2763
2764 return err;
2765 }
2766
2767 /* Walk the instruction chain and verify that bb head/end pointers
2768 are correct, and that instructions are in exactly one bb and have
2769 correct block pointers. */
2770
2771 static int
2772 rtl_verify_bb_insn_chain (void)
2773 {
2774 basic_block bb;
2775 int err = 0;
2776 rtx_insn *x;
2777 rtx_insn *last_head = get_last_insn ();
2778 basic_block *bb_info;
2779 const int max_uid = get_max_uid ();
2780
2781 bb_info = XCNEWVEC (basic_block, max_uid);
2782
2783 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2784 {
2785 rtx_insn *head = BB_HEAD (bb);
2786 rtx_insn *end = BB_END (bb);
2787
2788 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2789 {
2790 /* Verify the end of the basic block is in the INSN chain. */
2791 if (x == end)
2792 break;
2793
2794 /* And that the code outside of basic blocks has NULL bb field. */
2795 if (!BARRIER_P (x)
2796 && BLOCK_FOR_INSN (x) != NULL)
2797 {
2798 error ("insn %d outside of basic blocks has non-NULL bb field",
2799 INSN_UID (x));
2800 err = 1;
2801 }
2802 }
2803
2804 if (!x)
2805 {
2806 error ("end insn %d for block %d not found in the insn stream",
2807 INSN_UID (end), bb->index);
2808 err = 1;
2809 }
2810
2811 /* Work backwards from the end to the head of the basic block
2812 to verify the head is in the RTL chain. */
2813 for (; x != NULL_RTX; x = PREV_INSN (x))
2814 {
2815 /* While walking over the insn chain, verify insns appear
2816 in only one basic block. */
2817 if (bb_info[INSN_UID (x)] != NULL)
2818 {
2819 error ("insn %d is in multiple basic blocks (%d and %d)",
2820 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2821 err = 1;
2822 }
2823
2824 bb_info[INSN_UID (x)] = bb;
2825
2826 if (x == head)
2827 break;
2828 }
2829 if (!x)
2830 {
2831 error ("head insn %d for block %d not found in the insn stream",
2832 INSN_UID (head), bb->index);
2833 err = 1;
2834 }
2835
2836 last_head = PREV_INSN (x);
2837 }
2838
2839 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2840 {
2841 /* Check that the code before the first basic block has NULL
2842 bb field. */
2843 if (!BARRIER_P (x)
2844 && BLOCK_FOR_INSN (x) != NULL)
2845 {
2846 error ("insn %d outside of basic blocks has non-NULL bb field",
2847 INSN_UID (x));
2848 err = 1;
2849 }
2850 }
2851 free (bb_info);
2852
2853 return err;
2854 }
2855
2856 /* Verify that fallthru edges point to adjacent blocks in layout order and
2857 that barriers exist after non-fallthru blocks. */
2858
2859 static int
2860 rtl_verify_fallthru (void)
2861 {
2862 basic_block bb;
2863 int err = 0;
2864
2865 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2866 {
2867 edge e;
2868
2869 e = find_fallthru_edge (bb->succs);
2870 if (!e)
2871 {
2872 rtx_insn *insn;
2873
2874 /* Ensure existence of barrier in BB with no fallthru edges. */
2875 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2876 {
2877 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2878 {
2879 error ("missing barrier after block %i", bb->index);
2880 err = 1;
2881 break;
2882 }
2883 if (BARRIER_P (insn))
2884 break;
2885 }
2886 }
2887 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2888 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2889 {
2890 rtx_insn *insn;
2891
2892 if (e->src->next_bb != e->dest)
2893 {
2894 error
2895 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2896 e->src->index, e->dest->index);
2897 err = 1;
2898 }
2899 else
2900 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2901 insn = NEXT_INSN (insn))
2902 if (BARRIER_P (insn) || INSN_P (insn))
2903 {
2904 error ("verify_flow_info: Incorrect fallthru %i->%i",
2905 e->src->index, e->dest->index);
2906 fatal_insn ("wrong insn in the fallthru edge", insn);
2907 err = 1;
2908 }
2909 }
2910 }
2911
2912 return err;
2913 }
2914
2915 /* Verify that blocks are laid out in consecutive order. While walking the
2916 instructions, verify that all expected instructions are inside the basic
2917 blocks, and that all returns are followed by barriers. */
2918
2919 static int
2920 rtl_verify_bb_layout (void)
2921 {
2922 basic_block bb;
2923 int err = 0;
2924 rtx_insn *x;
2925 int num_bb_notes;
2926 rtx_insn * const rtx_first = get_insns ();
2927 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2928
2929 num_bb_notes = 0;
2930 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2931
2932 for (x = rtx_first; x; x = NEXT_INSN (x))
2933 {
2934 if (NOTE_INSN_BASIC_BLOCK_P (x))
2935 {
2936 bb = NOTE_BASIC_BLOCK (x);
2937
2938 num_bb_notes++;
2939 if (bb != last_bb_seen->next_bb)
2940 internal_error ("basic blocks not laid down consecutively");
2941
2942 curr_bb = last_bb_seen = bb;
2943 }
2944
2945 if (!curr_bb)
2946 {
2947 switch (GET_CODE (x))
2948 {
2949 case BARRIER:
2950 case NOTE:
2951 break;
2952
2953 case CODE_LABEL:
2954 /* An ADDR_VEC is placed outside any basic block. */
2955 if (NEXT_INSN (x)
2956 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2957 x = NEXT_INSN (x);
2958
2959 /* But in any case, non-deletable labels can appear anywhere. */
2960 break;
2961
2962 default:
2963 fatal_insn ("insn outside basic block", x);
2964 }
2965 }
2966
2967 if (JUMP_P (x)
2968 && returnjump_p (x) && ! condjump_p (x)
2969 && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
2970 fatal_insn ("return not followed by barrier", x);
2971
2972 if (curr_bb && x == BB_END (curr_bb))
2973 curr_bb = NULL;
2974 }
2975
2976 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2977 internal_error
2978 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2979 num_bb_notes, n_basic_blocks_for_fn (cfun));
2980
2981 return err;
2982 }
2983
2984 /* Verify the CFG and RTL consistency common for both underlying RTL and
2985 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2986
2987 Currently it does following checks:
2988 - all checks of rtl_verify_flow_info_1
2989 - test head/end pointers
2990 - check that blocks are laid out in consecutive order
2991 - check that all insns are in the basic blocks
2992 (except the switch handling code, barriers and notes)
2993 - check that all returns are followed by barriers
2994 - check that all fallthru edge points to the adjacent blocks
2995 - verify that there is a single hot/cold partition boundary after bbro */
2996
2997 static int
2998 rtl_verify_flow_info (void)
2999 {
3000 int err = 0;
3001
3002 err |= rtl_verify_flow_info_1 ();
3003
3004 err |= rtl_verify_bb_insn_chain ();
3005
3006 err |= rtl_verify_fallthru ();
3007
3008 err |= rtl_verify_bb_layout ();
3009
3010 err |= verify_hot_cold_block_grouping ();
3011
3012 return err;
3013 }
3014 \f
3015 /* Assume that the preceding pass has possibly eliminated jump instructions
3016 or converted the unconditional jumps. Eliminate the edges from CFG.
3017 Return true if any edges are eliminated. */
3018
3019 bool
3020 purge_dead_edges (basic_block bb)
3021 {
3022 edge e;
3023 rtx_insn *insn = BB_END (bb);
3024 rtx note;
3025 bool purged = false;
3026 bool found;
3027 edge_iterator ei;
3028
3029 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3030 do
3031 insn = PREV_INSN (insn);
3032 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3033
3034 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3035 if (NONJUMP_INSN_P (insn)
3036 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3037 {
3038 rtx eqnote;
3039
3040 if (! may_trap_p (PATTERN (insn))
3041 || ((eqnote = find_reg_equal_equiv_note (insn))
3042 && ! may_trap_p (XEXP (eqnote, 0))))
3043 remove_note (insn, note);
3044 }
3045
3046 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3047 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3048 {
3049 bool remove = false;
3050
3051 /* There are three types of edges we need to handle correctly here: EH
3052 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3053 latter can appear when nonlocal gotos are used. */
3054 if (e->flags & EDGE_ABNORMAL_CALL)
3055 {
3056 if (!CALL_P (insn))
3057 remove = true;
3058 else if (can_nonlocal_goto (insn))
3059 ;
3060 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3061 ;
3062 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3063 ;
3064 else
3065 remove = true;
3066 }
3067 else if (e->flags & EDGE_EH)
3068 remove = !can_throw_internal (insn);
3069
3070 if (remove)
3071 {
3072 remove_edge (e);
3073 df_set_bb_dirty (bb);
3074 purged = true;
3075 }
3076 else
3077 ei_next (&ei);
3078 }
3079
3080 if (JUMP_P (insn))
3081 {
3082 rtx note;
3083 edge b,f;
3084 edge_iterator ei;
3085
3086 /* We do care only about conditional jumps and simplejumps. */
3087 if (!any_condjump_p (insn)
3088 && !returnjump_p (insn)
3089 && !simplejump_p (insn))
3090 return purged;
3091
3092 /* Branch probability/prediction notes are defined only for
3093 condjumps. We've possibly turned condjump into simplejump. */
3094 if (simplejump_p (insn))
3095 {
3096 note = find_reg_note (insn, REG_BR_PROB, NULL);
3097 if (note)
3098 remove_note (insn, note);
3099 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3100 remove_note (insn, note);
3101 }
3102
3103 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3104 {
3105 /* Avoid abnormal flags to leak from computed jumps turned
3106 into simplejumps. */
3107
3108 e->flags &= ~EDGE_ABNORMAL;
3109
3110 /* See if this edge is one we should keep. */
3111 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3112 /* A conditional jump can fall through into the next
3113 block, so we should keep the edge. */
3114 {
3115 ei_next (&ei);
3116 continue;
3117 }
3118 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3119 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3120 /* If the destination block is the target of the jump,
3121 keep the edge. */
3122 {
3123 ei_next (&ei);
3124 continue;
3125 }
3126 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3127 && returnjump_p (insn))
3128 /* If the destination block is the exit block, and this
3129 instruction is a return, then keep the edge. */
3130 {
3131 ei_next (&ei);
3132 continue;
3133 }
3134 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3135 /* Keep the edges that correspond to exceptions thrown by
3136 this instruction and rematerialize the EDGE_ABNORMAL
3137 flag we just cleared above. */
3138 {
3139 e->flags |= EDGE_ABNORMAL;
3140 ei_next (&ei);
3141 continue;
3142 }
3143
3144 /* We do not need this edge. */
3145 df_set_bb_dirty (bb);
3146 purged = true;
3147 remove_edge (e);
3148 }
3149
3150 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3151 return purged;
3152
3153 if (dump_file)
3154 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3155
3156 if (!optimize)
3157 return purged;
3158
3159 /* Redistribute probabilities. */
3160 if (single_succ_p (bb))
3161 {
3162 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3163 single_succ_edge (bb)->count = bb->count;
3164 }
3165 else
3166 {
3167 note = find_reg_note (insn, REG_BR_PROB, NULL);
3168 if (!note)
3169 return purged;
3170
3171 b = BRANCH_EDGE (bb);
3172 f = FALLTHRU_EDGE (bb);
3173 b->probability = XINT (note, 0);
3174 f->probability = REG_BR_PROB_BASE - b->probability;
3175 /* Update these to use GCOV_COMPUTE_SCALE. */
3176 b->count = bb->count * b->probability / REG_BR_PROB_BASE;
3177 f->count = bb->count * f->probability / REG_BR_PROB_BASE;
3178 }
3179
3180 return purged;
3181 }
3182 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3183 {
3184 /* First, there should not be any EH or ABCALL edges resulting
3185 from non-local gotos and the like. If there were, we shouldn't
3186 have created the sibcall in the first place. Second, there
3187 should of course never have been a fallthru edge. */
3188 gcc_assert (single_succ_p (bb));
3189 gcc_assert (single_succ_edge (bb)->flags
3190 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3191
3192 return 0;
3193 }
3194
3195 /* If we don't see a jump insn, we don't know exactly why the block would
3196 have been broken at this point. Look for a simple, non-fallthru edge,
3197 as these are only created by conditional branches. If we find such an
3198 edge we know that there used to be a jump here and can then safely
3199 remove all non-fallthru edges. */
3200 found = false;
3201 FOR_EACH_EDGE (e, ei, bb->succs)
3202 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3203 {
3204 found = true;
3205 break;
3206 }
3207
3208 if (!found)
3209 return purged;
3210
3211 /* Remove all but the fake and fallthru edges. The fake edge may be
3212 the only successor for this block in the case of noreturn
3213 calls. */
3214 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3215 {
3216 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3217 {
3218 df_set_bb_dirty (bb);
3219 remove_edge (e);
3220 purged = true;
3221 }
3222 else
3223 ei_next (&ei);
3224 }
3225
3226 gcc_assert (single_succ_p (bb));
3227
3228 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3229 single_succ_edge (bb)->count = bb->count;
3230
3231 if (dump_file)
3232 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3233 bb->index);
3234 return purged;
3235 }
3236
3237 /* Search all basic blocks for potentially dead edges and purge them. Return
3238 true if some edge has been eliminated. */
3239
3240 bool
3241 purge_all_dead_edges (void)
3242 {
3243 int purged = false;
3244 basic_block bb;
3245
3246 FOR_EACH_BB_FN (bb, cfun)
3247 {
3248 bool purged_here = purge_dead_edges (bb);
3249
3250 purged |= purged_here;
3251 }
3252
3253 return purged;
3254 }
3255
3256 /* This is used by a few passes that emit some instructions after abnormal
3257 calls, moving the basic block's end, while they in fact do want to emit
3258 them on the fallthru edge. Look for abnormal call edges, find backward
3259 the call in the block and insert the instructions on the edge instead.
3260
3261 Similarly, handle instructions throwing exceptions internally.
3262
3263 Return true when instructions have been found and inserted on edges. */
3264
3265 bool
3266 fixup_abnormal_edges (void)
3267 {
3268 bool inserted = false;
3269 basic_block bb;
3270
3271 FOR_EACH_BB_FN (bb, cfun)
3272 {
3273 edge e;
3274 edge_iterator ei;
3275
3276 /* Look for cases we are interested in - calls or instructions causing
3277 exceptions. */
3278 FOR_EACH_EDGE (e, ei, bb->succs)
3279 if ((e->flags & EDGE_ABNORMAL_CALL)
3280 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3281 == (EDGE_ABNORMAL | EDGE_EH)))
3282 break;
3283
3284 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3285 {
3286 rtx_insn *insn;
3287
3288 /* Get past the new insns generated. Allow notes, as the insns
3289 may be already deleted. */
3290 insn = BB_END (bb);
3291 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3292 && !can_throw_internal (insn)
3293 && insn != BB_HEAD (bb))
3294 insn = PREV_INSN (insn);
3295
3296 if (CALL_P (insn) || can_throw_internal (insn))
3297 {
3298 rtx_insn *stop, *next;
3299
3300 e = find_fallthru_edge (bb->succs);
3301
3302 stop = NEXT_INSN (BB_END (bb));
3303 BB_END (bb) = insn;
3304
3305 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3306 {
3307 next = NEXT_INSN (insn);
3308 if (INSN_P (insn))
3309 {
3310 delete_insn (insn);
3311
3312 /* Sometimes there's still the return value USE.
3313 If it's placed after a trapping call (i.e. that
3314 call is the last insn anyway), we have no fallthru
3315 edge. Simply delete this use and don't try to insert
3316 on the non-existent edge. */
3317 if (GET_CODE (PATTERN (insn)) != USE)
3318 {
3319 /* We're not deleting it, we're moving it. */
3320 insn->set_undeleted ();
3321 SET_PREV_INSN (insn) = NULL_RTX;
3322 SET_NEXT_INSN (insn) = NULL_RTX;
3323
3324 insert_insn_on_edge (insn, e);
3325 inserted = true;
3326 }
3327 }
3328 else if (!BARRIER_P (insn))
3329 set_block_for_insn (insn, NULL);
3330 }
3331 }
3332
3333 /* It may be that we don't find any trapping insn. In this
3334 case we discovered quite late that the insn that had been
3335 marked as can_throw_internal in fact couldn't trap at all.
3336 So we should in fact delete the EH edges out of the block. */
3337 else
3338 purge_dead_edges (bb);
3339 }
3340 }
3341
3342 return inserted;
3343 }
3344 \f
3345 /* Cut the insns from FIRST to LAST out of the insns stream. */
3346
3347 rtx_insn *
3348 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3349 {
3350 rtx_insn *prevfirst = PREV_INSN (first);
3351 rtx_insn *nextlast = NEXT_INSN (last);
3352
3353 SET_PREV_INSN (first) = NULL;
3354 SET_NEXT_INSN (last) = NULL;
3355 if (prevfirst)
3356 SET_NEXT_INSN (prevfirst) = nextlast;
3357 if (nextlast)
3358 SET_PREV_INSN (nextlast) = prevfirst;
3359 else
3360 set_last_insn (prevfirst);
3361 if (!prevfirst)
3362 set_first_insn (nextlast);
3363 return first;
3364 }
3365 \f
3366 /* Skip over inter-block insns occurring after BB which are typically
3367 associated with BB (e.g., barriers). If there are any such insns,
3368 we return the last one. Otherwise, we return the end of BB. */
3369
3370 static rtx_insn *
3371 skip_insns_after_block (basic_block bb)
3372 {
3373 rtx_insn *insn, *last_insn, *next_head, *prev;
3374
3375 next_head = NULL;
3376 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3377 next_head = BB_HEAD (bb->next_bb);
3378
3379 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3380 {
3381 if (insn == next_head)
3382 break;
3383
3384 switch (GET_CODE (insn))
3385 {
3386 case BARRIER:
3387 last_insn = insn;
3388 continue;
3389
3390 case NOTE:
3391 switch (NOTE_KIND (insn))
3392 {
3393 case NOTE_INSN_BLOCK_END:
3394 gcc_unreachable ();
3395 continue;
3396 default:
3397 continue;
3398 break;
3399 }
3400 break;
3401
3402 case CODE_LABEL:
3403 if (NEXT_INSN (insn)
3404 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3405 {
3406 insn = NEXT_INSN (insn);
3407 last_insn = insn;
3408 continue;
3409 }
3410 break;
3411
3412 default:
3413 break;
3414 }
3415
3416 break;
3417 }
3418
3419 /* It is possible to hit contradictory sequence. For instance:
3420
3421 jump_insn
3422 NOTE_INSN_BLOCK_BEG
3423 barrier
3424
3425 Where barrier belongs to jump_insn, but the note does not. This can be
3426 created by removing the basic block originally following
3427 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3428
3429 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3430 {
3431 prev = PREV_INSN (insn);
3432 if (NOTE_P (insn))
3433 switch (NOTE_KIND (insn))
3434 {
3435 case NOTE_INSN_BLOCK_END:
3436 gcc_unreachable ();
3437 break;
3438 case NOTE_INSN_DELETED:
3439 case NOTE_INSN_DELETED_LABEL:
3440 case NOTE_INSN_DELETED_DEBUG_LABEL:
3441 continue;
3442 default:
3443 reorder_insns (insn, insn, last_insn);
3444 }
3445 }
3446
3447 return last_insn;
3448 }
3449
3450 /* Locate or create a label for a given basic block. */
3451
3452 static rtx_insn *
3453 label_for_bb (basic_block bb)
3454 {
3455 rtx_insn *label = BB_HEAD (bb);
3456
3457 if (!LABEL_P (label))
3458 {
3459 if (dump_file)
3460 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3461
3462 label = block_label (bb);
3463 }
3464
3465 return label;
3466 }
3467
3468 /* Locate the effective beginning and end of the insn chain for each
3469 block, as defined by skip_insns_after_block above. */
3470
3471 static void
3472 record_effective_endpoints (void)
3473 {
3474 rtx_insn *next_insn;
3475 basic_block bb;
3476 rtx_insn *insn;
3477
3478 for (insn = get_insns ();
3479 insn
3480 && NOTE_P (insn)
3481 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3482 insn = NEXT_INSN (insn))
3483 continue;
3484 /* No basic blocks at all? */
3485 gcc_assert (insn);
3486
3487 if (PREV_INSN (insn))
3488 cfg_layout_function_header =
3489 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3490 else
3491 cfg_layout_function_header = NULL;
3492
3493 next_insn = get_insns ();
3494 FOR_EACH_BB_FN (bb, cfun)
3495 {
3496 rtx_insn *end;
3497
3498 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3499 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3500 PREV_INSN (BB_HEAD (bb)));
3501 end = skip_insns_after_block (bb);
3502 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3503 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3504 next_insn = NEXT_INSN (BB_END (bb));
3505 }
3506
3507 cfg_layout_function_footer = next_insn;
3508 if (cfg_layout_function_footer)
3509 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3510 }
3511 \f
3512 namespace {
3513
3514 const pass_data pass_data_into_cfg_layout_mode =
3515 {
3516 RTL_PASS, /* type */
3517 "into_cfglayout", /* name */
3518 OPTGROUP_NONE, /* optinfo_flags */
3519 TV_CFG, /* tv_id */
3520 0, /* properties_required */
3521 PROP_cfglayout, /* properties_provided */
3522 0, /* properties_destroyed */
3523 0, /* todo_flags_start */
3524 0, /* todo_flags_finish */
3525 };
3526
3527 class pass_into_cfg_layout_mode : public rtl_opt_pass
3528 {
3529 public:
3530 pass_into_cfg_layout_mode (gcc::context *ctxt)
3531 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3532 {}
3533
3534 /* opt_pass methods: */
3535 virtual unsigned int execute (function *)
3536 {
3537 cfg_layout_initialize (0);
3538 return 0;
3539 }
3540
3541 }; // class pass_into_cfg_layout_mode
3542
3543 } // anon namespace
3544
3545 rtl_opt_pass *
3546 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3547 {
3548 return new pass_into_cfg_layout_mode (ctxt);
3549 }
3550
3551 namespace {
3552
3553 const pass_data pass_data_outof_cfg_layout_mode =
3554 {
3555 RTL_PASS, /* type */
3556 "outof_cfglayout", /* name */
3557 OPTGROUP_NONE, /* optinfo_flags */
3558 TV_CFG, /* tv_id */
3559 0, /* properties_required */
3560 0, /* properties_provided */
3561 PROP_cfglayout, /* properties_destroyed */
3562 0, /* todo_flags_start */
3563 0, /* todo_flags_finish */
3564 };
3565
3566 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3567 {
3568 public:
3569 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3570 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3571 {}
3572
3573 /* opt_pass methods: */
3574 virtual unsigned int execute (function *);
3575
3576 }; // class pass_outof_cfg_layout_mode
3577
3578 unsigned int
3579 pass_outof_cfg_layout_mode::execute (function *fun)
3580 {
3581 basic_block bb;
3582
3583 FOR_EACH_BB_FN (bb, fun)
3584 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3585 bb->aux = bb->next_bb;
3586
3587 cfg_layout_finalize ();
3588
3589 return 0;
3590 }
3591
3592 } // anon namespace
3593
3594 rtl_opt_pass *
3595 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3596 {
3597 return new pass_outof_cfg_layout_mode (ctxt);
3598 }
3599 \f
3600
3601 /* Link the basic blocks in the correct order, compacting the basic
3602 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3603 function also clears the basic block header and footer fields.
3604
3605 This function is usually called after a pass (e.g. tracer) finishes
3606 some transformations while in cfglayout mode. The required sequence
3607 of the basic blocks is in a linked list along the bb->aux field.
3608 This functions re-links the basic block prev_bb and next_bb pointers
3609 accordingly, and it compacts and renumbers the blocks.
3610
3611 FIXME: This currently works only for RTL, but the only RTL-specific
3612 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3613 to GIMPLE a long time ago, but it doesn't relink the basic block
3614 chain. It could do that (to give better initial RTL) if this function
3615 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3616
3617 void
3618 relink_block_chain (bool stay_in_cfglayout_mode)
3619 {
3620 basic_block bb, prev_bb;
3621 int index;
3622
3623 /* Maybe dump the re-ordered sequence. */
3624 if (dump_file)
3625 {
3626 fprintf (dump_file, "Reordered sequence:\n");
3627 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3628 NUM_FIXED_BLOCKS;
3629 bb;
3630 bb = (basic_block) bb->aux, index++)
3631 {
3632 fprintf (dump_file, " %i ", index);
3633 if (get_bb_original (bb))
3634 fprintf (dump_file, "duplicate of %i ",
3635 get_bb_original (bb)->index);
3636 else if (forwarder_block_p (bb)
3637 && !LABEL_P (BB_HEAD (bb)))
3638 fprintf (dump_file, "compensation ");
3639 else
3640 fprintf (dump_file, "bb %i ", bb->index);
3641 fprintf (dump_file, " [%i]\n", bb->frequency);
3642 }
3643 }
3644
3645 /* Now reorder the blocks. */
3646 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3647 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3648 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3649 {
3650 bb->prev_bb = prev_bb;
3651 prev_bb->next_bb = bb;
3652 }
3653 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3654 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3655
3656 /* Then, clean up the aux fields. */
3657 FOR_ALL_BB_FN (bb, cfun)
3658 {
3659 bb->aux = NULL;
3660 if (!stay_in_cfglayout_mode)
3661 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3662 }
3663
3664 /* Maybe reset the original copy tables, they are not valid anymore
3665 when we renumber the basic blocks in compact_blocks. If we are
3666 are going out of cfglayout mode, don't re-allocate the tables. */
3667 free_original_copy_tables ();
3668 if (stay_in_cfglayout_mode)
3669 initialize_original_copy_tables ();
3670
3671 /* Finally, put basic_block_info in the new order. */
3672 compact_blocks ();
3673 }
3674 \f
3675
3676 /* Given a reorder chain, rearrange the code to match. */
3677
3678 static void
3679 fixup_reorder_chain (void)
3680 {
3681 basic_block bb;
3682 rtx_insn *insn = NULL;
3683
3684 if (cfg_layout_function_header)
3685 {
3686 set_first_insn (cfg_layout_function_header);
3687 insn = cfg_layout_function_header;
3688 while (NEXT_INSN (insn))
3689 insn = NEXT_INSN (insn);
3690 }
3691
3692 /* First do the bulk reordering -- rechain the blocks without regard to
3693 the needed changes to jumps and labels. */
3694
3695 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3696 bb->aux)
3697 {
3698 if (BB_HEADER (bb))
3699 {
3700 if (insn)
3701 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3702 else
3703 set_first_insn (BB_HEADER (bb));
3704 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3705 insn = BB_HEADER (bb);
3706 while (NEXT_INSN (insn))
3707 insn = NEXT_INSN (insn);
3708 }
3709 if (insn)
3710 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3711 else
3712 set_first_insn (BB_HEAD (bb));
3713 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3714 insn = BB_END (bb);
3715 if (BB_FOOTER (bb))
3716 {
3717 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3718 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3719 while (NEXT_INSN (insn))
3720 insn = NEXT_INSN (insn);
3721 }
3722 }
3723
3724 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3725 if (cfg_layout_function_footer)
3726 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3727
3728 while (NEXT_INSN (insn))
3729 insn = NEXT_INSN (insn);
3730
3731 set_last_insn (insn);
3732 #ifdef ENABLE_CHECKING
3733 verify_insn_chain ();
3734 #endif
3735
3736 /* Now add jumps and labels as needed to match the blocks new
3737 outgoing edges. */
3738
3739 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3740 bb->aux)
3741 {
3742 edge e_fall, e_taken, e;
3743 rtx_insn *bb_end_insn;
3744 rtx ret_label = NULL_RTX;
3745 basic_block nb;
3746 edge_iterator ei;
3747
3748 if (EDGE_COUNT (bb->succs) == 0)
3749 continue;
3750
3751 /* Find the old fallthru edge, and another non-EH edge for
3752 a taken jump. */
3753 e_taken = e_fall = NULL;
3754
3755 FOR_EACH_EDGE (e, ei, bb->succs)
3756 if (e->flags & EDGE_FALLTHRU)
3757 e_fall = e;
3758 else if (! (e->flags & EDGE_EH))
3759 e_taken = e;
3760
3761 bb_end_insn = BB_END (bb);
3762 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3763 {
3764 ret_label = JUMP_LABEL (bb_end_jump);
3765 if (any_condjump_p (bb_end_jump))
3766 {
3767 /* This might happen if the conditional jump has side
3768 effects and could therefore not be optimized away.
3769 Make the basic block to end with a barrier in order
3770 to prevent rtl_verify_flow_info from complaining. */
3771 if (!e_fall)
3772 {
3773 gcc_assert (!onlyjump_p (bb_end_jump)
3774 || returnjump_p (bb_end_jump)
3775 || (e_taken->flags & EDGE_CROSSING));
3776 emit_barrier_after (bb_end_jump);
3777 continue;
3778 }
3779
3780 /* If the old fallthru is still next, nothing to do. */
3781 if (bb->aux == e_fall->dest
3782 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3783 continue;
3784
3785 /* The degenerated case of conditional jump jumping to the next
3786 instruction can happen for jumps with side effects. We need
3787 to construct a forwarder block and this will be done just
3788 fine by force_nonfallthru below. */
3789 if (!e_taken)
3790 ;
3791
3792 /* There is another special case: if *neither* block is next,
3793 such as happens at the very end of a function, then we'll
3794 need to add a new unconditional jump. Choose the taken
3795 edge based on known or assumed probability. */
3796 else if (bb->aux != e_taken->dest)
3797 {
3798 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3799
3800 if (note
3801 && XINT (note, 0) < REG_BR_PROB_BASE / 2
3802 && invert_jump (bb_end_jump,
3803 (e_fall->dest
3804 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3805 ? NULL_RTX
3806 : label_for_bb (e_fall->dest)), 0))
3807 {
3808 e_fall->flags &= ~EDGE_FALLTHRU;
3809 gcc_checking_assert (could_fall_through
3810 (e_taken->src, e_taken->dest));
3811 e_taken->flags |= EDGE_FALLTHRU;
3812 update_br_prob_note (bb);
3813 e = e_fall, e_fall = e_taken, e_taken = e;
3814 }
3815 }
3816
3817 /* If the "jumping" edge is a crossing edge, and the fall
3818 through edge is non-crossing, leave things as they are. */
3819 else if ((e_taken->flags & EDGE_CROSSING)
3820 && !(e_fall->flags & EDGE_CROSSING))
3821 continue;
3822
3823 /* Otherwise we can try to invert the jump. This will
3824 basically never fail, however, keep up the pretense. */
3825 else if (invert_jump (bb_end_jump,
3826 (e_fall->dest
3827 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3828 ? NULL_RTX
3829 : label_for_bb (e_fall->dest)), 0))
3830 {
3831 e_fall->flags &= ~EDGE_FALLTHRU;
3832 gcc_checking_assert (could_fall_through
3833 (e_taken->src, e_taken->dest));
3834 e_taken->flags |= EDGE_FALLTHRU;
3835 update_br_prob_note (bb);
3836 if (LABEL_NUSES (ret_label) == 0
3837 && single_pred_p (e_taken->dest))
3838 delete_insn (ret_label);
3839 continue;
3840 }
3841 }
3842 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3843 {
3844 /* If the old fallthru is still next or if
3845 asm goto doesn't have a fallthru (e.g. when followed by
3846 __builtin_unreachable ()), nothing to do. */
3847 if (! e_fall
3848 || bb->aux == e_fall->dest
3849 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3850 continue;
3851
3852 /* Otherwise we'll have to use the fallthru fixup below. */
3853 }
3854 else
3855 {
3856 /* Otherwise we have some return, switch or computed
3857 jump. In the 99% case, there should not have been a
3858 fallthru edge. */
3859 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3860 continue;
3861 }
3862 }
3863 else
3864 {
3865 /* No fallthru implies a noreturn function with EH edges, or
3866 something similarly bizarre. In any case, we don't need to
3867 do anything. */
3868 if (! e_fall)
3869 continue;
3870
3871 /* If the fallthru block is still next, nothing to do. */
3872 if (bb->aux == e_fall->dest)
3873 continue;
3874
3875 /* A fallthru to exit block. */
3876 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3877 continue;
3878 }
3879
3880 /* We got here if we need to add a new jump insn.
3881 Note force_nonfallthru can delete E_FALL and thus we have to
3882 save E_FALL->src prior to the call to force_nonfallthru. */
3883 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3884 if (nb)
3885 {
3886 nb->aux = bb->aux;
3887 bb->aux = nb;
3888 /* Don't process this new block. */
3889 bb = nb;
3890 }
3891 }
3892
3893 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3894
3895 /* Annoying special case - jump around dead jumptables left in the code. */
3896 FOR_EACH_BB_FN (bb, cfun)
3897 {
3898 edge e = find_fallthru_edge (bb->succs);
3899
3900 if (e && !can_fallthru (e->src, e->dest))
3901 force_nonfallthru (e);
3902 }
3903
3904 /* Ensure goto_locus from edges has some instructions with that locus
3905 in RTL. */
3906 if (!optimize)
3907 FOR_EACH_BB_FN (bb, cfun)
3908 {
3909 edge e;
3910 edge_iterator ei;
3911
3912 FOR_EACH_EDGE (e, ei, bb->succs)
3913 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3914 && !(e->flags & EDGE_ABNORMAL))
3915 {
3916 edge e2;
3917 edge_iterator ei2;
3918 basic_block dest, nb;
3919 rtx_insn *end;
3920
3921 insn = BB_END (e->src);
3922 end = PREV_INSN (BB_HEAD (e->src));
3923 while (insn != end
3924 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3925 insn = PREV_INSN (insn);
3926 if (insn != end
3927 && INSN_LOCATION (insn) == e->goto_locus)
3928 continue;
3929 if (simplejump_p (BB_END (e->src))
3930 && !INSN_HAS_LOCATION (BB_END (e->src)))
3931 {
3932 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3933 continue;
3934 }
3935 dest = e->dest;
3936 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3937 {
3938 /* Non-fallthru edges to the exit block cannot be split. */
3939 if (!(e->flags & EDGE_FALLTHRU))
3940 continue;
3941 }
3942 else
3943 {
3944 insn = BB_HEAD (dest);
3945 end = NEXT_INSN (BB_END (dest));
3946 while (insn != end && !NONDEBUG_INSN_P (insn))
3947 insn = NEXT_INSN (insn);
3948 if (insn != end && INSN_HAS_LOCATION (insn)
3949 && INSN_LOCATION (insn) == e->goto_locus)
3950 continue;
3951 }
3952 nb = split_edge (e);
3953 if (!INSN_P (BB_END (nb)))
3954 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3955 nb);
3956 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3957
3958 /* If there are other incoming edges to the destination block
3959 with the same goto locus, redirect them to the new block as
3960 well, this can prevent other such blocks from being created
3961 in subsequent iterations of the loop. */
3962 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3963 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3964 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3965 && e->goto_locus == e2->goto_locus)
3966 redirect_edge_and_branch (e2, nb);
3967 else
3968 ei_next (&ei2);
3969 }
3970 }
3971 }
3972 \f
3973 /* Perform sanity checks on the insn chain.
3974 1. Check that next/prev pointers are consistent in both the forward and
3975 reverse direction.
3976 2. Count insns in chain, going both directions, and check if equal.
3977 3. Check that get_last_insn () returns the actual end of chain. */
3978
3979 DEBUG_FUNCTION void
3980 verify_insn_chain (void)
3981 {
3982 rtx_insn *x, *prevx, *nextx;
3983 int insn_cnt1, insn_cnt2;
3984
3985 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3986 x != 0;
3987 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3988 gcc_assert (PREV_INSN (x) == prevx);
3989
3990 gcc_assert (prevx == get_last_insn ());
3991
3992 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3993 x != 0;
3994 nextx = x, insn_cnt2++, x = PREV_INSN (x))
3995 gcc_assert (NEXT_INSN (x) == nextx);
3996
3997 gcc_assert (insn_cnt1 == insn_cnt2);
3998 }
3999 \f
4000 /* If we have assembler epilogues, the block falling through to exit must
4001 be the last one in the reordered chain when we reach final. Ensure
4002 that this condition is met. */
4003 static void
4004 fixup_fallthru_exit_predecessor (void)
4005 {
4006 edge e;
4007 basic_block bb = NULL;
4008
4009 /* This transformation is not valid before reload, because we might
4010 separate a call from the instruction that copies the return
4011 value. */
4012 gcc_assert (reload_completed);
4013
4014 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4015 if (e)
4016 bb = e->src;
4017
4018 if (bb && bb->aux)
4019 {
4020 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4021
4022 /* If the very first block is the one with the fall-through exit
4023 edge, we have to split that block. */
4024 if (c == bb)
4025 {
4026 bb = split_block_after_labels (bb)->dest;
4027 bb->aux = c->aux;
4028 c->aux = bb;
4029 BB_FOOTER (bb) = BB_FOOTER (c);
4030 BB_FOOTER (c) = NULL;
4031 }
4032
4033 while (c->aux != bb)
4034 c = (basic_block) c->aux;
4035
4036 c->aux = bb->aux;
4037 while (c->aux)
4038 c = (basic_block) c->aux;
4039
4040 c->aux = bb;
4041 bb->aux = NULL;
4042 }
4043 }
4044
4045 /* In case there are more than one fallthru predecessors of exit, force that
4046 there is only one. */
4047
4048 static void
4049 force_one_exit_fallthru (void)
4050 {
4051 edge e, predecessor = NULL;
4052 bool more = false;
4053 edge_iterator ei;
4054 basic_block forwarder, bb;
4055
4056 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4057 if (e->flags & EDGE_FALLTHRU)
4058 {
4059 if (predecessor == NULL)
4060 predecessor = e;
4061 else
4062 {
4063 more = true;
4064 break;
4065 }
4066 }
4067
4068 if (!more)
4069 return;
4070
4071 /* Exit has several fallthru predecessors. Create a forwarder block for
4072 them. */
4073 forwarder = split_edge (predecessor);
4074 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4075 (e = ei_safe_edge (ei)); )
4076 {
4077 if (e->src == forwarder
4078 || !(e->flags & EDGE_FALLTHRU))
4079 ei_next (&ei);
4080 else
4081 redirect_edge_and_branch_force (e, forwarder);
4082 }
4083
4084 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4085 exit block. */
4086 FOR_EACH_BB_FN (bb, cfun)
4087 {
4088 if (bb->aux == NULL && bb != forwarder)
4089 {
4090 bb->aux = forwarder;
4091 break;
4092 }
4093 }
4094 }
4095 \f
4096 /* Return true in case it is possible to duplicate the basic block BB. */
4097
4098 static bool
4099 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4100 {
4101 /* Do not attempt to duplicate tablejumps, as we need to unshare
4102 the dispatch table. This is difficult to do, as the instructions
4103 computing jump destination may be hoisted outside the basic block. */
4104 if (tablejump_p (BB_END (bb), NULL, NULL))
4105 return false;
4106
4107 /* Do not duplicate blocks containing insns that can't be copied. */
4108 if (targetm.cannot_copy_insn_p)
4109 {
4110 rtx_insn *insn = BB_HEAD (bb);
4111 while (1)
4112 {
4113 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4114 return false;
4115 if (insn == BB_END (bb))
4116 break;
4117 insn = NEXT_INSN (insn);
4118 }
4119 }
4120
4121 return true;
4122 }
4123
4124 rtx_insn *
4125 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4126 {
4127 rtx_insn *insn, *next, *copy;
4128 rtx_note *last;
4129
4130 /* Avoid updating of boundaries of previous basic block. The
4131 note will get removed from insn stream in fixup. */
4132 last = emit_note (NOTE_INSN_DELETED);
4133
4134 /* Create copy at the end of INSN chain. The chain will
4135 be reordered later. */
4136 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4137 {
4138 switch (GET_CODE (insn))
4139 {
4140 case DEBUG_INSN:
4141 /* Don't duplicate label debug insns. */
4142 if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4143 break;
4144 /* FALLTHRU */
4145 case INSN:
4146 case CALL_INSN:
4147 case JUMP_INSN:
4148 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4149 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4150 && ANY_RETURN_P (JUMP_LABEL (insn)))
4151 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4152 maybe_copy_prologue_epilogue_insn (insn, copy);
4153 break;
4154
4155 case JUMP_TABLE_DATA:
4156 /* Avoid copying of dispatch tables. We never duplicate
4157 tablejumps, so this can hit only in case the table got
4158 moved far from original jump.
4159 Avoid copying following barrier as well if any
4160 (and debug insns in between). */
4161 for (next = NEXT_INSN (insn);
4162 next != NEXT_INSN (to);
4163 next = NEXT_INSN (next))
4164 if (!DEBUG_INSN_P (next))
4165 break;
4166 if (next != NEXT_INSN (to) && BARRIER_P (next))
4167 insn = next;
4168 break;
4169
4170 case CODE_LABEL:
4171 break;
4172
4173 case BARRIER:
4174 emit_barrier ();
4175 break;
4176
4177 case NOTE:
4178 switch (NOTE_KIND (insn))
4179 {
4180 /* In case prologue is empty and function contain label
4181 in first BB, we may want to copy the block. */
4182 case NOTE_INSN_PROLOGUE_END:
4183
4184 case NOTE_INSN_DELETED:
4185 case NOTE_INSN_DELETED_LABEL:
4186 case NOTE_INSN_DELETED_DEBUG_LABEL:
4187 /* No problem to strip these. */
4188 case NOTE_INSN_FUNCTION_BEG:
4189 /* There is always just single entry to function. */
4190 case NOTE_INSN_BASIC_BLOCK:
4191 /* We should only switch text sections once. */
4192 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4193 break;
4194
4195 case NOTE_INSN_EPILOGUE_BEG:
4196 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4197 emit_note_copy (as_a <rtx_note *> (insn));
4198 break;
4199
4200 default:
4201 /* All other notes should have already been eliminated. */
4202 gcc_unreachable ();
4203 }
4204 break;
4205 default:
4206 gcc_unreachable ();
4207 }
4208 }
4209 insn = NEXT_INSN (last);
4210 delete_insn (last);
4211 return insn;
4212 }
4213
4214 /* Create a duplicate of the basic block BB. */
4215
4216 static basic_block
4217 cfg_layout_duplicate_bb (basic_block bb)
4218 {
4219 rtx_insn *insn;
4220 basic_block new_bb;
4221
4222 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4223 new_bb = create_basic_block (insn,
4224 insn ? get_last_insn () : NULL,
4225 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4226
4227 BB_COPY_PARTITION (new_bb, bb);
4228 if (BB_HEADER (bb))
4229 {
4230 insn = BB_HEADER (bb);
4231 while (NEXT_INSN (insn))
4232 insn = NEXT_INSN (insn);
4233 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4234 if (insn)
4235 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4236 }
4237
4238 if (BB_FOOTER (bb))
4239 {
4240 insn = BB_FOOTER (bb);
4241 while (NEXT_INSN (insn))
4242 insn = NEXT_INSN (insn);
4243 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4244 if (insn)
4245 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4246 }
4247
4248 return new_bb;
4249 }
4250
4251 \f
4252 /* Main entry point to this module - initialize the datastructures for
4253 CFG layout changes. It keeps LOOPS up-to-date if not null.
4254
4255 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4256
4257 void
4258 cfg_layout_initialize (unsigned int flags)
4259 {
4260 rtx_insn_list *x;
4261 basic_block bb;
4262
4263 /* Once bb partitioning is complete, cfg layout mode should not be
4264 re-entered. Entering cfg layout mode may require fixups. As an
4265 example, if edge forwarding performed when optimizing the cfg
4266 layout required moving a block from the hot to the cold
4267 section. This would create an illegal partitioning unless some
4268 manual fixup was performed. */
4269 gcc_assert (!(crtl->bb_reorder_complete
4270 && flag_reorder_blocks_and_partition));
4271
4272 initialize_original_copy_tables ();
4273
4274 cfg_layout_rtl_register_cfg_hooks ();
4275
4276 record_effective_endpoints ();
4277
4278 /* Make sure that the targets of non local gotos are marked. */
4279 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4280 {
4281 bb = BLOCK_FOR_INSN (x->insn ());
4282 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4283 }
4284
4285 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4286 }
4287
4288 /* Splits superblocks. */
4289 void
4290 break_superblocks (void)
4291 {
4292 sbitmap superblocks;
4293 bool need = false;
4294 basic_block bb;
4295
4296 superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
4297 bitmap_clear (superblocks);
4298
4299 FOR_EACH_BB_FN (bb, cfun)
4300 if (bb->flags & BB_SUPERBLOCK)
4301 {
4302 bb->flags &= ~BB_SUPERBLOCK;
4303 bitmap_set_bit (superblocks, bb->index);
4304 need = true;
4305 }
4306
4307 if (need)
4308 {
4309 rebuild_jump_labels (get_insns ());
4310 find_many_sub_basic_blocks (superblocks);
4311 }
4312
4313 free (superblocks);
4314 }
4315
4316 /* Finalize the changes: reorder insn list according to the sequence specified
4317 by aux pointers, enter compensation code, rebuild scope forest. */
4318
4319 void
4320 cfg_layout_finalize (void)
4321 {
4322 #ifdef ENABLE_CHECKING
4323 verify_flow_info ();
4324 #endif
4325 force_one_exit_fallthru ();
4326 rtl_register_cfg_hooks ();
4327 if (reload_completed && !HAVE_epilogue)
4328 fixup_fallthru_exit_predecessor ();
4329 fixup_reorder_chain ();
4330
4331 rebuild_jump_labels (get_insns ());
4332 delete_dead_jumptables ();
4333
4334 #ifdef ENABLE_CHECKING
4335 verify_insn_chain ();
4336 verify_flow_info ();
4337 #endif
4338 }
4339
4340
4341 /* Same as split_block but update cfg_layout structures. */
4342
4343 static basic_block
4344 cfg_layout_split_block (basic_block bb, void *insnp)
4345 {
4346 rtx insn = (rtx) insnp;
4347 basic_block new_bb = rtl_split_block (bb, insn);
4348
4349 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4350 BB_FOOTER (bb) = NULL;
4351
4352 return new_bb;
4353 }
4354
4355 /* Redirect Edge to DEST. */
4356 static edge
4357 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4358 {
4359 basic_block src = e->src;
4360 edge ret;
4361
4362 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4363 return NULL;
4364
4365 if (e->dest == dest)
4366 return e;
4367
4368 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4369 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4370 {
4371 df_set_bb_dirty (src);
4372 return ret;
4373 }
4374
4375 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4376 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4377 {
4378 if (dump_file)
4379 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4380 e->src->index, dest->index);
4381
4382 df_set_bb_dirty (e->src);
4383 redirect_edge_succ (e, dest);
4384 return e;
4385 }
4386
4387 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4388 in the case the basic block appears to be in sequence. Avoid this
4389 transformation. */
4390
4391 if (e->flags & EDGE_FALLTHRU)
4392 {
4393 /* Redirect any branch edges unified with the fallthru one. */
4394 if (JUMP_P (BB_END (src))
4395 && label_is_jump_target_p (BB_HEAD (e->dest),
4396 BB_END (src)))
4397 {
4398 edge redirected;
4399
4400 if (dump_file)
4401 fprintf (dump_file, "Fallthru edge unified with branch "
4402 "%i->%i redirected to %i\n",
4403 e->src->index, e->dest->index, dest->index);
4404 e->flags &= ~EDGE_FALLTHRU;
4405 redirected = redirect_branch_edge (e, dest);
4406 gcc_assert (redirected);
4407 redirected->flags |= EDGE_FALLTHRU;
4408 df_set_bb_dirty (redirected->src);
4409 return redirected;
4410 }
4411 /* In case we are redirecting fallthru edge to the branch edge
4412 of conditional jump, remove it. */
4413 if (EDGE_COUNT (src->succs) == 2)
4414 {
4415 /* Find the edge that is different from E. */
4416 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4417
4418 if (s->dest == dest
4419 && any_condjump_p (BB_END (src))
4420 && onlyjump_p (BB_END (src)))
4421 delete_insn (BB_END (src));
4422 }
4423 if (dump_file)
4424 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4425 e->src->index, e->dest->index, dest->index);
4426 ret = redirect_edge_succ_nodup (e, dest);
4427 }
4428 else
4429 ret = redirect_branch_edge (e, dest);
4430
4431 /* We don't want simplejumps in the insn stream during cfglayout. */
4432 gcc_assert (!simplejump_p (BB_END (src)));
4433
4434 df_set_bb_dirty (src);
4435 return ret;
4436 }
4437
4438 /* Simple wrapper as we always can redirect fallthru edges. */
4439 static basic_block
4440 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4441 {
4442 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4443
4444 gcc_assert (redirected);
4445 return NULL;
4446 }
4447
4448 /* Same as delete_basic_block but update cfg_layout structures. */
4449
4450 static void
4451 cfg_layout_delete_block (basic_block bb)
4452 {
4453 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4454 rtx_insn **to;
4455
4456 if (BB_HEADER (bb))
4457 {
4458 next = BB_HEAD (bb);
4459 if (prev)
4460 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4461 else
4462 set_first_insn (BB_HEADER (bb));
4463 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4464 insn = BB_HEADER (bb);
4465 while (NEXT_INSN (insn))
4466 insn = NEXT_INSN (insn);
4467 SET_NEXT_INSN (insn) = next;
4468 SET_PREV_INSN (next) = insn;
4469 }
4470 next = NEXT_INSN (BB_END (bb));
4471 if (BB_FOOTER (bb))
4472 {
4473 insn = BB_FOOTER (bb);
4474 while (insn)
4475 {
4476 if (BARRIER_P (insn))
4477 {
4478 if (PREV_INSN (insn))
4479 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4480 else
4481 BB_FOOTER (bb) = NEXT_INSN (insn);
4482 if (NEXT_INSN (insn))
4483 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4484 }
4485 if (LABEL_P (insn))
4486 break;
4487 insn = NEXT_INSN (insn);
4488 }
4489 if (BB_FOOTER (bb))
4490 {
4491 insn = BB_END (bb);
4492 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4493 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4494 while (NEXT_INSN (insn))
4495 insn = NEXT_INSN (insn);
4496 SET_NEXT_INSN (insn) = next;
4497 if (next)
4498 SET_PREV_INSN (next) = insn;
4499 else
4500 set_last_insn (insn);
4501 }
4502 }
4503 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4504 to = &BB_HEADER (bb->next_bb);
4505 else
4506 to = &cfg_layout_function_footer;
4507
4508 rtl_delete_block (bb);
4509
4510 if (prev)
4511 prev = NEXT_INSN (prev);
4512 else
4513 prev = get_insns ();
4514 if (next)
4515 next = PREV_INSN (next);
4516 else
4517 next = get_last_insn ();
4518
4519 if (next && NEXT_INSN (next) != prev)
4520 {
4521 remaints = unlink_insn_chain (prev, next);
4522 insn = remaints;
4523 while (NEXT_INSN (insn))
4524 insn = NEXT_INSN (insn);
4525 SET_NEXT_INSN (insn) = *to;
4526 if (*to)
4527 SET_PREV_INSN (*to) = insn;
4528 *to = remaints;
4529 }
4530 }
4531
4532 /* Return true when blocks A and B can be safely merged. */
4533
4534 static bool
4535 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4536 {
4537 /* If we are partitioning hot/cold basic blocks, we don't want to
4538 mess up unconditional or indirect jumps that cross between hot
4539 and cold sections.
4540
4541 Basic block partitioning may result in some jumps that appear to
4542 be optimizable (or blocks that appear to be mergeable), but which really
4543 must be left untouched (they are required to make it safely across
4544 partition boundaries). See the comments at the top of
4545 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4546
4547 if (BB_PARTITION (a) != BB_PARTITION (b))
4548 return false;
4549
4550 /* Protect the loop latches. */
4551 if (current_loops && b->loop_father->latch == b)
4552 return false;
4553
4554 /* If we would end up moving B's instructions, make sure it doesn't fall
4555 through into the exit block, since we cannot recover from a fallthrough
4556 edge into the exit block occurring in the middle of a function. */
4557 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4558 {
4559 edge e = find_fallthru_edge (b->succs);
4560 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4561 return false;
4562 }
4563
4564 /* There must be exactly one edge in between the blocks. */
4565 return (single_succ_p (a)
4566 && single_succ (a) == b
4567 && single_pred_p (b) == 1
4568 && a != b
4569 /* Must be simple edge. */
4570 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4571 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4572 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4573 /* If the jump insn has side effects, we can't kill the edge.
4574 When not optimizing, try_redirect_by_replacing_jump will
4575 not allow us to redirect an edge by replacing a table jump. */
4576 && (!JUMP_P (BB_END (a))
4577 || ((!optimize || reload_completed)
4578 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4579 }
4580
4581 /* Merge block A and B. The blocks must be mergeable. */
4582
4583 static void
4584 cfg_layout_merge_blocks (basic_block a, basic_block b)
4585 {
4586 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4587 rtx_insn *insn;
4588
4589 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4590
4591 if (dump_file)
4592 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4593 a->index);
4594
4595 /* If there was a CODE_LABEL beginning B, delete it. */
4596 if (LABEL_P (BB_HEAD (b)))
4597 {
4598 delete_insn (BB_HEAD (b));
4599 }
4600
4601 /* We should have fallthru edge in a, or we can do dummy redirection to get
4602 it cleaned up. */
4603 if (JUMP_P (BB_END (a)))
4604 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4605 gcc_assert (!JUMP_P (BB_END (a)));
4606
4607 /* When not optimizing and the edge is the only place in RTL which holds
4608 some unique locus, emit a nop with that locus in between. */
4609 if (!optimize)
4610 emit_nop_for_unique_locus_between (a, b);
4611
4612 /* Move things from b->footer after a->footer. */
4613 if (BB_FOOTER (b))
4614 {
4615 if (!BB_FOOTER (a))
4616 BB_FOOTER (a) = BB_FOOTER (b);
4617 else
4618 {
4619 rtx_insn *last = BB_FOOTER (a);
4620
4621 while (NEXT_INSN (last))
4622 last = NEXT_INSN (last);
4623 SET_NEXT_INSN (last) = BB_FOOTER (b);
4624 SET_PREV_INSN (BB_FOOTER (b)) = last;
4625 }
4626 BB_FOOTER (b) = NULL;
4627 }
4628
4629 /* Move things from b->header before a->footer.
4630 Note that this may include dead tablejump data, but we don't clean
4631 those up until we go out of cfglayout mode. */
4632 if (BB_HEADER (b))
4633 {
4634 if (! BB_FOOTER (a))
4635 BB_FOOTER (a) = BB_HEADER (b);
4636 else
4637 {
4638 rtx_insn *last = BB_HEADER (b);
4639
4640 while (NEXT_INSN (last))
4641 last = NEXT_INSN (last);
4642 SET_NEXT_INSN (last) = BB_FOOTER (a);
4643 SET_PREV_INSN (BB_FOOTER (a)) = last;
4644 BB_FOOTER (a) = BB_HEADER (b);
4645 }
4646 BB_HEADER (b) = NULL;
4647 }
4648
4649 /* In the case basic blocks are not adjacent, move them around. */
4650 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4651 {
4652 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4653
4654 emit_insn_after_noloc (insn, BB_END (a), a);
4655 }
4656 /* Otherwise just re-associate the instructions. */
4657 else
4658 {
4659 insn = BB_HEAD (b);
4660 BB_END (a) = BB_END (b);
4661 }
4662
4663 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4664 We need to explicitly call. */
4665 update_bb_for_insn_chain (insn, BB_END (b), a);
4666
4667 /* Skip possible DELETED_LABEL insn. */
4668 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4669 insn = NEXT_INSN (insn);
4670 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4671 BB_HEAD (b) = BB_END (b) = NULL;
4672 delete_insn (insn);
4673
4674 df_bb_delete (b->index);
4675
4676 /* If B was a forwarder block, propagate the locus on the edge. */
4677 if (forwarder_p
4678 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4679 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4680
4681 if (dump_file)
4682 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4683 }
4684
4685 /* Split edge E. */
4686
4687 static basic_block
4688 cfg_layout_split_edge (edge e)
4689 {
4690 basic_block new_bb =
4691 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4692 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4693 NULL_RTX, e->src);
4694
4695 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4696 BB_COPY_PARTITION (new_bb, e->src);
4697 else
4698 BB_COPY_PARTITION (new_bb, e->dest);
4699 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4700 redirect_edge_and_branch_force (e, new_bb);
4701
4702 return new_bb;
4703 }
4704
4705 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4706
4707 static void
4708 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4709 {
4710 }
4711
4712 /* Return true if BB contains only labels or non-executable
4713 instructions. */
4714
4715 static bool
4716 rtl_block_empty_p (basic_block bb)
4717 {
4718 rtx_insn *insn;
4719
4720 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4721 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4722 return true;
4723
4724 FOR_BB_INSNS (bb, insn)
4725 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4726 return false;
4727
4728 return true;
4729 }
4730
4731 /* Split a basic block if it ends with a conditional branch and if
4732 the other part of the block is not empty. */
4733
4734 static basic_block
4735 rtl_split_block_before_cond_jump (basic_block bb)
4736 {
4737 rtx_insn *insn;
4738 rtx_insn *split_point = NULL;
4739 rtx_insn *last = NULL;
4740 bool found_code = false;
4741
4742 FOR_BB_INSNS (bb, insn)
4743 {
4744 if (any_condjump_p (insn))
4745 split_point = last;
4746 else if (NONDEBUG_INSN_P (insn))
4747 found_code = true;
4748 last = insn;
4749 }
4750
4751 /* Did not find everything. */
4752 if (found_code && split_point)
4753 return split_block (bb, split_point)->dest;
4754 else
4755 return NULL;
4756 }
4757
4758 /* Return 1 if BB ends with a call, possibly followed by some
4759 instructions that must stay with the call, 0 otherwise. */
4760
4761 static bool
4762 rtl_block_ends_with_call_p (basic_block bb)
4763 {
4764 rtx_insn *insn = BB_END (bb);
4765
4766 while (!CALL_P (insn)
4767 && insn != BB_HEAD (bb)
4768 && (keep_with_call_p (insn)
4769 || NOTE_P (insn)
4770 || DEBUG_INSN_P (insn)))
4771 insn = PREV_INSN (insn);
4772 return (CALL_P (insn));
4773 }
4774
4775 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4776
4777 static bool
4778 rtl_block_ends_with_condjump_p (const_basic_block bb)
4779 {
4780 return any_condjump_p (BB_END (bb));
4781 }
4782
4783 /* Return true if we need to add fake edge to exit.
4784 Helper function for rtl_flow_call_edges_add. */
4785
4786 static bool
4787 need_fake_edge_p (const rtx_insn *insn)
4788 {
4789 if (!INSN_P (insn))
4790 return false;
4791
4792 if ((CALL_P (insn)
4793 && !SIBLING_CALL_P (insn)
4794 && !find_reg_note (insn, REG_NORETURN, NULL)
4795 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4796 return true;
4797
4798 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4799 && MEM_VOLATILE_P (PATTERN (insn)))
4800 || (GET_CODE (PATTERN (insn)) == PARALLEL
4801 && asm_noperands (insn) != -1
4802 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4803 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4804 }
4805
4806 /* Add fake edges to the function exit for any non constant and non noreturn
4807 calls, volatile inline assembly in the bitmap of blocks specified by
4808 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4809 that were split.
4810
4811 The goal is to expose cases in which entering a basic block does not imply
4812 that all subsequent instructions must be executed. */
4813
4814 static int
4815 rtl_flow_call_edges_add (sbitmap blocks)
4816 {
4817 int i;
4818 int blocks_split = 0;
4819 int last_bb = last_basic_block_for_fn (cfun);
4820 bool check_last_block = false;
4821
4822 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4823 return 0;
4824
4825 if (! blocks)
4826 check_last_block = true;
4827 else
4828 check_last_block = bitmap_bit_p (blocks,
4829 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4830
4831 /* In the last basic block, before epilogue generation, there will be
4832 a fallthru edge to EXIT. Special care is required if the last insn
4833 of the last basic block is a call because make_edge folds duplicate
4834 edges, which would result in the fallthru edge also being marked
4835 fake, which would result in the fallthru edge being removed by
4836 remove_fake_edges, which would result in an invalid CFG.
4837
4838 Moreover, we can't elide the outgoing fake edge, since the block
4839 profiler needs to take this into account in order to solve the minimal
4840 spanning tree in the case that the call doesn't return.
4841
4842 Handle this by adding a dummy instruction in a new last basic block. */
4843 if (check_last_block)
4844 {
4845 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4846 rtx_insn *insn = BB_END (bb);
4847
4848 /* Back up past insns that must be kept in the same block as a call. */
4849 while (insn != BB_HEAD (bb)
4850 && keep_with_call_p (insn))
4851 insn = PREV_INSN (insn);
4852
4853 if (need_fake_edge_p (insn))
4854 {
4855 edge e;
4856
4857 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4858 if (e)
4859 {
4860 insert_insn_on_edge (gen_use (const0_rtx), e);
4861 commit_edge_insertions ();
4862 }
4863 }
4864 }
4865
4866 /* Now add fake edges to the function exit for any non constant
4867 calls since there is no way that we can determine if they will
4868 return or not... */
4869
4870 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4871 {
4872 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4873 rtx_insn *insn;
4874 rtx_insn *prev_insn;
4875
4876 if (!bb)
4877 continue;
4878
4879 if (blocks && !bitmap_bit_p (blocks, i))
4880 continue;
4881
4882 for (insn = BB_END (bb); ; insn = prev_insn)
4883 {
4884 prev_insn = PREV_INSN (insn);
4885 if (need_fake_edge_p (insn))
4886 {
4887 edge e;
4888 rtx_insn *split_at_insn = insn;
4889
4890 /* Don't split the block between a call and an insn that should
4891 remain in the same block as the call. */
4892 if (CALL_P (insn))
4893 while (split_at_insn != BB_END (bb)
4894 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4895 split_at_insn = NEXT_INSN (split_at_insn);
4896
4897 /* The handling above of the final block before the epilogue
4898 should be enough to verify that there is no edge to the exit
4899 block in CFG already. Calling make_edge in such case would
4900 cause us to mark that edge as fake and remove it later. */
4901
4902 #ifdef ENABLE_CHECKING
4903 if (split_at_insn == BB_END (bb))
4904 {
4905 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4906 gcc_assert (e == NULL);
4907 }
4908 #endif
4909
4910 /* Note that the following may create a new basic block
4911 and renumber the existing basic blocks. */
4912 if (split_at_insn != BB_END (bb))
4913 {
4914 e = split_block (bb, split_at_insn);
4915 if (e)
4916 blocks_split++;
4917 }
4918
4919 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4920 }
4921
4922 if (insn == BB_HEAD (bb))
4923 break;
4924 }
4925 }
4926
4927 if (blocks_split)
4928 verify_flow_info ();
4929
4930 return blocks_split;
4931 }
4932
4933 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4934 the conditional branch target, SECOND_HEAD should be the fall-thru
4935 there is no need to handle this here the loop versioning code handles
4936 this. the reason for SECON_HEAD is that it is needed for condition
4937 in trees, and this should be of the same type since it is a hook. */
4938 static void
4939 rtl_lv_add_condition_to_bb (basic_block first_head ,
4940 basic_block second_head ATTRIBUTE_UNUSED,
4941 basic_block cond_bb, void *comp_rtx)
4942 {
4943 rtx_code_label *label;
4944 rtx_insn *seq, *jump;
4945 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4946 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4947 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4948 machine_mode mode;
4949
4950
4951 label = block_label (first_head);
4952 mode = GET_MODE (op0);
4953 if (mode == VOIDmode)
4954 mode = GET_MODE (op1);
4955
4956 start_sequence ();
4957 op0 = force_operand (op0, NULL_RTX);
4958 op1 = force_operand (op1, NULL_RTX);
4959 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label, -1);
4960 jump = get_last_insn ();
4961 JUMP_LABEL (jump) = label;
4962 LABEL_NUSES (label)++;
4963 seq = get_insns ();
4964 end_sequence ();
4965
4966 /* Add the new cond, in the new head. */
4967 emit_insn_after (seq, BB_END (cond_bb));
4968 }
4969
4970
4971 /* Given a block B with unconditional branch at its end, get the
4972 store the return the branch edge and the fall-thru edge in
4973 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4974 static void
4975 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4976 edge *fallthru_edge)
4977 {
4978 edge e = EDGE_SUCC (b, 0);
4979
4980 if (e->flags & EDGE_FALLTHRU)
4981 {
4982 *fallthru_edge = e;
4983 *branch_edge = EDGE_SUCC (b, 1);
4984 }
4985 else
4986 {
4987 *branch_edge = e;
4988 *fallthru_edge = EDGE_SUCC (b, 1);
4989 }
4990 }
4991
4992 void
4993 init_rtl_bb_info (basic_block bb)
4994 {
4995 gcc_assert (!bb->il.x.rtl);
4996 bb->il.x.head_ = NULL;
4997 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
4998 }
4999
5000 /* Returns true if it is possible to remove edge E by redirecting
5001 it to the destination of the other edge from E->src. */
5002
5003 static bool
5004 rtl_can_remove_branch_p (const_edge e)
5005 {
5006 const_basic_block src = e->src;
5007 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5008 const rtx_insn *insn = BB_END (src);
5009 rtx set;
5010
5011 /* The conditions are taken from try_redirect_by_replacing_jump. */
5012 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5013 return false;
5014
5015 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5016 return false;
5017
5018 if (BB_PARTITION (src) != BB_PARTITION (target))
5019 return false;
5020
5021 if (!onlyjump_p (insn)
5022 || tablejump_p (insn, NULL, NULL))
5023 return false;
5024
5025 set = single_set (insn);
5026 if (!set || side_effects_p (set))
5027 return false;
5028
5029 return true;
5030 }
5031
5032 static basic_block
5033 rtl_duplicate_bb (basic_block bb)
5034 {
5035 bb = cfg_layout_duplicate_bb (bb);
5036 bb->aux = NULL;
5037 return bb;
5038 }
5039
5040 /* Do book-keeping of basic block BB for the profile consistency checker.
5041 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5042 then do post-pass accounting. Store the counting in RECORD. */
5043 static void
5044 rtl_account_profile_record (basic_block bb, int after_pass,
5045 struct profile_record *record)
5046 {
5047 rtx_insn *insn;
5048 FOR_BB_INSNS (bb, insn)
5049 if (INSN_P (insn))
5050 {
5051 record->size[after_pass]
5052 += insn_rtx_cost (PATTERN (insn), false);
5053 if (profile_status_for_fn (cfun) == PROFILE_READ)
5054 record->time[after_pass]
5055 += insn_rtx_cost (PATTERN (insn), true) * bb->count;
5056 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5057 record->time[after_pass]
5058 += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
5059 }
5060 }
5061
5062 /* Implementation of CFG manipulation for linearized RTL. */
5063 struct cfg_hooks rtl_cfg_hooks = {
5064 "rtl",
5065 rtl_verify_flow_info,
5066 rtl_dump_bb,
5067 rtl_dump_bb_for_graph,
5068 rtl_create_basic_block,
5069 rtl_redirect_edge_and_branch,
5070 rtl_redirect_edge_and_branch_force,
5071 rtl_can_remove_branch_p,
5072 rtl_delete_block,
5073 rtl_split_block,
5074 rtl_move_block_after,
5075 rtl_can_merge_blocks, /* can_merge_blocks_p */
5076 rtl_merge_blocks,
5077 rtl_predict_edge,
5078 rtl_predicted_by_p,
5079 cfg_layout_can_duplicate_bb_p,
5080 rtl_duplicate_bb,
5081 rtl_split_edge,
5082 rtl_make_forwarder_block,
5083 rtl_tidy_fallthru_edge,
5084 rtl_force_nonfallthru,
5085 rtl_block_ends_with_call_p,
5086 rtl_block_ends_with_condjump_p,
5087 rtl_flow_call_edges_add,
5088 NULL, /* execute_on_growing_pred */
5089 NULL, /* execute_on_shrinking_pred */
5090 NULL, /* duplicate loop for trees */
5091 NULL, /* lv_add_condition_to_bb */
5092 NULL, /* lv_adjust_loop_header_phi*/
5093 NULL, /* extract_cond_bb_edges */
5094 NULL, /* flush_pending_stmts */
5095 rtl_block_empty_p, /* block_empty_p */
5096 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5097 rtl_account_profile_record,
5098 };
5099
5100 /* Implementation of CFG manipulation for cfg layout RTL, where
5101 basic block connected via fallthru edges does not have to be adjacent.
5102 This representation will hopefully become the default one in future
5103 version of the compiler. */
5104
5105 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5106 "cfglayout mode",
5107 rtl_verify_flow_info_1,
5108 rtl_dump_bb,
5109 rtl_dump_bb_for_graph,
5110 cfg_layout_create_basic_block,
5111 cfg_layout_redirect_edge_and_branch,
5112 cfg_layout_redirect_edge_and_branch_force,
5113 rtl_can_remove_branch_p,
5114 cfg_layout_delete_block,
5115 cfg_layout_split_block,
5116 rtl_move_block_after,
5117 cfg_layout_can_merge_blocks_p,
5118 cfg_layout_merge_blocks,
5119 rtl_predict_edge,
5120 rtl_predicted_by_p,
5121 cfg_layout_can_duplicate_bb_p,
5122 cfg_layout_duplicate_bb,
5123 cfg_layout_split_edge,
5124 rtl_make_forwarder_block,
5125 NULL, /* tidy_fallthru_edge */
5126 rtl_force_nonfallthru,
5127 rtl_block_ends_with_call_p,
5128 rtl_block_ends_with_condjump_p,
5129 rtl_flow_call_edges_add,
5130 NULL, /* execute_on_growing_pred */
5131 NULL, /* execute_on_shrinking_pred */
5132 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5133 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5134 NULL, /* lv_adjust_loop_header_phi*/
5135 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5136 NULL, /* flush_pending_stmts */
5137 rtl_block_empty_p, /* block_empty_p */
5138 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5139 rtl_account_profile_record,
5140 };
5141
5142 #include "gt-cfgrtl.h"