re PR target/83488 (ICE on a CET test-case)
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64
65 /* Holds the interesting leading and trailing notes for the function.
66 Only applicable if the CFG is in cfglayout mode. */
67 static GTY(()) rtx_insn *cfg_layout_function_footer;
68 static GTY(()) rtx_insn *cfg_layout_function_header;
69
70 static rtx_insn *skip_insns_after_block (basic_block);
71 static void record_effective_endpoints (void);
72 static void fixup_reorder_chain (void);
73
74 void verify_insn_chain (void);
75 static void fixup_fallthru_exit_predecessor (void);
76 static int can_delete_note_p (const rtx_note *);
77 static int can_delete_label_p (const rtx_code_label *);
78 static basic_block rtl_split_edge (edge);
79 static bool rtl_move_block_after (basic_block, basic_block);
80 static int rtl_verify_flow_info (void);
81 static basic_block cfg_layout_split_block (basic_block, void *);
82 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
83 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
84 static void cfg_layout_delete_block (basic_block);
85 static void rtl_delete_block (basic_block);
86 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
87 static edge rtl_redirect_edge_and_branch (edge, basic_block);
88 static basic_block rtl_split_block (basic_block, void *);
89 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
90 static int rtl_verify_flow_info_1 (void);
91 static void rtl_make_forwarder_block (edge);
92 \f
93 /* Return true if NOTE is not one of the ones that must be kept paired,
94 so that we may simply delete it. */
95
96 static int
97 can_delete_note_p (const rtx_note *note)
98 {
99 switch (NOTE_KIND (note))
100 {
101 case NOTE_INSN_DELETED:
102 case NOTE_INSN_BASIC_BLOCK:
103 case NOTE_INSN_EPILOGUE_BEG:
104 return true;
105
106 default:
107 return false;
108 }
109 }
110
111 /* True if a given label can be deleted. */
112
113 static int
114 can_delete_label_p (const rtx_code_label *label)
115 {
116 return (!LABEL_PRESERVE_P (label)
117 /* User declared labels must be preserved. */
118 && LABEL_NAME (label) == 0
119 && !vec_safe_contains<rtx_insn *> (forced_labels,
120 const_cast<rtx_code_label *> (label)));
121 }
122
123 /* Delete INSN by patching it out. */
124
125 void
126 delete_insn (rtx_insn *insn)
127 {
128 rtx note;
129 bool really_delete = true;
130
131 if (LABEL_P (insn))
132 {
133 /* Some labels can't be directly removed from the INSN chain, as they
134 might be references via variables, constant pool etc.
135 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
136 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
137 {
138 const char *name = LABEL_NAME (insn);
139 basic_block bb = BLOCK_FOR_INSN (insn);
140 rtx_insn *bb_note = NEXT_INSN (insn);
141
142 really_delete = false;
143 PUT_CODE (insn, NOTE);
144 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
145 NOTE_DELETED_LABEL_NAME (insn) = name;
146
147 /* If the note following the label starts a basic block, and the
148 label is a member of the same basic block, interchange the two. */
149 if (bb_note != NULL_RTX
150 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
151 && bb != NULL
152 && bb == BLOCK_FOR_INSN (bb_note))
153 {
154 reorder_insns_nobb (insn, insn, bb_note);
155 BB_HEAD (bb) = bb_note;
156 if (BB_END (bb) == bb_note)
157 BB_END (bb) = insn;
158 }
159 }
160
161 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
162 }
163
164 if (really_delete)
165 {
166 /* If this insn has already been deleted, something is very wrong. */
167 gcc_assert (!insn->deleted ());
168 if (INSN_P (insn))
169 df_insn_delete (insn);
170 remove_insn (insn);
171 insn->set_deleted ();
172 }
173
174 /* If deleting a jump, decrement the use count of the label. Deleting
175 the label itself should happen in the normal course of block merging. */
176 if (JUMP_P (insn))
177 {
178 if (JUMP_LABEL (insn)
179 && LABEL_P (JUMP_LABEL (insn)))
180 LABEL_NUSES (JUMP_LABEL (insn))--;
181
182 /* If there are more targets, remove them too. */
183 while ((note
184 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
185 && LABEL_P (XEXP (note, 0)))
186 {
187 LABEL_NUSES (XEXP (note, 0))--;
188 remove_note (insn, note);
189 }
190 }
191
192 /* Also if deleting any insn that references a label as an operand. */
193 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199
200 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
201 {
202 rtvec vec = table->get_labels ();
203 int len = GET_NUM_ELEM (vec);
204 int i;
205
206 for (i = 0; i < len; i++)
207 {
208 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
209
210 /* When deleting code in bulk (e.g. removing many unreachable
211 blocks) we can delete a label that's a target of the vector
212 before deleting the vector itself. */
213 if (!NOTE_P (label))
214 LABEL_NUSES (label)--;
215 }
216 }
217 }
218
219 /* Like delete_insn but also purge dead edges from BB.
220 Return true if any edges are eliminated. */
221
222 bool
223 delete_insn_and_edges (rtx_insn *insn)
224 {
225 bool purge = false;
226
227 if (INSN_P (insn)
228 && BLOCK_FOR_INSN (insn)
229 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
230 purge = true;
231 delete_insn (insn);
232 if (purge)
233 return purge_dead_edges (BLOCK_FOR_INSN (insn));
234 return false;
235 }
236
237 /* Unlink a chain of insns between START and FINISH, leaving notes
238 that must be paired. If CLEAR_BB is true, we set bb field for
239 insns that cannot be removed to NULL. */
240
241 void
242 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
243 {
244 /* Unchain the insns one by one. It would be quicker to delete all of these
245 with a single unchaining, rather than one at a time, but we need to keep
246 the NOTE's. */
247 rtx_insn *current = finish;
248 while (1)
249 {
250 rtx_insn *prev = PREV_INSN (current);
251 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
252 ;
253 else
254 delete_insn (current);
255
256 if (clear_bb && !current->deleted ())
257 set_block_for_insn (current, NULL);
258
259 if (current == start)
260 break;
261 current = prev;
262 }
263 }
264 \f
265 /* Create a new basic block consisting of the instructions between HEAD and END
266 inclusive. This function is designed to allow fast BB construction - reuses
267 the note and basic block struct in BB_NOTE, if any and do not grow
268 BASIC_BLOCK chain and should be used directly only by CFG construction code.
269 END can be NULL in to create new empty basic block before HEAD. Both END
270 and HEAD can be NULL to create basic block at the end of INSN chain.
271 AFTER is the basic block we should be put after. */
272
273 basic_block
274 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
275 basic_block after)
276 {
277 basic_block bb;
278
279 if (bb_note
280 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
281 && bb->aux == NULL)
282 {
283 /* If we found an existing note, thread it back onto the chain. */
284
285 rtx_insn *after;
286
287 if (LABEL_P (head))
288 after = head;
289 else
290 {
291 after = PREV_INSN (head);
292 head = bb_note;
293 }
294
295 if (after != bb_note && NEXT_INSN (after) != bb_note)
296 reorder_insns_nobb (bb_note, bb_note, after);
297 }
298 else
299 {
300 /* Otherwise we must create a note and a basic block structure. */
301
302 bb = alloc_block ();
303
304 init_rtl_bb_info (bb);
305 if (!head && !end)
306 head = end = bb_note
307 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
308 else if (LABEL_P (head) && end)
309 {
310 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
311 if (head == end)
312 end = bb_note;
313 }
314 else
315 {
316 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
317 head = bb_note;
318 if (!end)
319 end = head;
320 }
321
322 NOTE_BASIC_BLOCK (bb_note) = bb;
323 }
324
325 /* Always include the bb note in the block. */
326 if (NEXT_INSN (end) == bb_note)
327 end = bb_note;
328
329 BB_HEAD (bb) = head;
330 BB_END (bb) = end;
331 bb->index = last_basic_block_for_fn (cfun)++;
332 bb->flags = BB_NEW | BB_RTL;
333 link_block (bb, after);
334 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
335 df_bb_refs_record (bb->index, false);
336 update_bb_for_insn (bb);
337 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
338
339 /* Tag the block so that we know it has been used when considering
340 other basic block notes. */
341 bb->aux = bb;
342
343 return bb;
344 }
345
346 /* Create new basic block consisting of instructions in between HEAD and END
347 and place it to the BB chain after block AFTER. END can be NULL to
348 create a new empty basic block before HEAD. Both END and HEAD can be
349 NULL to create basic block at the end of INSN chain. */
350
351 static basic_block
352 rtl_create_basic_block (void *headp, void *endp, basic_block after)
353 {
354 rtx_insn *head = (rtx_insn *) headp;
355 rtx_insn *end = (rtx_insn *) endp;
356 basic_block bb;
357
358 /* Grow the basic block array if needed. */
359 if ((size_t) last_basic_block_for_fn (cfun)
360 >= basic_block_info_for_fn (cfun)->length ())
361 {
362 size_t new_size =
363 (last_basic_block_for_fn (cfun)
364 + (last_basic_block_for_fn (cfun) + 3) / 4);
365 vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
366 }
367
368 n_basic_blocks_for_fn (cfun)++;
369
370 bb = create_basic_block_structure (head, end, NULL, after);
371 bb->aux = NULL;
372 return bb;
373 }
374
375 static basic_block
376 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
377 {
378 basic_block newbb = rtl_create_basic_block (head, end, after);
379
380 return newbb;
381 }
382 \f
383 /* Delete the insns in a (non-live) block. We physically delete every
384 non-deleted-note insn, and update the flow graph appropriately.
385
386 Return nonzero if we deleted an exception handler. */
387
388 /* ??? Preserving all such notes strikes me as wrong. It would be nice
389 to post-process the stream to remove empty blocks, loops, ranges, etc. */
390
391 static void
392 rtl_delete_block (basic_block b)
393 {
394 rtx_insn *insn, *end;
395
396 /* If the head of this block is a CODE_LABEL, then it might be the
397 label for an exception handler which can't be reached. We need
398 to remove the label from the exception_handler_label list. */
399 insn = BB_HEAD (b);
400
401 end = get_last_bb_insn (b);
402
403 /* Selectively delete the entire chain. */
404 BB_HEAD (b) = NULL;
405 delete_insn_chain (insn, end, true);
406
407
408 if (dump_file)
409 fprintf (dump_file, "deleting block %d\n", b->index);
410 df_bb_delete (b->index);
411 }
412 \f
413 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
414
415 void
416 compute_bb_for_insn (void)
417 {
418 basic_block bb;
419
420 FOR_EACH_BB_FN (bb, cfun)
421 {
422 rtx_insn *end = BB_END (bb);
423 rtx_insn *insn;
424
425 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
426 {
427 BLOCK_FOR_INSN (insn) = bb;
428 if (insn == end)
429 break;
430 }
431 }
432 }
433
434 /* Release the basic_block_for_insn array. */
435
436 unsigned int
437 free_bb_for_insn (void)
438 {
439 rtx_insn *insn;
440 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
441 if (!BARRIER_P (insn))
442 BLOCK_FOR_INSN (insn) = NULL;
443 return 0;
444 }
445
446 namespace {
447
448 const pass_data pass_data_free_cfg =
449 {
450 RTL_PASS, /* type */
451 "*free_cfg", /* name */
452 OPTGROUP_NONE, /* optinfo_flags */
453 TV_NONE, /* tv_id */
454 0, /* properties_required */
455 0, /* properties_provided */
456 PROP_cfg, /* properties_destroyed */
457 0, /* todo_flags_start */
458 0, /* todo_flags_finish */
459 };
460
461 class pass_free_cfg : public rtl_opt_pass
462 {
463 public:
464 pass_free_cfg (gcc::context *ctxt)
465 : rtl_opt_pass (pass_data_free_cfg, ctxt)
466 {}
467
468 /* opt_pass methods: */
469 virtual unsigned int execute (function *);
470
471 }; // class pass_free_cfg
472
473 unsigned int
474 pass_free_cfg::execute (function *)
475 {
476 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
477 valid at that point so it would be too late to call df_analyze. */
478 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
479 {
480 df_note_add_problem ();
481 df_analyze ();
482 }
483
484 if (crtl->has_bb_partition)
485 insert_section_boundary_note ();
486
487 free_bb_for_insn ();
488 return 0;
489 }
490
491 } // anon namespace
492
493 rtl_opt_pass *
494 make_pass_free_cfg (gcc::context *ctxt)
495 {
496 return new pass_free_cfg (ctxt);
497 }
498
499 /* Return RTX to emit after when we want to emit code on the entry of function. */
500 rtx_insn *
501 entry_of_function (void)
502 {
503 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
504 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
505 }
506
507 /* Emit INSN at the entry point of the function, ensuring that it is only
508 executed once per function. */
509 void
510 emit_insn_at_entry (rtx insn)
511 {
512 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
513 edge e = ei_safe_edge (ei);
514 gcc_assert (e->flags & EDGE_FALLTHRU);
515
516 insert_insn_on_edge (insn, e);
517 commit_edge_insertions ();
518 }
519
520 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
521 (or BARRIER if found) and notify df of the bb change.
522 The insn chain range is inclusive
523 (i.e. both BEGIN and END will be updated. */
524
525 static void
526 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
527 {
528 rtx_insn *insn;
529
530 end = NEXT_INSN (end);
531 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
532 if (!BARRIER_P (insn))
533 df_insn_change_bb (insn, bb);
534 }
535
536 /* Update BLOCK_FOR_INSN of insns in BB to BB,
537 and notify df of the change. */
538
539 void
540 update_bb_for_insn (basic_block bb)
541 {
542 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
543 }
544
545 \f
546 /* Like active_insn_p, except keep the return value clobber around
547 even after reload. */
548
549 static bool
550 flow_active_insn_p (const rtx_insn *insn)
551 {
552 if (active_insn_p (insn))
553 return true;
554
555 /* A clobber of the function return value exists for buggy
556 programs that fail to return a value. Its effect is to
557 keep the return value from being live across the entire
558 function. If we allow it to be skipped, we introduce the
559 possibility for register lifetime confusion. */
560 if (GET_CODE (PATTERN (insn)) == CLOBBER
561 && REG_P (XEXP (PATTERN (insn), 0))
562 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
563 return true;
564
565 return false;
566 }
567
568 /* Return true if the block has no effect and only forwards control flow to
569 its single destination. */
570
571 bool
572 contains_no_active_insn_p (const_basic_block bb)
573 {
574 rtx_insn *insn;
575
576 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
577 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
578 || !single_succ_p (bb)
579 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
580 return false;
581
582 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
583 if (INSN_P (insn) && flow_active_insn_p (insn))
584 return false;
585
586 return (!INSN_P (insn)
587 || (JUMP_P (insn) && simplejump_p (insn))
588 || !flow_active_insn_p (insn));
589 }
590
591 /* Likewise, but protect loop latches, headers and preheaders. */
592 /* FIXME: Make this a cfg hook. */
593
594 bool
595 forwarder_block_p (const_basic_block bb)
596 {
597 if (!contains_no_active_insn_p (bb))
598 return false;
599
600 /* Protect loop latches, headers and preheaders. */
601 if (current_loops)
602 {
603 basic_block dest;
604 if (bb->loop_father->header == bb)
605 return false;
606 dest = EDGE_SUCC (bb, 0)->dest;
607 if (dest->loop_father->header == dest)
608 return false;
609 }
610
611 return true;
612 }
613
614 /* Return nonzero if we can reach target from src by falling through. */
615 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
616
617 bool
618 can_fallthru (basic_block src, basic_block target)
619 {
620 rtx_insn *insn = BB_END (src);
621 rtx_insn *insn2;
622 edge e;
623 edge_iterator ei;
624
625 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
626 return true;
627 if (src->next_bb != target)
628 return false;
629
630 /* ??? Later we may add code to move jump tables offline. */
631 if (tablejump_p (insn, NULL, NULL))
632 return false;
633
634 FOR_EACH_EDGE (e, ei, src->succs)
635 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
636 && e->flags & EDGE_FALLTHRU)
637 return false;
638
639 insn2 = BB_HEAD (target);
640 if (!active_insn_p (insn2))
641 insn2 = next_active_insn (insn2);
642
643 return next_active_insn (insn) == insn2;
644 }
645
646 /* Return nonzero if we could reach target from src by falling through,
647 if the target was made adjacent. If we already have a fall-through
648 edge to the exit block, we can't do that. */
649 static bool
650 could_fall_through (basic_block src, basic_block target)
651 {
652 edge e;
653 edge_iterator ei;
654
655 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
656 return true;
657 FOR_EACH_EDGE (e, ei, src->succs)
658 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
659 && e->flags & EDGE_FALLTHRU)
660 return 0;
661 return true;
662 }
663 \f
664 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
665 rtx_note *
666 bb_note (basic_block bb)
667 {
668 rtx_insn *note;
669
670 note = BB_HEAD (bb);
671 if (LABEL_P (note))
672 note = NEXT_INSN (note);
673
674 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
675 return as_a <rtx_note *> (note);
676 }
677
678 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
679 note associated with the BLOCK. */
680
681 static rtx_insn *
682 first_insn_after_basic_block_note (basic_block block)
683 {
684 rtx_insn *insn;
685
686 /* Get the first instruction in the block. */
687 insn = BB_HEAD (block);
688
689 if (insn == NULL_RTX)
690 return NULL;
691 if (LABEL_P (insn))
692 insn = NEXT_INSN (insn);
693 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
694
695 return NEXT_INSN (insn);
696 }
697
698 /* Creates a new basic block just after basic block BB by splitting
699 everything after specified instruction INSNP. */
700
701 static basic_block
702 rtl_split_block (basic_block bb, void *insnp)
703 {
704 basic_block new_bb;
705 rtx_insn *insn = (rtx_insn *) insnp;
706 edge e;
707 edge_iterator ei;
708
709 if (!insn)
710 {
711 insn = first_insn_after_basic_block_note (bb);
712
713 if (insn)
714 {
715 rtx_insn *next = insn;
716
717 insn = PREV_INSN (insn);
718
719 /* If the block contains only debug insns, insn would have
720 been NULL in a non-debug compilation, and then we'd end
721 up emitting a DELETED note. For -fcompare-debug
722 stability, emit the note too. */
723 if (insn != BB_END (bb)
724 && DEBUG_INSN_P (next)
725 && DEBUG_INSN_P (BB_END (bb)))
726 {
727 while (next != BB_END (bb) && DEBUG_INSN_P (next))
728 next = NEXT_INSN (next);
729
730 if (next == BB_END (bb))
731 emit_note_after (NOTE_INSN_DELETED, next);
732 }
733 }
734 else
735 insn = get_last_insn ();
736 }
737
738 /* We probably should check type of the insn so that we do not create
739 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
740 bother. */
741 if (insn == BB_END (bb))
742 emit_note_after (NOTE_INSN_DELETED, insn);
743
744 /* Create the new basic block. */
745 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
746 BB_COPY_PARTITION (new_bb, bb);
747 BB_END (bb) = insn;
748
749 /* Redirect the outgoing edges. */
750 new_bb->succs = bb->succs;
751 bb->succs = NULL;
752 FOR_EACH_EDGE (e, ei, new_bb->succs)
753 e->src = new_bb;
754
755 /* The new block starts off being dirty. */
756 df_set_bb_dirty (bb);
757 return new_bb;
758 }
759
760 /* Return true if the single edge between blocks A and B is the only place
761 in RTL which holds some unique locus. */
762
763 static bool
764 unique_locus_on_edge_between_p (basic_block a, basic_block b)
765 {
766 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
767 rtx_insn *insn, *end;
768
769 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
770 return false;
771
772 /* First scan block A backward. */
773 insn = BB_END (a);
774 end = PREV_INSN (BB_HEAD (a));
775 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
776 insn = PREV_INSN (insn);
777
778 if (insn != end && INSN_LOCATION (insn) == goto_locus)
779 return false;
780
781 /* Then scan block B forward. */
782 insn = BB_HEAD (b);
783 if (insn)
784 {
785 end = NEXT_INSN (BB_END (b));
786 while (insn != end && !NONDEBUG_INSN_P (insn))
787 insn = NEXT_INSN (insn);
788
789 if (insn != end && INSN_HAS_LOCATION (insn)
790 && INSN_LOCATION (insn) == goto_locus)
791 return false;
792 }
793
794 return true;
795 }
796
797 /* If the single edge between blocks A and B is the only place in RTL which
798 holds some unique locus, emit a nop with that locus between the blocks. */
799
800 static void
801 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
802 {
803 if (!unique_locus_on_edge_between_p (a, b))
804 return;
805
806 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
807 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
808 }
809
810 /* Blocks A and B are to be merged into a single block A. The insns
811 are already contiguous. */
812
813 static void
814 rtl_merge_blocks (basic_block a, basic_block b)
815 {
816 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
817 rtx_insn *del_first = NULL, *del_last = NULL;
818 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
819 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
820 int b_empty = 0;
821
822 if (dump_file)
823 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
824 a->index);
825
826 while (DEBUG_INSN_P (b_end))
827 b_end = PREV_INSN (b_debug_start = b_end);
828
829 /* If there was a CODE_LABEL beginning B, delete it. */
830 if (LABEL_P (b_head))
831 {
832 /* Detect basic blocks with nothing but a label. This can happen
833 in particular at the end of a function. */
834 if (b_head == b_end)
835 b_empty = 1;
836
837 del_first = del_last = b_head;
838 b_head = NEXT_INSN (b_head);
839 }
840
841 /* Delete the basic block note and handle blocks containing just that
842 note. */
843 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
844 {
845 if (b_head == b_end)
846 b_empty = 1;
847 if (! del_last)
848 del_first = b_head;
849
850 del_last = b_head;
851 b_head = NEXT_INSN (b_head);
852 }
853
854 /* If there was a jump out of A, delete it. */
855 if (JUMP_P (a_end))
856 {
857 rtx_insn *prev;
858
859 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
860 if (!NOTE_P (prev)
861 || NOTE_INSN_BASIC_BLOCK_P (prev)
862 || prev == BB_HEAD (a))
863 break;
864
865 del_first = a_end;
866
867 /* If this was a conditional jump, we need to also delete
868 the insn that set cc0. */
869 if (HAVE_cc0 && only_sets_cc0_p (prev))
870 {
871 rtx_insn *tmp = prev;
872
873 prev = prev_nonnote_insn (prev);
874 if (!prev)
875 prev = BB_HEAD (a);
876 del_first = tmp;
877 }
878
879 a_end = PREV_INSN (del_first);
880 }
881 else if (BARRIER_P (NEXT_INSN (a_end)))
882 del_first = NEXT_INSN (a_end);
883
884 /* Delete everything marked above as well as crap that might be
885 hanging out between the two blocks. */
886 BB_END (a) = a_end;
887 BB_HEAD (b) = b_empty ? NULL : b_head;
888 delete_insn_chain (del_first, del_last, true);
889
890 /* When not optimizing and the edge is the only place in RTL which holds
891 some unique locus, emit a nop with that locus in between. */
892 if (!optimize)
893 {
894 emit_nop_for_unique_locus_between (a, b);
895 a_end = BB_END (a);
896 }
897
898 /* Reassociate the insns of B with A. */
899 if (!b_empty)
900 {
901 update_bb_for_insn_chain (a_end, b_debug_end, a);
902
903 BB_END (a) = b_debug_end;
904 BB_HEAD (b) = NULL;
905 }
906 else if (b_end != b_debug_end)
907 {
908 /* Move any deleted labels and other notes between the end of A
909 and the debug insns that make up B after the debug insns,
910 bringing the debug insns into A while keeping the notes after
911 the end of A. */
912 if (NEXT_INSN (a_end) != b_debug_start)
913 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
914 b_debug_end);
915 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
916 BB_END (a) = b_debug_end;
917 }
918
919 df_bb_delete (b->index);
920
921 /* If B was a forwarder block, propagate the locus on the edge. */
922 if (forwarder_p
923 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
924 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
925
926 if (dump_file)
927 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
928 }
929
930
931 /* Return true when block A and B can be merged. */
932
933 static bool
934 rtl_can_merge_blocks (basic_block a, basic_block b)
935 {
936 /* If we are partitioning hot/cold basic blocks, we don't want to
937 mess up unconditional or indirect jumps that cross between hot
938 and cold sections.
939
940 Basic block partitioning may result in some jumps that appear to
941 be optimizable (or blocks that appear to be mergeable), but which really
942 must be left untouched (they are required to make it safely across
943 partition boundaries). See the comments at the top of
944 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
945
946 if (BB_PARTITION (a) != BB_PARTITION (b))
947 return false;
948
949 /* Protect the loop latches. */
950 if (current_loops && b->loop_father->latch == b)
951 return false;
952
953 /* There must be exactly one edge in between the blocks. */
954 return (single_succ_p (a)
955 && single_succ (a) == b
956 && single_pred_p (b)
957 && a != b
958 /* Must be simple edge. */
959 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
960 && a->next_bb == b
961 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
962 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
963 /* If the jump insn has side effects,
964 we can't kill the edge. */
965 && (!JUMP_P (BB_END (a))
966 || (reload_completed
967 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
968 }
969 \f
970 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
971 exist. */
972
973 rtx_code_label *
974 block_label (basic_block block)
975 {
976 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
977 return NULL;
978
979 if (!LABEL_P (BB_HEAD (block)))
980 {
981 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
982 }
983
984 return as_a <rtx_code_label *> (BB_HEAD (block));
985 }
986
987 /* Attempt to perform edge redirection by replacing possibly complex jump
988 instruction by unconditional jump or removing jump completely. This can
989 apply only if all edges now point to the same block. The parameters and
990 return values are equivalent to redirect_edge_and_branch. */
991
992 edge
993 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
994 {
995 basic_block src = e->src;
996 rtx_insn *insn = BB_END (src), *kill_from;
997 rtx set;
998 int fallthru = 0;
999
1000 /* If we are partitioning hot/cold basic blocks, we don't want to
1001 mess up unconditional or indirect jumps that cross between hot
1002 and cold sections.
1003
1004 Basic block partitioning may result in some jumps that appear to
1005 be optimizable (or blocks that appear to be mergeable), but which really
1006 must be left untouched (they are required to make it safely across
1007 partition boundaries). See the comments at the top of
1008 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1009
1010 if (BB_PARTITION (src) != BB_PARTITION (target))
1011 return NULL;
1012
1013 /* We can replace or remove a complex jump only when we have exactly
1014 two edges. Also, if we have exactly one outgoing edge, we can
1015 redirect that. */
1016 if (EDGE_COUNT (src->succs) >= 3
1017 /* Verify that all targets will be TARGET. Specifically, the
1018 edge that is not E must also go to TARGET. */
1019 || (EDGE_COUNT (src->succs) == 2
1020 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1021 return NULL;
1022
1023 if (!onlyjump_p (insn))
1024 return NULL;
1025 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1026 return NULL;
1027
1028 /* Avoid removing branch with side effects. */
1029 set = single_set (insn);
1030 if (!set || side_effects_p (set))
1031 return NULL;
1032
1033 /* In case we zap a conditional jump, we'll need to kill
1034 the cc0 setter too. */
1035 kill_from = insn;
1036 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1037 && only_sets_cc0_p (PREV_INSN (insn)))
1038 kill_from = PREV_INSN (insn);
1039
1040 /* See if we can create the fallthru edge. */
1041 if (in_cfglayout || can_fallthru (src, target))
1042 {
1043 if (dump_file)
1044 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1045 fallthru = 1;
1046
1047 /* Selectively unlink whole insn chain. */
1048 if (in_cfglayout)
1049 {
1050 rtx_insn *insn = BB_FOOTER (src);
1051
1052 delete_insn_chain (kill_from, BB_END (src), false);
1053
1054 /* Remove barriers but keep jumptables. */
1055 while (insn)
1056 {
1057 if (BARRIER_P (insn))
1058 {
1059 if (PREV_INSN (insn))
1060 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1061 else
1062 BB_FOOTER (src) = NEXT_INSN (insn);
1063 if (NEXT_INSN (insn))
1064 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1065 }
1066 if (LABEL_P (insn))
1067 break;
1068 insn = NEXT_INSN (insn);
1069 }
1070 }
1071 else
1072 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1073 false);
1074 }
1075
1076 /* If this already is simplejump, redirect it. */
1077 else if (simplejump_p (insn))
1078 {
1079 if (e->dest == target)
1080 return NULL;
1081 if (dump_file)
1082 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1083 INSN_UID (insn), e->dest->index, target->index);
1084 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1085 block_label (target), 0))
1086 {
1087 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1088 return NULL;
1089 }
1090 }
1091
1092 /* Cannot do anything for target exit block. */
1093 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1094 return NULL;
1095
1096 /* Or replace possibly complicated jump insn by simple jump insn. */
1097 else
1098 {
1099 rtx_code_label *target_label = block_label (target);
1100 rtx_insn *barrier;
1101 rtx_insn *label;
1102 rtx_jump_table_data *table;
1103
1104 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1105 JUMP_LABEL (BB_END (src)) = target_label;
1106 LABEL_NUSES (target_label)++;
1107 if (dump_file)
1108 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1109 INSN_UID (insn), INSN_UID (BB_END (src)));
1110
1111
1112 delete_insn_chain (kill_from, insn, false);
1113
1114 /* Recognize a tablejump that we are converting to a
1115 simple jump and remove its associated CODE_LABEL
1116 and ADDR_VEC or ADDR_DIFF_VEC. */
1117 if (tablejump_p (insn, &label, &table))
1118 delete_insn_chain (label, table, false);
1119
1120 barrier = next_nonnote_nondebug_insn (BB_END (src));
1121 if (!barrier || !BARRIER_P (barrier))
1122 emit_barrier_after (BB_END (src));
1123 else
1124 {
1125 if (barrier != NEXT_INSN (BB_END (src)))
1126 {
1127 /* Move the jump before barrier so that the notes
1128 which originally were or were created before jump table are
1129 inside the basic block. */
1130 rtx_insn *new_insn = BB_END (src);
1131
1132 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1133 PREV_INSN (barrier), src);
1134
1135 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1136 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1137
1138 SET_NEXT_INSN (new_insn) = barrier;
1139 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1140
1141 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1142 SET_PREV_INSN (barrier) = new_insn;
1143 }
1144 }
1145 }
1146
1147 /* Keep only one edge out and set proper flags. */
1148 if (!single_succ_p (src))
1149 remove_edge (e);
1150 gcc_assert (single_succ_p (src));
1151
1152 e = single_succ_edge (src);
1153 if (fallthru)
1154 e->flags = EDGE_FALLTHRU;
1155 else
1156 e->flags = 0;
1157
1158 e->probability = profile_probability::always ();
1159
1160 if (e->dest != target)
1161 redirect_edge_succ (e, target);
1162 return e;
1163 }
1164
1165 /* Subroutine of redirect_branch_edge that tries to patch the jump
1166 instruction INSN so that it reaches block NEW. Do this
1167 only when it originally reached block OLD. Return true if this
1168 worked or the original target wasn't OLD, return false if redirection
1169 doesn't work. */
1170
1171 static bool
1172 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1173 {
1174 rtx_jump_table_data *table;
1175 rtx tmp;
1176 /* Recognize a tablejump and adjust all matching cases. */
1177 if (tablejump_p (insn, NULL, &table))
1178 {
1179 rtvec vec;
1180 int j;
1181 rtx_code_label *new_label = block_label (new_bb);
1182
1183 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1184 return false;
1185 vec = table->get_labels ();
1186
1187 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1188 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1189 {
1190 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1191 --LABEL_NUSES (old_label);
1192 ++LABEL_NUSES (new_label);
1193 }
1194
1195 /* Handle casesi dispatch insns. */
1196 if ((tmp = single_set (insn)) != NULL
1197 && SET_DEST (tmp) == pc_rtx
1198 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1199 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1200 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1201 {
1202 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1203 new_label);
1204 --LABEL_NUSES (old_label);
1205 ++LABEL_NUSES (new_label);
1206 }
1207 }
1208 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1209 {
1210 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1211 rtx note;
1212
1213 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1214 return false;
1215 rtx_code_label *new_label = block_label (new_bb);
1216
1217 for (i = 0; i < n; ++i)
1218 {
1219 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1220 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1221 if (XEXP (old_ref, 0) == old_label)
1222 {
1223 ASM_OPERANDS_LABEL (tmp, i)
1224 = gen_rtx_LABEL_REF (Pmode, new_label);
1225 --LABEL_NUSES (old_label);
1226 ++LABEL_NUSES (new_label);
1227 }
1228 }
1229
1230 if (JUMP_LABEL (insn) == old_label)
1231 {
1232 JUMP_LABEL (insn) = new_label;
1233 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1234 if (note)
1235 remove_note (insn, note);
1236 }
1237 else
1238 {
1239 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1240 if (note)
1241 remove_note (insn, note);
1242 if (JUMP_LABEL (insn) != new_label
1243 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1244 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1245 }
1246 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1247 != NULL_RTX)
1248 XEXP (note, 0) = new_label;
1249 }
1250 else
1251 {
1252 /* ?? We may play the games with moving the named labels from
1253 one basic block to the other in case only one computed_jump is
1254 available. */
1255 if (computed_jump_p (insn)
1256 /* A return instruction can't be redirected. */
1257 || returnjump_p (insn))
1258 return false;
1259
1260 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1261 {
1262 /* If the insn doesn't go where we think, we're confused. */
1263 gcc_assert (JUMP_LABEL (insn) == old_label);
1264
1265 /* If the substitution doesn't succeed, die. This can happen
1266 if the back end emitted unrecognizable instructions or if
1267 target is exit block on some arches. */
1268 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1269 block_label (new_bb), 0))
1270 {
1271 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
1272 return false;
1273 }
1274 }
1275 }
1276 return true;
1277 }
1278
1279
1280 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1281 NULL on failure */
1282 static edge
1283 redirect_branch_edge (edge e, basic_block target)
1284 {
1285 rtx_insn *old_label = BB_HEAD (e->dest);
1286 basic_block src = e->src;
1287 rtx_insn *insn = BB_END (src);
1288
1289 /* We can only redirect non-fallthru edges of jump insn. */
1290 if (e->flags & EDGE_FALLTHRU)
1291 return NULL;
1292 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1293 return NULL;
1294
1295 if (!currently_expanding_to_rtl)
1296 {
1297 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1298 return NULL;
1299 }
1300 else
1301 /* When expanding this BB might actually contain multiple
1302 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1303 Redirect all of those that match our label. */
1304 FOR_BB_INSNS (src, insn)
1305 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1306 old_label, target))
1307 return NULL;
1308
1309 if (dump_file)
1310 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1311 e->src->index, e->dest->index, target->index);
1312
1313 if (e->dest != target)
1314 e = redirect_edge_succ_nodup (e, target);
1315
1316 return e;
1317 }
1318
1319 /* Called when edge E has been redirected to a new destination,
1320 in order to update the region crossing flag on the edge and
1321 jump. */
1322
1323 static void
1324 fixup_partition_crossing (edge e)
1325 {
1326 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1327 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1328 return;
1329 /* If we redirected an existing edge, it may already be marked
1330 crossing, even though the new src is missing a reg crossing note.
1331 But make sure reg crossing note doesn't already exist before
1332 inserting. */
1333 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1334 {
1335 e->flags |= EDGE_CROSSING;
1336 if (JUMP_P (BB_END (e->src)))
1337 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1338 }
1339 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1340 {
1341 e->flags &= ~EDGE_CROSSING;
1342 /* Remove the section crossing note from jump at end of
1343 src if it exists, and if no other successors are
1344 still crossing. */
1345 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1346 {
1347 bool has_crossing_succ = false;
1348 edge e2;
1349 edge_iterator ei;
1350 FOR_EACH_EDGE (e2, ei, e->src->succs)
1351 {
1352 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1353 if (has_crossing_succ)
1354 break;
1355 }
1356 if (!has_crossing_succ)
1357 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1358 }
1359 }
1360 }
1361
1362 /* Called when block BB has been reassigned to the cold partition,
1363 because it is now dominated by another cold block,
1364 to ensure that the region crossing attributes are updated. */
1365
1366 static void
1367 fixup_new_cold_bb (basic_block bb)
1368 {
1369 edge e;
1370 edge_iterator ei;
1371
1372 /* This is called when a hot bb is found to now be dominated
1373 by a cold bb and therefore needs to become cold. Therefore,
1374 its preds will no longer be region crossing. Any non-dominating
1375 preds that were previously hot would also have become cold
1376 in the caller for the same region. Any preds that were previously
1377 region-crossing will be adjusted in fixup_partition_crossing. */
1378 FOR_EACH_EDGE (e, ei, bb->preds)
1379 {
1380 fixup_partition_crossing (e);
1381 }
1382
1383 /* Possibly need to make bb's successor edges region crossing,
1384 or remove stale region crossing. */
1385 FOR_EACH_EDGE (e, ei, bb->succs)
1386 {
1387 /* We can't have fall-through edges across partition boundaries.
1388 Note that force_nonfallthru will do any necessary partition
1389 boundary fixup by calling fixup_partition_crossing itself. */
1390 if ((e->flags & EDGE_FALLTHRU)
1391 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1392 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1393 force_nonfallthru (e);
1394 else
1395 fixup_partition_crossing (e);
1396 }
1397 }
1398
1399 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1400 expense of adding new instructions or reordering basic blocks.
1401
1402 Function can be also called with edge destination equivalent to the TARGET.
1403 Then it should try the simplifications and do nothing if none is possible.
1404
1405 Return edge representing the branch if transformation succeeded. Return NULL
1406 on failure.
1407 We still return NULL in case E already destinated TARGET and we didn't
1408 managed to simplify instruction stream. */
1409
1410 static edge
1411 rtl_redirect_edge_and_branch (edge e, basic_block target)
1412 {
1413 edge ret;
1414 basic_block src = e->src;
1415 basic_block dest = e->dest;
1416
1417 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1418 return NULL;
1419
1420 if (dest == target)
1421 return e;
1422
1423 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1424 {
1425 df_set_bb_dirty (src);
1426 fixup_partition_crossing (ret);
1427 return ret;
1428 }
1429
1430 ret = redirect_branch_edge (e, target);
1431 if (!ret)
1432 return NULL;
1433
1434 df_set_bb_dirty (src);
1435 fixup_partition_crossing (ret);
1436 return ret;
1437 }
1438
1439 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1440
1441 void
1442 emit_barrier_after_bb (basic_block bb)
1443 {
1444 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1445 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1446 || current_ir_type () == IR_RTL_CFGLAYOUT);
1447 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1448 {
1449 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1450
1451 if (BB_FOOTER (bb))
1452 {
1453 rtx_insn *footer_tail = BB_FOOTER (bb);
1454
1455 while (NEXT_INSN (footer_tail))
1456 footer_tail = NEXT_INSN (footer_tail);
1457 if (!BARRIER_P (footer_tail))
1458 {
1459 SET_NEXT_INSN (footer_tail) = insn;
1460 SET_PREV_INSN (insn) = footer_tail;
1461 }
1462 }
1463 else
1464 BB_FOOTER (bb) = insn;
1465 }
1466 }
1467
1468 /* Like force_nonfallthru below, but additionally performs redirection
1469 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1470 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1471 simple_return_rtx, indicating which kind of returnjump to create.
1472 It should be NULL otherwise. */
1473
1474 basic_block
1475 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1476 {
1477 basic_block jump_block, new_bb = NULL, src = e->src;
1478 rtx note;
1479 edge new_edge;
1480 int abnormal_edge_flags = 0;
1481 bool asm_goto_edge = false;
1482 int loc;
1483
1484 /* In the case the last instruction is conditional jump to the next
1485 instruction, first redirect the jump itself and then continue
1486 by creating a basic block afterwards to redirect fallthru edge. */
1487 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1488 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1489 && any_condjump_p (BB_END (e->src))
1490 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1491 {
1492 rtx note;
1493 edge b = unchecked_make_edge (e->src, target, 0);
1494 bool redirected;
1495
1496 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1497 block_label (target), 0);
1498 gcc_assert (redirected);
1499
1500 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1501 if (note)
1502 {
1503 int prob = XINT (note, 0);
1504
1505 b->probability = profile_probability::from_reg_br_prob_note (prob);
1506 e->probability -= e->probability;
1507 }
1508 }
1509
1510 if (e->flags & EDGE_ABNORMAL)
1511 {
1512 /* Irritating special case - fallthru edge to the same block as abnormal
1513 edge.
1514 We can't redirect abnormal edge, but we still can split the fallthru
1515 one and create separate abnormal edge to original destination.
1516 This allows bb-reorder to make such edge non-fallthru. */
1517 gcc_assert (e->dest == target);
1518 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1519 e->flags &= EDGE_FALLTHRU;
1520 }
1521 else
1522 {
1523 gcc_assert (e->flags & EDGE_FALLTHRU);
1524 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1525 {
1526 /* We can't redirect the entry block. Create an empty block
1527 at the start of the function which we use to add the new
1528 jump. */
1529 edge tmp;
1530 edge_iterator ei;
1531 bool found = false;
1532
1533 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1534 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1535 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1536
1537 /* Make sure new block ends up in correct hot/cold section. */
1538 BB_COPY_PARTITION (bb, e->dest);
1539
1540 /* Change the existing edge's source to be the new block, and add
1541 a new edge from the entry block to the new block. */
1542 e->src = bb;
1543 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1544 (tmp = ei_safe_edge (ei)); )
1545 {
1546 if (tmp == e)
1547 {
1548 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1549 found = true;
1550 break;
1551 }
1552 else
1553 ei_next (&ei);
1554 }
1555
1556 gcc_assert (found);
1557
1558 vec_safe_push (bb->succs, e);
1559 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1560 EDGE_FALLTHRU);
1561 }
1562 }
1563
1564 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1565 don't point to the target or fallthru label. */
1566 if (JUMP_P (BB_END (e->src))
1567 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1568 && (e->flags & EDGE_FALLTHRU)
1569 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1570 {
1571 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1572 bool adjust_jump_target = false;
1573
1574 for (i = 0; i < n; ++i)
1575 {
1576 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1577 {
1578 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1579 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1580 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1581 adjust_jump_target = true;
1582 }
1583 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1584 asm_goto_edge = true;
1585 }
1586 if (adjust_jump_target)
1587 {
1588 rtx_insn *insn = BB_END (e->src);
1589 rtx note;
1590 rtx_insn *old_label = BB_HEAD (e->dest);
1591 rtx_insn *new_label = BB_HEAD (target);
1592
1593 if (JUMP_LABEL (insn) == old_label)
1594 {
1595 JUMP_LABEL (insn) = new_label;
1596 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1597 if (note)
1598 remove_note (insn, note);
1599 }
1600 else
1601 {
1602 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1603 if (note)
1604 remove_note (insn, note);
1605 if (JUMP_LABEL (insn) != new_label
1606 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1607 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1608 }
1609 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1610 != NULL_RTX)
1611 XEXP (note, 0) = new_label;
1612 }
1613 }
1614
1615 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1616 {
1617 rtx_insn *new_head;
1618 profile_count count = e->count ();
1619 profile_probability probability = e->probability;
1620 /* Create the new structures. */
1621
1622 /* If the old block ended with a tablejump, skip its table
1623 by searching forward from there. Otherwise start searching
1624 forward from the last instruction of the old block. */
1625 rtx_jump_table_data *table;
1626 if (tablejump_p (BB_END (e->src), NULL, &table))
1627 new_head = table;
1628 else
1629 new_head = BB_END (e->src);
1630 new_head = NEXT_INSN (new_head);
1631 /* Make sure we don't split a call and its corresponding
1632 CALL_ARG_LOCATION note. */
1633 if (new_head && NOTE_P (new_head)
1634 && NOTE_KIND (new_head) == NOTE_INSN_CALL_ARG_LOCATION)
1635 new_head = NEXT_INSN (new_head);
1636
1637 jump_block = create_basic_block (new_head, NULL, e->src);
1638 jump_block->count = count;
1639
1640 /* Make sure new block ends up in correct hot/cold section. */
1641
1642 BB_COPY_PARTITION (jump_block, e->src);
1643
1644 /* Wire edge in. */
1645 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1646 new_edge->probability = probability;
1647
1648 /* Redirect old edge. */
1649 redirect_edge_pred (e, jump_block);
1650 e->probability = profile_probability::always ();
1651
1652 /* If e->src was previously region crossing, it no longer is
1653 and the reg crossing note should be removed. */
1654 fixup_partition_crossing (new_edge);
1655
1656 /* If asm goto has any label refs to target's label,
1657 add also edge from asm goto bb to target. */
1658 if (asm_goto_edge)
1659 {
1660 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1661 jump_block->count = jump_block->count.apply_scale (1, 2);
1662 edge new_edge2 = make_edge (new_edge->src, target,
1663 e->flags & ~EDGE_FALLTHRU);
1664 new_edge2->probability = probability - new_edge->probability;
1665 }
1666
1667 new_bb = jump_block;
1668 }
1669 else
1670 jump_block = e->src;
1671
1672 loc = e->goto_locus;
1673 e->flags &= ~EDGE_FALLTHRU;
1674 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1675 {
1676 if (jump_label == ret_rtx)
1677 emit_jump_insn_after_setloc (targetm.gen_return (),
1678 BB_END (jump_block), loc);
1679 else
1680 {
1681 gcc_assert (jump_label == simple_return_rtx);
1682 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1683 BB_END (jump_block), loc);
1684 }
1685 set_return_jump_label (BB_END (jump_block));
1686 }
1687 else
1688 {
1689 rtx_code_label *label = block_label (target);
1690 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1691 BB_END (jump_block), loc);
1692 JUMP_LABEL (BB_END (jump_block)) = label;
1693 LABEL_NUSES (label)++;
1694 }
1695
1696 /* We might be in cfg layout mode, and if so, the following routine will
1697 insert the barrier correctly. */
1698 emit_barrier_after_bb (jump_block);
1699 redirect_edge_succ_nodup (e, target);
1700
1701 if (abnormal_edge_flags)
1702 make_edge (src, target, abnormal_edge_flags);
1703
1704 df_mark_solutions_dirty ();
1705 fixup_partition_crossing (e);
1706 return new_bb;
1707 }
1708
1709 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1710 (and possibly create new basic block) to make edge non-fallthru.
1711 Return newly created BB or NULL if none. */
1712
1713 static basic_block
1714 rtl_force_nonfallthru (edge e)
1715 {
1716 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1717 }
1718
1719 /* Redirect edge even at the expense of creating new jump insn or
1720 basic block. Return new basic block if created, NULL otherwise.
1721 Conversion must be possible. */
1722
1723 static basic_block
1724 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1725 {
1726 if (redirect_edge_and_branch (e, target)
1727 || e->dest == target)
1728 return NULL;
1729
1730 /* In case the edge redirection failed, try to force it to be non-fallthru
1731 and redirect newly created simplejump. */
1732 df_set_bb_dirty (e->src);
1733 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1734 }
1735
1736 /* The given edge should potentially be a fallthru edge. If that is in
1737 fact true, delete the jump and barriers that are in the way. */
1738
1739 static void
1740 rtl_tidy_fallthru_edge (edge e)
1741 {
1742 rtx_insn *q;
1743 basic_block b = e->src, c = b->next_bb;
1744
1745 /* ??? In a late-running flow pass, other folks may have deleted basic
1746 blocks by nopping out blocks, leaving multiple BARRIERs between here
1747 and the target label. They ought to be chastised and fixed.
1748
1749 We can also wind up with a sequence of undeletable labels between
1750 one block and the next.
1751
1752 So search through a sequence of barriers, labels, and notes for
1753 the head of block C and assert that we really do fall through. */
1754
1755 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1756 if (NONDEBUG_INSN_P (q))
1757 return;
1758
1759 /* Remove what will soon cease being the jump insn from the source block.
1760 If block B consisted only of this single jump, turn it into a deleted
1761 note. */
1762 q = BB_END (b);
1763 if (JUMP_P (q)
1764 && onlyjump_p (q)
1765 && (any_uncondjump_p (q)
1766 || single_succ_p (b)))
1767 {
1768 rtx_insn *label;
1769 rtx_jump_table_data *table;
1770
1771 if (tablejump_p (q, &label, &table))
1772 {
1773 /* The label is likely mentioned in some instruction before
1774 the tablejump and might not be DCEd, so turn it into
1775 a note instead and move before the tablejump that is going to
1776 be deleted. */
1777 const char *name = LABEL_NAME (label);
1778 PUT_CODE (label, NOTE);
1779 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1780 NOTE_DELETED_LABEL_NAME (label) = name;
1781 reorder_insns (label, label, PREV_INSN (q));
1782 delete_insn (table);
1783 }
1784
1785 /* If this was a conditional jump, we need to also delete
1786 the insn that set cc0. */
1787 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1788 q = PREV_INSN (q);
1789
1790 q = PREV_INSN (q);
1791 }
1792 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1793 together with the barrier) should never have a fallthru edge. */
1794 else if (JUMP_P (q) && any_uncondjump_p (q))
1795 return;
1796
1797 /* Selectively unlink the sequence. */
1798 if (q != PREV_INSN (BB_HEAD (c)))
1799 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1800
1801 e->flags |= EDGE_FALLTHRU;
1802 }
1803 \f
1804 /* Should move basic block BB after basic block AFTER. NIY. */
1805
1806 static bool
1807 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1808 basic_block after ATTRIBUTE_UNUSED)
1809 {
1810 return false;
1811 }
1812
1813 /* Locate the last bb in the same partition as START_BB. */
1814
1815 static basic_block
1816 last_bb_in_partition (basic_block start_bb)
1817 {
1818 basic_block bb;
1819 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1820 {
1821 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1822 return bb;
1823 }
1824 /* Return bb before the exit block. */
1825 return bb->prev_bb;
1826 }
1827
1828 /* Split a (typically critical) edge. Return the new block.
1829 The edge must not be abnormal.
1830
1831 ??? The code generally expects to be called on critical edges.
1832 The case of a block ending in an unconditional jump to a
1833 block with multiple predecessors is not handled optimally. */
1834
1835 static basic_block
1836 rtl_split_edge (edge edge_in)
1837 {
1838 basic_block bb, new_bb;
1839 rtx_insn *before;
1840
1841 /* Abnormal edges cannot be split. */
1842 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1843
1844 /* We are going to place the new block in front of edge destination.
1845 Avoid existence of fallthru predecessors. */
1846 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1847 {
1848 edge e = find_fallthru_edge (edge_in->dest->preds);
1849
1850 if (e)
1851 force_nonfallthru (e);
1852 }
1853
1854 /* Create the basic block note. */
1855 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1856 before = BB_HEAD (edge_in->dest);
1857 else
1858 before = NULL;
1859
1860 /* If this is a fall through edge to the exit block, the blocks might be
1861 not adjacent, and the right place is after the source. */
1862 if ((edge_in->flags & EDGE_FALLTHRU)
1863 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1864 {
1865 before = NEXT_INSN (BB_END (edge_in->src));
1866 bb = create_basic_block (before, NULL, edge_in->src);
1867 BB_COPY_PARTITION (bb, edge_in->src);
1868 }
1869 else
1870 {
1871 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1872 {
1873 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1874 BB_COPY_PARTITION (bb, edge_in->dest);
1875 }
1876 else
1877 {
1878 basic_block after = edge_in->dest->prev_bb;
1879 /* If this is post-bb reordering, and the edge crosses a partition
1880 boundary, the new block needs to be inserted in the bb chain
1881 at the end of the src partition (since we put the new bb into
1882 that partition, see below). Otherwise we may end up creating
1883 an extra partition crossing in the chain, which is illegal.
1884 It can't go after the src, because src may have a fall-through
1885 to a different block. */
1886 if (crtl->bb_reorder_complete
1887 && (edge_in->flags & EDGE_CROSSING))
1888 {
1889 after = last_bb_in_partition (edge_in->src);
1890 before = get_last_bb_insn (after);
1891 /* The instruction following the last bb in partition should
1892 be a barrier, since it cannot end in a fall-through. */
1893 gcc_checking_assert (BARRIER_P (before));
1894 before = NEXT_INSN (before);
1895 }
1896 bb = create_basic_block (before, NULL, after);
1897 /* Put the split bb into the src partition, to avoid creating
1898 a situation where a cold bb dominates a hot bb, in the case
1899 where src is cold and dest is hot. The src will dominate
1900 the new bb (whereas it might not have dominated dest). */
1901 BB_COPY_PARTITION (bb, edge_in->src);
1902 }
1903 }
1904
1905 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1906
1907 /* Can't allow a region crossing edge to be fallthrough. */
1908 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1909 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1910 {
1911 new_bb = force_nonfallthru (single_succ_edge (bb));
1912 gcc_assert (!new_bb);
1913 }
1914
1915 /* For non-fallthru edges, we must adjust the predecessor's
1916 jump instruction to target our new block. */
1917 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1918 {
1919 edge redirected = redirect_edge_and_branch (edge_in, bb);
1920 gcc_assert (redirected);
1921 }
1922 else
1923 {
1924 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1925 {
1926 /* For asm goto even splitting of fallthru edge might
1927 need insn patching, as other labels might point to the
1928 old label. */
1929 rtx_insn *last = BB_END (edge_in->src);
1930 if (last
1931 && JUMP_P (last)
1932 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1933 && (extract_asm_operands (PATTERN (last))
1934 || JUMP_LABEL (last) == before)
1935 && patch_jump_insn (last, before, bb))
1936 df_set_bb_dirty (edge_in->src);
1937 }
1938 redirect_edge_succ (edge_in, bb);
1939 }
1940
1941 return bb;
1942 }
1943
1944 /* Queue instructions for insertion on an edge between two basic blocks.
1945 The new instructions and basic blocks (if any) will not appear in the
1946 CFG until commit_edge_insertions is called. */
1947
1948 void
1949 insert_insn_on_edge (rtx pattern, edge e)
1950 {
1951 /* We cannot insert instructions on an abnormal critical edge.
1952 It will be easier to find the culprit if we die now. */
1953 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1954
1955 if (e->insns.r == NULL_RTX)
1956 start_sequence ();
1957 else
1958 push_to_sequence (e->insns.r);
1959
1960 emit_insn (pattern);
1961
1962 e->insns.r = get_insns ();
1963 end_sequence ();
1964 }
1965
1966 /* Update the CFG for the instructions queued on edge E. */
1967
1968 void
1969 commit_one_edge_insertion (edge e)
1970 {
1971 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1972 basic_block bb;
1973
1974 /* Pull the insns off the edge now since the edge might go away. */
1975 insns = e->insns.r;
1976 e->insns.r = NULL;
1977
1978 /* Figure out where to put these insns. If the destination has
1979 one predecessor, insert there. Except for the exit block. */
1980 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1981 {
1982 bb = e->dest;
1983
1984 /* Get the location correct wrt a code label, and "nice" wrt
1985 a basic block note, and before everything else. */
1986 tmp = BB_HEAD (bb);
1987 if (LABEL_P (tmp))
1988 tmp = NEXT_INSN (tmp);
1989 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1990 tmp = NEXT_INSN (tmp);
1991 if (tmp == BB_HEAD (bb))
1992 before = tmp;
1993 else if (tmp)
1994 after = PREV_INSN (tmp);
1995 else
1996 after = get_last_insn ();
1997 }
1998
1999 /* If the source has one successor and the edge is not abnormal,
2000 insert there. Except for the entry block.
2001 Don't do this if the predecessor ends in a jump other than
2002 unconditional simple jump. E.g. for asm goto that points all
2003 its labels at the fallthru basic block, we can't insert instructions
2004 before the asm goto, as the asm goto can have various of side effects,
2005 and can't emit instructions after the asm goto, as it must end
2006 the basic block. */
2007 else if ((e->flags & EDGE_ABNORMAL) == 0
2008 && single_succ_p (e->src)
2009 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2010 && (!JUMP_P (BB_END (e->src))
2011 || simplejump_p (BB_END (e->src))))
2012 {
2013 bb = e->src;
2014
2015 /* It is possible to have a non-simple jump here. Consider a target
2016 where some forms of unconditional jumps clobber a register. This
2017 happens on the fr30 for example.
2018
2019 We know this block has a single successor, so we can just emit
2020 the queued insns before the jump. */
2021 if (JUMP_P (BB_END (bb)))
2022 before = BB_END (bb);
2023 else
2024 {
2025 /* We'd better be fallthru, or we've lost track of what's what. */
2026 gcc_assert (e->flags & EDGE_FALLTHRU);
2027
2028 after = BB_END (bb);
2029 }
2030 }
2031
2032 /* Otherwise we must split the edge. */
2033 else
2034 {
2035 bb = split_edge (e);
2036
2037 /* If E crossed a partition boundary, we needed to make bb end in
2038 a region-crossing jump, even though it was originally fallthru. */
2039 if (JUMP_P (BB_END (bb)))
2040 before = BB_END (bb);
2041 else
2042 after = BB_END (bb);
2043 }
2044
2045 /* Now that we've found the spot, do the insertion. */
2046 if (before)
2047 {
2048 emit_insn_before_noloc (insns, before, bb);
2049 last = prev_nonnote_insn (before);
2050 }
2051 else
2052 last = emit_insn_after_noloc (insns, after, bb);
2053
2054 if (returnjump_p (last))
2055 {
2056 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2057 This is not currently a problem because this only happens
2058 for the (single) epilogue, which already has a fallthru edge
2059 to EXIT. */
2060
2061 e = single_succ_edge (bb);
2062 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2063 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2064
2065 e->flags &= ~EDGE_FALLTHRU;
2066 emit_barrier_after (last);
2067
2068 if (before)
2069 delete_insn (before);
2070 }
2071 else
2072 gcc_assert (!JUMP_P (last));
2073 }
2074
2075 /* Update the CFG for all queued instructions. */
2076
2077 void
2078 commit_edge_insertions (void)
2079 {
2080 basic_block bb;
2081
2082 /* Optimization passes that invoke this routine can cause hot blocks
2083 previously reached by both hot and cold blocks to become dominated only
2084 by cold blocks. This will cause the verification below to fail,
2085 and lead to now cold code in the hot section. In some cases this
2086 may only be visible after newly unreachable blocks are deleted,
2087 which will be done by fixup_partitions. */
2088 fixup_partitions ();
2089
2090 checking_verify_flow_info ();
2091
2092 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2093 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2094 {
2095 edge e;
2096 edge_iterator ei;
2097
2098 FOR_EACH_EDGE (e, ei, bb->succs)
2099 if (e->insns.r)
2100 commit_one_edge_insertion (e);
2101 }
2102 }
2103 \f
2104
2105 /* Print out RTL-specific basic block information (live information
2106 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2107 documented in dumpfile.h. */
2108
2109 static void
2110 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2111 {
2112 char *s_indent;
2113
2114 s_indent = (char *) alloca ((size_t) indent + 1);
2115 memset (s_indent, ' ', (size_t) indent);
2116 s_indent[indent] = '\0';
2117
2118 if (df && (flags & TDF_DETAILS))
2119 {
2120 df_dump_top (bb, outf);
2121 putc ('\n', outf);
2122 }
2123
2124 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2125 {
2126 rtx_insn *last = BB_END (bb);
2127 if (last)
2128 last = NEXT_INSN (last);
2129 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2130 {
2131 if (flags & TDF_DETAILS)
2132 df_dump_insn_top (insn, outf);
2133 if (! (flags & TDF_SLIM))
2134 print_rtl_single (outf, insn);
2135 else
2136 dump_insn_slim (outf, insn);
2137 if (flags & TDF_DETAILS)
2138 df_dump_insn_bottom (insn, outf);
2139 }
2140 }
2141
2142 if (df && (flags & TDF_DETAILS))
2143 {
2144 df_dump_bottom (bb, outf);
2145 putc ('\n', outf);
2146 }
2147
2148 }
2149 \f
2150 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2151 for the start of each basic block. FLAGS are the TDF_* masks documented
2152 in dumpfile.h. */
2153
2154 void
2155 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2156 {
2157 const rtx_insn *tmp_rtx;
2158 if (rtx_first == 0)
2159 fprintf (outf, "(nil)\n");
2160 else
2161 {
2162 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2163 int max_uid = get_max_uid ();
2164 basic_block *start = XCNEWVEC (basic_block, max_uid);
2165 basic_block *end = XCNEWVEC (basic_block, max_uid);
2166 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2167 basic_block bb;
2168
2169 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2170 insns, but the CFG is not maintained so the basic block info
2171 is not reliable. Therefore it's omitted from the dumps. */
2172 if (! (cfun->curr_properties & PROP_cfg))
2173 flags &= ~TDF_BLOCKS;
2174
2175 if (df)
2176 df_dump_start (outf);
2177
2178 if (flags & TDF_BLOCKS)
2179 {
2180 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2181 {
2182 rtx_insn *x;
2183
2184 start[INSN_UID (BB_HEAD (bb))] = bb;
2185 end[INSN_UID (BB_END (bb))] = bb;
2186 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2187 {
2188 enum bb_state state = IN_MULTIPLE_BB;
2189
2190 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2191 state = IN_ONE_BB;
2192 in_bb_p[INSN_UID (x)] = state;
2193
2194 if (x == BB_END (bb))
2195 break;
2196 }
2197 }
2198 }
2199
2200 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2201 {
2202 if (flags & TDF_BLOCKS)
2203 {
2204 bb = start[INSN_UID (tmp_rtx)];
2205 if (bb != NULL)
2206 {
2207 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2208 if (df && (flags & TDF_DETAILS))
2209 df_dump_top (bb, outf);
2210 }
2211
2212 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2213 && !NOTE_P (tmp_rtx)
2214 && !BARRIER_P (tmp_rtx))
2215 fprintf (outf, ";; Insn is not within a basic block\n");
2216 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2217 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2218 }
2219
2220 if (flags & TDF_DETAILS)
2221 df_dump_insn_top (tmp_rtx, outf);
2222 if (! (flags & TDF_SLIM))
2223 print_rtl_single (outf, tmp_rtx);
2224 else
2225 dump_insn_slim (outf, tmp_rtx);
2226 if (flags & TDF_DETAILS)
2227 df_dump_insn_bottom (tmp_rtx, outf);
2228
2229 if (flags & TDF_BLOCKS)
2230 {
2231 bb = end[INSN_UID (tmp_rtx)];
2232 if (bb != NULL)
2233 {
2234 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2235 if (df && (flags & TDF_DETAILS))
2236 df_dump_bottom (bb, outf);
2237 putc ('\n', outf);
2238 }
2239 }
2240 }
2241
2242 free (start);
2243 free (end);
2244 free (in_bb_p);
2245 }
2246 }
2247 \f
2248 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2249
2250 void
2251 update_br_prob_note (basic_block bb)
2252 {
2253 rtx note;
2254 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2255 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2256 {
2257 if (note)
2258 {
2259 rtx *note_link, this_rtx;
2260
2261 note_link = &REG_NOTES (BB_END (bb));
2262 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2263 if (this_rtx == note)
2264 {
2265 *note_link = XEXP (this_rtx, 1);
2266 break;
2267 }
2268 }
2269 return;
2270 }
2271 if (!note
2272 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2273 return;
2274 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2275 }
2276
2277 /* Get the last insn associated with block BB (that includes barriers and
2278 tablejumps after BB). */
2279 rtx_insn *
2280 get_last_bb_insn (basic_block bb)
2281 {
2282 rtx_jump_table_data *table;
2283 rtx_insn *tmp;
2284 rtx_insn *end = BB_END (bb);
2285
2286 /* Include any jump table following the basic block. */
2287 if (tablejump_p (end, NULL, &table))
2288 end = table;
2289
2290 /* Include any barriers that may follow the basic block. */
2291 tmp = next_nonnote_nondebug_insn_bb (end);
2292 while (tmp && BARRIER_P (tmp))
2293 {
2294 end = tmp;
2295 tmp = next_nonnote_nondebug_insn_bb (end);
2296 }
2297
2298 return end;
2299 }
2300
2301 /* Add all BBs reachable from entry via hot paths into the SET. */
2302
2303 void
2304 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2305 {
2306 auto_vec<basic_block, 64> worklist;
2307
2308 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2309 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2310
2311 while (worklist.length () > 0)
2312 {
2313 basic_block bb = worklist.pop ();
2314 edge_iterator ei;
2315 edge e;
2316
2317 FOR_EACH_EDGE (e, ei, bb->succs)
2318 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2319 && !set->add (e->dest))
2320 worklist.safe_push (e->dest);
2321 }
2322 }
2323
2324 /* Sanity check partition hotness to ensure that basic blocks in
2325   the cold partition don't dominate basic blocks in the hot partition.
2326 If FLAG_ONLY is true, report violations as errors. Otherwise
2327 re-mark the dominated blocks as cold, since this is run after
2328 cfg optimizations that may make hot blocks previously reached
2329 by both hot and cold blocks now only reachable along cold paths. */
2330
2331 static vec<basic_block>
2332 find_partition_fixes (bool flag_only)
2333 {
2334 basic_block bb;
2335 vec<basic_block> bbs_in_cold_partition = vNULL;
2336 vec<basic_block> bbs_to_fix = vNULL;
2337 hash_set<basic_block> set;
2338
2339 /* Callers check this. */
2340 gcc_checking_assert (crtl->has_bb_partition);
2341
2342 find_bbs_reachable_by_hot_paths (&set);
2343
2344 FOR_EACH_BB_FN (bb, cfun)
2345 if (!set.contains (bb)
2346 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2347 {
2348 if (flag_only)
2349 error ("non-cold basic block %d reachable only "
2350 "by paths crossing the cold partition", bb->index);
2351 else
2352 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2353 bbs_to_fix.safe_push (bb);
2354 bbs_in_cold_partition.safe_push (bb);
2355 }
2356
2357 return bbs_to_fix;
2358 }
2359
2360 /* Perform cleanup on the hot/cold bb partitioning after optimization
2361 passes that modify the cfg. */
2362
2363 void
2364 fixup_partitions (void)
2365 {
2366 basic_block bb;
2367
2368 if (!crtl->has_bb_partition)
2369 return;
2370
2371 /* Delete any blocks that became unreachable and weren't
2372 already cleaned up, for example during edge forwarding
2373 and convert_jumps_to_returns. This will expose more
2374 opportunities for fixing the partition boundaries here.
2375 Also, the calculation of the dominance graph during verification
2376 will assert if there are unreachable nodes. */
2377 delete_unreachable_blocks ();
2378
2379 /* If there are partitions, do a sanity check on them: A basic block in
2380   a cold partition cannot dominate a basic block in a hot partition.
2381 Fixup any that now violate this requirement, as a result of edge
2382 forwarding and unreachable block deletion.  */
2383 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2384
2385 /* Do the partition fixup after all necessary blocks have been converted to
2386 cold, so that we only update the region crossings the minimum number of
2387 places, which can require forcing edges to be non fallthru. */
2388 while (! bbs_to_fix.is_empty ())
2389 {
2390 bb = bbs_to_fix.pop ();
2391 fixup_new_cold_bb (bb);
2392 }
2393 }
2394
2395 /* Verify, in the basic block chain, that there is at most one switch
2396 between hot/cold partitions. This condition will not be true until
2397 after reorder_basic_blocks is called. */
2398
2399 static int
2400 verify_hot_cold_block_grouping (void)
2401 {
2402 basic_block bb;
2403 int err = 0;
2404 bool switched_sections = false;
2405 int current_partition = BB_UNPARTITIONED;
2406
2407 /* Even after bb reordering is complete, we go into cfglayout mode
2408 again (in compgoto). Ensure we don't call this before going back
2409 into linearized RTL when any layout fixes would have been committed. */
2410 if (!crtl->bb_reorder_complete
2411 || current_ir_type () != IR_RTL_CFGRTL)
2412 return err;
2413
2414 FOR_EACH_BB_FN (bb, cfun)
2415 {
2416 if (current_partition != BB_UNPARTITIONED
2417 && BB_PARTITION (bb) != current_partition)
2418 {
2419 if (switched_sections)
2420 {
2421 error ("multiple hot/cold transitions found (bb %i)",
2422 bb->index);
2423 err = 1;
2424 }
2425 else
2426 switched_sections = true;
2427
2428 if (!crtl->has_bb_partition)
2429 error ("partition found but function partition flag not set");
2430 }
2431 current_partition = BB_PARTITION (bb);
2432 }
2433
2434 return err;
2435 }
2436 \f
2437
2438 /* Perform several checks on the edges out of each block, such as
2439 the consistency of the branch probabilities, the correctness
2440 of hot/cold partition crossing edges, and the number of expected
2441 successor edges. Also verify that the dominance relationship
2442 between hot/cold blocks is sane. */
2443
2444 static int
2445 rtl_verify_edges (void)
2446 {
2447 int err = 0;
2448 basic_block bb;
2449
2450 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2451 {
2452 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2453 int n_eh = 0, n_abnormal = 0;
2454 edge e, fallthru = NULL;
2455 edge_iterator ei;
2456 rtx note;
2457 bool has_crossing_edge = false;
2458
2459 if (JUMP_P (BB_END (bb))
2460 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2461 && EDGE_COUNT (bb->succs) >= 2
2462 && any_condjump_p (BB_END (bb)))
2463 {
2464 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2465 {
2466 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2467 {
2468 error ("verify_flow_info: "
2469 "REG_BR_PROB is set but cfg probability is not");
2470 err = 1;
2471 }
2472 }
2473 else if (XINT (note, 0)
2474 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2475 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2476 {
2477 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2478 XINT (note, 0),
2479 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2480 err = 1;
2481 }
2482 }
2483
2484 FOR_EACH_EDGE (e, ei, bb->succs)
2485 {
2486 bool is_crossing;
2487
2488 if (e->flags & EDGE_FALLTHRU)
2489 n_fallthru++, fallthru = e;
2490
2491 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2492 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2493 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2494 has_crossing_edge |= is_crossing;
2495 if (e->flags & EDGE_CROSSING)
2496 {
2497 if (!is_crossing)
2498 {
2499 error ("EDGE_CROSSING incorrectly set across same section");
2500 err = 1;
2501 }
2502 if (e->flags & EDGE_FALLTHRU)
2503 {
2504 error ("fallthru edge crosses section boundary in bb %i",
2505 e->src->index);
2506 err = 1;
2507 }
2508 if (e->flags & EDGE_EH)
2509 {
2510 error ("EH edge crosses section boundary in bb %i",
2511 e->src->index);
2512 err = 1;
2513 }
2514 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2515 {
2516 error ("No region crossing jump at section boundary in bb %i",
2517 bb->index);
2518 err = 1;
2519 }
2520 }
2521 else if (is_crossing)
2522 {
2523 error ("EDGE_CROSSING missing across section boundary");
2524 err = 1;
2525 }
2526
2527 if ((e->flags & ~(EDGE_DFS_BACK
2528 | EDGE_CAN_FALLTHRU
2529 | EDGE_IRREDUCIBLE_LOOP
2530 | EDGE_LOOP_EXIT
2531 | EDGE_CROSSING
2532 | EDGE_PRESERVE)) == 0)
2533 n_branch++;
2534
2535 if (e->flags & EDGE_ABNORMAL_CALL)
2536 n_abnormal_call++;
2537
2538 if (e->flags & EDGE_SIBCALL)
2539 n_sibcall++;
2540
2541 if (e->flags & EDGE_EH)
2542 n_eh++;
2543
2544 if (e->flags & EDGE_ABNORMAL)
2545 n_abnormal++;
2546 }
2547
2548 if (!has_crossing_edge
2549 && JUMP_P (BB_END (bb))
2550 && CROSSING_JUMP_P (BB_END (bb)))
2551 {
2552 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2553 error ("Region crossing jump across same section in bb %i",
2554 bb->index);
2555 err = 1;
2556 }
2557
2558 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2559 {
2560 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2561 err = 1;
2562 }
2563 if (n_eh > 1)
2564 {
2565 error ("too many exception handling edges in bb %i", bb->index);
2566 err = 1;
2567 }
2568 if (n_branch
2569 && (!JUMP_P (BB_END (bb))
2570 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2571 || any_condjump_p (BB_END (bb))))))
2572 {
2573 error ("too many outgoing branch edges from bb %i", bb->index);
2574 err = 1;
2575 }
2576 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2577 {
2578 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2579 err = 1;
2580 }
2581 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2582 {
2583 error ("wrong number of branch edges after unconditional jump"
2584 " in bb %i", bb->index);
2585 err = 1;
2586 }
2587 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2588 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2589 {
2590 error ("wrong amount of branch edges after conditional jump"
2591 " in bb %i", bb->index);
2592 err = 1;
2593 }
2594 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2595 {
2596 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2597 err = 1;
2598 }
2599 if (n_sibcall && !CALL_P (BB_END (bb)))
2600 {
2601 error ("sibcall edges for non-call insn in bb %i", bb->index);
2602 err = 1;
2603 }
2604 if (n_abnormal > n_eh
2605 && !(CALL_P (BB_END (bb))
2606 && n_abnormal == n_abnormal_call + n_sibcall)
2607 && (!JUMP_P (BB_END (bb))
2608 || any_condjump_p (BB_END (bb))
2609 || any_uncondjump_p (BB_END (bb))))
2610 {
2611 error ("abnormal edges for no purpose in bb %i", bb->index);
2612 err = 1;
2613 }
2614 }
2615
2616 /* If there are partitions, do a sanity check on them: A basic block in
2617   a cold partition cannot dominate a basic block in a hot partition.  */
2618 if (crtl->has_bb_partition && !err)
2619 {
2620 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2621 err = !bbs_to_fix.is_empty ();
2622 }
2623
2624 /* Clean up. */
2625 return err;
2626 }
2627
2628 /* Checks on the instructions within blocks. Currently checks that each
2629 block starts with a basic block note, and that basic block notes and
2630 control flow jumps are not found in the middle of the block. */
2631
2632 static int
2633 rtl_verify_bb_insns (void)
2634 {
2635 rtx_insn *x;
2636 int err = 0;
2637 basic_block bb;
2638
2639 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2640 {
2641 /* Now check the header of basic
2642 block. It ought to contain optional CODE_LABEL followed
2643 by NOTE_BASIC_BLOCK. */
2644 x = BB_HEAD (bb);
2645 if (LABEL_P (x))
2646 {
2647 if (BB_END (bb) == x)
2648 {
2649 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2650 bb->index);
2651 err = 1;
2652 }
2653
2654 x = NEXT_INSN (x);
2655 }
2656
2657 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2658 {
2659 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2660 bb->index);
2661 err = 1;
2662 }
2663
2664 if (BB_END (bb) == x)
2665 /* Do checks for empty blocks here. */
2666 ;
2667 else
2668 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2669 {
2670 if (NOTE_INSN_BASIC_BLOCK_P (x))
2671 {
2672 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2673 INSN_UID (x), bb->index);
2674 err = 1;
2675 }
2676
2677 if (x == BB_END (bb))
2678 break;
2679
2680 if (control_flow_insn_p (x))
2681 {
2682 error ("in basic block %d:", bb->index);
2683 fatal_insn ("flow control insn inside a basic block", x);
2684 }
2685 }
2686 }
2687
2688 /* Clean up. */
2689 return err;
2690 }
2691
2692 /* Verify that block pointers for instructions in basic blocks, headers and
2693 footers are set appropriately. */
2694
2695 static int
2696 rtl_verify_bb_pointers (void)
2697 {
2698 int err = 0;
2699 basic_block bb;
2700
2701 /* Check the general integrity of the basic blocks. */
2702 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2703 {
2704 rtx_insn *insn;
2705
2706 if (!(bb->flags & BB_RTL))
2707 {
2708 error ("BB_RTL flag not set for block %d", bb->index);
2709 err = 1;
2710 }
2711
2712 FOR_BB_INSNS (bb, insn)
2713 if (BLOCK_FOR_INSN (insn) != bb)
2714 {
2715 error ("insn %d basic block pointer is %d, should be %d",
2716 INSN_UID (insn),
2717 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2718 bb->index);
2719 err = 1;
2720 }
2721
2722 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2723 if (!BARRIER_P (insn)
2724 && BLOCK_FOR_INSN (insn) != NULL)
2725 {
2726 error ("insn %d in header of bb %d has non-NULL basic block",
2727 INSN_UID (insn), bb->index);
2728 err = 1;
2729 }
2730 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2731 if (!BARRIER_P (insn)
2732 && BLOCK_FOR_INSN (insn) != NULL)
2733 {
2734 error ("insn %d in footer of bb %d has non-NULL basic block",
2735 INSN_UID (insn), bb->index);
2736 err = 1;
2737 }
2738 }
2739
2740 /* Clean up. */
2741 return err;
2742 }
2743
2744 /* Verify the CFG and RTL consistency common for both underlying RTL and
2745 cfglayout RTL.
2746
2747 Currently it does following checks:
2748
2749 - overlapping of basic blocks
2750 - insns with wrong BLOCK_FOR_INSN pointers
2751 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2752 - tails of basic blocks (ensure that boundary is necessary)
2753 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2754 and NOTE_INSN_BASIC_BLOCK
2755 - verify that no fall_thru edge crosses hot/cold partition boundaries
2756 - verify that there are no pending RTL branch predictions
2757 - verify that hot blocks are not dominated by cold blocks
2758
2759 In future it can be extended check a lot of other stuff as well
2760 (reachability of basic blocks, life information, etc. etc.). */
2761
2762 static int
2763 rtl_verify_flow_info_1 (void)
2764 {
2765 int err = 0;
2766
2767 err |= rtl_verify_bb_pointers ();
2768
2769 err |= rtl_verify_bb_insns ();
2770
2771 err |= rtl_verify_edges ();
2772
2773 return err;
2774 }
2775
2776 /* Walk the instruction chain and verify that bb head/end pointers
2777 are correct, and that instructions are in exactly one bb and have
2778 correct block pointers. */
2779
2780 static int
2781 rtl_verify_bb_insn_chain (void)
2782 {
2783 basic_block bb;
2784 int err = 0;
2785 rtx_insn *x;
2786 rtx_insn *last_head = get_last_insn ();
2787 basic_block *bb_info;
2788 const int max_uid = get_max_uid ();
2789
2790 bb_info = XCNEWVEC (basic_block, max_uid);
2791
2792 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2793 {
2794 rtx_insn *head = BB_HEAD (bb);
2795 rtx_insn *end = BB_END (bb);
2796
2797 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2798 {
2799 /* Verify the end of the basic block is in the INSN chain. */
2800 if (x == end)
2801 break;
2802
2803 /* And that the code outside of basic blocks has NULL bb field. */
2804 if (!BARRIER_P (x)
2805 && BLOCK_FOR_INSN (x) != NULL)
2806 {
2807 error ("insn %d outside of basic blocks has non-NULL bb field",
2808 INSN_UID (x));
2809 err = 1;
2810 }
2811 }
2812
2813 if (!x)
2814 {
2815 error ("end insn %d for block %d not found in the insn stream",
2816 INSN_UID (end), bb->index);
2817 err = 1;
2818 }
2819
2820 /* Work backwards from the end to the head of the basic block
2821 to verify the head is in the RTL chain. */
2822 for (; x != NULL_RTX; x = PREV_INSN (x))
2823 {
2824 /* While walking over the insn chain, verify insns appear
2825 in only one basic block. */
2826 if (bb_info[INSN_UID (x)] != NULL)
2827 {
2828 error ("insn %d is in multiple basic blocks (%d and %d)",
2829 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2830 err = 1;
2831 }
2832
2833 bb_info[INSN_UID (x)] = bb;
2834
2835 if (x == head)
2836 break;
2837 }
2838 if (!x)
2839 {
2840 error ("head insn %d for block %d not found in the insn stream",
2841 INSN_UID (head), bb->index);
2842 err = 1;
2843 }
2844
2845 last_head = PREV_INSN (x);
2846 }
2847
2848 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2849 {
2850 /* Check that the code before the first basic block has NULL
2851 bb field. */
2852 if (!BARRIER_P (x)
2853 && BLOCK_FOR_INSN (x) != NULL)
2854 {
2855 error ("insn %d outside of basic blocks has non-NULL bb field",
2856 INSN_UID (x));
2857 err = 1;
2858 }
2859 }
2860 free (bb_info);
2861
2862 return err;
2863 }
2864
2865 /* Verify that fallthru edges point to adjacent blocks in layout order and
2866 that barriers exist after non-fallthru blocks. */
2867
2868 static int
2869 rtl_verify_fallthru (void)
2870 {
2871 basic_block bb;
2872 int err = 0;
2873
2874 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2875 {
2876 edge e;
2877
2878 e = find_fallthru_edge (bb->succs);
2879 if (!e)
2880 {
2881 rtx_insn *insn;
2882
2883 /* Ensure existence of barrier in BB with no fallthru edges. */
2884 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2885 {
2886 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2887 {
2888 error ("missing barrier after block %i", bb->index);
2889 err = 1;
2890 break;
2891 }
2892 if (BARRIER_P (insn))
2893 break;
2894 }
2895 }
2896 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2897 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2898 {
2899 rtx_insn *insn;
2900
2901 if (e->src->next_bb != e->dest)
2902 {
2903 error
2904 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2905 e->src->index, e->dest->index);
2906 err = 1;
2907 }
2908 else
2909 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2910 insn = NEXT_INSN (insn))
2911 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2912 {
2913 error ("verify_flow_info: Incorrect fallthru %i->%i",
2914 e->src->index, e->dest->index);
2915 fatal_insn ("wrong insn in the fallthru edge", insn);
2916 err = 1;
2917 }
2918 }
2919 }
2920
2921 return err;
2922 }
2923
2924 /* Verify that blocks are laid out in consecutive order. While walking the
2925 instructions, verify that all expected instructions are inside the basic
2926 blocks, and that all returns are followed by barriers. */
2927
2928 static int
2929 rtl_verify_bb_layout (void)
2930 {
2931 basic_block bb;
2932 int err = 0;
2933 rtx_insn *x, *y;
2934 int num_bb_notes;
2935 rtx_insn * const rtx_first = get_insns ();
2936 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
2937
2938 num_bb_notes = 0;
2939 last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
2940
2941 for (x = rtx_first; x; x = NEXT_INSN (x))
2942 {
2943 if (NOTE_INSN_BASIC_BLOCK_P (x))
2944 {
2945 bb = NOTE_BASIC_BLOCK (x);
2946
2947 num_bb_notes++;
2948 if (bb != last_bb_seen->next_bb)
2949 internal_error ("basic blocks not laid down consecutively");
2950
2951 curr_bb = last_bb_seen = bb;
2952 }
2953
2954 if (!curr_bb)
2955 {
2956 switch (GET_CODE (x))
2957 {
2958 case BARRIER:
2959 case NOTE:
2960 break;
2961
2962 case CODE_LABEL:
2963 /* An ADDR_VEC is placed outside any basic block. */
2964 if (NEXT_INSN (x)
2965 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2966 x = NEXT_INSN (x);
2967
2968 /* But in any case, non-deletable labels can appear anywhere. */
2969 break;
2970
2971 default:
2972 fatal_insn ("insn outside basic block", x);
2973 }
2974 }
2975
2976 if (JUMP_P (x)
2977 && returnjump_p (x) && ! condjump_p (x)
2978 && ! ((y = next_nonnote_nondebug_insn (x))
2979 && BARRIER_P (y)))
2980 fatal_insn ("return not followed by barrier", x);
2981
2982 if (curr_bb && x == BB_END (curr_bb))
2983 curr_bb = NULL;
2984 }
2985
2986 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2987 internal_error
2988 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2989 num_bb_notes, n_basic_blocks_for_fn (cfun));
2990
2991 return err;
2992 }
2993
2994 /* Verify the CFG and RTL consistency common for both underlying RTL and
2995 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2996
2997 Currently it does following checks:
2998 - all checks of rtl_verify_flow_info_1
2999 - test head/end pointers
3000 - check that blocks are laid out in consecutive order
3001 - check that all insns are in the basic blocks
3002 (except the switch handling code, barriers and notes)
3003 - check that all returns are followed by barriers
3004 - check that all fallthru edge points to the adjacent blocks
3005 - verify that there is a single hot/cold partition boundary after bbro */
3006
3007 static int
3008 rtl_verify_flow_info (void)
3009 {
3010 int err = 0;
3011
3012 err |= rtl_verify_flow_info_1 ();
3013
3014 err |= rtl_verify_bb_insn_chain ();
3015
3016 err |= rtl_verify_fallthru ();
3017
3018 err |= rtl_verify_bb_layout ();
3019
3020 err |= verify_hot_cold_block_grouping ();
3021
3022 return err;
3023 }
3024 \f
3025 /* Assume that the preceding pass has possibly eliminated jump instructions
3026 or converted the unconditional jumps. Eliminate the edges from CFG.
3027 Return true if any edges are eliminated. */
3028
3029 bool
3030 purge_dead_edges (basic_block bb)
3031 {
3032 edge e;
3033 rtx_insn *insn = BB_END (bb);
3034 rtx note;
3035 bool purged = false;
3036 bool found;
3037 edge_iterator ei;
3038
3039 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
3040 do
3041 insn = PREV_INSN (insn);
3042 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3043
3044 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3045 if (NONJUMP_INSN_P (insn)
3046 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3047 {
3048 rtx eqnote;
3049
3050 if (! may_trap_p (PATTERN (insn))
3051 || ((eqnote = find_reg_equal_equiv_note (insn))
3052 && ! may_trap_p (XEXP (eqnote, 0))))
3053 remove_note (insn, note);
3054 }
3055
3056 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3057 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3058 {
3059 bool remove = false;
3060
3061 /* There are three types of edges we need to handle correctly here: EH
3062 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3063 latter can appear when nonlocal gotos are used. */
3064 if (e->flags & EDGE_ABNORMAL_CALL)
3065 {
3066 if (!CALL_P (insn))
3067 remove = true;
3068 else if (can_nonlocal_goto (insn))
3069 ;
3070 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3071 ;
3072 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3073 ;
3074 else
3075 remove = true;
3076 }
3077 else if (e->flags & EDGE_EH)
3078 remove = !can_throw_internal (insn);
3079
3080 if (remove)
3081 {
3082 remove_edge (e);
3083 df_set_bb_dirty (bb);
3084 purged = true;
3085 }
3086 else
3087 ei_next (&ei);
3088 }
3089
3090 if (JUMP_P (insn))
3091 {
3092 rtx note;
3093 edge b,f;
3094 edge_iterator ei;
3095
3096 /* We do care only about conditional jumps and simplejumps. */
3097 if (!any_condjump_p (insn)
3098 && !returnjump_p (insn)
3099 && !simplejump_p (insn))
3100 return purged;
3101
3102 /* Branch probability/prediction notes are defined only for
3103 condjumps. We've possibly turned condjump into simplejump. */
3104 if (simplejump_p (insn))
3105 {
3106 note = find_reg_note (insn, REG_BR_PROB, NULL);
3107 if (note)
3108 remove_note (insn, note);
3109 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3110 remove_note (insn, note);
3111 }
3112
3113 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3114 {
3115 /* Avoid abnormal flags to leak from computed jumps turned
3116 into simplejumps. */
3117
3118 e->flags &= ~EDGE_ABNORMAL;
3119
3120 /* See if this edge is one we should keep. */
3121 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3122 /* A conditional jump can fall through into the next
3123 block, so we should keep the edge. */
3124 {
3125 ei_next (&ei);
3126 continue;
3127 }
3128 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3129 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3130 /* If the destination block is the target of the jump,
3131 keep the edge. */
3132 {
3133 ei_next (&ei);
3134 continue;
3135 }
3136 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3137 && returnjump_p (insn))
3138 /* If the destination block is the exit block, and this
3139 instruction is a return, then keep the edge. */
3140 {
3141 ei_next (&ei);
3142 continue;
3143 }
3144 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3145 /* Keep the edges that correspond to exceptions thrown by
3146 this instruction and rematerialize the EDGE_ABNORMAL
3147 flag we just cleared above. */
3148 {
3149 e->flags |= EDGE_ABNORMAL;
3150 ei_next (&ei);
3151 continue;
3152 }
3153
3154 /* We do not need this edge. */
3155 df_set_bb_dirty (bb);
3156 purged = true;
3157 remove_edge (e);
3158 }
3159
3160 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3161 return purged;
3162
3163 if (dump_file)
3164 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3165
3166 if (!optimize)
3167 return purged;
3168
3169 /* Redistribute probabilities. */
3170 if (single_succ_p (bb))
3171 {
3172 single_succ_edge (bb)->probability = profile_probability::always ();
3173 }
3174 else
3175 {
3176 note = find_reg_note (insn, REG_BR_PROB, NULL);
3177 if (!note)
3178 return purged;
3179
3180 b = BRANCH_EDGE (bb);
3181 f = FALLTHRU_EDGE (bb);
3182 b->probability = profile_probability::from_reg_br_prob_note
3183 (XINT (note, 0));
3184 f->probability = b->probability.invert ();
3185 }
3186
3187 return purged;
3188 }
3189 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3190 {
3191 /* First, there should not be any EH or ABCALL edges resulting
3192 from non-local gotos and the like. If there were, we shouldn't
3193 have created the sibcall in the first place. Second, there
3194 should of course never have been a fallthru edge. */
3195 gcc_assert (single_succ_p (bb));
3196 gcc_assert (single_succ_edge (bb)->flags
3197 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3198
3199 return 0;
3200 }
3201
3202 /* If we don't see a jump insn, we don't know exactly why the block would
3203 have been broken at this point. Look for a simple, non-fallthru edge,
3204 as these are only created by conditional branches. If we find such an
3205 edge we know that there used to be a jump here and can then safely
3206 remove all non-fallthru edges. */
3207 found = false;
3208 FOR_EACH_EDGE (e, ei, bb->succs)
3209 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3210 {
3211 found = true;
3212 break;
3213 }
3214
3215 if (!found)
3216 return purged;
3217
3218 /* Remove all but the fake and fallthru edges. The fake edge may be
3219 the only successor for this block in the case of noreturn
3220 calls. */
3221 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3222 {
3223 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3224 {
3225 df_set_bb_dirty (bb);
3226 remove_edge (e);
3227 purged = true;
3228 }
3229 else
3230 ei_next (&ei);
3231 }
3232
3233 gcc_assert (single_succ_p (bb));
3234
3235 single_succ_edge (bb)->probability = profile_probability::always ();
3236
3237 if (dump_file)
3238 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3239 bb->index);
3240 return purged;
3241 }
3242
3243 /* Search all basic blocks for potentially dead edges and purge them. Return
3244 true if some edge has been eliminated. */
3245
3246 bool
3247 purge_all_dead_edges (void)
3248 {
3249 int purged = false;
3250 basic_block bb;
3251
3252 FOR_EACH_BB_FN (bb, cfun)
3253 {
3254 bool purged_here = purge_dead_edges (bb);
3255
3256 purged |= purged_here;
3257 }
3258
3259 return purged;
3260 }
3261
3262 /* This is used by a few passes that emit some instructions after abnormal
3263 calls, moving the basic block's end, while they in fact do want to emit
3264 them on the fallthru edge. Look for abnormal call edges, find backward
3265 the call in the block and insert the instructions on the edge instead.
3266
3267 Similarly, handle instructions throwing exceptions internally.
3268
3269 Return true when instructions have been found and inserted on edges. */
3270
3271 bool
3272 fixup_abnormal_edges (void)
3273 {
3274 bool inserted = false;
3275 basic_block bb;
3276
3277 FOR_EACH_BB_FN (bb, cfun)
3278 {
3279 edge e;
3280 edge_iterator ei;
3281
3282 /* Look for cases we are interested in - calls or instructions causing
3283 exceptions. */
3284 FOR_EACH_EDGE (e, ei, bb->succs)
3285 if ((e->flags & EDGE_ABNORMAL_CALL)
3286 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3287 == (EDGE_ABNORMAL | EDGE_EH)))
3288 break;
3289
3290 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3291 {
3292 rtx_insn *insn;
3293
3294 /* Get past the new insns generated. Allow notes, as the insns
3295 may be already deleted. */
3296 insn = BB_END (bb);
3297 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3298 && !can_throw_internal (insn)
3299 && insn != BB_HEAD (bb))
3300 insn = PREV_INSN (insn);
3301
3302 if (CALL_P (insn) || can_throw_internal (insn))
3303 {
3304 rtx_insn *stop, *next;
3305
3306 e = find_fallthru_edge (bb->succs);
3307
3308 stop = NEXT_INSN (BB_END (bb));
3309 BB_END (bb) = insn;
3310
3311 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3312 {
3313 next = NEXT_INSN (insn);
3314 if (INSN_P (insn))
3315 {
3316 delete_insn (insn);
3317
3318 /* Sometimes there's still the return value USE.
3319 If it's placed after a trapping call (i.e. that
3320 call is the last insn anyway), we have no fallthru
3321 edge. Simply delete this use and don't try to insert
3322 on the non-existent edge. */
3323 if (GET_CODE (PATTERN (insn)) != USE)
3324 {
3325 /* We're not deleting it, we're moving it. */
3326 insn->set_undeleted ();
3327 SET_PREV_INSN (insn) = NULL_RTX;
3328 SET_NEXT_INSN (insn) = NULL_RTX;
3329
3330 insert_insn_on_edge (insn, e);
3331 inserted = true;
3332 }
3333 }
3334 else if (!BARRIER_P (insn))
3335 set_block_for_insn (insn, NULL);
3336 }
3337 }
3338
3339 /* It may be that we don't find any trapping insn. In this
3340 case we discovered quite late that the insn that had been
3341 marked as can_throw_internal in fact couldn't trap at all.
3342 So we should in fact delete the EH edges out of the block. */
3343 else
3344 purge_dead_edges (bb);
3345 }
3346 }
3347
3348 return inserted;
3349 }
3350 \f
3351 /* Cut the insns from FIRST to LAST out of the insns stream. */
3352
3353 rtx_insn *
3354 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3355 {
3356 rtx_insn *prevfirst = PREV_INSN (first);
3357 rtx_insn *nextlast = NEXT_INSN (last);
3358
3359 SET_PREV_INSN (first) = NULL;
3360 SET_NEXT_INSN (last) = NULL;
3361 if (prevfirst)
3362 SET_NEXT_INSN (prevfirst) = nextlast;
3363 if (nextlast)
3364 SET_PREV_INSN (nextlast) = prevfirst;
3365 else
3366 set_last_insn (prevfirst);
3367 if (!prevfirst)
3368 set_first_insn (nextlast);
3369 return first;
3370 }
3371 \f
3372 /* Skip over inter-block insns occurring after BB which are typically
3373 associated with BB (e.g., barriers). If there are any such insns,
3374 we return the last one. Otherwise, we return the end of BB. */
3375
3376 static rtx_insn *
3377 skip_insns_after_block (basic_block bb)
3378 {
3379 rtx_insn *insn, *last_insn, *next_head, *prev;
3380
3381 next_head = NULL;
3382 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3383 next_head = BB_HEAD (bb->next_bb);
3384
3385 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3386 {
3387 if (insn == next_head)
3388 break;
3389
3390 switch (GET_CODE (insn))
3391 {
3392 case BARRIER:
3393 last_insn = insn;
3394 continue;
3395
3396 case NOTE:
3397 switch (NOTE_KIND (insn))
3398 {
3399 case NOTE_INSN_BLOCK_END:
3400 gcc_unreachable ();
3401 continue;
3402 default:
3403 continue;
3404 break;
3405 }
3406 break;
3407
3408 case CODE_LABEL:
3409 if (NEXT_INSN (insn)
3410 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3411 {
3412 insn = NEXT_INSN (insn);
3413 last_insn = insn;
3414 continue;
3415 }
3416 break;
3417
3418 default:
3419 break;
3420 }
3421
3422 break;
3423 }
3424
3425 /* It is possible to hit contradictory sequence. For instance:
3426
3427 jump_insn
3428 NOTE_INSN_BLOCK_BEG
3429 barrier
3430
3431 Where barrier belongs to jump_insn, but the note does not. This can be
3432 created by removing the basic block originally following
3433 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3434
3435 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3436 {
3437 prev = PREV_INSN (insn);
3438 if (NOTE_P (insn))
3439 switch (NOTE_KIND (insn))
3440 {
3441 case NOTE_INSN_BLOCK_END:
3442 gcc_unreachable ();
3443 break;
3444 case NOTE_INSN_DELETED:
3445 case NOTE_INSN_DELETED_LABEL:
3446 case NOTE_INSN_DELETED_DEBUG_LABEL:
3447 continue;
3448 default:
3449 reorder_insns (insn, insn, last_insn);
3450 }
3451 }
3452
3453 return last_insn;
3454 }
3455
3456 /* Locate or create a label for a given basic block. */
3457
3458 static rtx_insn *
3459 label_for_bb (basic_block bb)
3460 {
3461 rtx_insn *label = BB_HEAD (bb);
3462
3463 if (!LABEL_P (label))
3464 {
3465 if (dump_file)
3466 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3467
3468 label = block_label (bb);
3469 }
3470
3471 return label;
3472 }
3473
3474 /* Locate the effective beginning and end of the insn chain for each
3475 block, as defined by skip_insns_after_block above. */
3476
3477 static void
3478 record_effective_endpoints (void)
3479 {
3480 rtx_insn *next_insn;
3481 basic_block bb;
3482 rtx_insn *insn;
3483
3484 for (insn = get_insns ();
3485 insn
3486 && NOTE_P (insn)
3487 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3488 insn = NEXT_INSN (insn))
3489 continue;
3490 /* No basic blocks at all? */
3491 gcc_assert (insn);
3492
3493 if (PREV_INSN (insn))
3494 cfg_layout_function_header =
3495 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3496 else
3497 cfg_layout_function_header = NULL;
3498
3499 next_insn = get_insns ();
3500 FOR_EACH_BB_FN (bb, cfun)
3501 {
3502 rtx_insn *end;
3503
3504 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3505 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3506 PREV_INSN (BB_HEAD (bb)));
3507 end = skip_insns_after_block (bb);
3508 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3509 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3510 next_insn = NEXT_INSN (BB_END (bb));
3511 }
3512
3513 cfg_layout_function_footer = next_insn;
3514 if (cfg_layout_function_footer)
3515 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3516 }
3517 \f
3518 namespace {
3519
3520 const pass_data pass_data_into_cfg_layout_mode =
3521 {
3522 RTL_PASS, /* type */
3523 "into_cfglayout", /* name */
3524 OPTGROUP_NONE, /* optinfo_flags */
3525 TV_CFG, /* tv_id */
3526 0, /* properties_required */
3527 PROP_cfglayout, /* properties_provided */
3528 0, /* properties_destroyed */
3529 0, /* todo_flags_start */
3530 0, /* todo_flags_finish */
3531 };
3532
3533 class pass_into_cfg_layout_mode : public rtl_opt_pass
3534 {
3535 public:
3536 pass_into_cfg_layout_mode (gcc::context *ctxt)
3537 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3538 {}
3539
3540 /* opt_pass methods: */
3541 virtual unsigned int execute (function *)
3542 {
3543 cfg_layout_initialize (0);
3544 return 0;
3545 }
3546
3547 }; // class pass_into_cfg_layout_mode
3548
3549 } // anon namespace
3550
3551 rtl_opt_pass *
3552 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3553 {
3554 return new pass_into_cfg_layout_mode (ctxt);
3555 }
3556
3557 namespace {
3558
3559 const pass_data pass_data_outof_cfg_layout_mode =
3560 {
3561 RTL_PASS, /* type */
3562 "outof_cfglayout", /* name */
3563 OPTGROUP_NONE, /* optinfo_flags */
3564 TV_CFG, /* tv_id */
3565 0, /* properties_required */
3566 0, /* properties_provided */
3567 PROP_cfglayout, /* properties_destroyed */
3568 0, /* todo_flags_start */
3569 0, /* todo_flags_finish */
3570 };
3571
3572 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3573 {
3574 public:
3575 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3576 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3577 {}
3578
3579 /* opt_pass methods: */
3580 virtual unsigned int execute (function *);
3581
3582 }; // class pass_outof_cfg_layout_mode
3583
3584 unsigned int
3585 pass_outof_cfg_layout_mode::execute (function *fun)
3586 {
3587 basic_block bb;
3588
3589 FOR_EACH_BB_FN (bb, fun)
3590 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3591 bb->aux = bb->next_bb;
3592
3593 cfg_layout_finalize ();
3594
3595 return 0;
3596 }
3597
3598 } // anon namespace
3599
3600 rtl_opt_pass *
3601 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3602 {
3603 return new pass_outof_cfg_layout_mode (ctxt);
3604 }
3605 \f
3606
3607 /* Link the basic blocks in the correct order, compacting the basic
3608 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3609 function also clears the basic block header and footer fields.
3610
3611 This function is usually called after a pass (e.g. tracer) finishes
3612 some transformations while in cfglayout mode. The required sequence
3613 of the basic blocks is in a linked list along the bb->aux field.
3614 This functions re-links the basic block prev_bb and next_bb pointers
3615 accordingly, and it compacts and renumbers the blocks.
3616
3617 FIXME: This currently works only for RTL, but the only RTL-specific
3618 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3619 to GIMPLE a long time ago, but it doesn't relink the basic block
3620 chain. It could do that (to give better initial RTL) if this function
3621 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3622
3623 void
3624 relink_block_chain (bool stay_in_cfglayout_mode)
3625 {
3626 basic_block bb, prev_bb;
3627 int index;
3628
3629 /* Maybe dump the re-ordered sequence. */
3630 if (dump_file)
3631 {
3632 fprintf (dump_file, "Reordered sequence:\n");
3633 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3634 NUM_FIXED_BLOCKS;
3635 bb;
3636 bb = (basic_block) bb->aux, index++)
3637 {
3638 fprintf (dump_file, " %i ", index);
3639 if (get_bb_original (bb))
3640 fprintf (dump_file, "duplicate of %i ",
3641 get_bb_original (bb)->index);
3642 else if (forwarder_block_p (bb)
3643 && !LABEL_P (BB_HEAD (bb)))
3644 fprintf (dump_file, "compensation ");
3645 else
3646 fprintf (dump_file, "bb %i ", bb->index);
3647 }
3648 }
3649
3650 /* Now reorder the blocks. */
3651 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3652 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3653 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3654 {
3655 bb->prev_bb = prev_bb;
3656 prev_bb->next_bb = bb;
3657 }
3658 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3659 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3660
3661 /* Then, clean up the aux fields. */
3662 FOR_ALL_BB_FN (bb, cfun)
3663 {
3664 bb->aux = NULL;
3665 if (!stay_in_cfglayout_mode)
3666 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3667 }
3668
3669 /* Maybe reset the original copy tables, they are not valid anymore
3670 when we renumber the basic blocks in compact_blocks. If we are
3671 are going out of cfglayout mode, don't re-allocate the tables. */
3672 if (original_copy_tables_initialized_p ())
3673 free_original_copy_tables ();
3674 if (stay_in_cfglayout_mode)
3675 initialize_original_copy_tables ();
3676
3677 /* Finally, put basic_block_info in the new order. */
3678 compact_blocks ();
3679 }
3680 \f
3681
3682 /* Given a reorder chain, rearrange the code to match. */
3683
3684 static void
3685 fixup_reorder_chain (void)
3686 {
3687 basic_block bb;
3688 rtx_insn *insn = NULL;
3689
3690 if (cfg_layout_function_header)
3691 {
3692 set_first_insn (cfg_layout_function_header);
3693 insn = cfg_layout_function_header;
3694 while (NEXT_INSN (insn))
3695 insn = NEXT_INSN (insn);
3696 }
3697
3698 /* First do the bulk reordering -- rechain the blocks without regard to
3699 the needed changes to jumps and labels. */
3700
3701 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3702 bb->aux)
3703 {
3704 if (BB_HEADER (bb))
3705 {
3706 if (insn)
3707 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3708 else
3709 set_first_insn (BB_HEADER (bb));
3710 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3711 insn = BB_HEADER (bb);
3712 while (NEXT_INSN (insn))
3713 insn = NEXT_INSN (insn);
3714 }
3715 if (insn)
3716 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3717 else
3718 set_first_insn (BB_HEAD (bb));
3719 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3720 insn = BB_END (bb);
3721 if (BB_FOOTER (bb))
3722 {
3723 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3724 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3725 while (NEXT_INSN (insn))
3726 insn = NEXT_INSN (insn);
3727 }
3728 }
3729
3730 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3731 if (cfg_layout_function_footer)
3732 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3733
3734 while (NEXT_INSN (insn))
3735 insn = NEXT_INSN (insn);
3736
3737 set_last_insn (insn);
3738 if (flag_checking)
3739 verify_insn_chain ();
3740
3741 /* Now add jumps and labels as needed to match the blocks new
3742 outgoing edges. */
3743
3744 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3745 bb->aux)
3746 {
3747 edge e_fall, e_taken, e;
3748 rtx_insn *bb_end_insn;
3749 rtx ret_label = NULL_RTX;
3750 basic_block nb;
3751 edge_iterator ei;
3752
3753 if (EDGE_COUNT (bb->succs) == 0)
3754 continue;
3755
3756 /* Find the old fallthru edge, and another non-EH edge for
3757 a taken jump. */
3758 e_taken = e_fall = NULL;
3759
3760 FOR_EACH_EDGE (e, ei, bb->succs)
3761 if (e->flags & EDGE_FALLTHRU)
3762 e_fall = e;
3763 else if (! (e->flags & EDGE_EH))
3764 e_taken = e;
3765
3766 bb_end_insn = BB_END (bb);
3767 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3768 {
3769 ret_label = JUMP_LABEL (bb_end_jump);
3770 if (any_condjump_p (bb_end_jump))
3771 {
3772 /* This might happen if the conditional jump has side
3773 effects and could therefore not be optimized away.
3774 Make the basic block to end with a barrier in order
3775 to prevent rtl_verify_flow_info from complaining. */
3776 if (!e_fall)
3777 {
3778 gcc_assert (!onlyjump_p (bb_end_jump)
3779 || returnjump_p (bb_end_jump)
3780 || (e_taken->flags & EDGE_CROSSING));
3781 emit_barrier_after (bb_end_jump);
3782 continue;
3783 }
3784
3785 /* If the old fallthru is still next, nothing to do. */
3786 if (bb->aux == e_fall->dest
3787 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3788 continue;
3789
3790 /* The degenerated case of conditional jump jumping to the next
3791 instruction can happen for jumps with side effects. We need
3792 to construct a forwarder block and this will be done just
3793 fine by force_nonfallthru below. */
3794 if (!e_taken)
3795 ;
3796
3797 /* There is another special case: if *neither* block is next,
3798 such as happens at the very end of a function, then we'll
3799 need to add a new unconditional jump. Choose the taken
3800 edge based on known or assumed probability. */
3801 else if (bb->aux != e_taken->dest)
3802 {
3803 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3804
3805 if (note
3806 && profile_probability::from_reg_br_prob_note
3807 (XINT (note, 0)) < profile_probability::even ()
3808 && invert_jump (bb_end_jump,
3809 (e_fall->dest
3810 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3811 ? NULL_RTX
3812 : label_for_bb (e_fall->dest)), 0))
3813 {
3814 e_fall->flags &= ~EDGE_FALLTHRU;
3815 gcc_checking_assert (could_fall_through
3816 (e_taken->src, e_taken->dest));
3817 e_taken->flags |= EDGE_FALLTHRU;
3818 update_br_prob_note (bb);
3819 e = e_fall, e_fall = e_taken, e_taken = e;
3820 }
3821 }
3822
3823 /* If the "jumping" edge is a crossing edge, and the fall
3824 through edge is non-crossing, leave things as they are. */
3825 else if ((e_taken->flags & EDGE_CROSSING)
3826 && !(e_fall->flags & EDGE_CROSSING))
3827 continue;
3828
3829 /* Otherwise we can try to invert the jump. This will
3830 basically never fail, however, keep up the pretense. */
3831 else if (invert_jump (bb_end_jump,
3832 (e_fall->dest
3833 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3834 ? NULL_RTX
3835 : label_for_bb (e_fall->dest)), 0))
3836 {
3837 e_fall->flags &= ~EDGE_FALLTHRU;
3838 gcc_checking_assert (could_fall_through
3839 (e_taken->src, e_taken->dest));
3840 e_taken->flags |= EDGE_FALLTHRU;
3841 update_br_prob_note (bb);
3842 if (LABEL_NUSES (ret_label) == 0
3843 && single_pred_p (e_taken->dest))
3844 delete_insn (as_a<rtx_insn *> (ret_label));
3845 continue;
3846 }
3847 }
3848 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3849 {
3850 /* If the old fallthru is still next or if
3851 asm goto doesn't have a fallthru (e.g. when followed by
3852 __builtin_unreachable ()), nothing to do. */
3853 if (! e_fall
3854 || bb->aux == e_fall->dest
3855 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3856 continue;
3857
3858 /* Otherwise we'll have to use the fallthru fixup below. */
3859 }
3860 else
3861 {
3862 /* Otherwise we have some return, switch or computed
3863 jump. In the 99% case, there should not have been a
3864 fallthru edge. */
3865 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3866 continue;
3867 }
3868 }
3869 else
3870 {
3871 /* No fallthru implies a noreturn function with EH edges, or
3872 something similarly bizarre. In any case, we don't need to
3873 do anything. */
3874 if (! e_fall)
3875 continue;
3876
3877 /* If the fallthru block is still next, nothing to do. */
3878 if (bb->aux == e_fall->dest)
3879 continue;
3880
3881 /* A fallthru to exit block. */
3882 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3883 continue;
3884 }
3885
3886 /* We got here if we need to add a new jump insn.
3887 Note force_nonfallthru can delete E_FALL and thus we have to
3888 save E_FALL->src prior to the call to force_nonfallthru. */
3889 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3890 if (nb)
3891 {
3892 nb->aux = bb->aux;
3893 bb->aux = nb;
3894 /* Don't process this new block. */
3895 bb = nb;
3896 }
3897 }
3898
3899 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3900
3901 /* Annoying special case - jump around dead jumptables left in the code. */
3902 FOR_EACH_BB_FN (bb, cfun)
3903 {
3904 edge e = find_fallthru_edge (bb->succs);
3905
3906 if (e && !can_fallthru (e->src, e->dest))
3907 force_nonfallthru (e);
3908 }
3909
3910 /* Ensure goto_locus from edges has some instructions with that locus
3911 in RTL. */
3912 if (!optimize)
3913 FOR_EACH_BB_FN (bb, cfun)
3914 {
3915 edge e;
3916 edge_iterator ei;
3917
3918 FOR_EACH_EDGE (e, ei, bb->succs)
3919 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3920 && !(e->flags & EDGE_ABNORMAL))
3921 {
3922 edge e2;
3923 edge_iterator ei2;
3924 basic_block dest, nb;
3925 rtx_insn *end;
3926
3927 insn = BB_END (e->src);
3928 end = PREV_INSN (BB_HEAD (e->src));
3929 while (insn != end
3930 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3931 insn = PREV_INSN (insn);
3932 if (insn != end
3933 && INSN_LOCATION (insn) == e->goto_locus)
3934 continue;
3935 if (simplejump_p (BB_END (e->src))
3936 && !INSN_HAS_LOCATION (BB_END (e->src)))
3937 {
3938 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3939 continue;
3940 }
3941 dest = e->dest;
3942 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3943 {
3944 /* Non-fallthru edges to the exit block cannot be split. */
3945 if (!(e->flags & EDGE_FALLTHRU))
3946 continue;
3947 }
3948 else
3949 {
3950 insn = BB_HEAD (dest);
3951 end = NEXT_INSN (BB_END (dest));
3952 while (insn != end && !NONDEBUG_INSN_P (insn))
3953 insn = NEXT_INSN (insn);
3954 if (insn != end && INSN_HAS_LOCATION (insn)
3955 && INSN_LOCATION (insn) == e->goto_locus)
3956 continue;
3957 }
3958 nb = split_edge (e);
3959 if (!INSN_P (BB_END (nb)))
3960 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3961 nb);
3962 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3963
3964 /* If there are other incoming edges to the destination block
3965 with the same goto locus, redirect them to the new block as
3966 well, this can prevent other such blocks from being created
3967 in subsequent iterations of the loop. */
3968 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3969 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3970 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3971 && e->goto_locus == e2->goto_locus)
3972 redirect_edge_and_branch (e2, nb);
3973 else
3974 ei_next (&ei2);
3975 }
3976 }
3977 }
3978 \f
3979 /* Perform sanity checks on the insn chain.
3980 1. Check that next/prev pointers are consistent in both the forward and
3981 reverse direction.
3982 2. Count insns in chain, going both directions, and check if equal.
3983 3. Check that get_last_insn () returns the actual end of chain. */
3984
3985 DEBUG_FUNCTION void
3986 verify_insn_chain (void)
3987 {
3988 rtx_insn *x, *prevx, *nextx;
3989 int insn_cnt1, insn_cnt2;
3990
3991 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3992 x != 0;
3993 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3994 gcc_assert (PREV_INSN (x) == prevx);
3995
3996 gcc_assert (prevx == get_last_insn ());
3997
3998 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3999 x != 0;
4000 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4001 gcc_assert (NEXT_INSN (x) == nextx);
4002
4003 gcc_assert (insn_cnt1 == insn_cnt2);
4004 }
4005 \f
4006 /* If we have assembler epilogues, the block falling through to exit must
4007 be the last one in the reordered chain when we reach final. Ensure
4008 that this condition is met. */
4009 static void
4010 fixup_fallthru_exit_predecessor (void)
4011 {
4012 edge e;
4013 basic_block bb = NULL;
4014
4015 /* This transformation is not valid before reload, because we might
4016 separate a call from the instruction that copies the return
4017 value. */
4018 gcc_assert (reload_completed);
4019
4020 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4021 if (e)
4022 bb = e->src;
4023
4024 if (bb && bb->aux)
4025 {
4026 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4027
4028 /* If the very first block is the one with the fall-through exit
4029 edge, we have to split that block. */
4030 if (c == bb)
4031 {
4032 bb = split_block_after_labels (bb)->dest;
4033 bb->aux = c->aux;
4034 c->aux = bb;
4035 BB_FOOTER (bb) = BB_FOOTER (c);
4036 BB_FOOTER (c) = NULL;
4037 }
4038
4039 while (c->aux != bb)
4040 c = (basic_block) c->aux;
4041
4042 c->aux = bb->aux;
4043 while (c->aux)
4044 c = (basic_block) c->aux;
4045
4046 c->aux = bb;
4047 bb->aux = NULL;
4048 }
4049 }
4050
4051 /* In case there are more than one fallthru predecessors of exit, force that
4052 there is only one. */
4053
4054 static void
4055 force_one_exit_fallthru (void)
4056 {
4057 edge e, predecessor = NULL;
4058 bool more = false;
4059 edge_iterator ei;
4060 basic_block forwarder, bb;
4061
4062 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4063 if (e->flags & EDGE_FALLTHRU)
4064 {
4065 if (predecessor == NULL)
4066 predecessor = e;
4067 else
4068 {
4069 more = true;
4070 break;
4071 }
4072 }
4073
4074 if (!more)
4075 return;
4076
4077 /* Exit has several fallthru predecessors. Create a forwarder block for
4078 them. */
4079 forwarder = split_edge (predecessor);
4080 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4081 (e = ei_safe_edge (ei)); )
4082 {
4083 if (e->src == forwarder
4084 || !(e->flags & EDGE_FALLTHRU))
4085 ei_next (&ei);
4086 else
4087 redirect_edge_and_branch_force (e, forwarder);
4088 }
4089
4090 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4091 exit block. */
4092 FOR_EACH_BB_FN (bb, cfun)
4093 {
4094 if (bb->aux == NULL && bb != forwarder)
4095 {
4096 bb->aux = forwarder;
4097 break;
4098 }
4099 }
4100 }
4101 \f
4102 /* Return true in case it is possible to duplicate the basic block BB. */
4103
4104 static bool
4105 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4106 {
4107 /* Do not attempt to duplicate tablejumps, as we need to unshare
4108 the dispatch table. This is difficult to do, as the instructions
4109 computing jump destination may be hoisted outside the basic block. */
4110 if (tablejump_p (BB_END (bb), NULL, NULL))
4111 return false;
4112
4113 /* Do not duplicate blocks containing insns that can't be copied. */
4114 if (targetm.cannot_copy_insn_p)
4115 {
4116 rtx_insn *insn = BB_HEAD (bb);
4117 while (1)
4118 {
4119 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4120 return false;
4121 if (insn == BB_END (bb))
4122 break;
4123 insn = NEXT_INSN (insn);
4124 }
4125 }
4126
4127 return true;
4128 }
4129
4130 rtx_insn *
4131 duplicate_insn_chain (rtx_insn *from, rtx_insn *to)
4132 {
4133 rtx_insn *insn, *next, *copy;
4134 rtx_note *last;
4135
4136 /* Avoid updating of boundaries of previous basic block. The
4137 note will get removed from insn stream in fixup. */
4138 last = emit_note (NOTE_INSN_DELETED);
4139
4140 /* Create copy at the end of INSN chain. The chain will
4141 be reordered later. */
4142 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4143 {
4144 switch (GET_CODE (insn))
4145 {
4146 case DEBUG_INSN:
4147 /* Don't duplicate label debug insns. */
4148 if (DEBUG_BIND_INSN_P (insn)
4149 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4150 break;
4151 /* FALLTHRU */
4152 case INSN:
4153 case CALL_INSN:
4154 case JUMP_INSN:
4155 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4156 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4157 && ANY_RETURN_P (JUMP_LABEL (insn)))
4158 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4159 maybe_copy_prologue_epilogue_insn (insn, copy);
4160 break;
4161
4162 case JUMP_TABLE_DATA:
4163 /* Avoid copying of dispatch tables. We never duplicate
4164 tablejumps, so this can hit only in case the table got
4165 moved far from original jump.
4166 Avoid copying following barrier as well if any
4167 (and debug insns in between). */
4168 for (next = NEXT_INSN (insn);
4169 next != NEXT_INSN (to);
4170 next = NEXT_INSN (next))
4171 if (!DEBUG_INSN_P (next))
4172 break;
4173 if (next != NEXT_INSN (to) && BARRIER_P (next))
4174 insn = next;
4175 break;
4176
4177 case CODE_LABEL:
4178 break;
4179
4180 case BARRIER:
4181 emit_barrier ();
4182 break;
4183
4184 case NOTE:
4185 switch (NOTE_KIND (insn))
4186 {
4187 /* In case prologue is empty and function contain label
4188 in first BB, we may want to copy the block. */
4189 case NOTE_INSN_PROLOGUE_END:
4190
4191 case NOTE_INSN_DELETED:
4192 case NOTE_INSN_DELETED_LABEL:
4193 case NOTE_INSN_DELETED_DEBUG_LABEL:
4194 /* No problem to strip these. */
4195 case NOTE_INSN_FUNCTION_BEG:
4196 /* There is always just single entry to function. */
4197 case NOTE_INSN_BASIC_BLOCK:
4198 /* We should only switch text sections once. */
4199 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4200 break;
4201
4202 case NOTE_INSN_EPILOGUE_BEG:
4203 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4204 emit_note_copy (as_a <rtx_note *> (insn));
4205 break;
4206
4207 default:
4208 /* All other notes should have already been eliminated. */
4209 gcc_unreachable ();
4210 }
4211 break;
4212 default:
4213 gcc_unreachable ();
4214 }
4215 }
4216 insn = NEXT_INSN (last);
4217 delete_insn (last);
4218 return insn;
4219 }
4220
4221 /* Create a duplicate of the basic block BB. */
4222
4223 static basic_block
4224 cfg_layout_duplicate_bb (basic_block bb)
4225 {
4226 rtx_insn *insn;
4227 basic_block new_bb;
4228
4229 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4230 new_bb = create_basic_block (insn,
4231 insn ? get_last_insn () : NULL,
4232 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4233
4234 BB_COPY_PARTITION (new_bb, bb);
4235 if (BB_HEADER (bb))
4236 {
4237 insn = BB_HEADER (bb);
4238 while (NEXT_INSN (insn))
4239 insn = NEXT_INSN (insn);
4240 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4241 if (insn)
4242 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4243 }
4244
4245 if (BB_FOOTER (bb))
4246 {
4247 insn = BB_FOOTER (bb);
4248 while (NEXT_INSN (insn))
4249 insn = NEXT_INSN (insn);
4250 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4251 if (insn)
4252 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4253 }
4254
4255 return new_bb;
4256 }
4257
4258 \f
4259 /* Main entry point to this module - initialize the datastructures for
4260 CFG layout changes. It keeps LOOPS up-to-date if not null.
4261
4262 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4263
4264 void
4265 cfg_layout_initialize (int flags)
4266 {
4267 rtx_insn_list *x;
4268 basic_block bb;
4269
4270 /* Once bb partitioning is complete, cfg layout mode should not be
4271 re-entered. Entering cfg layout mode may require fixups. As an
4272 example, if edge forwarding performed when optimizing the cfg
4273 layout required moving a block from the hot to the cold
4274 section. This would create an illegal partitioning unless some
4275 manual fixup was performed. */
4276 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4277
4278 initialize_original_copy_tables ();
4279
4280 cfg_layout_rtl_register_cfg_hooks ();
4281
4282 record_effective_endpoints ();
4283
4284 /* Make sure that the targets of non local gotos are marked. */
4285 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4286 {
4287 bb = BLOCK_FOR_INSN (x->insn ());
4288 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4289 }
4290
4291 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4292 }
4293
4294 /* Splits superblocks. */
4295 void
4296 break_superblocks (void)
4297 {
4298 bool need = false;
4299 basic_block bb;
4300
4301 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4302 bitmap_clear (superblocks);
4303
4304 FOR_EACH_BB_FN (bb, cfun)
4305 if (bb->flags & BB_SUPERBLOCK)
4306 {
4307 bb->flags &= ~BB_SUPERBLOCK;
4308 bitmap_set_bit (superblocks, bb->index);
4309 need = true;
4310 }
4311
4312 if (need)
4313 {
4314 rebuild_jump_labels (get_insns ());
4315 find_many_sub_basic_blocks (superblocks);
4316 }
4317 }
4318
4319 /* Finalize the changes: reorder insn list according to the sequence specified
4320 by aux pointers, enter compensation code, rebuild scope forest. */
4321
4322 void
4323 cfg_layout_finalize (void)
4324 {
4325 checking_verify_flow_info ();
4326 free_dominance_info (CDI_DOMINATORS);
4327 force_one_exit_fallthru ();
4328 rtl_register_cfg_hooks ();
4329 if (reload_completed && !targetm.have_epilogue ())
4330 fixup_fallthru_exit_predecessor ();
4331 fixup_reorder_chain ();
4332
4333 rebuild_jump_labels (get_insns ());
4334 delete_dead_jumptables ();
4335
4336 if (flag_checking)
4337 verify_insn_chain ();
4338 checking_verify_flow_info ();
4339 }
4340
4341
4342 /* Same as split_block but update cfg_layout structures. */
4343
4344 static basic_block
4345 cfg_layout_split_block (basic_block bb, void *insnp)
4346 {
4347 rtx insn = (rtx) insnp;
4348 basic_block new_bb = rtl_split_block (bb, insn);
4349
4350 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4351 BB_FOOTER (bb) = NULL;
4352
4353 return new_bb;
4354 }
4355
4356 /* Redirect Edge to DEST. */
4357 static edge
4358 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4359 {
4360 basic_block src = e->src;
4361 edge ret;
4362
4363 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4364 return NULL;
4365
4366 if (e->dest == dest)
4367 return e;
4368
4369 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4370 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4371 {
4372 df_set_bb_dirty (src);
4373 return ret;
4374 }
4375
4376 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4377 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4378 {
4379 if (dump_file)
4380 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4381 e->src->index, dest->index);
4382
4383 df_set_bb_dirty (e->src);
4384 redirect_edge_succ (e, dest);
4385 return e;
4386 }
4387
4388 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4389 in the case the basic block appears to be in sequence. Avoid this
4390 transformation. */
4391
4392 if (e->flags & EDGE_FALLTHRU)
4393 {
4394 /* Redirect any branch edges unified with the fallthru one. */
4395 if (JUMP_P (BB_END (src))
4396 && label_is_jump_target_p (BB_HEAD (e->dest),
4397 BB_END (src)))
4398 {
4399 edge redirected;
4400
4401 if (dump_file)
4402 fprintf (dump_file, "Fallthru edge unified with branch "
4403 "%i->%i redirected to %i\n",
4404 e->src->index, e->dest->index, dest->index);
4405 e->flags &= ~EDGE_FALLTHRU;
4406 redirected = redirect_branch_edge (e, dest);
4407 gcc_assert (redirected);
4408 redirected->flags |= EDGE_FALLTHRU;
4409 df_set_bb_dirty (redirected->src);
4410 return redirected;
4411 }
4412 /* In case we are redirecting fallthru edge to the branch edge
4413 of conditional jump, remove it. */
4414 if (EDGE_COUNT (src->succs) == 2)
4415 {
4416 /* Find the edge that is different from E. */
4417 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4418
4419 if (s->dest == dest
4420 && any_condjump_p (BB_END (src))
4421 && onlyjump_p (BB_END (src)))
4422 delete_insn (BB_END (src));
4423 }
4424 if (dump_file)
4425 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4426 e->src->index, e->dest->index, dest->index);
4427 ret = redirect_edge_succ_nodup (e, dest);
4428 }
4429 else
4430 ret = redirect_branch_edge (e, dest);
4431
4432 /* We don't want simplejumps in the insn stream during cfglayout. */
4433 gcc_assert (!simplejump_p (BB_END (src)));
4434
4435 df_set_bb_dirty (src);
4436 return ret;
4437 }
4438
4439 /* Simple wrapper as we always can redirect fallthru edges. */
4440 static basic_block
4441 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4442 {
4443 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4444
4445 gcc_assert (redirected);
4446 return NULL;
4447 }
4448
4449 /* Same as delete_basic_block but update cfg_layout structures. */
4450
4451 static void
4452 cfg_layout_delete_block (basic_block bb)
4453 {
4454 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4455 rtx_insn **to;
4456
4457 if (BB_HEADER (bb))
4458 {
4459 next = BB_HEAD (bb);
4460 if (prev)
4461 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4462 else
4463 set_first_insn (BB_HEADER (bb));
4464 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4465 insn = BB_HEADER (bb);
4466 while (NEXT_INSN (insn))
4467 insn = NEXT_INSN (insn);
4468 SET_NEXT_INSN (insn) = next;
4469 SET_PREV_INSN (next) = insn;
4470 }
4471 next = NEXT_INSN (BB_END (bb));
4472 if (BB_FOOTER (bb))
4473 {
4474 insn = BB_FOOTER (bb);
4475 while (insn)
4476 {
4477 if (BARRIER_P (insn))
4478 {
4479 if (PREV_INSN (insn))
4480 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4481 else
4482 BB_FOOTER (bb) = NEXT_INSN (insn);
4483 if (NEXT_INSN (insn))
4484 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4485 }
4486 if (LABEL_P (insn))
4487 break;
4488 insn = NEXT_INSN (insn);
4489 }
4490 if (BB_FOOTER (bb))
4491 {
4492 insn = BB_END (bb);
4493 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4494 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4495 while (NEXT_INSN (insn))
4496 insn = NEXT_INSN (insn);
4497 SET_NEXT_INSN (insn) = next;
4498 if (next)
4499 SET_PREV_INSN (next) = insn;
4500 else
4501 set_last_insn (insn);
4502 }
4503 }
4504 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4505 to = &BB_HEADER (bb->next_bb);
4506 else
4507 to = &cfg_layout_function_footer;
4508
4509 rtl_delete_block (bb);
4510
4511 if (prev)
4512 prev = NEXT_INSN (prev);
4513 else
4514 prev = get_insns ();
4515 if (next)
4516 next = PREV_INSN (next);
4517 else
4518 next = get_last_insn ();
4519
4520 if (next && NEXT_INSN (next) != prev)
4521 {
4522 remaints = unlink_insn_chain (prev, next);
4523 insn = remaints;
4524 while (NEXT_INSN (insn))
4525 insn = NEXT_INSN (insn);
4526 SET_NEXT_INSN (insn) = *to;
4527 if (*to)
4528 SET_PREV_INSN (*to) = insn;
4529 *to = remaints;
4530 }
4531 }
4532
4533 /* Return true when blocks A and B can be safely merged. */
4534
4535 static bool
4536 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4537 {
4538 /* If we are partitioning hot/cold basic blocks, we don't want to
4539 mess up unconditional or indirect jumps that cross between hot
4540 and cold sections.
4541
4542 Basic block partitioning may result in some jumps that appear to
4543 be optimizable (or blocks that appear to be mergeable), but which really
4544 must be left untouched (they are required to make it safely across
4545 partition boundaries). See the comments at the top of
4546 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4547
4548 if (BB_PARTITION (a) != BB_PARTITION (b))
4549 return false;
4550
4551 /* Protect the loop latches. */
4552 if (current_loops && b->loop_father->latch == b)
4553 return false;
4554
4555 /* If we would end up moving B's instructions, make sure it doesn't fall
4556 through into the exit block, since we cannot recover from a fallthrough
4557 edge into the exit block occurring in the middle of a function. */
4558 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4559 {
4560 edge e = find_fallthru_edge (b->succs);
4561 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4562 return false;
4563 }
4564
4565 /* There must be exactly one edge in between the blocks. */
4566 return (single_succ_p (a)
4567 && single_succ (a) == b
4568 && single_pred_p (b) == 1
4569 && a != b
4570 /* Must be simple edge. */
4571 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4572 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4573 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4574 /* If the jump insn has side effects, we can't kill the edge.
4575 When not optimizing, try_redirect_by_replacing_jump will
4576 not allow us to redirect an edge by replacing a table jump. */
4577 && (!JUMP_P (BB_END (a))
4578 || ((!optimize || reload_completed)
4579 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4580 }
4581
4582 /* Merge block A and B. The blocks must be mergeable. */
4583
4584 static void
4585 cfg_layout_merge_blocks (basic_block a, basic_block b)
4586 {
4587 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4588 rtx_insn *insn;
4589
4590 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4591
4592 if (dump_file)
4593 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4594 a->index);
4595
4596 /* If there was a CODE_LABEL beginning B, delete it. */
4597 if (LABEL_P (BB_HEAD (b)))
4598 {
4599 delete_insn (BB_HEAD (b));
4600 }
4601
4602 /* We should have fallthru edge in a, or we can do dummy redirection to get
4603 it cleaned up. */
4604 if (JUMP_P (BB_END (a)))
4605 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4606 gcc_assert (!JUMP_P (BB_END (a)));
4607
4608 /* When not optimizing and the edge is the only place in RTL which holds
4609 some unique locus, emit a nop with that locus in between. */
4610 if (!optimize)
4611 emit_nop_for_unique_locus_between (a, b);
4612
4613 /* Move things from b->footer after a->footer. */
4614 if (BB_FOOTER (b))
4615 {
4616 if (!BB_FOOTER (a))
4617 BB_FOOTER (a) = BB_FOOTER (b);
4618 else
4619 {
4620 rtx_insn *last = BB_FOOTER (a);
4621
4622 while (NEXT_INSN (last))
4623 last = NEXT_INSN (last);
4624 SET_NEXT_INSN (last) = BB_FOOTER (b);
4625 SET_PREV_INSN (BB_FOOTER (b)) = last;
4626 }
4627 BB_FOOTER (b) = NULL;
4628 }
4629
4630 /* Move things from b->header before a->footer.
4631 Note that this may include dead tablejump data, but we don't clean
4632 those up until we go out of cfglayout mode. */
4633 if (BB_HEADER (b))
4634 {
4635 if (! BB_FOOTER (a))
4636 BB_FOOTER (a) = BB_HEADER (b);
4637 else
4638 {
4639 rtx_insn *last = BB_HEADER (b);
4640
4641 while (NEXT_INSN (last))
4642 last = NEXT_INSN (last);
4643 SET_NEXT_INSN (last) = BB_FOOTER (a);
4644 SET_PREV_INSN (BB_FOOTER (a)) = last;
4645 BB_FOOTER (a) = BB_HEADER (b);
4646 }
4647 BB_HEADER (b) = NULL;
4648 }
4649
4650 /* In the case basic blocks are not adjacent, move them around. */
4651 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4652 {
4653 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4654
4655 emit_insn_after_noloc (insn, BB_END (a), a);
4656 }
4657 /* Otherwise just re-associate the instructions. */
4658 else
4659 {
4660 insn = BB_HEAD (b);
4661 BB_END (a) = BB_END (b);
4662 }
4663
4664 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4665 We need to explicitly call. */
4666 update_bb_for_insn_chain (insn, BB_END (b), a);
4667
4668 /* Skip possible DELETED_LABEL insn. */
4669 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4670 insn = NEXT_INSN (insn);
4671 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4672 BB_HEAD (b) = BB_END (b) = NULL;
4673 delete_insn (insn);
4674
4675 df_bb_delete (b->index);
4676
4677 /* If B was a forwarder block, propagate the locus on the edge. */
4678 if (forwarder_p
4679 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4680 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4681
4682 if (dump_file)
4683 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4684 }
4685
4686 /* Split edge E. */
4687
4688 static basic_block
4689 cfg_layout_split_edge (edge e)
4690 {
4691 basic_block new_bb =
4692 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4693 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4694 NULL_RTX, e->src);
4695
4696 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4697 BB_COPY_PARTITION (new_bb, e->src);
4698 else
4699 BB_COPY_PARTITION (new_bb, e->dest);
4700 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4701 redirect_edge_and_branch_force (e, new_bb);
4702
4703 return new_bb;
4704 }
4705
4706 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4707
4708 static void
4709 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4710 {
4711 }
4712
4713 /* Return true if BB contains only labels or non-executable
4714 instructions. */
4715
4716 static bool
4717 rtl_block_empty_p (basic_block bb)
4718 {
4719 rtx_insn *insn;
4720
4721 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4722 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4723 return true;
4724
4725 FOR_BB_INSNS (bb, insn)
4726 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4727 return false;
4728
4729 return true;
4730 }
4731
4732 /* Split a basic block if it ends with a conditional branch and if
4733 the other part of the block is not empty. */
4734
4735 static basic_block
4736 rtl_split_block_before_cond_jump (basic_block bb)
4737 {
4738 rtx_insn *insn;
4739 rtx_insn *split_point = NULL;
4740 rtx_insn *last = NULL;
4741 bool found_code = false;
4742
4743 FOR_BB_INSNS (bb, insn)
4744 {
4745 if (any_condjump_p (insn))
4746 split_point = last;
4747 else if (NONDEBUG_INSN_P (insn))
4748 found_code = true;
4749 last = insn;
4750 }
4751
4752 /* Did not find everything. */
4753 if (found_code && split_point)
4754 return split_block (bb, split_point)->dest;
4755 else
4756 return NULL;
4757 }
4758
4759 /* Return 1 if BB ends with a call, possibly followed by some
4760 instructions that must stay with the call, 0 otherwise. */
4761
4762 static bool
4763 rtl_block_ends_with_call_p (basic_block bb)
4764 {
4765 rtx_insn *insn = BB_END (bb);
4766
4767 while (!CALL_P (insn)
4768 && insn != BB_HEAD (bb)
4769 && (keep_with_call_p (insn)
4770 || NOTE_P (insn)
4771 || DEBUG_INSN_P (insn)))
4772 insn = PREV_INSN (insn);
4773 return (CALL_P (insn));
4774 }
4775
4776 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4777
4778 static bool
4779 rtl_block_ends_with_condjump_p (const_basic_block bb)
4780 {
4781 return any_condjump_p (BB_END (bb));
4782 }
4783
4784 /* Return true if we need to add fake edge to exit.
4785 Helper function for rtl_flow_call_edges_add. */
4786
4787 static bool
4788 need_fake_edge_p (const rtx_insn *insn)
4789 {
4790 if (!INSN_P (insn))
4791 return false;
4792
4793 if ((CALL_P (insn)
4794 && !SIBLING_CALL_P (insn)
4795 && !find_reg_note (insn, REG_NORETURN, NULL)
4796 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4797 return true;
4798
4799 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4800 && MEM_VOLATILE_P (PATTERN (insn)))
4801 || (GET_CODE (PATTERN (insn)) == PARALLEL
4802 && asm_noperands (insn) != -1
4803 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4804 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4805 }
4806
4807 /* Add fake edges to the function exit for any non constant and non noreturn
4808 calls, volatile inline assembly in the bitmap of blocks specified by
4809 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4810 that were split.
4811
4812 The goal is to expose cases in which entering a basic block does not imply
4813 that all subsequent instructions must be executed. */
4814
4815 static int
4816 rtl_flow_call_edges_add (sbitmap blocks)
4817 {
4818 int i;
4819 int blocks_split = 0;
4820 int last_bb = last_basic_block_for_fn (cfun);
4821 bool check_last_block = false;
4822
4823 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4824 return 0;
4825
4826 if (! blocks)
4827 check_last_block = true;
4828 else
4829 check_last_block = bitmap_bit_p (blocks,
4830 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4831
4832 /* In the last basic block, before epilogue generation, there will be
4833 a fallthru edge to EXIT. Special care is required if the last insn
4834 of the last basic block is a call because make_edge folds duplicate
4835 edges, which would result in the fallthru edge also being marked
4836 fake, which would result in the fallthru edge being removed by
4837 remove_fake_edges, which would result in an invalid CFG.
4838
4839 Moreover, we can't elide the outgoing fake edge, since the block
4840 profiler needs to take this into account in order to solve the minimal
4841 spanning tree in the case that the call doesn't return.
4842
4843 Handle this by adding a dummy instruction in a new last basic block. */
4844 if (check_last_block)
4845 {
4846 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4847 rtx_insn *insn = BB_END (bb);
4848
4849 /* Back up past insns that must be kept in the same block as a call. */
4850 while (insn != BB_HEAD (bb)
4851 && keep_with_call_p (insn))
4852 insn = PREV_INSN (insn);
4853
4854 if (need_fake_edge_p (insn))
4855 {
4856 edge e;
4857
4858 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4859 if (e)
4860 {
4861 insert_insn_on_edge (gen_use (const0_rtx), e);
4862 commit_edge_insertions ();
4863 }
4864 }
4865 }
4866
4867 /* Now add fake edges to the function exit for any non constant
4868 calls since there is no way that we can determine if they will
4869 return or not... */
4870
4871 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4872 {
4873 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
4874 rtx_insn *insn;
4875 rtx_insn *prev_insn;
4876
4877 if (!bb)
4878 continue;
4879
4880 if (blocks && !bitmap_bit_p (blocks, i))
4881 continue;
4882
4883 for (insn = BB_END (bb); ; insn = prev_insn)
4884 {
4885 prev_insn = PREV_INSN (insn);
4886 if (need_fake_edge_p (insn))
4887 {
4888 edge e;
4889 rtx_insn *split_at_insn = insn;
4890
4891 /* Don't split the block between a call and an insn that should
4892 remain in the same block as the call. */
4893 if (CALL_P (insn))
4894 while (split_at_insn != BB_END (bb)
4895 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4896 split_at_insn = NEXT_INSN (split_at_insn);
4897
4898 /* The handling above of the final block before the epilogue
4899 should be enough to verify that there is no edge to the exit
4900 block in CFG already. Calling make_edge in such case would
4901 cause us to mark that edge as fake and remove it later. */
4902
4903 if (flag_checking && split_at_insn == BB_END (bb))
4904 {
4905 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4906 gcc_assert (e == NULL);
4907 }
4908
4909 /* Note that the following may create a new basic block
4910 and renumber the existing basic blocks. */
4911 if (split_at_insn != BB_END (bb))
4912 {
4913 e = split_block (bb, split_at_insn);
4914 if (e)
4915 blocks_split++;
4916 }
4917
4918 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
4919 ne->probability = profile_probability::guessed_never ();
4920 }
4921
4922 if (insn == BB_HEAD (bb))
4923 break;
4924 }
4925 }
4926
4927 if (blocks_split)
4928 verify_flow_info ();
4929
4930 return blocks_split;
4931 }
4932
4933 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4934 the conditional branch target, SECOND_HEAD should be the fall-thru
4935 there is no need to handle this here the loop versioning code handles
4936 this. the reason for SECON_HEAD is that it is needed for condition
4937 in trees, and this should be of the same type since it is a hook. */
4938 static void
4939 rtl_lv_add_condition_to_bb (basic_block first_head ,
4940 basic_block second_head ATTRIBUTE_UNUSED,
4941 basic_block cond_bb, void *comp_rtx)
4942 {
4943 rtx_code_label *label;
4944 rtx_insn *seq, *jump;
4945 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4946 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4947 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4948 machine_mode mode;
4949
4950
4951 label = block_label (first_head);
4952 mode = GET_MODE (op0);
4953 if (mode == VOIDmode)
4954 mode = GET_MODE (op1);
4955
4956 start_sequence ();
4957 op0 = force_operand (op0, NULL_RTX);
4958 op1 = force_operand (op1, NULL_RTX);
4959 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
4960 profile_probability::uninitialized ());
4961 jump = get_last_insn ();
4962 JUMP_LABEL (jump) = label;
4963 LABEL_NUSES (label)++;
4964 seq = get_insns ();
4965 end_sequence ();
4966
4967 /* Add the new cond, in the new head. */
4968 emit_insn_after (seq, BB_END (cond_bb));
4969 }
4970
4971
4972 /* Given a block B with unconditional branch at its end, get the
4973 store the return the branch edge and the fall-thru edge in
4974 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4975 static void
4976 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4977 edge *fallthru_edge)
4978 {
4979 edge e = EDGE_SUCC (b, 0);
4980
4981 if (e->flags & EDGE_FALLTHRU)
4982 {
4983 *fallthru_edge = e;
4984 *branch_edge = EDGE_SUCC (b, 1);
4985 }
4986 else
4987 {
4988 *branch_edge = e;
4989 *fallthru_edge = EDGE_SUCC (b, 1);
4990 }
4991 }
4992
4993 void
4994 init_rtl_bb_info (basic_block bb)
4995 {
4996 gcc_assert (!bb->il.x.rtl);
4997 bb->il.x.head_ = NULL;
4998 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
4999 }
5000
5001 /* Returns true if it is possible to remove edge E by redirecting
5002 it to the destination of the other edge from E->src. */
5003
5004 static bool
5005 rtl_can_remove_branch_p (const_edge e)
5006 {
5007 const_basic_block src = e->src;
5008 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5009 const rtx_insn *insn = BB_END (src);
5010 rtx set;
5011
5012 /* The conditions are taken from try_redirect_by_replacing_jump. */
5013 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5014 return false;
5015
5016 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5017 return false;
5018
5019 if (BB_PARTITION (src) != BB_PARTITION (target))
5020 return false;
5021
5022 if (!onlyjump_p (insn)
5023 || tablejump_p (insn, NULL, NULL))
5024 return false;
5025
5026 set = single_set (insn);
5027 if (!set || side_effects_p (set))
5028 return false;
5029
5030 return true;
5031 }
5032
5033 static basic_block
5034 rtl_duplicate_bb (basic_block bb)
5035 {
5036 bb = cfg_layout_duplicate_bb (bb);
5037 bb->aux = NULL;
5038 return bb;
5039 }
5040
5041 /* Do book-keeping of basic block BB for the profile consistency checker.
5042 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
5043 then do post-pass accounting. Store the counting in RECORD. */
5044 static void
5045 rtl_account_profile_record (basic_block bb, int after_pass,
5046 struct profile_record *record)
5047 {
5048 rtx_insn *insn;
5049 FOR_BB_INSNS (bb, insn)
5050 if (INSN_P (insn))
5051 {
5052 record->size[after_pass] += insn_cost (insn, false);
5053 if (bb->count.initialized_p ())
5054 record->time[after_pass]
5055 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5056 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5057 record->time[after_pass]
5058 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5059 }
5060 }
5061
5062 /* Implementation of CFG manipulation for linearized RTL. */
5063 struct cfg_hooks rtl_cfg_hooks = {
5064 "rtl",
5065 rtl_verify_flow_info,
5066 rtl_dump_bb,
5067 rtl_dump_bb_for_graph,
5068 rtl_create_basic_block,
5069 rtl_redirect_edge_and_branch,
5070 rtl_redirect_edge_and_branch_force,
5071 rtl_can_remove_branch_p,
5072 rtl_delete_block,
5073 rtl_split_block,
5074 rtl_move_block_after,
5075 rtl_can_merge_blocks, /* can_merge_blocks_p */
5076 rtl_merge_blocks,
5077 rtl_predict_edge,
5078 rtl_predicted_by_p,
5079 cfg_layout_can_duplicate_bb_p,
5080 rtl_duplicate_bb,
5081 rtl_split_edge,
5082 rtl_make_forwarder_block,
5083 rtl_tidy_fallthru_edge,
5084 rtl_force_nonfallthru,
5085 rtl_block_ends_with_call_p,
5086 rtl_block_ends_with_condjump_p,
5087 rtl_flow_call_edges_add,
5088 NULL, /* execute_on_growing_pred */
5089 NULL, /* execute_on_shrinking_pred */
5090 NULL, /* duplicate loop for trees */
5091 NULL, /* lv_add_condition_to_bb */
5092 NULL, /* lv_adjust_loop_header_phi*/
5093 NULL, /* extract_cond_bb_edges */
5094 NULL, /* flush_pending_stmts */
5095 rtl_block_empty_p, /* block_empty_p */
5096 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5097 rtl_account_profile_record,
5098 };
5099
5100 /* Implementation of CFG manipulation for cfg layout RTL, where
5101 basic block connected via fallthru edges does not have to be adjacent.
5102 This representation will hopefully become the default one in future
5103 version of the compiler. */
5104
5105 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5106 "cfglayout mode",
5107 rtl_verify_flow_info_1,
5108 rtl_dump_bb,
5109 rtl_dump_bb_for_graph,
5110 cfg_layout_create_basic_block,
5111 cfg_layout_redirect_edge_and_branch,
5112 cfg_layout_redirect_edge_and_branch_force,
5113 rtl_can_remove_branch_p,
5114 cfg_layout_delete_block,
5115 cfg_layout_split_block,
5116 rtl_move_block_after,
5117 cfg_layout_can_merge_blocks_p,
5118 cfg_layout_merge_blocks,
5119 rtl_predict_edge,
5120 rtl_predicted_by_p,
5121 cfg_layout_can_duplicate_bb_p,
5122 cfg_layout_duplicate_bb,
5123 cfg_layout_split_edge,
5124 rtl_make_forwarder_block,
5125 NULL, /* tidy_fallthru_edge */
5126 rtl_force_nonfallthru,
5127 rtl_block_ends_with_call_p,
5128 rtl_block_ends_with_condjump_p,
5129 rtl_flow_call_edges_add,
5130 NULL, /* execute_on_growing_pred */
5131 NULL, /* execute_on_shrinking_pred */
5132 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5133 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5134 NULL, /* lv_adjust_loop_header_phi*/
5135 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5136 NULL, /* flush_pending_stmts */
5137 rtl_block_empty_p, /* block_empty_p */
5138 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5139 rtl_account_profile_record,
5140 };
5141
5142 #include "gt-cfgrtl.h"