C-family, Objective-C [1/3] : Implement Wobjc-root-class [PR77404].
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "backend.h"
44 #include "target.h"
45 #include "rtl.h"
46 #include "tree.h"
47 #include "cfghooks.h"
48 #include "df.h"
49 #include "insn-config.h"
50 #include "memmodel.h"
51 #include "emit-rtl.h"
52 #include "cfgrtl.h"
53 #include "cfganal.h"
54 #include "cfgbuild.h"
55 #include "cfgcleanup.h"
56 #include "bb-reorder.h"
57 #include "rtl-error.h"
58 #include "insn-attr.h"
59 #include "dojump.h"
60 #include "expr.h"
61 #include "cfgloop.h"
62 #include "tree-pass.h"
63 #include "print-rtl.h"
64 #include "rtl-iter.h"
65 #include "gimplify.h"
66
67 /* Disable warnings about missing quoting in GCC diagnostics. */
68 #if __GNUC__ >= 10
69 # pragma GCC diagnostic push
70 # pragma GCC diagnostic ignored "-Wformat-diag"
71 #endif
72
73 /* Holds the interesting leading and trailing notes for the function.
74 Only applicable if the CFG is in cfglayout mode. */
75 static GTY(()) rtx_insn *cfg_layout_function_footer;
76 static GTY(()) rtx_insn *cfg_layout_function_header;
77
78 static rtx_insn *skip_insns_after_block (basic_block);
79 static void record_effective_endpoints (void);
80 static void fixup_reorder_chain (void);
81
82 void verify_insn_chain (void);
83 static void fixup_fallthru_exit_predecessor (void);
84 static int can_delete_note_p (const rtx_note *);
85 static int can_delete_label_p (const rtx_code_label *);
86 static basic_block rtl_split_edge (edge);
87 static bool rtl_move_block_after (basic_block, basic_block);
88 static int rtl_verify_flow_info (void);
89 static basic_block cfg_layout_split_block (basic_block, void *);
90 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
91 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
92 static void cfg_layout_delete_block (basic_block);
93 static void rtl_delete_block (basic_block);
94 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
95 static edge rtl_redirect_edge_and_branch (edge, basic_block);
96 static basic_block rtl_split_block (basic_block, void *);
97 static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
98 static int rtl_verify_flow_info_1 (void);
99 static void rtl_make_forwarder_block (edge);
100 \f
101 /* Return true if NOTE is not one of the ones that must be kept paired,
102 so that we may simply delete it. */
103
104 static int
105 can_delete_note_p (const rtx_note *note)
106 {
107 switch (NOTE_KIND (note))
108 {
109 case NOTE_INSN_DELETED:
110 case NOTE_INSN_BASIC_BLOCK:
111 case NOTE_INSN_EPILOGUE_BEG:
112 return true;
113
114 default:
115 return false;
116 }
117 }
118
119 /* True if a given label can be deleted. */
120
121 static int
122 can_delete_label_p (const rtx_code_label *label)
123 {
124 return (!LABEL_PRESERVE_P (label)
125 /* User declared labels must be preserved. */
126 && LABEL_NAME (label) == 0
127 && !vec_safe_contains<rtx_insn *> (forced_labels,
128 const_cast<rtx_code_label *> (label)));
129 }
130
131 /* Delete INSN by patching it out. */
132
133 void
134 delete_insn (rtx_insn *insn)
135 {
136 rtx note;
137 bool really_delete = true;
138
139 if (LABEL_P (insn))
140 {
141 /* Some labels can't be directly removed from the INSN chain, as they
142 might be references via variables, constant pool etc.
143 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
144 if (! can_delete_label_p (as_a <rtx_code_label *> (insn)))
145 {
146 const char *name = LABEL_NAME (insn);
147 basic_block bb = BLOCK_FOR_INSN (insn);
148 rtx_insn *bb_note = NEXT_INSN (insn);
149
150 really_delete = false;
151 PUT_CODE (insn, NOTE);
152 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
153 NOTE_DELETED_LABEL_NAME (insn) = name;
154
155 /* If the note following the label starts a basic block, and the
156 label is a member of the same basic block, interchange the two. */
157 if (bb_note != NULL_RTX
158 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
159 && bb != NULL
160 && bb == BLOCK_FOR_INSN (bb_note))
161 {
162 reorder_insns_nobb (insn, insn, bb_note);
163 BB_HEAD (bb) = bb_note;
164 if (BB_END (bb) == bb_note)
165 BB_END (bb) = insn;
166 }
167 }
168
169 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
170 }
171
172 if (really_delete)
173 {
174 /* If this insn has already been deleted, something is very wrong. */
175 gcc_assert (!insn->deleted ());
176 if (INSN_P (insn))
177 df_insn_delete (insn);
178 remove_insn (insn);
179 insn->set_deleted ();
180 }
181
182 /* If deleting a jump, decrement the use count of the label. Deleting
183 the label itself should happen in the normal course of block merging. */
184 if (JUMP_P (insn))
185 {
186 if (JUMP_LABEL (insn)
187 && LABEL_P (JUMP_LABEL (insn)))
188 LABEL_NUSES (JUMP_LABEL (insn))--;
189
190 /* If there are more targets, remove them too. */
191 while ((note
192 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
193 && LABEL_P (XEXP (note, 0)))
194 {
195 LABEL_NUSES (XEXP (note, 0))--;
196 remove_note (insn, note);
197 }
198 }
199
200 /* Also if deleting any insn that references a label as an operand. */
201 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
202 && LABEL_P (XEXP (note, 0)))
203 {
204 LABEL_NUSES (XEXP (note, 0))--;
205 remove_note (insn, note);
206 }
207
208 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
209 {
210 rtvec vec = table->get_labels ();
211 int len = GET_NUM_ELEM (vec);
212 int i;
213
214 for (i = 0; i < len; i++)
215 {
216 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
217
218 /* When deleting code in bulk (e.g. removing many unreachable
219 blocks) we can delete a label that's a target of the vector
220 before deleting the vector itself. */
221 if (!NOTE_P (label))
222 LABEL_NUSES (label)--;
223 }
224 }
225 }
226
227 /* Like delete_insn but also purge dead edges from BB.
228 Return true if any edges are eliminated. */
229
230 bool
231 delete_insn_and_edges (rtx_insn *insn)
232 {
233 bool purge = false;
234
235 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
236 {
237 basic_block bb = BLOCK_FOR_INSN (insn);
238 if (BB_END (bb) == insn)
239 purge = true;
240 else if (DEBUG_INSN_P (BB_END (bb)))
241 for (rtx_insn *dinsn = NEXT_INSN (insn);
242 DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (dinsn))
243 if (BB_END (bb) == dinsn)
244 {
245 purge = true;
246 break;
247 }
248 }
249 delete_insn (insn);
250 if (purge)
251 return purge_dead_edges (BLOCK_FOR_INSN (insn));
252 return false;
253 }
254
255 /* Unlink a chain of insns between START and FINISH, leaving notes
256 that must be paired. If CLEAR_BB is true, we set bb field for
257 insns that cannot be removed to NULL. */
258
259 void
260 delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
261 {
262 /* Unchain the insns one by one. It would be quicker to delete all of these
263 with a single unchaining, rather than one at a time, but we need to keep
264 the NOTE's. */
265 rtx_insn *current = finish;
266 while (1)
267 {
268 rtx_insn *prev = PREV_INSN (current);
269 if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
270 ;
271 else
272 delete_insn (current);
273
274 if (clear_bb && !current->deleted ())
275 set_block_for_insn (current, NULL);
276
277 if (current == start)
278 break;
279 current = prev;
280 }
281 }
282 \f
283 /* Create a new basic block consisting of the instructions between HEAD and END
284 inclusive. This function is designed to allow fast BB construction - reuses
285 the note and basic block struct in BB_NOTE, if any and do not grow
286 BASIC_BLOCK chain and should be used directly only by CFG construction code.
287 END can be NULL in to create new empty basic block before HEAD. Both END
288 and HEAD can be NULL to create basic block at the end of INSN chain.
289 AFTER is the basic block we should be put after. */
290
291 basic_block
292 create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
293 basic_block after)
294 {
295 basic_block bb;
296
297 if (bb_note
298 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
299 && bb->aux == NULL)
300 {
301 /* If we found an existing note, thread it back onto the chain. */
302
303 rtx_insn *after;
304
305 if (LABEL_P (head))
306 after = head;
307 else
308 {
309 after = PREV_INSN (head);
310 head = bb_note;
311 }
312
313 if (after != bb_note && NEXT_INSN (after) != bb_note)
314 reorder_insns_nobb (bb_note, bb_note, after);
315 }
316 else
317 {
318 /* Otherwise we must create a note and a basic block structure. */
319
320 bb = alloc_block ();
321
322 init_rtl_bb_info (bb);
323 if (!head && !end)
324 head = end = bb_note
325 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
326 else if (LABEL_P (head) && end)
327 {
328 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
329 if (head == end)
330 end = bb_note;
331 }
332 else
333 {
334 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
335 head = bb_note;
336 if (!end)
337 end = head;
338 }
339
340 NOTE_BASIC_BLOCK (bb_note) = bb;
341 }
342
343 /* Always include the bb note in the block. */
344 if (NEXT_INSN (end) == bb_note)
345 end = bb_note;
346
347 BB_HEAD (bb) = head;
348 BB_END (bb) = end;
349 bb->index = last_basic_block_for_fn (cfun)++;
350 bb->flags = BB_NEW | BB_RTL;
351 link_block (bb, after);
352 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
353 df_bb_refs_record (bb->index, false);
354 update_bb_for_insn (bb);
355 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
356
357 /* Tag the block so that we know it has been used when considering
358 other basic block notes. */
359 bb->aux = bb;
360
361 return bb;
362 }
363
364 /* Create new basic block consisting of instructions in between HEAD and END
365 and place it to the BB chain after block AFTER. END can be NULL to
366 create a new empty basic block before HEAD. Both END and HEAD can be
367 NULL to create basic block at the end of INSN chain. */
368
369 static basic_block
370 rtl_create_basic_block (void *headp, void *endp, basic_block after)
371 {
372 rtx_insn *head = (rtx_insn *) headp;
373 rtx_insn *end = (rtx_insn *) endp;
374 basic_block bb;
375
376 /* Grow the basic block array if needed. */
377 if ((size_t) last_basic_block_for_fn (cfun)
378 >= basic_block_info_for_fn (cfun)->length ())
379 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
380 last_basic_block_for_fn (cfun) + 1);
381
382 n_basic_blocks_for_fn (cfun)++;
383
384 bb = create_basic_block_structure (head, end, NULL, after);
385 bb->aux = NULL;
386 return bb;
387 }
388
389 static basic_block
390 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
391 {
392 basic_block newbb = rtl_create_basic_block (head, end, after);
393
394 return newbb;
395 }
396 \f
397 /* Delete the insns in a (non-live) block. We physically delete every
398 non-deleted-note insn, and update the flow graph appropriately.
399
400 Return nonzero if we deleted an exception handler. */
401
402 /* ??? Preserving all such notes strikes me as wrong. It would be nice
403 to post-process the stream to remove empty blocks, loops, ranges, etc. */
404
405 static void
406 rtl_delete_block (basic_block b)
407 {
408 rtx_insn *insn, *end;
409
410 /* If the head of this block is a CODE_LABEL, then it might be the
411 label for an exception handler which can't be reached. We need
412 to remove the label from the exception_handler_label list. */
413 insn = BB_HEAD (b);
414
415 end = get_last_bb_insn (b);
416
417 /* Selectively delete the entire chain. */
418 BB_HEAD (b) = NULL;
419 delete_insn_chain (insn, end, true);
420
421
422 if (dump_file)
423 fprintf (dump_file, "deleting block %d\n", b->index);
424 df_bb_delete (b->index);
425 }
426 \f
427 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
428
429 void
430 compute_bb_for_insn (void)
431 {
432 basic_block bb;
433
434 FOR_EACH_BB_FN (bb, cfun)
435 {
436 rtx_insn *end = BB_END (bb);
437 rtx_insn *insn;
438
439 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
440 {
441 BLOCK_FOR_INSN (insn) = bb;
442 if (insn == end)
443 break;
444 }
445 }
446 }
447
448 /* Release the basic_block_for_insn array. */
449
450 unsigned int
451 free_bb_for_insn (void)
452 {
453 rtx_insn *insn;
454 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
455 if (!BARRIER_P (insn))
456 BLOCK_FOR_INSN (insn) = NULL;
457 return 0;
458 }
459
460 namespace {
461
462 const pass_data pass_data_free_cfg =
463 {
464 RTL_PASS, /* type */
465 "*free_cfg", /* name */
466 OPTGROUP_NONE, /* optinfo_flags */
467 TV_NONE, /* tv_id */
468 0, /* properties_required */
469 0, /* properties_provided */
470 PROP_cfg, /* properties_destroyed */
471 0, /* todo_flags_start */
472 0, /* todo_flags_finish */
473 };
474
475 class pass_free_cfg : public rtl_opt_pass
476 {
477 public:
478 pass_free_cfg (gcc::context *ctxt)
479 : rtl_opt_pass (pass_data_free_cfg, ctxt)
480 {}
481
482 /* opt_pass methods: */
483 virtual unsigned int execute (function *);
484
485 }; // class pass_free_cfg
486
487 unsigned int
488 pass_free_cfg::execute (function *)
489 {
490 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
491 valid at that point so it would be too late to call df_analyze. */
492 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
493 {
494 df_note_add_problem ();
495 df_analyze ();
496 }
497
498 if (crtl->has_bb_partition)
499 insert_section_boundary_note ();
500
501 free_bb_for_insn ();
502 return 0;
503 }
504
505 } // anon namespace
506
507 rtl_opt_pass *
508 make_pass_free_cfg (gcc::context *ctxt)
509 {
510 return new pass_free_cfg (ctxt);
511 }
512
513 /* Return RTX to emit after when we want to emit code on the entry of function. */
514 rtx_insn *
515 entry_of_function (void)
516 {
517 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
518 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
519 }
520
521 /* Emit INSN at the entry point of the function, ensuring that it is only
522 executed once per function. */
523 void
524 emit_insn_at_entry (rtx insn)
525 {
526 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
527 edge e = ei_safe_edge (ei);
528 gcc_assert (e->flags & EDGE_FALLTHRU);
529
530 insert_insn_on_edge (insn, e);
531 commit_edge_insertions ();
532 }
533
534 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
535 (or BARRIER if found) and notify df of the bb change.
536 The insn chain range is inclusive
537 (i.e. both BEGIN and END will be updated. */
538
539 static void
540 update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
541 {
542 rtx_insn *insn;
543
544 end = NEXT_INSN (end);
545 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
546 if (!BARRIER_P (insn))
547 df_insn_change_bb (insn, bb);
548 }
549
550 /* Update BLOCK_FOR_INSN of insns in BB to BB,
551 and notify df of the change. */
552
553 void
554 update_bb_for_insn (basic_block bb)
555 {
556 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
557 }
558
559 \f
560 /* Like active_insn_p, except keep the return value use or clobber around
561 even after reload. */
562
563 static bool
564 flow_active_insn_p (const rtx_insn *insn)
565 {
566 if (active_insn_p (insn))
567 return true;
568
569 /* A clobber of the function return value exists for buggy
570 programs that fail to return a value. Its effect is to
571 keep the return value from being live across the entire
572 function. If we allow it to be skipped, we introduce the
573 possibility for register lifetime confusion.
574 Similarly, keep a USE of the function return value, otherwise
575 the USE is dropped and we could fail to thread jump if USE
576 appears on some paths and not on others, see PR90257. */
577 if ((GET_CODE (PATTERN (insn)) == CLOBBER
578 || GET_CODE (PATTERN (insn)) == USE)
579 && REG_P (XEXP (PATTERN (insn), 0))
580 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
581 return true;
582
583 return false;
584 }
585
586 /* Return true if the block has no effect and only forwards control flow to
587 its single destination. */
588
589 bool
590 contains_no_active_insn_p (const_basic_block bb)
591 {
592 rtx_insn *insn;
593
594 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
595 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
596 || !single_succ_p (bb)
597 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
598 return false;
599
600 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
601 if (INSN_P (insn) && flow_active_insn_p (insn))
602 return false;
603
604 return (!INSN_P (insn)
605 || (JUMP_P (insn) && simplejump_p (insn))
606 || !flow_active_insn_p (insn));
607 }
608
609 /* Likewise, but protect loop latches, headers and preheaders. */
610 /* FIXME: Make this a cfg hook. */
611
612 bool
613 forwarder_block_p (const_basic_block bb)
614 {
615 if (!contains_no_active_insn_p (bb))
616 return false;
617
618 /* Protect loop latches, headers and preheaders. */
619 if (current_loops)
620 {
621 basic_block dest;
622 if (bb->loop_father->header == bb)
623 return false;
624 dest = EDGE_SUCC (bb, 0)->dest;
625 if (dest->loop_father->header == dest)
626 return false;
627 }
628
629 return true;
630 }
631
632 /* Return nonzero if we can reach target from src by falling through. */
633 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
634
635 bool
636 can_fallthru (basic_block src, basic_block target)
637 {
638 rtx_insn *insn = BB_END (src);
639 rtx_insn *insn2;
640 edge e;
641 edge_iterator ei;
642
643 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
644 return true;
645 if (src->next_bb != target)
646 return false;
647
648 /* ??? Later we may add code to move jump tables offline. */
649 if (tablejump_p (insn, NULL, NULL))
650 return false;
651
652 FOR_EACH_EDGE (e, ei, src->succs)
653 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
654 && e->flags & EDGE_FALLTHRU)
655 return false;
656
657 insn2 = BB_HEAD (target);
658 if (!active_insn_p (insn2))
659 insn2 = next_active_insn (insn2);
660
661 return next_active_insn (insn) == insn2;
662 }
663
664 /* Return nonzero if we could reach target from src by falling through,
665 if the target was made adjacent. If we already have a fall-through
666 edge to the exit block, we can't do that. */
667 static bool
668 could_fall_through (basic_block src, basic_block target)
669 {
670 edge e;
671 edge_iterator ei;
672
673 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
674 return true;
675 FOR_EACH_EDGE (e, ei, src->succs)
676 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
677 && e->flags & EDGE_FALLTHRU)
678 return 0;
679 return true;
680 }
681 \f
682 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
683 rtx_note *
684 bb_note (basic_block bb)
685 {
686 rtx_insn *note;
687
688 note = BB_HEAD (bb);
689 if (LABEL_P (note))
690 note = NEXT_INSN (note);
691
692 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
693 return as_a <rtx_note *> (note);
694 }
695
696 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
697 note associated with the BLOCK. */
698
699 static rtx_insn *
700 first_insn_after_basic_block_note (basic_block block)
701 {
702 rtx_insn *insn;
703
704 /* Get the first instruction in the block. */
705 insn = BB_HEAD (block);
706
707 if (insn == NULL_RTX)
708 return NULL;
709 if (LABEL_P (insn))
710 insn = NEXT_INSN (insn);
711 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
712
713 return NEXT_INSN (insn);
714 }
715
716 /* Creates a new basic block just after basic block BB by splitting
717 everything after specified instruction INSNP. */
718
719 static basic_block
720 rtl_split_block (basic_block bb, void *insnp)
721 {
722 basic_block new_bb;
723 rtx_insn *insn = (rtx_insn *) insnp;
724 edge e;
725 edge_iterator ei;
726
727 if (!insn)
728 {
729 insn = first_insn_after_basic_block_note (bb);
730
731 if (insn)
732 {
733 rtx_insn *next = insn;
734
735 insn = PREV_INSN (insn);
736
737 /* If the block contains only debug insns, insn would have
738 been NULL in a non-debug compilation, and then we'd end
739 up emitting a DELETED note. For -fcompare-debug
740 stability, emit the note too. */
741 if (insn != BB_END (bb)
742 && DEBUG_INSN_P (next)
743 && DEBUG_INSN_P (BB_END (bb)))
744 {
745 while (next != BB_END (bb) && DEBUG_INSN_P (next))
746 next = NEXT_INSN (next);
747
748 if (next == BB_END (bb))
749 emit_note_after (NOTE_INSN_DELETED, next);
750 }
751 }
752 else
753 insn = get_last_insn ();
754 }
755
756 /* We probably should check type of the insn so that we do not create
757 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
758 bother. */
759 if (insn == BB_END (bb))
760 emit_note_after (NOTE_INSN_DELETED, insn);
761
762 /* Create the new basic block. */
763 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
764 BB_COPY_PARTITION (new_bb, bb);
765 BB_END (bb) = insn;
766
767 /* Redirect the outgoing edges. */
768 new_bb->succs = bb->succs;
769 bb->succs = NULL;
770 FOR_EACH_EDGE (e, ei, new_bb->succs)
771 e->src = new_bb;
772
773 /* The new block starts off being dirty. */
774 df_set_bb_dirty (bb);
775 return new_bb;
776 }
777
778 /* Return true if the single edge between blocks A and B is the only place
779 in RTL which holds some unique locus. */
780
781 static bool
782 unique_locus_on_edge_between_p (basic_block a, basic_block b)
783 {
784 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
785 rtx_insn *insn, *end;
786
787 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
788 return false;
789
790 /* First scan block A backward. */
791 insn = BB_END (a);
792 end = PREV_INSN (BB_HEAD (a));
793 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
794 insn = PREV_INSN (insn);
795
796 if (insn != end && INSN_LOCATION (insn) == goto_locus)
797 return false;
798
799 /* Then scan block B forward. */
800 insn = BB_HEAD (b);
801 if (insn)
802 {
803 end = NEXT_INSN (BB_END (b));
804 while (insn != end && !NONDEBUG_INSN_P (insn))
805 insn = NEXT_INSN (insn);
806
807 if (insn != end && INSN_HAS_LOCATION (insn)
808 && INSN_LOCATION (insn) == goto_locus)
809 return false;
810 }
811
812 return true;
813 }
814
815 /* If the single edge between blocks A and B is the only place in RTL which
816 holds some unique locus, emit a nop with that locus between the blocks. */
817
818 static void
819 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
820 {
821 if (!unique_locus_on_edge_between_p (a, b))
822 return;
823
824 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
825 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
826 }
827
828 /* Blocks A and B are to be merged into a single block A. The insns
829 are already contiguous. */
830
831 static void
832 rtl_merge_blocks (basic_block a, basic_block b)
833 {
834 /* If B is a forwarder block whose outgoing edge has no location, we'll
835 propagate the locus of the edge between A and B onto it. */
836 const bool forward_edge_locus
837 = (b->flags & BB_FORWARDER_BLOCK) != 0
838 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
839 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
840 rtx_insn *del_first = NULL, *del_last = NULL;
841 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
842 int b_empty = 0;
843
844 if (dump_file)
845 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
846 a->index);
847
848 while (DEBUG_INSN_P (b_end))
849 b_end = PREV_INSN (b_debug_start = b_end);
850
851 /* If there was a CODE_LABEL beginning B, delete it. */
852 if (LABEL_P (b_head))
853 {
854 /* Detect basic blocks with nothing but a label. This can happen
855 in particular at the end of a function. */
856 if (b_head == b_end)
857 b_empty = 1;
858
859 del_first = del_last = b_head;
860 b_head = NEXT_INSN (b_head);
861 }
862
863 /* Delete the basic block note and handle blocks containing just that
864 note. */
865 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
866 {
867 if (b_head == b_end)
868 b_empty = 1;
869 if (! del_last)
870 del_first = b_head;
871
872 del_last = b_head;
873 b_head = NEXT_INSN (b_head);
874 }
875
876 /* If there was a jump out of A, delete it. */
877 if (JUMP_P (a_end))
878 {
879 rtx_insn *prev;
880
881 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
882 if (!NOTE_P (prev)
883 || NOTE_INSN_BASIC_BLOCK_P (prev)
884 || prev == BB_HEAD (a))
885 break;
886
887 del_first = a_end;
888
889 /* If this was a conditional jump, we need to also delete
890 the insn that set cc0. */
891 if (HAVE_cc0 && only_sets_cc0_p (prev))
892 {
893 rtx_insn *tmp = prev;
894
895 prev = prev_nonnote_insn (prev);
896 if (!prev)
897 prev = BB_HEAD (a);
898 del_first = tmp;
899 }
900
901 a_end = PREV_INSN (del_first);
902 }
903 else if (BARRIER_P (NEXT_INSN (a_end)))
904 del_first = NEXT_INSN (a_end);
905
906 /* Delete everything marked above as well as crap that might be
907 hanging out between the two blocks. */
908 BB_END (a) = a_end;
909 BB_HEAD (b) = b_empty ? NULL : b_head;
910 delete_insn_chain (del_first, del_last, true);
911
912 /* If not optimizing, preserve the locus of the single edge between
913 blocks A and B if necessary by emitting a nop. */
914 if (!optimize
915 && !forward_edge_locus
916 && !DECL_IGNORED_P (current_function_decl))
917 {
918 emit_nop_for_unique_locus_between (a, b);
919 a_end = BB_END (a);
920 }
921
922 /* Reassociate the insns of B with A. */
923 if (!b_empty)
924 {
925 update_bb_for_insn_chain (a_end, b_debug_end, a);
926
927 BB_END (a) = b_debug_end;
928 BB_HEAD (b) = NULL;
929 }
930 else if (b_end != b_debug_end)
931 {
932 /* Move any deleted labels and other notes between the end of A
933 and the debug insns that make up B after the debug insns,
934 bringing the debug insns into A while keeping the notes after
935 the end of A. */
936 if (NEXT_INSN (a_end) != b_debug_start)
937 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
938 b_debug_end);
939 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
940 BB_END (a) = b_debug_end;
941 }
942
943 df_bb_delete (b->index);
944
945 if (forward_edge_locus)
946 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
947
948 if (dump_file)
949 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
950 }
951
952
953 /* Return true when block A and B can be merged. */
954
955 static bool
956 rtl_can_merge_blocks (basic_block a, basic_block b)
957 {
958 /* If we are partitioning hot/cold basic blocks, we don't want to
959 mess up unconditional or indirect jumps that cross between hot
960 and cold sections.
961
962 Basic block partitioning may result in some jumps that appear to
963 be optimizable (or blocks that appear to be mergeable), but which really
964 must be left untouched (they are required to make it safely across
965 partition boundaries). See the comments at the top of
966 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
967
968 if (BB_PARTITION (a) != BB_PARTITION (b))
969 return false;
970
971 /* Protect the loop latches. */
972 if (current_loops && b->loop_father->latch == b)
973 return false;
974
975 /* There must be exactly one edge in between the blocks. */
976 return (single_succ_p (a)
977 && single_succ (a) == b
978 && single_pred_p (b)
979 && a != b
980 /* Must be simple edge. */
981 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
982 && a->next_bb == b
983 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
984 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
985 /* If the jump insn has side effects,
986 we can't kill the edge. */
987 && (!JUMP_P (BB_END (a))
988 || (reload_completed
989 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
990 }
991 \f
992 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
993 exist. */
994
995 rtx_code_label *
996 block_label (basic_block block)
997 {
998 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
999 return NULL;
1000
1001 if (!LABEL_P (BB_HEAD (block)))
1002 {
1003 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
1004 }
1005
1006 return as_a <rtx_code_label *> (BB_HEAD (block));
1007 }
1008
1009 /* Remove all barriers from BB_FOOTER of a BB. */
1010
1011 static void
1012 remove_barriers_from_footer (basic_block bb)
1013 {
1014 rtx_insn *insn = BB_FOOTER (bb);
1015
1016 /* Remove barriers but keep jumptables. */
1017 while (insn)
1018 {
1019 if (BARRIER_P (insn))
1020 {
1021 if (PREV_INSN (insn))
1022 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1023 else
1024 BB_FOOTER (bb) = NEXT_INSN (insn);
1025 if (NEXT_INSN (insn))
1026 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1027 }
1028 if (LABEL_P (insn))
1029 return;
1030 insn = NEXT_INSN (insn);
1031 }
1032 }
1033
1034 /* Attempt to perform edge redirection by replacing possibly complex jump
1035 instruction by unconditional jump or removing jump completely. This can
1036 apply only if all edges now point to the same block. The parameters and
1037 return values are equivalent to redirect_edge_and_branch. */
1038
1039 edge
1040 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1041 {
1042 basic_block src = e->src;
1043 rtx_insn *insn = BB_END (src), *kill_from;
1044 rtx set;
1045 int fallthru = 0;
1046
1047 /* If we are partitioning hot/cold basic blocks, we don't want to
1048 mess up unconditional or indirect jumps that cross between hot
1049 and cold sections.
1050
1051 Basic block partitioning may result in some jumps that appear to
1052 be optimizable (or blocks that appear to be mergeable), but which really
1053 must be left untouched (they are required to make it safely across
1054 partition boundaries). See the comments at the top of
1055 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1056
1057 if (BB_PARTITION (src) != BB_PARTITION (target))
1058 return NULL;
1059
1060 /* We can replace or remove a complex jump only when we have exactly
1061 two edges. Also, if we have exactly one outgoing edge, we can
1062 redirect that. */
1063 if (EDGE_COUNT (src->succs) >= 3
1064 /* Verify that all targets will be TARGET. Specifically, the
1065 edge that is not E must also go to TARGET. */
1066 || (EDGE_COUNT (src->succs) == 2
1067 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1068 return NULL;
1069
1070 if (!onlyjump_p (insn))
1071 return NULL;
1072 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1073 return NULL;
1074
1075 /* Avoid removing branch with side effects. */
1076 set = single_set (insn);
1077 if (!set || side_effects_p (set))
1078 return NULL;
1079
1080 /* In case we zap a conditional jump, we'll need to kill
1081 the cc0 setter too. */
1082 kill_from = insn;
1083 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
1084 && only_sets_cc0_p (PREV_INSN (insn)))
1085 kill_from = PREV_INSN (insn);
1086
1087 /* See if we can create the fallthru edge. */
1088 if (in_cfglayout || can_fallthru (src, target))
1089 {
1090 if (dump_file)
1091 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1092 fallthru = 1;
1093
1094 /* Selectively unlink whole insn chain. */
1095 if (in_cfglayout)
1096 {
1097 delete_insn_chain (kill_from, BB_END (src), false);
1098 remove_barriers_from_footer (src);
1099 }
1100 else
1101 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1102 false);
1103 }
1104
1105 /* If this already is simplejump, redirect it. */
1106 else if (simplejump_p (insn))
1107 {
1108 if (e->dest == target)
1109 return NULL;
1110 if (dump_file)
1111 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1112 INSN_UID (insn), e->dest->index, target->index);
1113 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1114 block_label (target), 0))
1115 {
1116 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1117 return NULL;
1118 }
1119 }
1120
1121 /* Cannot do anything for target exit block. */
1122 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1123 return NULL;
1124
1125 /* Or replace possibly complicated jump insn by simple jump insn. */
1126 else
1127 {
1128 rtx_code_label *target_label = block_label (target);
1129 rtx_insn *barrier;
1130 rtx_insn *label;
1131 rtx_jump_table_data *table;
1132
1133 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1134 JUMP_LABEL (BB_END (src)) = target_label;
1135 LABEL_NUSES (target_label)++;
1136 if (dump_file)
1137 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1138 INSN_UID (insn), INSN_UID (BB_END (src)));
1139
1140
1141 delete_insn_chain (kill_from, insn, false);
1142
1143 /* Recognize a tablejump that we are converting to a
1144 simple jump and remove its associated CODE_LABEL
1145 and ADDR_VEC or ADDR_DIFF_VEC. */
1146 if (tablejump_p (insn, &label, &table))
1147 delete_insn_chain (label, table, false);
1148
1149 barrier = next_nonnote_nondebug_insn (BB_END (src));
1150 if (!barrier || !BARRIER_P (barrier))
1151 emit_barrier_after (BB_END (src));
1152 else
1153 {
1154 if (barrier != NEXT_INSN (BB_END (src)))
1155 {
1156 /* Move the jump before barrier so that the notes
1157 which originally were or were created before jump table are
1158 inside the basic block. */
1159 rtx_insn *new_insn = BB_END (src);
1160
1161 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1162 PREV_INSN (barrier), src);
1163
1164 SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1165 SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1166
1167 SET_NEXT_INSN (new_insn) = barrier;
1168 SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1169
1170 SET_PREV_INSN (new_insn) = PREV_INSN (barrier);
1171 SET_PREV_INSN (barrier) = new_insn;
1172 }
1173 }
1174 }
1175
1176 /* Keep only one edge out and set proper flags. */
1177 if (!single_succ_p (src))
1178 remove_edge (e);
1179 gcc_assert (single_succ_p (src));
1180
1181 e = single_succ_edge (src);
1182 if (fallthru)
1183 e->flags = EDGE_FALLTHRU;
1184 else
1185 e->flags = 0;
1186
1187 e->probability = profile_probability::always ();
1188
1189 if (e->dest != target)
1190 redirect_edge_succ (e, target);
1191 return e;
1192 }
1193
1194 /* Subroutine of redirect_branch_edge that tries to patch the jump
1195 instruction INSN so that it reaches block NEW. Do this
1196 only when it originally reached block OLD. Return true if this
1197 worked or the original target wasn't OLD, return false if redirection
1198 doesn't work. */
1199
1200 static bool
1201 patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1202 {
1203 rtx_jump_table_data *table;
1204 rtx tmp;
1205 /* Recognize a tablejump and adjust all matching cases. */
1206 if (tablejump_p (insn, NULL, &table))
1207 {
1208 rtvec vec;
1209 int j;
1210 rtx_code_label *new_label = block_label (new_bb);
1211
1212 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1213 return false;
1214 vec = table->get_labels ();
1215
1216 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1217 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1218 {
1219 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1220 --LABEL_NUSES (old_label);
1221 ++LABEL_NUSES (new_label);
1222 }
1223
1224 /* Handle casesi dispatch insns. */
1225 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
1226 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1227 {
1228 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1229 new_label);
1230 --LABEL_NUSES (old_label);
1231 ++LABEL_NUSES (new_label);
1232 }
1233 }
1234 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1235 {
1236 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1237 rtx note;
1238
1239 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1240 return false;
1241 rtx_code_label *new_label = block_label (new_bb);
1242
1243 for (i = 0; i < n; ++i)
1244 {
1245 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1246 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1247 if (XEXP (old_ref, 0) == old_label)
1248 {
1249 ASM_OPERANDS_LABEL (tmp, i)
1250 = gen_rtx_LABEL_REF (Pmode, new_label);
1251 --LABEL_NUSES (old_label);
1252 ++LABEL_NUSES (new_label);
1253 }
1254 }
1255
1256 if (JUMP_LABEL (insn) == old_label)
1257 {
1258 JUMP_LABEL (insn) = new_label;
1259 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1260 if (note)
1261 remove_note (insn, note);
1262 }
1263 else
1264 {
1265 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1266 if (note)
1267 remove_note (insn, note);
1268 if (JUMP_LABEL (insn) != new_label
1269 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1270 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1271 }
1272 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1273 != NULL_RTX)
1274 XEXP (note, 0) = new_label;
1275 }
1276 else
1277 {
1278 /* ?? We may play the games with moving the named labels from
1279 one basic block to the other in case only one computed_jump is
1280 available. */
1281 if (computed_jump_p (insn)
1282 /* A return instruction can't be redirected. */
1283 || returnjump_p (insn))
1284 return false;
1285
1286 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1287 {
1288 /* If the insn doesn't go where we think, we're confused. */
1289 gcc_assert (JUMP_LABEL (insn) == old_label);
1290
1291 /* If the substitution doesn't succeed, die. This can happen
1292 if the back end emitted unrecognizable instructions or if
1293 target is exit block on some arches. Or for crossing
1294 jumps. */
1295 if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
1296 block_label (new_bb), 0))
1297 {
1298 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1299 || CROSSING_JUMP_P (insn));
1300 return false;
1301 }
1302 }
1303 }
1304 return true;
1305 }
1306
1307
1308 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1309 NULL on failure */
1310 static edge
1311 redirect_branch_edge (edge e, basic_block target)
1312 {
1313 rtx_insn *old_label = BB_HEAD (e->dest);
1314 basic_block src = e->src;
1315 rtx_insn *insn = BB_END (src);
1316
1317 /* We can only redirect non-fallthru edges of jump insn. */
1318 if (e->flags & EDGE_FALLTHRU)
1319 return NULL;
1320 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1321 return NULL;
1322
1323 if (!currently_expanding_to_rtl)
1324 {
1325 if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
1326 return NULL;
1327 }
1328 else
1329 /* When expanding this BB might actually contain multiple
1330 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1331 Redirect all of those that match our label. */
1332 FOR_BB_INSNS (src, insn)
1333 if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
1334 old_label, target))
1335 return NULL;
1336
1337 if (dump_file)
1338 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1339 e->src->index, e->dest->index, target->index);
1340
1341 if (e->dest != target)
1342 e = redirect_edge_succ_nodup (e, target);
1343
1344 return e;
1345 }
1346
1347 /* Called when edge E has been redirected to a new destination,
1348 in order to update the region crossing flag on the edge and
1349 jump. */
1350
1351 static void
1352 fixup_partition_crossing (edge e)
1353 {
1354 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1355 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1356 return;
1357 /* If we redirected an existing edge, it may already be marked
1358 crossing, even though the new src is missing a reg crossing note.
1359 But make sure reg crossing note doesn't already exist before
1360 inserting. */
1361 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1362 {
1363 e->flags |= EDGE_CROSSING;
1364 if (JUMP_P (BB_END (e->src)))
1365 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1366 }
1367 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1368 {
1369 e->flags &= ~EDGE_CROSSING;
1370 /* Remove the section crossing note from jump at end of
1371 src if it exists, and if no other successors are
1372 still crossing. */
1373 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1374 {
1375 bool has_crossing_succ = false;
1376 edge e2;
1377 edge_iterator ei;
1378 FOR_EACH_EDGE (e2, ei, e->src->succs)
1379 {
1380 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1381 if (has_crossing_succ)
1382 break;
1383 }
1384 if (!has_crossing_succ)
1385 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1386 }
1387 }
1388 }
1389
1390 /* Called when block BB has been reassigned to the cold partition,
1391 because it is now dominated by another cold block,
1392 to ensure that the region crossing attributes are updated. */
1393
1394 static void
1395 fixup_new_cold_bb (basic_block bb)
1396 {
1397 edge e;
1398 edge_iterator ei;
1399
1400 /* This is called when a hot bb is found to now be dominated
1401 by a cold bb and therefore needs to become cold. Therefore,
1402 its preds will no longer be region crossing. Any non-dominating
1403 preds that were previously hot would also have become cold
1404 in the caller for the same region. Any preds that were previously
1405 region-crossing will be adjusted in fixup_partition_crossing. */
1406 FOR_EACH_EDGE (e, ei, bb->preds)
1407 {
1408 fixup_partition_crossing (e);
1409 }
1410
1411 /* Possibly need to make bb's successor edges region crossing,
1412 or remove stale region crossing. */
1413 FOR_EACH_EDGE (e, ei, bb->succs)
1414 {
1415 /* We can't have fall-through edges across partition boundaries.
1416 Note that force_nonfallthru will do any necessary partition
1417 boundary fixup by calling fixup_partition_crossing itself. */
1418 if ((e->flags & EDGE_FALLTHRU)
1419 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1420 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1421 force_nonfallthru (e);
1422 else
1423 fixup_partition_crossing (e);
1424 }
1425 }
1426
1427 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1428 expense of adding new instructions or reordering basic blocks.
1429
1430 Function can be also called with edge destination equivalent to the TARGET.
1431 Then it should try the simplifications and do nothing if none is possible.
1432
1433 Return edge representing the branch if transformation succeeded. Return NULL
1434 on failure.
1435 We still return NULL in case E already destinated TARGET and we didn't
1436 managed to simplify instruction stream. */
1437
1438 static edge
1439 rtl_redirect_edge_and_branch (edge e, basic_block target)
1440 {
1441 edge ret;
1442 basic_block src = e->src;
1443 basic_block dest = e->dest;
1444
1445 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1446 return NULL;
1447
1448 if (dest == target)
1449 return e;
1450
1451 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1452 {
1453 df_set_bb_dirty (src);
1454 fixup_partition_crossing (ret);
1455 return ret;
1456 }
1457
1458 ret = redirect_branch_edge (e, target);
1459 if (!ret)
1460 return NULL;
1461
1462 df_set_bb_dirty (src);
1463 fixup_partition_crossing (ret);
1464 return ret;
1465 }
1466
1467 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1468
1469 void
1470 emit_barrier_after_bb (basic_block bb)
1471 {
1472 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1473 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1474 || current_ir_type () == IR_RTL_CFGLAYOUT);
1475 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1476 {
1477 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1478
1479 if (BB_FOOTER (bb))
1480 {
1481 rtx_insn *footer_tail = BB_FOOTER (bb);
1482
1483 while (NEXT_INSN (footer_tail))
1484 footer_tail = NEXT_INSN (footer_tail);
1485 if (!BARRIER_P (footer_tail))
1486 {
1487 SET_NEXT_INSN (footer_tail) = insn;
1488 SET_PREV_INSN (insn) = footer_tail;
1489 }
1490 }
1491 else
1492 BB_FOOTER (bb) = insn;
1493 }
1494 }
1495
1496 /* Like force_nonfallthru below, but additionally performs redirection
1497 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1498 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1499 simple_return_rtx, indicating which kind of returnjump to create.
1500 It should be NULL otherwise. */
1501
1502 basic_block
1503 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1504 {
1505 basic_block jump_block, new_bb = NULL, src = e->src;
1506 rtx note;
1507 edge new_edge;
1508 int abnormal_edge_flags = 0;
1509 bool asm_goto_edge = false;
1510 int loc;
1511
1512 /* In the case the last instruction is conditional jump to the next
1513 instruction, first redirect the jump itself and then continue
1514 by creating a basic block afterwards to redirect fallthru edge. */
1515 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1516 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1517 && any_condjump_p (BB_END (e->src))
1518 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1519 {
1520 rtx note;
1521 edge b = unchecked_make_edge (e->src, target, 0);
1522 bool redirected;
1523
1524 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1525 block_label (target), 0);
1526 gcc_assert (redirected);
1527
1528 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1529 if (note)
1530 {
1531 int prob = XINT (note, 0);
1532
1533 b->probability = profile_probability::from_reg_br_prob_note (prob);
1534 e->probability -= e->probability;
1535 }
1536 }
1537
1538 if (e->flags & EDGE_ABNORMAL)
1539 {
1540 /* Irritating special case - fallthru edge to the same block as abnormal
1541 edge.
1542 We can't redirect abnormal edge, but we still can split the fallthru
1543 one and create separate abnormal edge to original destination.
1544 This allows bb-reorder to make such edge non-fallthru. */
1545 gcc_assert (e->dest == target);
1546 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1547 e->flags &= EDGE_FALLTHRU;
1548 }
1549 else
1550 {
1551 gcc_assert (e->flags & EDGE_FALLTHRU);
1552 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1553 {
1554 /* We can't redirect the entry block. Create an empty block
1555 at the start of the function which we use to add the new
1556 jump. */
1557 edge tmp;
1558 edge_iterator ei;
1559 bool found = false;
1560
1561 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1562 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1563 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1564
1565 /* Make sure new block ends up in correct hot/cold section. */
1566 BB_COPY_PARTITION (bb, e->dest);
1567
1568 /* Change the existing edge's source to be the new block, and add
1569 a new edge from the entry block to the new block. */
1570 e->src = bb;
1571 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1572 (tmp = ei_safe_edge (ei)); )
1573 {
1574 if (tmp == e)
1575 {
1576 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
1577 found = true;
1578 break;
1579 }
1580 else
1581 ei_next (&ei);
1582 }
1583
1584 gcc_assert (found);
1585
1586 vec_safe_push (bb->succs, e);
1587 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1588 EDGE_FALLTHRU);
1589 }
1590 }
1591
1592 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1593 don't point to the target or fallthru label. */
1594 if (JUMP_P (BB_END (e->src))
1595 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1596 && (e->flags & EDGE_FALLTHRU)
1597 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1598 {
1599 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1600 bool adjust_jump_target = false;
1601
1602 for (i = 0; i < n; ++i)
1603 {
1604 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1605 {
1606 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1607 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1608 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1609 adjust_jump_target = true;
1610 }
1611 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1612 asm_goto_edge = true;
1613 }
1614 if (adjust_jump_target)
1615 {
1616 rtx_insn *insn = BB_END (e->src);
1617 rtx note;
1618 rtx_insn *old_label = BB_HEAD (e->dest);
1619 rtx_insn *new_label = BB_HEAD (target);
1620
1621 if (JUMP_LABEL (insn) == old_label)
1622 {
1623 JUMP_LABEL (insn) = new_label;
1624 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1625 if (note)
1626 remove_note (insn, note);
1627 }
1628 else
1629 {
1630 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1631 if (note)
1632 remove_note (insn, note);
1633 if (JUMP_LABEL (insn) != new_label
1634 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1635 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1636 }
1637 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1638 != NULL_RTX)
1639 XEXP (note, 0) = new_label;
1640 }
1641 }
1642
1643 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1644 {
1645 rtx_insn *new_head;
1646 profile_count count = e->count ();
1647 profile_probability probability = e->probability;
1648 /* Create the new structures. */
1649
1650 /* If the old block ended with a tablejump, skip its table
1651 by searching forward from there. Otherwise start searching
1652 forward from the last instruction of the old block. */
1653 rtx_jump_table_data *table;
1654 if (tablejump_p (BB_END (e->src), NULL, &table))
1655 new_head = table;
1656 else
1657 new_head = BB_END (e->src);
1658 new_head = NEXT_INSN (new_head);
1659
1660 jump_block = create_basic_block (new_head, NULL, e->src);
1661 jump_block->count = count;
1662
1663 /* Make sure new block ends up in correct hot/cold section. */
1664
1665 BB_COPY_PARTITION (jump_block, e->src);
1666
1667 /* Wire edge in. */
1668 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1669 new_edge->probability = probability;
1670
1671 /* Redirect old edge. */
1672 redirect_edge_pred (e, jump_block);
1673 e->probability = profile_probability::always ();
1674
1675 /* If e->src was previously region crossing, it no longer is
1676 and the reg crossing note should be removed. */
1677 fixup_partition_crossing (new_edge);
1678
1679 /* If asm goto has any label refs to target's label,
1680 add also edge from asm goto bb to target. */
1681 if (asm_goto_edge)
1682 {
1683 new_edge->probability = new_edge->probability.apply_scale (1, 2);
1684 jump_block->count = jump_block->count.apply_scale (1, 2);
1685 edge new_edge2 = make_edge (new_edge->src, target,
1686 e->flags & ~EDGE_FALLTHRU);
1687 new_edge2->probability = probability - new_edge->probability;
1688 }
1689
1690 new_bb = jump_block;
1691 }
1692 else
1693 jump_block = e->src;
1694
1695 loc = e->goto_locus;
1696 e->flags &= ~EDGE_FALLTHRU;
1697 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1698 {
1699 if (jump_label == ret_rtx)
1700 emit_jump_insn_after_setloc (targetm.gen_return (),
1701 BB_END (jump_block), loc);
1702 else
1703 {
1704 gcc_assert (jump_label == simple_return_rtx);
1705 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1706 BB_END (jump_block), loc);
1707 }
1708 set_return_jump_label (BB_END (jump_block));
1709 }
1710 else
1711 {
1712 rtx_code_label *label = block_label (target);
1713 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1714 BB_END (jump_block), loc);
1715 JUMP_LABEL (BB_END (jump_block)) = label;
1716 LABEL_NUSES (label)++;
1717 }
1718
1719 /* We might be in cfg layout mode, and if so, the following routine will
1720 insert the barrier correctly. */
1721 emit_barrier_after_bb (jump_block);
1722 redirect_edge_succ_nodup (e, target);
1723
1724 if (abnormal_edge_flags)
1725 make_edge (src, target, abnormal_edge_flags);
1726
1727 df_mark_solutions_dirty ();
1728 fixup_partition_crossing (e);
1729 return new_bb;
1730 }
1731
1732 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1733 (and possibly create new basic block) to make edge non-fallthru.
1734 Return newly created BB or NULL if none. */
1735
1736 static basic_block
1737 rtl_force_nonfallthru (edge e)
1738 {
1739 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1740 }
1741
1742 /* Redirect edge even at the expense of creating new jump insn or
1743 basic block. Return new basic block if created, NULL otherwise.
1744 Conversion must be possible. */
1745
1746 static basic_block
1747 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1748 {
1749 if (redirect_edge_and_branch (e, target)
1750 || e->dest == target)
1751 return NULL;
1752
1753 /* In case the edge redirection failed, try to force it to be non-fallthru
1754 and redirect newly created simplejump. */
1755 df_set_bb_dirty (e->src);
1756 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1757 }
1758
1759 /* The given edge should potentially be a fallthru edge. If that is in
1760 fact true, delete the jump and barriers that are in the way. */
1761
1762 static void
1763 rtl_tidy_fallthru_edge (edge e)
1764 {
1765 rtx_insn *q;
1766 basic_block b = e->src, c = b->next_bb;
1767
1768 /* ??? In a late-running flow pass, other folks may have deleted basic
1769 blocks by nopping out blocks, leaving multiple BARRIERs between here
1770 and the target label. They ought to be chastised and fixed.
1771
1772 We can also wind up with a sequence of undeletable labels between
1773 one block and the next.
1774
1775 So search through a sequence of barriers, labels, and notes for
1776 the head of block C and assert that we really do fall through. */
1777
1778 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1779 if (NONDEBUG_INSN_P (q))
1780 return;
1781
1782 /* Remove what will soon cease being the jump insn from the source block.
1783 If block B consisted only of this single jump, turn it into a deleted
1784 note. */
1785 q = BB_END (b);
1786 if (JUMP_P (q)
1787 && onlyjump_p (q)
1788 && (any_uncondjump_p (q)
1789 || single_succ_p (b)))
1790 {
1791 rtx_insn *label;
1792 rtx_jump_table_data *table;
1793
1794 if (tablejump_p (q, &label, &table))
1795 {
1796 /* The label is likely mentioned in some instruction before
1797 the tablejump and might not be DCEd, so turn it into
1798 a note instead and move before the tablejump that is going to
1799 be deleted. */
1800 const char *name = LABEL_NAME (label);
1801 PUT_CODE (label, NOTE);
1802 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1803 NOTE_DELETED_LABEL_NAME (label) = name;
1804 reorder_insns (label, label, PREV_INSN (q));
1805 delete_insn (table);
1806 }
1807
1808 /* If this was a conditional jump, we need to also delete
1809 the insn that set cc0. */
1810 if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1811 q = PREV_INSN (q);
1812
1813 q = PREV_INSN (q);
1814 }
1815 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1816 together with the barrier) should never have a fallthru edge. */
1817 else if (JUMP_P (q) && any_uncondjump_p (q))
1818 return;
1819
1820 /* Selectively unlink the sequence. */
1821 if (q != PREV_INSN (BB_HEAD (c)))
1822 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1823
1824 e->flags |= EDGE_FALLTHRU;
1825 }
1826 \f
1827 /* Should move basic block BB after basic block AFTER. NIY. */
1828
1829 static bool
1830 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1831 basic_block after ATTRIBUTE_UNUSED)
1832 {
1833 return false;
1834 }
1835
1836 /* Locate the last bb in the same partition as START_BB. */
1837
1838 static basic_block
1839 last_bb_in_partition (basic_block start_bb)
1840 {
1841 basic_block bb;
1842 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1843 {
1844 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1845 return bb;
1846 }
1847 /* Return bb before the exit block. */
1848 return bb->prev_bb;
1849 }
1850
1851 /* Split a (typically critical) edge. Return the new block.
1852 The edge must not be abnormal.
1853
1854 ??? The code generally expects to be called on critical edges.
1855 The case of a block ending in an unconditional jump to a
1856 block with multiple predecessors is not handled optimally. */
1857
1858 static basic_block
1859 rtl_split_edge (edge edge_in)
1860 {
1861 basic_block bb, new_bb;
1862 rtx_insn *before;
1863
1864 /* Abnormal edges cannot be split. */
1865 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1866
1867 /* We are going to place the new block in front of edge destination.
1868 Avoid existence of fallthru predecessors. */
1869 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1870 {
1871 edge e = find_fallthru_edge (edge_in->dest->preds);
1872
1873 if (e)
1874 force_nonfallthru (e);
1875 }
1876
1877 /* Create the basic block note. */
1878 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1879 before = BB_HEAD (edge_in->dest);
1880 else
1881 before = NULL;
1882
1883 /* If this is a fall through edge to the exit block, the blocks might be
1884 not adjacent, and the right place is after the source. */
1885 if ((edge_in->flags & EDGE_FALLTHRU)
1886 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1887 {
1888 before = NEXT_INSN (BB_END (edge_in->src));
1889 bb = create_basic_block (before, NULL, edge_in->src);
1890 BB_COPY_PARTITION (bb, edge_in->src);
1891 }
1892 else
1893 {
1894 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1895 {
1896 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1897 BB_COPY_PARTITION (bb, edge_in->dest);
1898 }
1899 else
1900 {
1901 basic_block after = edge_in->dest->prev_bb;
1902 /* If this is post-bb reordering, and the edge crosses a partition
1903 boundary, the new block needs to be inserted in the bb chain
1904 at the end of the src partition (since we put the new bb into
1905 that partition, see below). Otherwise we may end up creating
1906 an extra partition crossing in the chain, which is illegal.
1907 It can't go after the src, because src may have a fall-through
1908 to a different block. */
1909 if (crtl->bb_reorder_complete
1910 && (edge_in->flags & EDGE_CROSSING))
1911 {
1912 after = last_bb_in_partition (edge_in->src);
1913 before = get_last_bb_insn (after);
1914 /* The instruction following the last bb in partition should
1915 be a barrier, since it cannot end in a fall-through. */
1916 gcc_checking_assert (BARRIER_P (before));
1917 before = NEXT_INSN (before);
1918 }
1919 bb = create_basic_block (before, NULL, after);
1920 /* Put the split bb into the src partition, to avoid creating
1921 a situation where a cold bb dominates a hot bb, in the case
1922 where src is cold and dest is hot. The src will dominate
1923 the new bb (whereas it might not have dominated dest). */
1924 BB_COPY_PARTITION (bb, edge_in->src);
1925 }
1926 }
1927
1928 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1929
1930 /* Can't allow a region crossing edge to be fallthrough. */
1931 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1932 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1933 {
1934 new_bb = force_nonfallthru (single_succ_edge (bb));
1935 gcc_assert (!new_bb);
1936 }
1937
1938 /* For non-fallthru edges, we must adjust the predecessor's
1939 jump instruction to target our new block. */
1940 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1941 {
1942 edge redirected = redirect_edge_and_branch (edge_in, bb);
1943 gcc_assert (redirected);
1944 }
1945 else
1946 {
1947 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1948 {
1949 /* For asm goto even splitting of fallthru edge might
1950 need insn patching, as other labels might point to the
1951 old label. */
1952 rtx_insn *last = BB_END (edge_in->src);
1953 if (last
1954 && JUMP_P (last)
1955 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1956 && (extract_asm_operands (PATTERN (last))
1957 || JUMP_LABEL (last) == before)
1958 && patch_jump_insn (last, before, bb))
1959 df_set_bb_dirty (edge_in->src);
1960 }
1961 redirect_edge_succ (edge_in, bb);
1962 }
1963
1964 return bb;
1965 }
1966
1967 /* Queue instructions for insertion on an edge between two basic blocks.
1968 The new instructions and basic blocks (if any) will not appear in the
1969 CFG until commit_edge_insertions is called. */
1970
1971 void
1972 insert_insn_on_edge (rtx pattern, edge e)
1973 {
1974 /* We cannot insert instructions on an abnormal critical edge.
1975 It will be easier to find the culprit if we die now. */
1976 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1977
1978 if (e->insns.r == NULL_RTX)
1979 start_sequence ();
1980 else
1981 push_to_sequence (e->insns.r);
1982
1983 emit_insn (pattern);
1984
1985 e->insns.r = get_insns ();
1986 end_sequence ();
1987 }
1988
1989 /* Update the CFG for the instructions queued on edge E. */
1990
1991 void
1992 commit_one_edge_insertion (edge e)
1993 {
1994 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1995 basic_block bb;
1996
1997 /* Pull the insns off the edge now since the edge might go away. */
1998 insns = e->insns.r;
1999 e->insns.r = NULL;
2000
2001 /* Figure out where to put these insns. If the destination has
2002 one predecessor, insert there. Except for the exit block. */
2003 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2004 {
2005 bb = e->dest;
2006
2007 /* Get the location correct wrt a code label, and "nice" wrt
2008 a basic block note, and before everything else. */
2009 tmp = BB_HEAD (bb);
2010 if (LABEL_P (tmp))
2011 tmp = NEXT_INSN (tmp);
2012 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2013 tmp = NEXT_INSN (tmp);
2014 if (tmp == BB_HEAD (bb))
2015 before = tmp;
2016 else if (tmp)
2017 after = PREV_INSN (tmp);
2018 else
2019 after = get_last_insn ();
2020 }
2021
2022 /* If the source has one successor and the edge is not abnormal,
2023 insert there. Except for the entry block.
2024 Don't do this if the predecessor ends in a jump other than
2025 unconditional simple jump. E.g. for asm goto that points all
2026 its labels at the fallthru basic block, we can't insert instructions
2027 before the asm goto, as the asm goto can have various of side effects,
2028 and can't emit instructions after the asm goto, as it must end
2029 the basic block. */
2030 else if ((e->flags & EDGE_ABNORMAL) == 0
2031 && single_succ_p (e->src)
2032 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2033 && (!JUMP_P (BB_END (e->src))
2034 || simplejump_p (BB_END (e->src))))
2035 {
2036 bb = e->src;
2037
2038 /* It is possible to have a non-simple jump here. Consider a target
2039 where some forms of unconditional jumps clobber a register. This
2040 happens on the fr30 for example.
2041
2042 We know this block has a single successor, so we can just emit
2043 the queued insns before the jump. */
2044 if (JUMP_P (BB_END (bb)))
2045 before = BB_END (bb);
2046 else
2047 {
2048 /* We'd better be fallthru, or we've lost track of what's what. */
2049 gcc_assert (e->flags & EDGE_FALLTHRU);
2050
2051 after = BB_END (bb);
2052 }
2053 }
2054
2055 /* Otherwise we must split the edge. */
2056 else
2057 {
2058 bb = split_edge (e);
2059
2060 /* If E crossed a partition boundary, we needed to make bb end in
2061 a region-crossing jump, even though it was originally fallthru. */
2062 if (JUMP_P (BB_END (bb)))
2063 before = BB_END (bb);
2064 else
2065 after = BB_END (bb);
2066 }
2067
2068 /* Now that we've found the spot, do the insertion. */
2069 if (before)
2070 {
2071 emit_insn_before_noloc (insns, before, bb);
2072 last = prev_nonnote_insn (before);
2073 }
2074 else
2075 last = emit_insn_after_noloc (insns, after, bb);
2076
2077 if (returnjump_p (last))
2078 {
2079 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2080 This is not currently a problem because this only happens
2081 for the (single) epilogue, which already has a fallthru edge
2082 to EXIT. */
2083
2084 e = single_succ_edge (bb);
2085 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2086 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2087
2088 e->flags &= ~EDGE_FALLTHRU;
2089 emit_barrier_after (last);
2090
2091 if (before)
2092 delete_insn (before);
2093 }
2094 else
2095 gcc_assert (!JUMP_P (last));
2096 }
2097
2098 /* Update the CFG for all queued instructions. */
2099
2100 void
2101 commit_edge_insertions (void)
2102 {
2103 basic_block bb;
2104
2105 /* Optimization passes that invoke this routine can cause hot blocks
2106 previously reached by both hot and cold blocks to become dominated only
2107 by cold blocks. This will cause the verification below to fail,
2108 and lead to now cold code in the hot section. In some cases this
2109 may only be visible after newly unreachable blocks are deleted,
2110 which will be done by fixup_partitions. */
2111 fixup_partitions ();
2112
2113 if (!currently_expanding_to_rtl)
2114 checking_verify_flow_info ();
2115
2116 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2117 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2118 {
2119 edge e;
2120 edge_iterator ei;
2121
2122 FOR_EACH_EDGE (e, ei, bb->succs)
2123 if (e->insns.r)
2124 {
2125 if (currently_expanding_to_rtl)
2126 rebuild_jump_labels_chain (e->insns.r);
2127 commit_one_edge_insertion (e);
2128 }
2129 }
2130 }
2131 \f
2132
2133 /* Print out RTL-specific basic block information (live information
2134 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2135 documented in dumpfile.h. */
2136
2137 static void
2138 rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2139 {
2140 char *s_indent;
2141
2142 s_indent = (char *) alloca ((size_t) indent + 1);
2143 memset (s_indent, ' ', (size_t) indent);
2144 s_indent[indent] = '\0';
2145
2146 if (df && (flags & TDF_DETAILS))
2147 {
2148 df_dump_top (bb, outf);
2149 putc ('\n', outf);
2150 }
2151
2152 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2153 {
2154 rtx_insn *last = BB_END (bb);
2155 if (last)
2156 last = NEXT_INSN (last);
2157 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2158 {
2159 if (flags & TDF_DETAILS)
2160 df_dump_insn_top (insn, outf);
2161 if (! (flags & TDF_SLIM))
2162 print_rtl_single (outf, insn);
2163 else
2164 dump_insn_slim (outf, insn);
2165 if (flags & TDF_DETAILS)
2166 df_dump_insn_bottom (insn, outf);
2167 }
2168 }
2169
2170 if (df && (flags & TDF_DETAILS))
2171 {
2172 df_dump_bottom (bb, outf);
2173 putc ('\n', outf);
2174 }
2175
2176 }
2177 \f
2178 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2179 for the start of each basic block. FLAGS are the TDF_* masks documented
2180 in dumpfile.h. */
2181
2182 void
2183 print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2184 {
2185 const rtx_insn *tmp_rtx;
2186 if (rtx_first == 0)
2187 fprintf (outf, "(nil)\n");
2188 else
2189 {
2190 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2191 int max_uid = get_max_uid ();
2192 basic_block *start = XCNEWVEC (basic_block, max_uid);
2193 basic_block *end = XCNEWVEC (basic_block, max_uid);
2194 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2195 basic_block bb;
2196
2197 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2198 insns, but the CFG is not maintained so the basic block info
2199 is not reliable. Therefore it's omitted from the dumps. */
2200 if (! (cfun->curr_properties & PROP_cfg))
2201 flags &= ~TDF_BLOCKS;
2202
2203 if (df)
2204 df_dump_start (outf);
2205
2206 if (cfun->curr_properties & PROP_cfg)
2207 {
2208 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2209 {
2210 rtx_insn *x;
2211
2212 start[INSN_UID (BB_HEAD (bb))] = bb;
2213 end[INSN_UID (BB_END (bb))] = bb;
2214 if (flags & TDF_BLOCKS)
2215 {
2216 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2217 {
2218 enum bb_state state = IN_MULTIPLE_BB;
2219
2220 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2221 state = IN_ONE_BB;
2222 in_bb_p[INSN_UID (x)] = state;
2223
2224 if (x == BB_END (bb))
2225 break;
2226 }
2227 }
2228 }
2229 }
2230
2231 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
2232 {
2233 if (flags & TDF_BLOCKS)
2234 {
2235 bb = start[INSN_UID (tmp_rtx)];
2236 if (bb != NULL)
2237 {
2238 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2239 if (df && (flags & TDF_DETAILS))
2240 df_dump_top (bb, outf);
2241 }
2242
2243 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2244 && !NOTE_P (tmp_rtx)
2245 && !BARRIER_P (tmp_rtx))
2246 fprintf (outf, ";; Insn is not within a basic block\n");
2247 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2248 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2249 }
2250
2251 if (flags & TDF_DETAILS)
2252 df_dump_insn_top (tmp_rtx, outf);
2253 if (! (flags & TDF_SLIM))
2254 print_rtl_single (outf, tmp_rtx);
2255 else
2256 dump_insn_slim (outf, tmp_rtx);
2257 if (flags & TDF_DETAILS)
2258 df_dump_insn_bottom (tmp_rtx, outf);
2259
2260 bb = end[INSN_UID (tmp_rtx)];
2261 if (bb != NULL)
2262 {
2263 if (flags & TDF_BLOCKS)
2264 {
2265 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2266 if (df && (flags & TDF_DETAILS))
2267 df_dump_bottom (bb, outf);
2268 putc ('\n', outf);
2269 }
2270 /* Emit a hint if the fallthrough target of current basic block
2271 isn't the one placed right next. */
2272 else if (EDGE_COUNT (bb->succs) > 0)
2273 {
2274 gcc_assert (BB_END (bb) == tmp_rtx);
2275 const rtx_insn *ninsn = NEXT_INSN (tmp_rtx);
2276 /* Bypass intervening deleted-insn notes and debug insns. */
2277 while (ninsn
2278 && !NONDEBUG_INSN_P (ninsn)
2279 && !start[INSN_UID (ninsn)])
2280 ninsn = NEXT_INSN (ninsn);
2281 edge e = find_fallthru_edge (bb->succs);
2282 if (e && ninsn)
2283 {
2284 basic_block dest = e->dest;
2285 if (start[INSN_UID (ninsn)] != dest)
2286 fprintf (outf, "%s ; pc falls through to BB %d\n",
2287 print_rtx_head, dest->index);
2288 }
2289 }
2290 }
2291 }
2292
2293 free (start);
2294 free (end);
2295 free (in_bb_p);
2296 }
2297 }
2298 \f
2299 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2300
2301 void
2302 update_br_prob_note (basic_block bb)
2303 {
2304 rtx note;
2305 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2306 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2307 {
2308 if (note)
2309 {
2310 rtx *note_link, this_rtx;
2311
2312 note_link = &REG_NOTES (BB_END (bb));
2313 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2314 if (this_rtx == note)
2315 {
2316 *note_link = XEXP (this_rtx, 1);
2317 break;
2318 }
2319 }
2320 return;
2321 }
2322 if (!note
2323 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2324 return;
2325 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2326 }
2327
2328 /* Get the last insn associated with block BB (that includes barriers and
2329 tablejumps after BB). */
2330 rtx_insn *
2331 get_last_bb_insn (basic_block bb)
2332 {
2333 rtx_jump_table_data *table;
2334 rtx_insn *tmp;
2335 rtx_insn *end = BB_END (bb);
2336
2337 /* Include any jump table following the basic block. */
2338 if (tablejump_p (end, NULL, &table))
2339 end = table;
2340
2341 /* Include any barriers that may follow the basic block. */
2342 tmp = next_nonnote_nondebug_insn_bb (end);
2343 while (tmp && BARRIER_P (tmp))
2344 {
2345 end = tmp;
2346 tmp = next_nonnote_nondebug_insn_bb (end);
2347 }
2348
2349 return end;
2350 }
2351
2352 /* Add all BBs reachable from entry via hot paths into the SET. */
2353
2354 void
2355 find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2356 {
2357 auto_vec<basic_block, 64> worklist;
2358
2359 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2360 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2361
2362 while (worklist.length () > 0)
2363 {
2364 basic_block bb = worklist.pop ();
2365 edge_iterator ei;
2366 edge e;
2367
2368 FOR_EACH_EDGE (e, ei, bb->succs)
2369 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2370 && !set->add (e->dest))
2371 worklist.safe_push (e->dest);
2372 }
2373 }
2374
2375 /* Sanity check partition hotness to ensure that basic blocks in
2376   the cold partition don't dominate basic blocks in the hot partition.
2377 If FLAG_ONLY is true, report violations as errors. Otherwise
2378 re-mark the dominated blocks as cold, since this is run after
2379 cfg optimizations that may make hot blocks previously reached
2380 by both hot and cold blocks now only reachable along cold paths. */
2381
2382 static vec<basic_block>
2383 find_partition_fixes (bool flag_only)
2384 {
2385 basic_block bb;
2386 vec<basic_block> bbs_to_fix = vNULL;
2387 hash_set<basic_block> set;
2388
2389 /* Callers check this. */
2390 gcc_checking_assert (crtl->has_bb_partition);
2391
2392 find_bbs_reachable_by_hot_paths (&set);
2393
2394 FOR_EACH_BB_FN (bb, cfun)
2395 if (!set.contains (bb)
2396 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2397 {
2398 if (flag_only)
2399 error ("non-cold basic block %d reachable only "
2400 "by paths crossing the cold partition", bb->index);
2401 else
2402 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2403 bbs_to_fix.safe_push (bb);
2404 }
2405
2406 return bbs_to_fix;
2407 }
2408
2409 /* Perform cleanup on the hot/cold bb partitioning after optimization
2410 passes that modify the cfg. */
2411
2412 void
2413 fixup_partitions (void)
2414 {
2415 basic_block bb;
2416
2417 if (!crtl->has_bb_partition)
2418 return;
2419
2420 /* Delete any blocks that became unreachable and weren't
2421 already cleaned up, for example during edge forwarding
2422 and convert_jumps_to_returns. This will expose more
2423 opportunities for fixing the partition boundaries here.
2424 Also, the calculation of the dominance graph during verification
2425 will assert if there are unreachable nodes. */
2426 delete_unreachable_blocks ();
2427
2428 /* If there are partitions, do a sanity check on them: A basic block in
2429   a cold partition cannot dominate a basic block in a hot partition.
2430 Fixup any that now violate this requirement, as a result of edge
2431 forwarding and unreachable block deletion.  */
2432 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2433
2434 /* Do the partition fixup after all necessary blocks have been converted to
2435 cold, so that we only update the region crossings the minimum number of
2436 places, which can require forcing edges to be non fallthru. */
2437 while (! bbs_to_fix.is_empty ())
2438 {
2439 bb = bbs_to_fix.pop ();
2440 fixup_new_cold_bb (bb);
2441 }
2442 }
2443
2444 /* Verify, in the basic block chain, that there is at most one switch
2445 between hot/cold partitions. This condition will not be true until
2446 after reorder_basic_blocks is called. */
2447
2448 static int
2449 verify_hot_cold_block_grouping (void)
2450 {
2451 basic_block bb;
2452 int err = 0;
2453 bool switched_sections = false;
2454 int current_partition = BB_UNPARTITIONED;
2455
2456 /* Even after bb reordering is complete, we go into cfglayout mode
2457 again (in compgoto). Ensure we don't call this before going back
2458 into linearized RTL when any layout fixes would have been committed. */
2459 if (!crtl->bb_reorder_complete
2460 || current_ir_type () != IR_RTL_CFGRTL)
2461 return err;
2462
2463 FOR_EACH_BB_FN (bb, cfun)
2464 {
2465 if (current_partition != BB_UNPARTITIONED
2466 && BB_PARTITION (bb) != current_partition)
2467 {
2468 if (switched_sections)
2469 {
2470 error ("multiple hot/cold transitions found (bb %i)",
2471 bb->index);
2472 err = 1;
2473 }
2474 else
2475 switched_sections = true;
2476
2477 if (!crtl->has_bb_partition)
2478 error ("partition found but function partition flag not set");
2479 }
2480 current_partition = BB_PARTITION (bb);
2481 }
2482
2483 return err;
2484 }
2485 \f
2486
2487 /* Perform several checks on the edges out of each block, such as
2488 the consistency of the branch probabilities, the correctness
2489 of hot/cold partition crossing edges, and the number of expected
2490 successor edges. Also verify that the dominance relationship
2491 between hot/cold blocks is sane. */
2492
2493 static int
2494 rtl_verify_edges (void)
2495 {
2496 int err = 0;
2497 basic_block bb;
2498
2499 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2500 {
2501 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2502 int n_eh = 0, n_abnormal = 0;
2503 edge e, fallthru = NULL;
2504 edge_iterator ei;
2505 rtx note;
2506 bool has_crossing_edge = false;
2507
2508 if (JUMP_P (BB_END (bb))
2509 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2510 && EDGE_COUNT (bb->succs) >= 2
2511 && any_condjump_p (BB_END (bb)))
2512 {
2513 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2514 {
2515 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2516 {
2517 error ("verify_flow_info: "
2518 "REG_BR_PROB is set but cfg probability is not");
2519 err = 1;
2520 }
2521 }
2522 else if (XINT (note, 0)
2523 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2524 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2525 {
2526 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2527 XINT (note, 0),
2528 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2529 err = 1;
2530 }
2531 }
2532
2533 FOR_EACH_EDGE (e, ei, bb->succs)
2534 {
2535 bool is_crossing;
2536
2537 if (e->flags & EDGE_FALLTHRU)
2538 n_fallthru++, fallthru = e;
2539
2540 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2541 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2542 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2543 has_crossing_edge |= is_crossing;
2544 if (e->flags & EDGE_CROSSING)
2545 {
2546 if (!is_crossing)
2547 {
2548 error ("EDGE_CROSSING incorrectly set across same section");
2549 err = 1;
2550 }
2551 if (e->flags & EDGE_FALLTHRU)
2552 {
2553 error ("fallthru edge crosses section boundary in bb %i",
2554 e->src->index);
2555 err = 1;
2556 }
2557 if (e->flags & EDGE_EH)
2558 {
2559 error ("EH edge crosses section boundary in bb %i",
2560 e->src->index);
2561 err = 1;
2562 }
2563 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2564 {
2565 error ("No region crossing jump at section boundary in bb %i",
2566 bb->index);
2567 err = 1;
2568 }
2569 }
2570 else if (is_crossing)
2571 {
2572 error ("EDGE_CROSSING missing across section boundary");
2573 err = 1;
2574 }
2575
2576 if ((e->flags & ~(EDGE_DFS_BACK
2577 | EDGE_CAN_FALLTHRU
2578 | EDGE_IRREDUCIBLE_LOOP
2579 | EDGE_LOOP_EXIT
2580 | EDGE_CROSSING
2581 | EDGE_PRESERVE)) == 0)
2582 n_branch++;
2583
2584 if (e->flags & EDGE_ABNORMAL_CALL)
2585 n_abnormal_call++;
2586
2587 if (e->flags & EDGE_SIBCALL)
2588 n_sibcall++;
2589
2590 if (e->flags & EDGE_EH)
2591 n_eh++;
2592
2593 if (e->flags & EDGE_ABNORMAL)
2594 n_abnormal++;
2595 }
2596
2597 if (!has_crossing_edge
2598 && JUMP_P (BB_END (bb))
2599 && CROSSING_JUMP_P (BB_END (bb)))
2600 {
2601 print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
2602 error ("Region crossing jump across same section in bb %i",
2603 bb->index);
2604 err = 1;
2605 }
2606
2607 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2608 {
2609 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2610 err = 1;
2611 }
2612 if (n_eh > 1)
2613 {
2614 error ("too many exception handling edges in bb %i", bb->index);
2615 err = 1;
2616 }
2617 if (n_branch
2618 && (!JUMP_P (BB_END (bb))
2619 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2620 || any_condjump_p (BB_END (bb))))))
2621 {
2622 error ("too many outgoing branch edges from bb %i", bb->index);
2623 err = 1;
2624 }
2625 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2626 {
2627 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2628 err = 1;
2629 }
2630 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2631 {
2632 error ("wrong number of branch edges after unconditional jump"
2633 " in bb %i", bb->index);
2634 err = 1;
2635 }
2636 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2637 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2638 {
2639 error ("wrong amount of branch edges after conditional jump"
2640 " in bb %i", bb->index);
2641 err = 1;
2642 }
2643 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2644 {
2645 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2646 err = 1;
2647 }
2648 if (n_sibcall && !CALL_P (BB_END (bb)))
2649 {
2650 error ("sibcall edges for non-call insn in bb %i", bb->index);
2651 err = 1;
2652 }
2653 if (n_abnormal > n_eh
2654 && !(CALL_P (BB_END (bb))
2655 && n_abnormal == n_abnormal_call + n_sibcall)
2656 && (!JUMP_P (BB_END (bb))
2657 || any_condjump_p (BB_END (bb))
2658 || any_uncondjump_p (BB_END (bb))))
2659 {
2660 error ("abnormal edges for no purpose in bb %i", bb->index);
2661 err = 1;
2662 }
2663
2664 int has_eh = -1;
2665 FOR_EACH_EDGE (e, ei, bb->preds)
2666 {
2667 if (has_eh == -1)
2668 has_eh = (e->flags & EDGE_EH);
2669 if ((e->flags & EDGE_EH) == has_eh)
2670 continue;
2671 error ("EH incoming edge mixed with non-EH incoming edges "
2672 "in bb %i", bb->index);
2673 err = 1;
2674 break;
2675 }
2676 }
2677
2678 /* If there are partitions, do a sanity check on them: A basic block in
2679   a cold partition cannot dominate a basic block in a hot partition.  */
2680 if (crtl->has_bb_partition && !err
2681 && current_ir_type () == IR_RTL_CFGLAYOUT)
2682 {
2683 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2684 err = !bbs_to_fix.is_empty ();
2685 }
2686
2687 /* Clean up. */
2688 return err;
2689 }
2690
2691 /* Checks on the instructions within blocks. Currently checks that each
2692 block starts with a basic block note, and that basic block notes and
2693 control flow jumps are not found in the middle of the block. */
2694
2695 static int
2696 rtl_verify_bb_insns (void)
2697 {
2698 rtx_insn *x;
2699 int err = 0;
2700 basic_block bb;
2701
2702 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2703 {
2704 /* Now check the header of basic
2705 block. It ought to contain optional CODE_LABEL followed
2706 by NOTE_BASIC_BLOCK. */
2707 x = BB_HEAD (bb);
2708 if (LABEL_P (x))
2709 {
2710 if (BB_END (bb) == x)
2711 {
2712 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2713 bb->index);
2714 err = 1;
2715 }
2716
2717 x = NEXT_INSN (x);
2718 }
2719
2720 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2721 {
2722 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2723 bb->index);
2724 err = 1;
2725 }
2726
2727 if (BB_END (bb) == x)
2728 /* Do checks for empty blocks here. */
2729 ;
2730 else
2731 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2732 {
2733 if (NOTE_INSN_BASIC_BLOCK_P (x))
2734 {
2735 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2736 INSN_UID (x), bb->index);
2737 err = 1;
2738 }
2739
2740 if (x == BB_END (bb))
2741 break;
2742
2743 if (control_flow_insn_p (x))
2744 {
2745 error ("in basic block %d:", bb->index);
2746 fatal_insn ("flow control insn inside a basic block", x);
2747 }
2748 }
2749 }
2750
2751 /* Clean up. */
2752 return err;
2753 }
2754
2755 /* Verify that block pointers for instructions in basic blocks, headers and
2756 footers are set appropriately. */
2757
2758 static int
2759 rtl_verify_bb_pointers (void)
2760 {
2761 int err = 0;
2762 basic_block bb;
2763
2764 /* Check the general integrity of the basic blocks. */
2765 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2766 {
2767 rtx_insn *insn;
2768
2769 if (!(bb->flags & BB_RTL))
2770 {
2771 error ("BB_RTL flag not set for block %d", bb->index);
2772 err = 1;
2773 }
2774
2775 FOR_BB_INSNS (bb, insn)
2776 if (BLOCK_FOR_INSN (insn) != bb)
2777 {
2778 error ("insn %d basic block pointer is %d, should be %d",
2779 INSN_UID (insn),
2780 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2781 bb->index);
2782 err = 1;
2783 }
2784
2785 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2786 if (!BARRIER_P (insn)
2787 && BLOCK_FOR_INSN (insn) != NULL)
2788 {
2789 error ("insn %d in header of bb %d has non-NULL basic block",
2790 INSN_UID (insn), bb->index);
2791 err = 1;
2792 }
2793 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2794 if (!BARRIER_P (insn)
2795 && BLOCK_FOR_INSN (insn) != NULL)
2796 {
2797 error ("insn %d in footer of bb %d has non-NULL basic block",
2798 INSN_UID (insn), bb->index);
2799 err = 1;
2800 }
2801 }
2802
2803 /* Clean up. */
2804 return err;
2805 }
2806
2807 /* Verify the CFG and RTL consistency common for both underlying RTL and
2808 cfglayout RTL.
2809
2810 Currently it does following checks:
2811
2812 - overlapping of basic blocks
2813 - insns with wrong BLOCK_FOR_INSN pointers
2814 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2815 - tails of basic blocks (ensure that boundary is necessary)
2816 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2817 and NOTE_INSN_BASIC_BLOCK
2818 - verify that no fall_thru edge crosses hot/cold partition boundaries
2819 - verify that there are no pending RTL branch predictions
2820 - verify that hot blocks are not dominated by cold blocks
2821
2822 In future it can be extended check a lot of other stuff as well
2823 (reachability of basic blocks, life information, etc. etc.). */
2824
2825 static int
2826 rtl_verify_flow_info_1 (void)
2827 {
2828 int err = 0;
2829
2830 err |= rtl_verify_bb_pointers ();
2831
2832 err |= rtl_verify_bb_insns ();
2833
2834 err |= rtl_verify_edges ();
2835
2836 return err;
2837 }
2838
2839 /* Walk the instruction chain and verify that bb head/end pointers
2840 are correct, and that instructions are in exactly one bb and have
2841 correct block pointers. */
2842
2843 static int
2844 rtl_verify_bb_insn_chain (void)
2845 {
2846 basic_block bb;
2847 int err = 0;
2848 rtx_insn *x;
2849 rtx_insn *last_head = get_last_insn ();
2850 basic_block *bb_info;
2851 const int max_uid = get_max_uid ();
2852
2853 bb_info = XCNEWVEC (basic_block, max_uid);
2854
2855 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2856 {
2857 rtx_insn *head = BB_HEAD (bb);
2858 rtx_insn *end = BB_END (bb);
2859
2860 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2861 {
2862 /* Verify the end of the basic block is in the INSN chain. */
2863 if (x == end)
2864 break;
2865
2866 /* And that the code outside of basic blocks has NULL bb field. */
2867 if (!BARRIER_P (x)
2868 && BLOCK_FOR_INSN (x) != NULL)
2869 {
2870 error ("insn %d outside of basic blocks has non-NULL bb field",
2871 INSN_UID (x));
2872 err = 1;
2873 }
2874 }
2875
2876 if (!x)
2877 {
2878 error ("end insn %d for block %d not found in the insn stream",
2879 INSN_UID (end), bb->index);
2880 err = 1;
2881 }
2882
2883 /* Work backwards from the end to the head of the basic block
2884 to verify the head is in the RTL chain. */
2885 for (; x != NULL_RTX; x = PREV_INSN (x))
2886 {
2887 /* While walking over the insn chain, verify insns appear
2888 in only one basic block. */
2889 if (bb_info[INSN_UID (x)] != NULL)
2890 {
2891 error ("insn %d is in multiple basic blocks (%d and %d)",
2892 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2893 err = 1;
2894 }
2895
2896 bb_info[INSN_UID (x)] = bb;
2897
2898 if (x == head)
2899 break;
2900 }
2901 if (!x)
2902 {
2903 error ("head insn %d for block %d not found in the insn stream",
2904 INSN_UID (head), bb->index);
2905 err = 1;
2906 }
2907
2908 last_head = PREV_INSN (x);
2909 }
2910
2911 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2912 {
2913 /* Check that the code before the first basic block has NULL
2914 bb field. */
2915 if (!BARRIER_P (x)
2916 && BLOCK_FOR_INSN (x) != NULL)
2917 {
2918 error ("insn %d outside of basic blocks has non-NULL bb field",
2919 INSN_UID (x));
2920 err = 1;
2921 }
2922 }
2923 free (bb_info);
2924
2925 return err;
2926 }
2927
2928 /* Verify that fallthru edges point to adjacent blocks in layout order and
2929 that barriers exist after non-fallthru blocks. */
2930
2931 static int
2932 rtl_verify_fallthru (void)
2933 {
2934 basic_block bb;
2935 int err = 0;
2936
2937 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2938 {
2939 edge e;
2940
2941 e = find_fallthru_edge (bb->succs);
2942 if (!e)
2943 {
2944 rtx_insn *insn;
2945
2946 /* Ensure existence of barrier in BB with no fallthru edges. */
2947 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2948 {
2949 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2950 {
2951 error ("missing barrier after block %i", bb->index);
2952 err = 1;
2953 break;
2954 }
2955 if (BARRIER_P (insn))
2956 break;
2957 }
2958 }
2959 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2960 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2961 {
2962 rtx_insn *insn;
2963
2964 if (e->src->next_bb != e->dest)
2965 {
2966 error
2967 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2968 e->src->index, e->dest->index);
2969 err = 1;
2970 }
2971 else
2972 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2973 insn = NEXT_INSN (insn))
2974 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
2975 {
2976 error ("verify_flow_info: Incorrect fallthru %i->%i",
2977 e->src->index, e->dest->index);
2978 fatal_insn ("wrong insn in the fallthru edge", insn);
2979 err = 1;
2980 }
2981 }
2982 }
2983
2984 return err;
2985 }
2986
2987 /* Verify that blocks are laid out in consecutive order. While walking the
2988 instructions, verify that all expected instructions are inside the basic
2989 blocks, and that all returns are followed by barriers. */
2990
2991 static int
2992 rtl_verify_bb_layout (void)
2993 {
2994 basic_block bb;
2995 int err = 0;
2996 rtx_insn *x, *y;
2997 int num_bb_notes;
2998 rtx_insn * const rtx_first = get_insns ();
2999 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
3000
3001 num_bb_notes = 0;
3002
3003 for (x = rtx_first; x; x = NEXT_INSN (x))
3004 {
3005 if (NOTE_INSN_BASIC_BLOCK_P (x))
3006 {
3007 bb = NOTE_BASIC_BLOCK (x);
3008
3009 num_bb_notes++;
3010 if (bb != last_bb_seen->next_bb)
3011 internal_error ("basic blocks not laid down consecutively");
3012
3013 curr_bb = last_bb_seen = bb;
3014 }
3015
3016 if (!curr_bb)
3017 {
3018 switch (GET_CODE (x))
3019 {
3020 case BARRIER:
3021 case NOTE:
3022 break;
3023
3024 case CODE_LABEL:
3025 /* An ADDR_VEC is placed outside any basic block. */
3026 if (NEXT_INSN (x)
3027 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
3028 x = NEXT_INSN (x);
3029
3030 /* But in any case, non-deletable labels can appear anywhere. */
3031 break;
3032
3033 default:
3034 fatal_insn ("insn outside basic block", x);
3035 }
3036 }
3037
3038 if (JUMP_P (x)
3039 && returnjump_p (x) && ! condjump_p (x)
3040 && ! ((y = next_nonnote_nondebug_insn (x))
3041 && BARRIER_P (y)))
3042 fatal_insn ("return not followed by barrier", x);
3043
3044 if (curr_bb && x == BB_END (curr_bb))
3045 curr_bb = NULL;
3046 }
3047
3048 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3049 internal_error
3050 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3051 num_bb_notes, n_basic_blocks_for_fn (cfun));
3052
3053 return err;
3054 }
3055
3056 /* Verify the CFG and RTL consistency common for both underlying RTL and
3057 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3058
3059 Currently it does following checks:
3060 - all checks of rtl_verify_flow_info_1
3061 - test head/end pointers
3062 - check that blocks are laid out in consecutive order
3063 - check that all insns are in the basic blocks
3064 (except the switch handling code, barriers and notes)
3065 - check that all returns are followed by barriers
3066 - check that all fallthru edge points to the adjacent blocks
3067 - verify that there is a single hot/cold partition boundary after bbro */
3068
3069 static int
3070 rtl_verify_flow_info (void)
3071 {
3072 int err = 0;
3073
3074 err |= rtl_verify_flow_info_1 ();
3075
3076 err |= rtl_verify_bb_insn_chain ();
3077
3078 err |= rtl_verify_fallthru ();
3079
3080 err |= rtl_verify_bb_layout ();
3081
3082 err |= verify_hot_cold_block_grouping ();
3083
3084 return err;
3085 }
3086 \f
3087 /* Assume that the preceding pass has possibly eliminated jump instructions
3088 or converted the unconditional jumps. Eliminate the edges from CFG.
3089 Return true if any edges are eliminated. */
3090
3091 bool
3092 purge_dead_edges (basic_block bb)
3093 {
3094 edge e;
3095 rtx_insn *insn = BB_END (bb);
3096 rtx note;
3097 bool purged = false;
3098 bool found;
3099 edge_iterator ei;
3100
3101 if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
3102 do
3103 insn = PREV_INSN (insn);
3104 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3105
3106 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3107 if (NONJUMP_INSN_P (insn)
3108 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3109 {
3110 rtx eqnote;
3111
3112 if (! may_trap_p (PATTERN (insn))
3113 || ((eqnote = find_reg_equal_equiv_note (insn))
3114 && ! may_trap_p (XEXP (eqnote, 0))))
3115 remove_note (insn, note);
3116 }
3117
3118 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3119 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3120 {
3121 bool remove = false;
3122
3123 /* There are three types of edges we need to handle correctly here: EH
3124 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3125 latter can appear when nonlocal gotos are used. */
3126 if (e->flags & EDGE_ABNORMAL_CALL)
3127 {
3128 if (!CALL_P (insn))
3129 remove = true;
3130 else if (can_nonlocal_goto (insn))
3131 ;
3132 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3133 ;
3134 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3135 ;
3136 else
3137 remove = true;
3138 }
3139 else if (e->flags & EDGE_EH)
3140 remove = !can_throw_internal (insn);
3141
3142 if (remove)
3143 {
3144 remove_edge (e);
3145 df_set_bb_dirty (bb);
3146 purged = true;
3147 }
3148 else
3149 ei_next (&ei);
3150 }
3151
3152 if (JUMP_P (insn))
3153 {
3154 rtx note;
3155 edge b,f;
3156 edge_iterator ei;
3157
3158 /* We do care only about conditional jumps and simplejumps. */
3159 if (!any_condjump_p (insn)
3160 && !returnjump_p (insn)
3161 && !simplejump_p (insn))
3162 return purged;
3163
3164 /* Branch probability/prediction notes are defined only for
3165 condjumps. We've possibly turned condjump into simplejump. */
3166 if (simplejump_p (insn))
3167 {
3168 note = find_reg_note (insn, REG_BR_PROB, NULL);
3169 if (note)
3170 remove_note (insn, note);
3171 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3172 remove_note (insn, note);
3173 }
3174
3175 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3176 {
3177 /* Avoid abnormal flags to leak from computed jumps turned
3178 into simplejumps. */
3179
3180 e->flags &= ~EDGE_ABNORMAL;
3181
3182 /* See if this edge is one we should keep. */
3183 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3184 /* A conditional jump can fall through into the next
3185 block, so we should keep the edge. */
3186 {
3187 ei_next (&ei);
3188 continue;
3189 }
3190 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3191 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3192 /* If the destination block is the target of the jump,
3193 keep the edge. */
3194 {
3195 ei_next (&ei);
3196 continue;
3197 }
3198 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3199 && returnjump_p (insn))
3200 /* If the destination block is the exit block, and this
3201 instruction is a return, then keep the edge. */
3202 {
3203 ei_next (&ei);
3204 continue;
3205 }
3206 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3207 /* Keep the edges that correspond to exceptions thrown by
3208 this instruction and rematerialize the EDGE_ABNORMAL
3209 flag we just cleared above. */
3210 {
3211 e->flags |= EDGE_ABNORMAL;
3212 ei_next (&ei);
3213 continue;
3214 }
3215
3216 /* We do not need this edge. */
3217 df_set_bb_dirty (bb);
3218 purged = true;
3219 remove_edge (e);
3220 }
3221
3222 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3223 return purged;
3224
3225 if (dump_file)
3226 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3227
3228 if (!optimize)
3229 return purged;
3230
3231 /* Redistribute probabilities. */
3232 if (single_succ_p (bb))
3233 {
3234 single_succ_edge (bb)->probability = profile_probability::always ();
3235 }
3236 else
3237 {
3238 note = find_reg_note (insn, REG_BR_PROB, NULL);
3239 if (!note)
3240 return purged;
3241
3242 b = BRANCH_EDGE (bb);
3243 f = FALLTHRU_EDGE (bb);
3244 b->probability = profile_probability::from_reg_br_prob_note
3245 (XINT (note, 0));
3246 f->probability = b->probability.invert ();
3247 }
3248
3249 return purged;
3250 }
3251 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3252 {
3253 /* First, there should not be any EH or ABCALL edges resulting
3254 from non-local gotos and the like. If there were, we shouldn't
3255 have created the sibcall in the first place. Second, there
3256 should of course never have been a fallthru edge. */
3257 gcc_assert (single_succ_p (bb));
3258 gcc_assert (single_succ_edge (bb)->flags
3259 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3260
3261 return 0;
3262 }
3263
3264 /* If we don't see a jump insn, we don't know exactly why the block would
3265 have been broken at this point. Look for a simple, non-fallthru edge,
3266 as these are only created by conditional branches. If we find such an
3267 edge we know that there used to be a jump here and can then safely
3268 remove all non-fallthru edges. */
3269 found = false;
3270 FOR_EACH_EDGE (e, ei, bb->succs)
3271 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3272 {
3273 found = true;
3274 break;
3275 }
3276
3277 if (!found)
3278 return purged;
3279
3280 /* Remove all but the fake and fallthru edges. The fake edge may be
3281 the only successor for this block in the case of noreturn
3282 calls. */
3283 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3284 {
3285 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3286 {
3287 df_set_bb_dirty (bb);
3288 remove_edge (e);
3289 purged = true;
3290 }
3291 else
3292 ei_next (&ei);
3293 }
3294
3295 gcc_assert (single_succ_p (bb));
3296
3297 single_succ_edge (bb)->probability = profile_probability::always ();
3298
3299 if (dump_file)
3300 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3301 bb->index);
3302 return purged;
3303 }
3304
3305 /* Search all basic blocks for potentially dead edges and purge them. Return
3306 true if some edge has been eliminated. */
3307
3308 bool
3309 purge_all_dead_edges (void)
3310 {
3311 int purged = false;
3312 basic_block bb;
3313
3314 FOR_EACH_BB_FN (bb, cfun)
3315 {
3316 bool purged_here = purge_dead_edges (bb);
3317
3318 purged |= purged_here;
3319 }
3320
3321 return purged;
3322 }
3323
3324 /* This is used by a few passes that emit some instructions after abnormal
3325 calls, moving the basic block's end, while they in fact do want to emit
3326 them on the fallthru edge. Look for abnormal call edges, find backward
3327 the call in the block and insert the instructions on the edge instead.
3328
3329 Similarly, handle instructions throwing exceptions internally.
3330
3331 Return true when instructions have been found and inserted on edges. */
3332
3333 bool
3334 fixup_abnormal_edges (void)
3335 {
3336 bool inserted = false;
3337 basic_block bb;
3338
3339 FOR_EACH_BB_FN (bb, cfun)
3340 {
3341 edge e;
3342 edge_iterator ei;
3343
3344 /* Look for cases we are interested in - calls or instructions causing
3345 exceptions. */
3346 FOR_EACH_EDGE (e, ei, bb->succs)
3347 if ((e->flags & EDGE_ABNORMAL_CALL)
3348 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3349 == (EDGE_ABNORMAL | EDGE_EH)))
3350 break;
3351
3352 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3353 {
3354 rtx_insn *insn;
3355
3356 /* Get past the new insns generated. Allow notes, as the insns
3357 may be already deleted. */
3358 insn = BB_END (bb);
3359 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3360 && !can_throw_internal (insn)
3361 && insn != BB_HEAD (bb))
3362 insn = PREV_INSN (insn);
3363
3364 if (CALL_P (insn) || can_throw_internal (insn))
3365 {
3366 rtx_insn *stop, *next;
3367
3368 e = find_fallthru_edge (bb->succs);
3369
3370 stop = NEXT_INSN (BB_END (bb));
3371 BB_END (bb) = insn;
3372
3373 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3374 {
3375 next = NEXT_INSN (insn);
3376 if (INSN_P (insn))
3377 {
3378 delete_insn (insn);
3379
3380 /* Sometimes there's still the return value USE.
3381 If it's placed after a trapping call (i.e. that
3382 call is the last insn anyway), we have no fallthru
3383 edge. Simply delete this use and don't try to insert
3384 on the non-existent edge.
3385 Similarly, sometimes a call that can throw is
3386 followed in the source with __builtin_unreachable (),
3387 meaning that there is UB if the call returns rather
3388 than throws. If there weren't any instructions
3389 following such calls before, supposedly even the ones
3390 we've deleted aren't significant and can be
3391 removed. */
3392 if (e)
3393 {
3394 /* We're not deleting it, we're moving it. */
3395 insn->set_undeleted ();
3396 SET_PREV_INSN (insn) = NULL_RTX;
3397 SET_NEXT_INSN (insn) = NULL_RTX;
3398
3399 insert_insn_on_edge (insn, e);
3400 inserted = true;
3401 }
3402 }
3403 else if (!BARRIER_P (insn))
3404 set_block_for_insn (insn, NULL);
3405 }
3406 }
3407
3408 /* It may be that we don't find any trapping insn. In this
3409 case we discovered quite late that the insn that had been
3410 marked as can_throw_internal in fact couldn't trap at all.
3411 So we should in fact delete the EH edges out of the block. */
3412 else
3413 purge_dead_edges (bb);
3414 }
3415 }
3416
3417 return inserted;
3418 }
3419 \f
3420 /* Cut the insns from FIRST to LAST out of the insns stream. */
3421
3422 rtx_insn *
3423 unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3424 {
3425 rtx_insn *prevfirst = PREV_INSN (first);
3426 rtx_insn *nextlast = NEXT_INSN (last);
3427
3428 SET_PREV_INSN (first) = NULL;
3429 SET_NEXT_INSN (last) = NULL;
3430 if (prevfirst)
3431 SET_NEXT_INSN (prevfirst) = nextlast;
3432 if (nextlast)
3433 SET_PREV_INSN (nextlast) = prevfirst;
3434 else
3435 set_last_insn (prevfirst);
3436 if (!prevfirst)
3437 set_first_insn (nextlast);
3438 return first;
3439 }
3440 \f
3441 /* Skip over inter-block insns occurring after BB which are typically
3442 associated with BB (e.g., barriers). If there are any such insns,
3443 we return the last one. Otherwise, we return the end of BB. */
3444
3445 static rtx_insn *
3446 skip_insns_after_block (basic_block bb)
3447 {
3448 rtx_insn *insn, *last_insn, *next_head, *prev;
3449
3450 next_head = NULL;
3451 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3452 next_head = BB_HEAD (bb->next_bb);
3453
3454 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3455 {
3456 if (insn == next_head)
3457 break;
3458
3459 switch (GET_CODE (insn))
3460 {
3461 case BARRIER:
3462 last_insn = insn;
3463 continue;
3464
3465 case NOTE:
3466 switch (NOTE_KIND (insn))
3467 {
3468 case NOTE_INSN_BLOCK_END:
3469 gcc_unreachable ();
3470 continue;
3471 default:
3472 continue;
3473 break;
3474 }
3475 break;
3476
3477 case CODE_LABEL:
3478 if (NEXT_INSN (insn)
3479 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3480 {
3481 insn = NEXT_INSN (insn);
3482 last_insn = insn;
3483 continue;
3484 }
3485 break;
3486
3487 default:
3488 break;
3489 }
3490
3491 break;
3492 }
3493
3494 /* It is possible to hit contradictory sequence. For instance:
3495
3496 jump_insn
3497 NOTE_INSN_BLOCK_BEG
3498 barrier
3499
3500 Where barrier belongs to jump_insn, but the note does not. This can be
3501 created by removing the basic block originally following
3502 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3503
3504 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3505 {
3506 prev = PREV_INSN (insn);
3507 if (NOTE_P (insn))
3508 switch (NOTE_KIND (insn))
3509 {
3510 case NOTE_INSN_BLOCK_END:
3511 gcc_unreachable ();
3512 break;
3513 case NOTE_INSN_DELETED:
3514 case NOTE_INSN_DELETED_LABEL:
3515 case NOTE_INSN_DELETED_DEBUG_LABEL:
3516 continue;
3517 default:
3518 reorder_insns (insn, insn, last_insn);
3519 }
3520 }
3521
3522 return last_insn;
3523 }
3524
3525 /* Locate or create a label for a given basic block. */
3526
3527 static rtx_insn *
3528 label_for_bb (basic_block bb)
3529 {
3530 rtx_insn *label = BB_HEAD (bb);
3531
3532 if (!LABEL_P (label))
3533 {
3534 if (dump_file)
3535 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3536
3537 label = block_label (bb);
3538 }
3539
3540 return label;
3541 }
3542
3543 /* Locate the effective beginning and end of the insn chain for each
3544 block, as defined by skip_insns_after_block above. */
3545
3546 static void
3547 record_effective_endpoints (void)
3548 {
3549 rtx_insn *next_insn;
3550 basic_block bb;
3551 rtx_insn *insn;
3552
3553 for (insn = get_insns ();
3554 insn
3555 && NOTE_P (insn)
3556 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3557 insn = NEXT_INSN (insn))
3558 continue;
3559 /* No basic blocks at all? */
3560 gcc_assert (insn);
3561
3562 if (PREV_INSN (insn))
3563 cfg_layout_function_header =
3564 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3565 else
3566 cfg_layout_function_header = NULL;
3567
3568 next_insn = get_insns ();
3569 FOR_EACH_BB_FN (bb, cfun)
3570 {
3571 rtx_insn *end;
3572
3573 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3574 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3575 PREV_INSN (BB_HEAD (bb)));
3576 end = skip_insns_after_block (bb);
3577 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3578 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3579 next_insn = NEXT_INSN (BB_END (bb));
3580 }
3581
3582 cfg_layout_function_footer = next_insn;
3583 if (cfg_layout_function_footer)
3584 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3585 }
3586 \f
3587 namespace {
3588
3589 const pass_data pass_data_into_cfg_layout_mode =
3590 {
3591 RTL_PASS, /* type */
3592 "into_cfglayout", /* name */
3593 OPTGROUP_NONE, /* optinfo_flags */
3594 TV_CFG, /* tv_id */
3595 0, /* properties_required */
3596 PROP_cfglayout, /* properties_provided */
3597 0, /* properties_destroyed */
3598 0, /* todo_flags_start */
3599 0, /* todo_flags_finish */
3600 };
3601
3602 class pass_into_cfg_layout_mode : public rtl_opt_pass
3603 {
3604 public:
3605 pass_into_cfg_layout_mode (gcc::context *ctxt)
3606 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3607 {}
3608
3609 /* opt_pass methods: */
3610 virtual unsigned int execute (function *)
3611 {
3612 cfg_layout_initialize (0);
3613 return 0;
3614 }
3615
3616 }; // class pass_into_cfg_layout_mode
3617
3618 } // anon namespace
3619
3620 rtl_opt_pass *
3621 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3622 {
3623 return new pass_into_cfg_layout_mode (ctxt);
3624 }
3625
3626 namespace {
3627
3628 const pass_data pass_data_outof_cfg_layout_mode =
3629 {
3630 RTL_PASS, /* type */
3631 "outof_cfglayout", /* name */
3632 OPTGROUP_NONE, /* optinfo_flags */
3633 TV_CFG, /* tv_id */
3634 0, /* properties_required */
3635 0, /* properties_provided */
3636 PROP_cfglayout, /* properties_destroyed */
3637 0, /* todo_flags_start */
3638 0, /* todo_flags_finish */
3639 };
3640
3641 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3642 {
3643 public:
3644 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3645 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3646 {}
3647
3648 /* opt_pass methods: */
3649 virtual unsigned int execute (function *);
3650
3651 }; // class pass_outof_cfg_layout_mode
3652
3653 unsigned int
3654 pass_outof_cfg_layout_mode::execute (function *fun)
3655 {
3656 basic_block bb;
3657
3658 FOR_EACH_BB_FN (bb, fun)
3659 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3660 bb->aux = bb->next_bb;
3661
3662 cfg_layout_finalize ();
3663
3664 return 0;
3665 }
3666
3667 } // anon namespace
3668
3669 rtl_opt_pass *
3670 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3671 {
3672 return new pass_outof_cfg_layout_mode (ctxt);
3673 }
3674 \f
3675
3676 /* Link the basic blocks in the correct order, compacting the basic
3677 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3678 function also clears the basic block header and footer fields.
3679
3680 This function is usually called after a pass (e.g. tracer) finishes
3681 some transformations while in cfglayout mode. The required sequence
3682 of the basic blocks is in a linked list along the bb->aux field.
3683 This functions re-links the basic block prev_bb and next_bb pointers
3684 accordingly, and it compacts and renumbers the blocks.
3685
3686 FIXME: This currently works only for RTL, but the only RTL-specific
3687 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3688 to GIMPLE a long time ago, but it doesn't relink the basic block
3689 chain. It could do that (to give better initial RTL) if this function
3690 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3691
3692 void
3693 relink_block_chain (bool stay_in_cfglayout_mode)
3694 {
3695 basic_block bb, prev_bb;
3696 int index;
3697
3698 /* Maybe dump the re-ordered sequence. */
3699 if (dump_file)
3700 {
3701 fprintf (dump_file, "Reordered sequence:\n");
3702 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3703 NUM_FIXED_BLOCKS;
3704 bb;
3705 bb = (basic_block) bb->aux, index++)
3706 {
3707 fprintf (dump_file, " %i ", index);
3708 if (get_bb_original (bb))
3709 fprintf (dump_file, "duplicate of %i\n",
3710 get_bb_original (bb)->index);
3711 else if (forwarder_block_p (bb)
3712 && !LABEL_P (BB_HEAD (bb)))
3713 fprintf (dump_file, "compensation\n");
3714 else
3715 fprintf (dump_file, "bb %i\n", bb->index);
3716 }
3717 }
3718
3719 /* Now reorder the blocks. */
3720 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3721 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3722 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3723 {
3724 bb->prev_bb = prev_bb;
3725 prev_bb->next_bb = bb;
3726 }
3727 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3728 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3729
3730 /* Then, clean up the aux fields. */
3731 FOR_ALL_BB_FN (bb, cfun)
3732 {
3733 bb->aux = NULL;
3734 if (!stay_in_cfglayout_mode)
3735 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3736 }
3737
3738 /* Maybe reset the original copy tables, they are not valid anymore
3739 when we renumber the basic blocks in compact_blocks. If we are
3740 are going out of cfglayout mode, don't re-allocate the tables. */
3741 if (original_copy_tables_initialized_p ())
3742 free_original_copy_tables ();
3743 if (stay_in_cfglayout_mode)
3744 initialize_original_copy_tables ();
3745
3746 /* Finally, put basic_block_info in the new order. */
3747 compact_blocks ();
3748 }
3749 \f
3750
3751 /* Given a reorder chain, rearrange the code to match. */
3752
3753 static void
3754 fixup_reorder_chain (void)
3755 {
3756 basic_block bb;
3757 rtx_insn *insn = NULL;
3758
3759 if (cfg_layout_function_header)
3760 {
3761 set_first_insn (cfg_layout_function_header);
3762 insn = cfg_layout_function_header;
3763 while (NEXT_INSN (insn))
3764 insn = NEXT_INSN (insn);
3765 }
3766
3767 /* First do the bulk reordering -- rechain the blocks without regard to
3768 the needed changes to jumps and labels. */
3769
3770 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3771 bb->aux)
3772 {
3773 if (BB_HEADER (bb))
3774 {
3775 if (insn)
3776 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3777 else
3778 set_first_insn (BB_HEADER (bb));
3779 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3780 insn = BB_HEADER (bb);
3781 while (NEXT_INSN (insn))
3782 insn = NEXT_INSN (insn);
3783 }
3784 if (insn)
3785 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3786 else
3787 set_first_insn (BB_HEAD (bb));
3788 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3789 insn = BB_END (bb);
3790 if (BB_FOOTER (bb))
3791 {
3792 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3793 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3794 while (NEXT_INSN (insn))
3795 insn = NEXT_INSN (insn);
3796 }
3797 }
3798
3799 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3800 if (cfg_layout_function_footer)
3801 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3802
3803 while (NEXT_INSN (insn))
3804 insn = NEXT_INSN (insn);
3805
3806 set_last_insn (insn);
3807 if (flag_checking)
3808 verify_insn_chain ();
3809
3810 /* Now add jumps and labels as needed to match the blocks new
3811 outgoing edges. */
3812
3813 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3814 bb->aux)
3815 {
3816 edge e_fall, e_taken, e;
3817 rtx_insn *bb_end_insn;
3818 rtx ret_label = NULL_RTX;
3819 basic_block nb;
3820 edge_iterator ei;
3821
3822 if (EDGE_COUNT (bb->succs) == 0)
3823 continue;
3824
3825 /* Find the old fallthru edge, and another non-EH edge for
3826 a taken jump. */
3827 e_taken = e_fall = NULL;
3828
3829 FOR_EACH_EDGE (e, ei, bb->succs)
3830 if (e->flags & EDGE_FALLTHRU)
3831 e_fall = e;
3832 else if (! (e->flags & EDGE_EH))
3833 e_taken = e;
3834
3835 bb_end_insn = BB_END (bb);
3836 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
3837 {
3838 ret_label = JUMP_LABEL (bb_end_jump);
3839 if (any_condjump_p (bb_end_jump))
3840 {
3841 /* This might happen if the conditional jump has side
3842 effects and could therefore not be optimized away.
3843 Make the basic block to end with a barrier in order
3844 to prevent rtl_verify_flow_info from complaining. */
3845 if (!e_fall)
3846 {
3847 gcc_assert (!onlyjump_p (bb_end_jump)
3848 || returnjump_p (bb_end_jump)
3849 || (e_taken->flags & EDGE_CROSSING));
3850 emit_barrier_after (bb_end_jump);
3851 continue;
3852 }
3853
3854 /* If the old fallthru is still next, nothing to do. */
3855 if (bb->aux == e_fall->dest
3856 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3857 continue;
3858
3859 /* The degenerated case of conditional jump jumping to the next
3860 instruction can happen for jumps with side effects. We need
3861 to construct a forwarder block and this will be done just
3862 fine by force_nonfallthru below. */
3863 if (!e_taken)
3864 ;
3865
3866 /* There is another special case: if *neither* block is next,
3867 such as happens at the very end of a function, then we'll
3868 need to add a new unconditional jump. Choose the taken
3869 edge based on known or assumed probability. */
3870 else if (bb->aux != e_taken->dest)
3871 {
3872 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3873
3874 if (note
3875 && profile_probability::from_reg_br_prob_note
3876 (XINT (note, 0)) < profile_probability::even ()
3877 && invert_jump (bb_end_jump,
3878 (e_fall->dest
3879 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3880 ? NULL_RTX
3881 : label_for_bb (e_fall->dest)), 0))
3882 {
3883 e_fall->flags &= ~EDGE_FALLTHRU;
3884 gcc_checking_assert (could_fall_through
3885 (e_taken->src, e_taken->dest));
3886 e_taken->flags |= EDGE_FALLTHRU;
3887 update_br_prob_note (bb);
3888 e = e_fall, e_fall = e_taken, e_taken = e;
3889 }
3890 }
3891
3892 /* If the "jumping" edge is a crossing edge, and the fall
3893 through edge is non-crossing, leave things as they are. */
3894 else if ((e_taken->flags & EDGE_CROSSING)
3895 && !(e_fall->flags & EDGE_CROSSING))
3896 continue;
3897
3898 /* Otherwise we can try to invert the jump. This will
3899 basically never fail, however, keep up the pretense. */
3900 else if (invert_jump (bb_end_jump,
3901 (e_fall->dest
3902 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3903 ? NULL_RTX
3904 : label_for_bb (e_fall->dest)), 0))
3905 {
3906 e_fall->flags &= ~EDGE_FALLTHRU;
3907 gcc_checking_assert (could_fall_through
3908 (e_taken->src, e_taken->dest));
3909 e_taken->flags |= EDGE_FALLTHRU;
3910 update_br_prob_note (bb);
3911 if (LABEL_NUSES (ret_label) == 0
3912 && single_pred_p (e_taken->dest))
3913 delete_insn (as_a<rtx_insn *> (ret_label));
3914 continue;
3915 }
3916 }
3917 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3918 {
3919 /* If the old fallthru is still next or if
3920 asm goto doesn't have a fallthru (e.g. when followed by
3921 __builtin_unreachable ()), nothing to do. */
3922 if (! e_fall
3923 || bb->aux == e_fall->dest
3924 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3925 continue;
3926
3927 /* Otherwise we'll have to use the fallthru fixup below. */
3928 }
3929 else
3930 {
3931 /* Otherwise we have some return, switch or computed
3932 jump. In the 99% case, there should not have been a
3933 fallthru edge. */
3934 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3935 continue;
3936 }
3937 }
3938 else
3939 {
3940 /* No fallthru implies a noreturn function with EH edges, or
3941 something similarly bizarre. In any case, we don't need to
3942 do anything. */
3943 if (! e_fall)
3944 continue;
3945
3946 /* If the fallthru block is still next, nothing to do. */
3947 if (bb->aux == e_fall->dest)
3948 continue;
3949
3950 /* A fallthru to exit block. */
3951 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3952 continue;
3953 }
3954
3955 /* We got here if we need to add a new jump insn.
3956 Note force_nonfallthru can delete E_FALL and thus we have to
3957 save E_FALL->src prior to the call to force_nonfallthru. */
3958 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3959 if (nb)
3960 {
3961 nb->aux = bb->aux;
3962 bb->aux = nb;
3963 /* Don't process this new block. */
3964 bb = nb;
3965 }
3966 }
3967
3968 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3969
3970 /* Annoying special case - jump around dead jumptables left in the code. */
3971 FOR_EACH_BB_FN (bb, cfun)
3972 {
3973 edge e = find_fallthru_edge (bb->succs);
3974
3975 if (e && !can_fallthru (e->src, e->dest))
3976 force_nonfallthru (e);
3977 }
3978
3979 /* Ensure goto_locus from edges has some instructions with that locus in RTL
3980 when not optimizing. */
3981 if (!optimize && !DECL_IGNORED_P (current_function_decl))
3982 FOR_EACH_BB_FN (bb, cfun)
3983 {
3984 edge e;
3985 edge_iterator ei;
3986
3987 FOR_EACH_EDGE (e, ei, bb->succs)
3988 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3989 && !(e->flags & EDGE_ABNORMAL))
3990 {
3991 edge e2;
3992 edge_iterator ei2;
3993 basic_block dest, nb;
3994 rtx_insn *end;
3995
3996 insn = BB_END (e->src);
3997 end = PREV_INSN (BB_HEAD (e->src));
3998 while (insn != end
3999 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
4000 insn = PREV_INSN (insn);
4001 if (insn != end
4002 && INSN_LOCATION (insn) == e->goto_locus)
4003 continue;
4004 if (simplejump_p (BB_END (e->src))
4005 && !INSN_HAS_LOCATION (BB_END (e->src)))
4006 {
4007 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
4008 continue;
4009 }
4010 dest = e->dest;
4011 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4012 {
4013 /* Non-fallthru edges to the exit block cannot be split. */
4014 if (!(e->flags & EDGE_FALLTHRU))
4015 continue;
4016 }
4017 else
4018 {
4019 insn = BB_HEAD (dest);
4020 end = NEXT_INSN (BB_END (dest));
4021 while (insn != end && !NONDEBUG_INSN_P (insn))
4022 insn = NEXT_INSN (insn);
4023 if (insn != end && INSN_HAS_LOCATION (insn)
4024 && INSN_LOCATION (insn) == e->goto_locus)
4025 continue;
4026 }
4027 nb = split_edge (e);
4028 if (!INSN_P (BB_END (nb)))
4029 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4030 nb);
4031 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4032
4033 /* If there are other incoming edges to the destination block
4034 with the same goto locus, redirect them to the new block as
4035 well, this can prevent other such blocks from being created
4036 in subsequent iterations of the loop. */
4037 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
4038 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4039 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4040 && e->goto_locus == e2->goto_locus)
4041 redirect_edge_and_branch (e2, nb);
4042 else
4043 ei_next (&ei2);
4044 }
4045 }
4046 }
4047 \f
4048 /* Perform sanity checks on the insn chain.
4049 1. Check that next/prev pointers are consistent in both the forward and
4050 reverse direction.
4051 2. Count insns in chain, going both directions, and check if equal.
4052 3. Check that get_last_insn () returns the actual end of chain. */
4053
4054 DEBUG_FUNCTION void
4055 verify_insn_chain (void)
4056 {
4057 rtx_insn *x, *prevx, *nextx;
4058 int insn_cnt1, insn_cnt2;
4059
4060 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4061 x != 0;
4062 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
4063 gcc_assert (PREV_INSN (x) == prevx);
4064
4065 gcc_assert (prevx == get_last_insn ());
4066
4067 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4068 x != 0;
4069 nextx = x, insn_cnt2++, x = PREV_INSN (x))
4070 gcc_assert (NEXT_INSN (x) == nextx);
4071
4072 gcc_assert (insn_cnt1 == insn_cnt2);
4073 }
4074 \f
4075 /* If we have assembler epilogues, the block falling through to exit must
4076 be the last one in the reordered chain when we reach final. Ensure
4077 that this condition is met. */
4078 static void
4079 fixup_fallthru_exit_predecessor (void)
4080 {
4081 edge e;
4082 basic_block bb = NULL;
4083
4084 /* This transformation is not valid before reload, because we might
4085 separate a call from the instruction that copies the return
4086 value. */
4087 gcc_assert (reload_completed);
4088
4089 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4090 if (e)
4091 bb = e->src;
4092
4093 if (bb && bb->aux)
4094 {
4095 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4096
4097 /* If the very first block is the one with the fall-through exit
4098 edge, we have to split that block. */
4099 if (c == bb)
4100 {
4101 bb = split_block_after_labels (bb)->dest;
4102 bb->aux = c->aux;
4103 c->aux = bb;
4104 BB_FOOTER (bb) = BB_FOOTER (c);
4105 BB_FOOTER (c) = NULL;
4106 }
4107
4108 while (c->aux != bb)
4109 c = (basic_block) c->aux;
4110
4111 c->aux = bb->aux;
4112 while (c->aux)
4113 c = (basic_block) c->aux;
4114
4115 c->aux = bb;
4116 bb->aux = NULL;
4117 }
4118 }
4119
4120 /* In case there are more than one fallthru predecessors of exit, force that
4121 there is only one. */
4122
4123 static void
4124 force_one_exit_fallthru (void)
4125 {
4126 edge e, predecessor = NULL;
4127 bool more = false;
4128 edge_iterator ei;
4129 basic_block forwarder, bb;
4130
4131 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4132 if (e->flags & EDGE_FALLTHRU)
4133 {
4134 if (predecessor == NULL)
4135 predecessor = e;
4136 else
4137 {
4138 more = true;
4139 break;
4140 }
4141 }
4142
4143 if (!more)
4144 return;
4145
4146 /* Exit has several fallthru predecessors. Create a forwarder block for
4147 them. */
4148 forwarder = split_edge (predecessor);
4149 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4150 (e = ei_safe_edge (ei)); )
4151 {
4152 if (e->src == forwarder
4153 || !(e->flags & EDGE_FALLTHRU))
4154 ei_next (&ei);
4155 else
4156 redirect_edge_and_branch_force (e, forwarder);
4157 }
4158
4159 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4160 exit block. */
4161 FOR_EACH_BB_FN (bb, cfun)
4162 {
4163 if (bb->aux == NULL && bb != forwarder)
4164 {
4165 bb->aux = forwarder;
4166 break;
4167 }
4168 }
4169 }
4170 \f
4171 /* Return true in case it is possible to duplicate the basic block BB. */
4172
4173 static bool
4174 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4175 {
4176 /* Do not attempt to duplicate tablejumps, as we need to unshare
4177 the dispatch table. This is difficult to do, as the instructions
4178 computing jump destination may be hoisted outside the basic block. */
4179 if (tablejump_p (BB_END (bb), NULL, NULL))
4180 return false;
4181
4182 /* Do not duplicate blocks containing insns that can't be copied. */
4183 if (targetm.cannot_copy_insn_p)
4184 {
4185 rtx_insn *insn = BB_HEAD (bb);
4186 while (1)
4187 {
4188 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4189 return false;
4190 if (insn == BB_END (bb))
4191 break;
4192 insn = NEXT_INSN (insn);
4193 }
4194 }
4195
4196 return true;
4197 }
4198
4199 rtx_insn *
4200 duplicate_insn_chain (rtx_insn *from, rtx_insn *to,
4201 class loop *loop, copy_bb_data *id)
4202 {
4203 rtx_insn *insn, *next, *copy;
4204 rtx_note *last;
4205
4206 /* Avoid updating of boundaries of previous basic block. The
4207 note will get removed from insn stream in fixup. */
4208 last = emit_note (NOTE_INSN_DELETED);
4209
4210 /* Create copy at the end of INSN chain. The chain will
4211 be reordered later. */
4212 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4213 {
4214 switch (GET_CODE (insn))
4215 {
4216 case DEBUG_INSN:
4217 /* Don't duplicate label debug insns. */
4218 if (DEBUG_BIND_INSN_P (insn)
4219 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4220 break;
4221 /* FALLTHRU */
4222 case INSN:
4223 case CALL_INSN:
4224 case JUMP_INSN:
4225 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4226 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4227 && ANY_RETURN_P (JUMP_LABEL (insn)))
4228 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4229 maybe_copy_prologue_epilogue_insn (insn, copy);
4230 /* If requested remap dependence info of cliques brought in
4231 via inlining. */
4232 if (id)
4233 {
4234 subrtx_iterator::array_type array;
4235 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
4236 if (MEM_P (*iter) && MEM_EXPR (*iter))
4237 {
4238 tree op = MEM_EXPR (*iter);
4239 if (TREE_CODE (op) == WITH_SIZE_EXPR)
4240 op = TREE_OPERAND (op, 0);
4241 while (handled_component_p (op))
4242 op = TREE_OPERAND (op, 0);
4243 if ((TREE_CODE (op) == MEM_REF
4244 || TREE_CODE (op) == TARGET_MEM_REF)
4245 && MR_DEPENDENCE_CLIQUE (op) > 1
4246 && (!loop
4247 || (MR_DEPENDENCE_CLIQUE (op)
4248 != loop->owned_clique)))
4249 {
4250 if (!id->dependence_map)
4251 id->dependence_map = new hash_map<dependence_hash,
4252 unsigned short>;
4253 bool existed;
4254 unsigned short &newc = id->dependence_map->get_or_insert
4255 (MR_DEPENDENCE_CLIQUE (op), &existed);
4256 if (!existed)
4257 {
4258 gcc_assert
4259 (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
4260 newc = ++cfun->last_clique;
4261 }
4262 /* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
4263 since MEM_EXPR is shared so make a copy and
4264 walk to the subtree again. */
4265 tree new_expr = unshare_expr (MEM_EXPR (*iter));
4266 if (TREE_CODE (new_expr) == WITH_SIZE_EXPR)
4267 new_expr = TREE_OPERAND (new_expr, 0);
4268 while (handled_component_p (new_expr))
4269 new_expr = TREE_OPERAND (new_expr, 0);
4270 MR_DEPENDENCE_CLIQUE (new_expr) = newc;
4271 set_mem_expr (const_cast <rtx> (*iter), new_expr);
4272 }
4273 }
4274 }
4275 break;
4276
4277 case JUMP_TABLE_DATA:
4278 /* Avoid copying of dispatch tables. We never duplicate
4279 tablejumps, so this can hit only in case the table got
4280 moved far from original jump.
4281 Avoid copying following barrier as well if any
4282 (and debug insns in between). */
4283 for (next = NEXT_INSN (insn);
4284 next != NEXT_INSN (to);
4285 next = NEXT_INSN (next))
4286 if (!DEBUG_INSN_P (next))
4287 break;
4288 if (next != NEXT_INSN (to) && BARRIER_P (next))
4289 insn = next;
4290 break;
4291
4292 case CODE_LABEL:
4293 break;
4294
4295 case BARRIER:
4296 emit_barrier ();
4297 break;
4298
4299 case NOTE:
4300 switch (NOTE_KIND (insn))
4301 {
4302 /* In case prologue is empty and function contain label
4303 in first BB, we may want to copy the block. */
4304 case NOTE_INSN_PROLOGUE_END:
4305
4306 case NOTE_INSN_DELETED:
4307 case NOTE_INSN_DELETED_LABEL:
4308 case NOTE_INSN_DELETED_DEBUG_LABEL:
4309 /* No problem to strip these. */
4310 case NOTE_INSN_FUNCTION_BEG:
4311 /* There is always just single entry to function. */
4312 case NOTE_INSN_BASIC_BLOCK:
4313 /* We should only switch text sections once. */
4314 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4315 break;
4316
4317 case NOTE_INSN_EPILOGUE_BEG:
4318 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4319 emit_note_copy (as_a <rtx_note *> (insn));
4320 break;
4321
4322 default:
4323 /* All other notes should have already been eliminated. */
4324 gcc_unreachable ();
4325 }
4326 break;
4327 default:
4328 gcc_unreachable ();
4329 }
4330 }
4331 insn = NEXT_INSN (last);
4332 delete_insn (last);
4333 return insn;
4334 }
4335
4336 /* Create a duplicate of the basic block BB. */
4337
4338 static basic_block
4339 cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *id)
4340 {
4341 rtx_insn *insn;
4342 basic_block new_bb;
4343
4344 class loop *loop = (id && current_loops) ? bb->loop_father : NULL;
4345
4346 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb), loop, id);
4347 new_bb = create_basic_block (insn,
4348 insn ? get_last_insn () : NULL,
4349 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4350
4351 BB_COPY_PARTITION (new_bb, bb);
4352 if (BB_HEADER (bb))
4353 {
4354 insn = BB_HEADER (bb);
4355 while (NEXT_INSN (insn))
4356 insn = NEXT_INSN (insn);
4357 insn = duplicate_insn_chain (BB_HEADER (bb), insn, loop, id);
4358 if (insn)
4359 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4360 }
4361
4362 if (BB_FOOTER (bb))
4363 {
4364 insn = BB_FOOTER (bb);
4365 while (NEXT_INSN (insn))
4366 insn = NEXT_INSN (insn);
4367 insn = duplicate_insn_chain (BB_FOOTER (bb), insn, loop, id);
4368 if (insn)
4369 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4370 }
4371
4372 return new_bb;
4373 }
4374
4375 \f
4376 /* Main entry point to this module - initialize the datastructures for
4377 CFG layout changes. It keeps LOOPS up-to-date if not null.
4378
4379 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4380
4381 void
4382 cfg_layout_initialize (int flags)
4383 {
4384 rtx_insn_list *x;
4385 basic_block bb;
4386
4387 /* Once bb partitioning is complete, cfg layout mode should not be
4388 re-entered. Entering cfg layout mode may require fixups. As an
4389 example, if edge forwarding performed when optimizing the cfg
4390 layout required moving a block from the hot to the cold
4391 section. This would create an illegal partitioning unless some
4392 manual fixup was performed. */
4393 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4394
4395 initialize_original_copy_tables ();
4396
4397 cfg_layout_rtl_register_cfg_hooks ();
4398
4399 record_effective_endpoints ();
4400
4401 /* Make sure that the targets of non local gotos are marked. */
4402 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4403 {
4404 bb = BLOCK_FOR_INSN (x->insn ());
4405 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4406 }
4407
4408 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4409 }
4410
4411 /* Splits superblocks. */
4412 void
4413 break_superblocks (void)
4414 {
4415 bool need = false;
4416 basic_block bb;
4417
4418 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4419 bitmap_clear (superblocks);
4420
4421 FOR_EACH_BB_FN (bb, cfun)
4422 if (bb->flags & BB_SUPERBLOCK)
4423 {
4424 bb->flags &= ~BB_SUPERBLOCK;
4425 bitmap_set_bit (superblocks, bb->index);
4426 need = true;
4427 }
4428
4429 if (need)
4430 {
4431 rebuild_jump_labels (get_insns ());
4432 find_many_sub_basic_blocks (superblocks);
4433 }
4434 }
4435
4436 /* Finalize the changes: reorder insn list according to the sequence specified
4437 by aux pointers, enter compensation code, rebuild scope forest. */
4438
4439 void
4440 cfg_layout_finalize (void)
4441 {
4442 free_dominance_info (CDI_DOMINATORS);
4443 force_one_exit_fallthru ();
4444 rtl_register_cfg_hooks ();
4445 if (reload_completed && !targetm.have_epilogue ())
4446 fixup_fallthru_exit_predecessor ();
4447 fixup_reorder_chain ();
4448
4449 rebuild_jump_labels (get_insns ());
4450 delete_dead_jumptables ();
4451
4452 if (flag_checking)
4453 verify_insn_chain ();
4454 checking_verify_flow_info ();
4455 }
4456
4457
4458 /* Same as split_block but update cfg_layout structures. */
4459
4460 static basic_block
4461 cfg_layout_split_block (basic_block bb, void *insnp)
4462 {
4463 rtx insn = (rtx) insnp;
4464 basic_block new_bb = rtl_split_block (bb, insn);
4465
4466 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4467 BB_FOOTER (bb) = NULL;
4468
4469 return new_bb;
4470 }
4471
4472 /* Redirect Edge to DEST. */
4473 static edge
4474 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4475 {
4476 basic_block src = e->src;
4477 edge ret;
4478
4479 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4480 return NULL;
4481
4482 if (e->dest == dest)
4483 return e;
4484
4485 if (e->flags & EDGE_CROSSING
4486 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4487 && simplejump_p (BB_END (src)))
4488 {
4489 if (dump_file)
4490 fprintf (dump_file,
4491 "Removing crossing jump while redirecting edge form %i to %i\n",
4492 e->src->index, dest->index);
4493 delete_insn (BB_END (src));
4494 remove_barriers_from_footer (src);
4495 e->flags |= EDGE_FALLTHRU;
4496 }
4497
4498 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4499 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4500 {
4501 df_set_bb_dirty (src);
4502 return ret;
4503 }
4504
4505 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4506 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4507 {
4508 if (dump_file)
4509 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4510 e->src->index, dest->index);
4511
4512 df_set_bb_dirty (e->src);
4513 redirect_edge_succ (e, dest);
4514 return e;
4515 }
4516
4517 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4518 in the case the basic block appears to be in sequence. Avoid this
4519 transformation. */
4520
4521 if (e->flags & EDGE_FALLTHRU)
4522 {
4523 /* Redirect any branch edges unified with the fallthru one. */
4524 if (JUMP_P (BB_END (src))
4525 && label_is_jump_target_p (BB_HEAD (e->dest),
4526 BB_END (src)))
4527 {
4528 edge redirected;
4529
4530 if (dump_file)
4531 fprintf (dump_file, "Fallthru edge unified with branch "
4532 "%i->%i redirected to %i\n",
4533 e->src->index, e->dest->index, dest->index);
4534 e->flags &= ~EDGE_FALLTHRU;
4535 redirected = redirect_branch_edge (e, dest);
4536 gcc_assert (redirected);
4537 redirected->flags |= EDGE_FALLTHRU;
4538 df_set_bb_dirty (redirected->src);
4539 return redirected;
4540 }
4541 /* In case we are redirecting fallthru edge to the branch edge
4542 of conditional jump, remove it. */
4543 if (EDGE_COUNT (src->succs) == 2)
4544 {
4545 /* Find the edge that is different from E. */
4546 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4547
4548 if (s->dest == dest
4549 && any_condjump_p (BB_END (src))
4550 && onlyjump_p (BB_END (src)))
4551 delete_insn (BB_END (src));
4552 }
4553 if (dump_file)
4554 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4555 e->src->index, e->dest->index, dest->index);
4556 ret = redirect_edge_succ_nodup (e, dest);
4557 }
4558 else
4559 ret = redirect_branch_edge (e, dest);
4560
4561 if (!ret)
4562 return NULL;
4563
4564 fixup_partition_crossing (ret);
4565 /* We don't want simplejumps in the insn stream during cfglayout. */
4566 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4567
4568 df_set_bb_dirty (src);
4569 return ret;
4570 }
4571
4572 /* Simple wrapper as we always can redirect fallthru edges. */
4573 static basic_block
4574 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4575 {
4576 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4577
4578 gcc_assert (redirected);
4579 return NULL;
4580 }
4581
4582 /* Same as delete_basic_block but update cfg_layout structures. */
4583
4584 static void
4585 cfg_layout_delete_block (basic_block bb)
4586 {
4587 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4588 rtx_insn **to;
4589
4590 if (BB_HEADER (bb))
4591 {
4592 next = BB_HEAD (bb);
4593 if (prev)
4594 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4595 else
4596 set_first_insn (BB_HEADER (bb));
4597 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4598 insn = BB_HEADER (bb);
4599 while (NEXT_INSN (insn))
4600 insn = NEXT_INSN (insn);
4601 SET_NEXT_INSN (insn) = next;
4602 SET_PREV_INSN (next) = insn;
4603 }
4604 next = NEXT_INSN (BB_END (bb));
4605 if (BB_FOOTER (bb))
4606 {
4607 insn = BB_FOOTER (bb);
4608 while (insn)
4609 {
4610 if (BARRIER_P (insn))
4611 {
4612 if (PREV_INSN (insn))
4613 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4614 else
4615 BB_FOOTER (bb) = NEXT_INSN (insn);
4616 if (NEXT_INSN (insn))
4617 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4618 }
4619 if (LABEL_P (insn))
4620 break;
4621 insn = NEXT_INSN (insn);
4622 }
4623 if (BB_FOOTER (bb))
4624 {
4625 insn = BB_END (bb);
4626 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4627 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4628 while (NEXT_INSN (insn))
4629 insn = NEXT_INSN (insn);
4630 SET_NEXT_INSN (insn) = next;
4631 if (next)
4632 SET_PREV_INSN (next) = insn;
4633 else
4634 set_last_insn (insn);
4635 }
4636 }
4637 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4638 to = &BB_HEADER (bb->next_bb);
4639 else
4640 to = &cfg_layout_function_footer;
4641
4642 rtl_delete_block (bb);
4643
4644 if (prev)
4645 prev = NEXT_INSN (prev);
4646 else
4647 prev = get_insns ();
4648 if (next)
4649 next = PREV_INSN (next);
4650 else
4651 next = get_last_insn ();
4652
4653 if (next && NEXT_INSN (next) != prev)
4654 {
4655 remaints = unlink_insn_chain (prev, next);
4656 insn = remaints;
4657 while (NEXT_INSN (insn))
4658 insn = NEXT_INSN (insn);
4659 SET_NEXT_INSN (insn) = *to;
4660 if (*to)
4661 SET_PREV_INSN (*to) = insn;
4662 *to = remaints;
4663 }
4664 }
4665
4666 /* Return true when blocks A and B can be safely merged. */
4667
4668 static bool
4669 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4670 {
4671 /* If we are partitioning hot/cold basic blocks, we don't want to
4672 mess up unconditional or indirect jumps that cross between hot
4673 and cold sections.
4674
4675 Basic block partitioning may result in some jumps that appear to
4676 be optimizable (or blocks that appear to be mergeable), but which really
4677 must be left untouched (they are required to make it safely across
4678 partition boundaries). See the comments at the top of
4679 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4680
4681 if (BB_PARTITION (a) != BB_PARTITION (b))
4682 return false;
4683
4684 /* Protect the loop latches. */
4685 if (current_loops && b->loop_father->latch == b)
4686 return false;
4687
4688 /* If we would end up moving B's instructions, make sure it doesn't fall
4689 through into the exit block, since we cannot recover from a fallthrough
4690 edge into the exit block occurring in the middle of a function. */
4691 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4692 {
4693 edge e = find_fallthru_edge (b->succs);
4694 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4695 return false;
4696 }
4697
4698 /* There must be exactly one edge in between the blocks. */
4699 return (single_succ_p (a)
4700 && single_succ (a) == b
4701 && single_pred_p (b) == 1
4702 && a != b
4703 /* Must be simple edge. */
4704 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4705 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4706 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4707 /* If the jump insn has side effects, we can't kill the edge.
4708 When not optimizing, try_redirect_by_replacing_jump will
4709 not allow us to redirect an edge by replacing a table jump. */
4710 && (!JUMP_P (BB_END (a))
4711 || ((!optimize || reload_completed)
4712 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4713 }
4714
4715 /* Merge block A and B. The blocks must be mergeable. */
4716
4717 static void
4718 cfg_layout_merge_blocks (basic_block a, basic_block b)
4719 {
4720 /* If B is a forwarder block whose outgoing edge has no location, we'll
4721 propagate the locus of the edge between A and B onto it. */
4722 const bool forward_edge_locus
4723 = (b->flags & BB_FORWARDER_BLOCK) != 0
4724 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4725 rtx_insn *insn;
4726
4727 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4728
4729 if (dump_file)
4730 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4731 a->index);
4732
4733 /* If there was a CODE_LABEL beginning B, delete it. */
4734 if (LABEL_P (BB_HEAD (b)))
4735 {
4736 delete_insn (BB_HEAD (b));
4737 }
4738
4739 /* We should have fallthru edge in a, or we can do dummy redirection to get
4740 it cleaned up. */
4741 if (JUMP_P (BB_END (a)))
4742 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4743 gcc_assert (!JUMP_P (BB_END (a)));
4744
4745 /* If not optimizing, preserve the locus of the single edge between
4746 blocks A and B if necessary by emitting a nop. */
4747 if (!optimize
4748 && !forward_edge_locus
4749 && !DECL_IGNORED_P (current_function_decl))
4750 emit_nop_for_unique_locus_between (a, b);
4751
4752 /* Move things from b->footer after a->footer. */
4753 if (BB_FOOTER (b))
4754 {
4755 if (!BB_FOOTER (a))
4756 BB_FOOTER (a) = BB_FOOTER (b);
4757 else
4758 {
4759 rtx_insn *last = BB_FOOTER (a);
4760
4761 while (NEXT_INSN (last))
4762 last = NEXT_INSN (last);
4763 SET_NEXT_INSN (last) = BB_FOOTER (b);
4764 SET_PREV_INSN (BB_FOOTER (b)) = last;
4765 }
4766 BB_FOOTER (b) = NULL;
4767 }
4768
4769 /* Move things from b->header before a->footer.
4770 Note that this may include dead tablejump data, but we don't clean
4771 those up until we go out of cfglayout mode. */
4772 if (BB_HEADER (b))
4773 {
4774 if (! BB_FOOTER (a))
4775 BB_FOOTER (a) = BB_HEADER (b);
4776 else
4777 {
4778 rtx_insn *last = BB_HEADER (b);
4779
4780 while (NEXT_INSN (last))
4781 last = NEXT_INSN (last);
4782 SET_NEXT_INSN (last) = BB_FOOTER (a);
4783 SET_PREV_INSN (BB_FOOTER (a)) = last;
4784 BB_FOOTER (a) = BB_HEADER (b);
4785 }
4786 BB_HEADER (b) = NULL;
4787 }
4788
4789 /* In the case basic blocks are not adjacent, move them around. */
4790 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4791 {
4792 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4793
4794 emit_insn_after_noloc (insn, BB_END (a), a);
4795 }
4796 /* Otherwise just re-associate the instructions. */
4797 else
4798 {
4799 insn = BB_HEAD (b);
4800 BB_END (a) = BB_END (b);
4801 }
4802
4803 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4804 We need to explicitly call. */
4805 update_bb_for_insn_chain (insn, BB_END (b), a);
4806
4807 /* Skip possible DELETED_LABEL insn. */
4808 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4809 insn = NEXT_INSN (insn);
4810 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4811 BB_HEAD (b) = BB_END (b) = NULL;
4812 delete_insn (insn);
4813
4814 df_bb_delete (b->index);
4815
4816 if (forward_edge_locus)
4817 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4818
4819 if (dump_file)
4820 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4821 }
4822
4823 /* Split edge E. */
4824
4825 static basic_block
4826 cfg_layout_split_edge (edge e)
4827 {
4828 basic_block new_bb =
4829 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4830 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4831 NULL_RTX, e->src);
4832
4833 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4834 BB_COPY_PARTITION (new_bb, e->src);
4835 else
4836 BB_COPY_PARTITION (new_bb, e->dest);
4837 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4838 redirect_edge_and_branch_force (e, new_bb);
4839
4840 return new_bb;
4841 }
4842
4843 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4844
4845 static void
4846 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4847 {
4848 }
4849
4850 /* Return true if BB contains only labels or non-executable
4851 instructions. */
4852
4853 static bool
4854 rtl_block_empty_p (basic_block bb)
4855 {
4856 rtx_insn *insn;
4857
4858 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4859 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4860 return true;
4861
4862 FOR_BB_INSNS (bb, insn)
4863 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4864 return false;
4865
4866 return true;
4867 }
4868
4869 /* Split a basic block if it ends with a conditional branch and if
4870 the other part of the block is not empty. */
4871
4872 static basic_block
4873 rtl_split_block_before_cond_jump (basic_block bb)
4874 {
4875 rtx_insn *insn;
4876 rtx_insn *split_point = NULL;
4877 rtx_insn *last = NULL;
4878 bool found_code = false;
4879
4880 FOR_BB_INSNS (bb, insn)
4881 {
4882 if (any_condjump_p (insn))
4883 split_point = last;
4884 else if (NONDEBUG_INSN_P (insn))
4885 found_code = true;
4886 last = insn;
4887 }
4888
4889 /* Did not find everything. */
4890 if (found_code && split_point)
4891 return split_block (bb, split_point)->dest;
4892 else
4893 return NULL;
4894 }
4895
4896 /* Return 1 if BB ends with a call, possibly followed by some
4897 instructions that must stay with the call, 0 otherwise. */
4898
4899 static bool
4900 rtl_block_ends_with_call_p (basic_block bb)
4901 {
4902 rtx_insn *insn = BB_END (bb);
4903
4904 while (!CALL_P (insn)
4905 && insn != BB_HEAD (bb)
4906 && (keep_with_call_p (insn)
4907 || NOTE_P (insn)
4908 || DEBUG_INSN_P (insn)))
4909 insn = PREV_INSN (insn);
4910 return (CALL_P (insn));
4911 }
4912
4913 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4914
4915 static bool
4916 rtl_block_ends_with_condjump_p (const_basic_block bb)
4917 {
4918 return any_condjump_p (BB_END (bb));
4919 }
4920
4921 /* Return true if we need to add fake edge to exit.
4922 Helper function for rtl_flow_call_edges_add. */
4923
4924 static bool
4925 need_fake_edge_p (const rtx_insn *insn)
4926 {
4927 if (!INSN_P (insn))
4928 return false;
4929
4930 if ((CALL_P (insn)
4931 && !SIBLING_CALL_P (insn)
4932 && !find_reg_note (insn, REG_NORETURN, NULL)
4933 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4934 return true;
4935
4936 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4937 && MEM_VOLATILE_P (PATTERN (insn)))
4938 || (GET_CODE (PATTERN (insn)) == PARALLEL
4939 && asm_noperands (insn) != -1
4940 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4941 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4942 }
4943
4944 /* Add fake edges to the function exit for any non constant and non noreturn
4945 calls, volatile inline assembly in the bitmap of blocks specified by
4946 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4947 that were split.
4948
4949 The goal is to expose cases in which entering a basic block does not imply
4950 that all subsequent instructions must be executed. */
4951
4952 static int
4953 rtl_flow_call_edges_add (sbitmap blocks)
4954 {
4955 int i;
4956 int blocks_split = 0;
4957 int last_bb = last_basic_block_for_fn (cfun);
4958 bool check_last_block = false;
4959
4960 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4961 return 0;
4962
4963 if (! blocks)
4964 check_last_block = true;
4965 else
4966 check_last_block = bitmap_bit_p (blocks,
4967 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
4968
4969 /* In the last basic block, before epilogue generation, there will be
4970 a fallthru edge to EXIT. Special care is required if the last insn
4971 of the last basic block is a call because make_edge folds duplicate
4972 edges, which would result in the fallthru edge also being marked
4973 fake, which would result in the fallthru edge being removed by
4974 remove_fake_edges, which would result in an invalid CFG.
4975
4976 Moreover, we can't elide the outgoing fake edge, since the block
4977 profiler needs to take this into account in order to solve the minimal
4978 spanning tree in the case that the call doesn't return.
4979
4980 Handle this by adding a dummy instruction in a new last basic block. */
4981 if (check_last_block)
4982 {
4983 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
4984 rtx_insn *insn = BB_END (bb);
4985
4986 /* Back up past insns that must be kept in the same block as a call. */
4987 while (insn != BB_HEAD (bb)
4988 && keep_with_call_p (insn))
4989 insn = PREV_INSN (insn);
4990
4991 if (need_fake_edge_p (insn))
4992 {
4993 edge e;
4994
4995 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
4996 if (e)
4997 {
4998 insert_insn_on_edge (gen_use (const0_rtx), e);
4999 commit_edge_insertions ();
5000 }
5001 }
5002 }
5003
5004 /* Now add fake edges to the function exit for any non constant
5005 calls since there is no way that we can determine if they will
5006 return or not... */
5007
5008 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
5009 {
5010 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
5011 rtx_insn *insn;
5012 rtx_insn *prev_insn;
5013
5014 if (!bb)
5015 continue;
5016
5017 if (blocks && !bitmap_bit_p (blocks, i))
5018 continue;
5019
5020 for (insn = BB_END (bb); ; insn = prev_insn)
5021 {
5022 prev_insn = PREV_INSN (insn);
5023 if (need_fake_edge_p (insn))
5024 {
5025 edge e;
5026 rtx_insn *split_at_insn = insn;
5027
5028 /* Don't split the block between a call and an insn that should
5029 remain in the same block as the call. */
5030 if (CALL_P (insn))
5031 while (split_at_insn != BB_END (bb)
5032 && keep_with_call_p (NEXT_INSN (split_at_insn)))
5033 split_at_insn = NEXT_INSN (split_at_insn);
5034
5035 /* The handling above of the final block before the epilogue
5036 should be enough to verify that there is no edge to the exit
5037 block in CFG already. Calling make_edge in such case would
5038 cause us to mark that edge as fake and remove it later. */
5039
5040 if (flag_checking && split_at_insn == BB_END (bb))
5041 {
5042 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5043 gcc_assert (e == NULL);
5044 }
5045
5046 /* Note that the following may create a new basic block
5047 and renumber the existing basic blocks. */
5048 if (split_at_insn != BB_END (bb))
5049 {
5050 e = split_block (bb, split_at_insn);
5051 if (e)
5052 blocks_split++;
5053 }
5054
5055 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
5056 ne->probability = profile_probability::guessed_never ();
5057 }
5058
5059 if (insn == BB_HEAD (bb))
5060 break;
5061 }
5062 }
5063
5064 if (blocks_split)
5065 verify_flow_info ();
5066
5067 return blocks_split;
5068 }
5069
5070 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
5071 the conditional branch target, SECOND_HEAD should be the fall-thru
5072 there is no need to handle this here the loop versioning code handles
5073 this. the reason for SECON_HEAD is that it is needed for condition
5074 in trees, and this should be of the same type since it is a hook. */
5075 static void
5076 rtl_lv_add_condition_to_bb (basic_block first_head ,
5077 basic_block second_head ATTRIBUTE_UNUSED,
5078 basic_block cond_bb, void *comp_rtx)
5079 {
5080 rtx_code_label *label;
5081 rtx_insn *seq, *jump;
5082 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5083 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5084 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5085 machine_mode mode;
5086
5087
5088 label = block_label (first_head);
5089 mode = GET_MODE (op0);
5090 if (mode == VOIDmode)
5091 mode = GET_MODE (op1);
5092
5093 start_sequence ();
5094 op0 = force_operand (op0, NULL_RTX);
5095 op1 = force_operand (op1, NULL_RTX);
5096 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5097 profile_probability::uninitialized ());
5098 jump = get_last_insn ();
5099 JUMP_LABEL (jump) = label;
5100 LABEL_NUSES (label)++;
5101 seq = get_insns ();
5102 end_sequence ();
5103
5104 /* Add the new cond, in the new head. */
5105 emit_insn_after (seq, BB_END (cond_bb));
5106 }
5107
5108
5109 /* Given a block B with unconditional branch at its end, get the
5110 store the return the branch edge and the fall-thru edge in
5111 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5112 static void
5113 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5114 edge *fallthru_edge)
5115 {
5116 edge e = EDGE_SUCC (b, 0);
5117
5118 if (e->flags & EDGE_FALLTHRU)
5119 {
5120 *fallthru_edge = e;
5121 *branch_edge = EDGE_SUCC (b, 1);
5122 }
5123 else
5124 {
5125 *branch_edge = e;
5126 *fallthru_edge = EDGE_SUCC (b, 1);
5127 }
5128 }
5129
5130 void
5131 init_rtl_bb_info (basic_block bb)
5132 {
5133 gcc_assert (!bb->il.x.rtl);
5134 bb->il.x.head_ = NULL;
5135 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5136 }
5137
5138 /* Returns true if it is possible to remove edge E by redirecting
5139 it to the destination of the other edge from E->src. */
5140
5141 static bool
5142 rtl_can_remove_branch_p (const_edge e)
5143 {
5144 const_basic_block src = e->src;
5145 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5146 const rtx_insn *insn = BB_END (src);
5147 rtx set;
5148
5149 /* The conditions are taken from try_redirect_by_replacing_jump. */
5150 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5151 return false;
5152
5153 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5154 return false;
5155
5156 if (BB_PARTITION (src) != BB_PARTITION (target))
5157 return false;
5158
5159 if (!onlyjump_p (insn)
5160 || tablejump_p (insn, NULL, NULL))
5161 return false;
5162
5163 set = single_set (insn);
5164 if (!set || side_effects_p (set))
5165 return false;
5166
5167 return true;
5168 }
5169
5170 static basic_block
5171 rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5172 {
5173 bb = cfg_layout_duplicate_bb (bb, id);
5174 bb->aux = NULL;
5175 return bb;
5176 }
5177
5178 /* Do book-keeping of basic block BB for the profile consistency checker.
5179 Store the counting in RECORD. */
5180 static void
5181 rtl_account_profile_record (basic_block bb, struct profile_record *record)
5182 {
5183 rtx_insn *insn;
5184 FOR_BB_INSNS (bb, insn)
5185 if (INSN_P (insn))
5186 {
5187 record->size += insn_cost (insn, false);
5188 if (bb->count.initialized_p ())
5189 record->time
5190 += insn_cost (insn, true) * bb->count.to_gcov_type ();
5191 else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
5192 record->time
5193 += insn_cost (insn, true) * bb->count.to_frequency (cfun);
5194 }
5195 }
5196
5197 /* Implementation of CFG manipulation for linearized RTL. */
5198 struct cfg_hooks rtl_cfg_hooks = {
5199 "rtl",
5200 rtl_verify_flow_info,
5201 rtl_dump_bb,
5202 rtl_dump_bb_for_graph,
5203 rtl_create_basic_block,
5204 rtl_redirect_edge_and_branch,
5205 rtl_redirect_edge_and_branch_force,
5206 rtl_can_remove_branch_p,
5207 rtl_delete_block,
5208 rtl_split_block,
5209 rtl_move_block_after,
5210 rtl_can_merge_blocks, /* can_merge_blocks_p */
5211 rtl_merge_blocks,
5212 rtl_predict_edge,
5213 rtl_predicted_by_p,
5214 cfg_layout_can_duplicate_bb_p,
5215 rtl_duplicate_bb,
5216 rtl_split_edge,
5217 rtl_make_forwarder_block,
5218 rtl_tidy_fallthru_edge,
5219 rtl_force_nonfallthru,
5220 rtl_block_ends_with_call_p,
5221 rtl_block_ends_with_condjump_p,
5222 rtl_flow_call_edges_add,
5223 NULL, /* execute_on_growing_pred */
5224 NULL, /* execute_on_shrinking_pred */
5225 NULL, /* duplicate loop for trees */
5226 NULL, /* lv_add_condition_to_bb */
5227 NULL, /* lv_adjust_loop_header_phi*/
5228 NULL, /* extract_cond_bb_edges */
5229 NULL, /* flush_pending_stmts */
5230 rtl_block_empty_p, /* block_empty_p */
5231 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5232 rtl_account_profile_record,
5233 };
5234
5235 /* Implementation of CFG manipulation for cfg layout RTL, where
5236 basic block connected via fallthru edges does not have to be adjacent.
5237 This representation will hopefully become the default one in future
5238 version of the compiler. */
5239
5240 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5241 "cfglayout mode",
5242 rtl_verify_flow_info_1,
5243 rtl_dump_bb,
5244 rtl_dump_bb_for_graph,
5245 cfg_layout_create_basic_block,
5246 cfg_layout_redirect_edge_and_branch,
5247 cfg_layout_redirect_edge_and_branch_force,
5248 rtl_can_remove_branch_p,
5249 cfg_layout_delete_block,
5250 cfg_layout_split_block,
5251 rtl_move_block_after,
5252 cfg_layout_can_merge_blocks_p,
5253 cfg_layout_merge_blocks,
5254 rtl_predict_edge,
5255 rtl_predicted_by_p,
5256 cfg_layout_can_duplicate_bb_p,
5257 cfg_layout_duplicate_bb,
5258 cfg_layout_split_edge,
5259 rtl_make_forwarder_block,
5260 NULL, /* tidy_fallthru_edge */
5261 rtl_force_nonfallthru,
5262 rtl_block_ends_with_call_p,
5263 rtl_block_ends_with_condjump_p,
5264 rtl_flow_call_edges_add,
5265 NULL, /* execute_on_growing_pred */
5266 NULL, /* execute_on_shrinking_pred */
5267 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5268 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5269 NULL, /* lv_adjust_loop_header_phi*/
5270 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5271 NULL, /* flush_pending_stmts */
5272 rtl_block_empty_p, /* block_empty_p */
5273 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5274 rtl_account_profile_record,
5275 };
5276
5277 #include "gt-cfgrtl.h"
5278
5279 #if __GNUC__ >= 10
5280 # pragma GCC diagnostic pop
5281 #endif