safe_unordered_sequence.h, [...]: Rename respectively in...
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "diagnostic-core.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
59
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass;
64
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
66 static bool crossjumps_occured;
67
68 /* Set to true if we couldn't run an optimization due to stale liveness
69 information; we should run df_analyze to enable more opportunities. */
70 static bool block_was_dirty;
71
72 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
73 static bool try_crossjump_bb (int, basic_block);
74 static bool outgoing_edges_match (int, basic_block, basic_block);
75 static enum replace_direction old_insns_match_p (int, rtx, rtx);
76
77 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
78 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
79 static bool try_optimize_cfg (int);
80 static bool try_simplify_condjump (basic_block);
81 static bool try_forward_edges (int, basic_block);
82 static edge thread_jump (edge, basic_block);
83 static bool mark_effect (rtx, bitmap);
84 static void notice_new_block (basic_block);
85 static void update_forwarder_flag (basic_block);
86 static int mentions_nonequal_regs (rtx *, void *);
87 static void merge_memattrs (rtx, rtx);
88 \f
89 /* Set flags for newly created block. */
90
91 static void
92 notice_new_block (basic_block bb)
93 {
94 if (!bb)
95 return;
96
97 if (forwarder_block_p (bb))
98 bb->flags |= BB_FORWARDER_BLOCK;
99 }
100
101 /* Recompute forwarder flag after block has been modified. */
102
103 static void
104 update_forwarder_flag (basic_block bb)
105 {
106 if (forwarder_block_p (bb))
107 bb->flags |= BB_FORWARDER_BLOCK;
108 else
109 bb->flags &= ~BB_FORWARDER_BLOCK;
110 }
111 \f
112 /* Simplify a conditional jump around an unconditional jump.
113 Return true if something changed. */
114
115 static bool
116 try_simplify_condjump (basic_block cbranch_block)
117 {
118 basic_block jump_block, jump_dest_block, cbranch_dest_block;
119 edge cbranch_jump_edge, cbranch_fallthru_edge;
120 rtx cbranch_insn;
121
122 /* Verify that there are exactly two successors. */
123 if (EDGE_COUNT (cbranch_block->succs) != 2)
124 return false;
125
126 /* Verify that we've got a normal conditional branch at the end
127 of the block. */
128 cbranch_insn = BB_END (cbranch_block);
129 if (!any_condjump_p (cbranch_insn))
130 return false;
131
132 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
133 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
134
135 /* The next block must not have multiple predecessors, must not
136 be the last block in the function, and must contain just the
137 unconditional jump. */
138 jump_block = cbranch_fallthru_edge->dest;
139 if (!single_pred_p (jump_block)
140 || jump_block->next_bb == EXIT_BLOCK_PTR
141 || !FORWARDER_BLOCK_P (jump_block))
142 return false;
143 jump_dest_block = single_succ (jump_block);
144
145 /* If we are partitioning hot/cold basic blocks, we don't want to
146 mess up unconditional or indirect jumps that cross between hot
147 and cold sections.
148
149 Basic block partitioning may result in some jumps that appear to
150 be optimizable (or blocks that appear to be mergeable), but which really
151 must be left untouched (they are required to make it safely across
152 partition boundaries). See the comments at the top of
153 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
154
155 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
156 || (cbranch_jump_edge->flags & EDGE_CROSSING))
157 return false;
158
159 /* The conditional branch must target the block after the
160 unconditional branch. */
161 cbranch_dest_block = cbranch_jump_edge->dest;
162
163 if (cbranch_dest_block == EXIT_BLOCK_PTR
164 || !can_fallthru (jump_block, cbranch_dest_block))
165 return false;
166
167 /* Invert the conditional branch. */
168 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
169 return false;
170
171 if (dump_file)
172 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
173 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
174
175 /* Success. Update the CFG to match. Note that after this point
176 the edge variable names appear backwards; the redirection is done
177 this way to preserve edge profile data. */
178 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
179 cbranch_dest_block);
180 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
181 jump_dest_block);
182 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
183 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
184 update_br_prob_note (cbranch_block);
185
186 /* Delete the block with the unconditional jump, and clean up the mess. */
187 delete_basic_block (jump_block);
188 tidy_fallthru_edge (cbranch_jump_edge);
189 update_forwarder_flag (cbranch_block);
190
191 return true;
192 }
193 \f
194 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
195 on register. Used by jump threading. */
196
197 static bool
198 mark_effect (rtx exp, regset nonequal)
199 {
200 int regno;
201 rtx dest;
202 switch (GET_CODE (exp))
203 {
204 /* In case we do clobber the register, mark it as equal, as we know the
205 value is dead so it don't have to match. */
206 case CLOBBER:
207 if (REG_P (XEXP (exp, 0)))
208 {
209 dest = XEXP (exp, 0);
210 regno = REGNO (dest);
211 if (HARD_REGISTER_NUM_P (regno))
212 bitmap_clear_range (nonequal, regno,
213 hard_regno_nregs[regno][GET_MODE (dest)]);
214 else
215 bitmap_clear_bit (nonequal, regno);
216 }
217 return false;
218
219 case SET:
220 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
221 return false;
222 dest = SET_DEST (exp);
223 if (dest == pc_rtx)
224 return false;
225 if (!REG_P (dest))
226 return true;
227 regno = REGNO (dest);
228 if (HARD_REGISTER_NUM_P (regno))
229 bitmap_set_range (nonequal, regno,
230 hard_regno_nregs[regno][GET_MODE (dest)]);
231 else
232 bitmap_set_bit (nonequal, regno);
233 return false;
234
235 default:
236 return false;
237 }
238 }
239
240 /* Return nonzero if X is a register set in regset DATA.
241 Called via for_each_rtx. */
242 static int
243 mentions_nonequal_regs (rtx *x, void *data)
244 {
245 regset nonequal = (regset) data;
246 if (REG_P (*x))
247 {
248 int regno;
249
250 regno = REGNO (*x);
251 if (REGNO_REG_SET_P (nonequal, regno))
252 return 1;
253 if (regno < FIRST_PSEUDO_REGISTER)
254 {
255 int n = hard_regno_nregs[regno][GET_MODE (*x)];
256 while (--n > 0)
257 if (REGNO_REG_SET_P (nonequal, regno + n))
258 return 1;
259 }
260 }
261 return 0;
262 }
263 /* Attempt to prove that the basic block B will have no side effects and
264 always continues in the same edge if reached via E. Return the edge
265 if exist, NULL otherwise. */
266
267 static edge
268 thread_jump (edge e, basic_block b)
269 {
270 rtx set1, set2, cond1, cond2, insn;
271 enum rtx_code code1, code2, reversed_code2;
272 bool reverse1 = false;
273 unsigned i;
274 regset nonequal;
275 bool failed = false;
276 reg_set_iterator rsi;
277
278 if (b->flags & BB_NONTHREADABLE_BLOCK)
279 return NULL;
280
281 /* At the moment, we do handle only conditional jumps, but later we may
282 want to extend this code to tablejumps and others. */
283 if (EDGE_COUNT (e->src->succs) != 2)
284 return NULL;
285 if (EDGE_COUNT (b->succs) != 2)
286 {
287 b->flags |= BB_NONTHREADABLE_BLOCK;
288 return NULL;
289 }
290
291 /* Second branch must end with onlyjump, as we will eliminate the jump. */
292 if (!any_condjump_p (BB_END (e->src)))
293 return NULL;
294
295 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
296 {
297 b->flags |= BB_NONTHREADABLE_BLOCK;
298 return NULL;
299 }
300
301 set1 = pc_set (BB_END (e->src));
302 set2 = pc_set (BB_END (b));
303 if (((e->flags & EDGE_FALLTHRU) != 0)
304 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
305 reverse1 = true;
306
307 cond1 = XEXP (SET_SRC (set1), 0);
308 cond2 = XEXP (SET_SRC (set2), 0);
309 if (reverse1)
310 code1 = reversed_comparison_code (cond1, BB_END (e->src));
311 else
312 code1 = GET_CODE (cond1);
313
314 code2 = GET_CODE (cond2);
315 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
316
317 if (!comparison_dominates_p (code1, code2)
318 && !comparison_dominates_p (code1, reversed_code2))
319 return NULL;
320
321 /* Ensure that the comparison operators are equivalent.
322 ??? This is far too pessimistic. We should allow swapped operands,
323 different CCmodes, or for example comparisons for interval, that
324 dominate even when operands are not equivalent. */
325 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
326 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
327 return NULL;
328
329 /* Short circuit cases where block B contains some side effects, as we can't
330 safely bypass it. */
331 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
332 insn = NEXT_INSN (insn))
333 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
334 {
335 b->flags |= BB_NONTHREADABLE_BLOCK;
336 return NULL;
337 }
338
339 cselib_init (0);
340
341 /* First process all values computed in the source basic block. */
342 for (insn = NEXT_INSN (BB_HEAD (e->src));
343 insn != NEXT_INSN (BB_END (e->src));
344 insn = NEXT_INSN (insn))
345 if (INSN_P (insn))
346 cselib_process_insn (insn);
347
348 nonequal = BITMAP_ALLOC (NULL);
349 CLEAR_REG_SET (nonequal);
350
351 /* Now assume that we've continued by the edge E to B and continue
352 processing as if it were same basic block.
353 Our goal is to prove that whole block is an NOOP. */
354
355 for (insn = NEXT_INSN (BB_HEAD (b));
356 insn != NEXT_INSN (BB_END (b)) && !failed;
357 insn = NEXT_INSN (insn))
358 {
359 if (INSN_P (insn))
360 {
361 rtx pat = PATTERN (insn);
362
363 if (GET_CODE (pat) == PARALLEL)
364 {
365 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
367 }
368 else
369 failed |= mark_effect (pat, nonequal);
370 }
371
372 cselib_process_insn (insn);
373 }
374
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
377 if (failed)
378 {
379 b->flags |= BB_NONTHREADABLE_BLOCK;
380 goto failed_exit;
381 }
382
383 /* cond2 must not mention any register that is not equal to the
384 former block. */
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386 goto failed_exit;
387
388 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
389 goto failed_exit;
390
391 BITMAP_FREE (nonequal);
392 cselib_finish ();
393 if ((comparison_dominates_p (code1, code2) != 0)
394 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
395 return BRANCH_EDGE (b);
396 else
397 return FALLTHRU_EDGE (b);
398
399 failed_exit:
400 BITMAP_FREE (nonequal);
401 cselib_finish ();
402 return NULL;
403 }
404 \f
405 /* Attempt to forward edges leaving basic block B.
406 Return true if successful. */
407
408 static bool
409 try_forward_edges (int mode, basic_block b)
410 {
411 bool changed = false;
412 edge_iterator ei;
413 edge e, *threaded_edges = NULL;
414
415 /* If we are partitioning hot/cold basic blocks, we don't want to
416 mess up unconditional or indirect jumps that cross between hot
417 and cold sections.
418
419 Basic block partitioning may result in some jumps that appear to
420 be optimizable (or blocks that appear to be mergeable), but which really
421 must be left untouched (they are required to make it safely across
422 partition boundaries). See the comments at the top of
423 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
424
425 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
426 return false;
427
428 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
429 {
430 basic_block target, first;
431 int counter, goto_locus;
432 bool threaded = false;
433 int nthreaded_edges = 0;
434 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
435
436 /* Skip complex edges because we don't know how to update them.
437
438 Still handle fallthru edges, as we can succeed to forward fallthru
439 edge to the same place as the branch edge of conditional branch
440 and turn conditional branch to an unconditional branch. */
441 if (e->flags & EDGE_COMPLEX)
442 {
443 ei_next (&ei);
444 continue;
445 }
446
447 target = first = e->dest;
448 counter = NUM_FIXED_BLOCKS;
449 goto_locus = e->goto_locus;
450
451 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
452 up jumps that cross between hot/cold sections.
453
454 Basic block partitioning may result in some jumps that appear
455 to be optimizable (or blocks that appear to be mergeable), but which
456 really must be left untouched (they are required to make it safely
457 across partition boundaries). See the comments at the top of
458 bb-reorder.c:partition_hot_cold_basic_blocks for complete
459 details. */
460
461 if (first != EXIT_BLOCK_PTR
462 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
463 return false;
464
465 while (counter < n_basic_blocks)
466 {
467 basic_block new_target = NULL;
468 bool new_target_threaded = false;
469 may_thread |= (target->flags & BB_MODIFIED) != 0;
470
471 if (FORWARDER_BLOCK_P (target)
472 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
473 && single_succ (target) != EXIT_BLOCK_PTR)
474 {
475 /* Bypass trivial infinite loops. */
476 new_target = single_succ (target);
477 if (target == new_target)
478 counter = n_basic_blocks;
479 else if (!optimize)
480 {
481 /* When not optimizing, ensure that edges or forwarder
482 blocks with different locus are not optimized out. */
483 int new_locus = single_succ_edge (target)->goto_locus;
484 int locus = goto_locus;
485
486 if (new_locus && locus && !locator_eq (new_locus, locus))
487 new_target = NULL;
488 else
489 {
490 rtx last;
491
492 if (new_locus)
493 locus = new_locus;
494
495 last = BB_END (target);
496 if (DEBUG_INSN_P (last))
497 last = prev_nondebug_insn (last);
498
499 new_locus = last && INSN_P (last)
500 ? INSN_LOCATOR (last) : 0;
501
502 if (new_locus && locus && !locator_eq (new_locus, locus))
503 new_target = NULL;
504 else
505 {
506 if (new_locus)
507 locus = new_locus;
508
509 goto_locus = locus;
510 }
511 }
512 }
513 }
514
515 /* Allow to thread only over one edge at time to simplify updating
516 of probabilities. */
517 else if ((mode & CLEANUP_THREADING) && may_thread)
518 {
519 edge t = thread_jump (e, target);
520 if (t)
521 {
522 if (!threaded_edges)
523 threaded_edges = XNEWVEC (edge, n_basic_blocks);
524 else
525 {
526 int i;
527
528 /* Detect an infinite loop across blocks not
529 including the start block. */
530 for (i = 0; i < nthreaded_edges; ++i)
531 if (threaded_edges[i] == t)
532 break;
533 if (i < nthreaded_edges)
534 {
535 counter = n_basic_blocks;
536 break;
537 }
538 }
539
540 /* Detect an infinite loop across the start block. */
541 if (t->dest == b)
542 break;
543
544 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
545 threaded_edges[nthreaded_edges++] = t;
546
547 new_target = t->dest;
548 new_target_threaded = true;
549 }
550 }
551
552 if (!new_target)
553 break;
554
555 counter++;
556 target = new_target;
557 threaded |= new_target_threaded;
558 }
559
560 if (counter >= n_basic_blocks)
561 {
562 if (dump_file)
563 fprintf (dump_file, "Infinite loop in BB %i.\n",
564 target->index);
565 }
566 else if (target == first)
567 ; /* We didn't do anything. */
568 else
569 {
570 /* Save the values now, as the edge may get removed. */
571 gcov_type edge_count = e->count;
572 int edge_probability = e->probability;
573 int edge_frequency;
574 int n = 0;
575
576 e->goto_locus = goto_locus;
577
578 /* Don't force if target is exit block. */
579 if (threaded && target != EXIT_BLOCK_PTR)
580 {
581 notice_new_block (redirect_edge_and_branch_force (e, target));
582 if (dump_file)
583 fprintf (dump_file, "Conditionals threaded.\n");
584 }
585 else if (!redirect_edge_and_branch (e, target))
586 {
587 if (dump_file)
588 fprintf (dump_file,
589 "Forwarding edge %i->%i to %i failed.\n",
590 b->index, e->dest->index, target->index);
591 ei_next (&ei);
592 continue;
593 }
594
595 /* We successfully forwarded the edge. Now update profile
596 data: for each edge we traversed in the chain, remove
597 the original edge's execution count. */
598 edge_frequency = ((edge_probability * b->frequency
599 + REG_BR_PROB_BASE / 2)
600 / REG_BR_PROB_BASE);
601
602 do
603 {
604 edge t;
605
606 if (!single_succ_p (first))
607 {
608 gcc_assert (n < nthreaded_edges);
609 t = threaded_edges [n++];
610 gcc_assert (t->src == first);
611 update_bb_profile_for_threading (first, edge_frequency,
612 edge_count, t);
613 update_br_prob_note (first);
614 }
615 else
616 {
617 first->count -= edge_count;
618 if (first->count < 0)
619 first->count = 0;
620 first->frequency -= edge_frequency;
621 if (first->frequency < 0)
622 first->frequency = 0;
623 /* It is possible that as the result of
624 threading we've removed edge as it is
625 threaded to the fallthru edge. Avoid
626 getting out of sync. */
627 if (n < nthreaded_edges
628 && first == threaded_edges [n]->src)
629 n++;
630 t = single_succ_edge (first);
631 }
632
633 t->count -= edge_count;
634 if (t->count < 0)
635 t->count = 0;
636 first = t->dest;
637 }
638 while (first != target);
639
640 changed = true;
641 continue;
642 }
643 ei_next (&ei);
644 }
645
646 free (threaded_edges);
647 return changed;
648 }
649 \f
650
651 /* Blocks A and B are to be merged into a single block. A has no incoming
652 fallthru edge, so it can be moved before B without adding or modifying
653 any jumps (aside from the jump from A to B). */
654
655 static void
656 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
657 {
658 rtx barrier;
659
660 /* If we are partitioning hot/cold basic blocks, we don't want to
661 mess up unconditional or indirect jumps that cross between hot
662 and cold sections.
663
664 Basic block partitioning may result in some jumps that appear to
665 be optimizable (or blocks that appear to be mergeable), but which really
666 must be left untouched (they are required to make it safely across
667 partition boundaries). See the comments at the top of
668 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
669
670 if (BB_PARTITION (a) != BB_PARTITION (b))
671 return;
672
673 barrier = next_nonnote_insn (BB_END (a));
674 gcc_assert (BARRIER_P (barrier));
675 delete_insn (barrier);
676
677 /* Scramble the insn chain. */
678 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
679 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
680 df_set_bb_dirty (a);
681
682 if (dump_file)
683 fprintf (dump_file, "Moved block %d before %d and merged.\n",
684 a->index, b->index);
685
686 /* Swap the records for the two blocks around. */
687
688 unlink_block (a);
689 link_block (a, b->prev_bb);
690
691 /* Now blocks A and B are contiguous. Merge them. */
692 merge_blocks (a, b);
693 }
694
695 /* Blocks A and B are to be merged into a single block. B has no outgoing
696 fallthru edge, so it can be moved after A without adding or modifying
697 any jumps (aside from the jump from A to B). */
698
699 static void
700 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
701 {
702 rtx barrier, real_b_end;
703 rtx label, table;
704
705 /* If we are partitioning hot/cold basic blocks, we don't want to
706 mess up unconditional or indirect jumps that cross between hot
707 and cold sections.
708
709 Basic block partitioning may result in some jumps that appear to
710 be optimizable (or blocks that appear to be mergeable), but which really
711 must be left untouched (they are required to make it safely across
712 partition boundaries). See the comments at the top of
713 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
714
715 if (BB_PARTITION (a) != BB_PARTITION (b))
716 return;
717
718 real_b_end = BB_END (b);
719
720 /* If there is a jump table following block B temporarily add the jump table
721 to block B so that it will also be moved to the correct location. */
722 if (tablejump_p (BB_END (b), &label, &table)
723 && prev_active_insn (label) == BB_END (b))
724 {
725 BB_END (b) = table;
726 }
727
728 /* There had better have been a barrier there. Delete it. */
729 barrier = NEXT_INSN (BB_END (b));
730 if (barrier && BARRIER_P (barrier))
731 delete_insn (barrier);
732
733
734 /* Scramble the insn chain. */
735 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
736
737 /* Restore the real end of b. */
738 BB_END (b) = real_b_end;
739
740 if (dump_file)
741 fprintf (dump_file, "Moved block %d after %d and merged.\n",
742 b->index, a->index);
743
744 /* Now blocks A and B are contiguous. Merge them. */
745 merge_blocks (a, b);
746 }
747
748 /* Attempt to merge basic blocks that are potentially non-adjacent.
749 Return NULL iff the attempt failed, otherwise return basic block
750 where cleanup_cfg should continue. Because the merging commonly
751 moves basic block away or introduces another optimization
752 possibility, return basic block just before B so cleanup_cfg don't
753 need to iterate.
754
755 It may be good idea to return basic block before C in the case
756 C has been moved after B and originally appeared earlier in the
757 insn sequence, but we have no information available about the
758 relative ordering of these two. Hopefully it is not too common. */
759
760 static basic_block
761 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
762 {
763 basic_block next;
764
765 /* If we are partitioning hot/cold basic blocks, we don't want to
766 mess up unconditional or indirect jumps that cross between hot
767 and cold sections.
768
769 Basic block partitioning may result in some jumps that appear to
770 be optimizable (or blocks that appear to be mergeable), but which really
771 must be left untouched (they are required to make it safely across
772 partition boundaries). See the comments at the top of
773 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
774
775 if (BB_PARTITION (b) != BB_PARTITION (c))
776 return NULL;
777
778 /* If B has a fallthru edge to C, no need to move anything. */
779 if (e->flags & EDGE_FALLTHRU)
780 {
781 int b_index = b->index, c_index = c->index;
782 merge_blocks (b, c);
783 update_forwarder_flag (b);
784
785 if (dump_file)
786 fprintf (dump_file, "Merged %d and %d without moving.\n",
787 b_index, c_index);
788
789 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
790 }
791
792 /* Otherwise we will need to move code around. Do that only if expensive
793 transformations are allowed. */
794 else if (mode & CLEANUP_EXPENSIVE)
795 {
796 edge tmp_edge, b_fallthru_edge;
797 bool c_has_outgoing_fallthru;
798 bool b_has_incoming_fallthru;
799
800 /* Avoid overactive code motion, as the forwarder blocks should be
801 eliminated by edge redirection instead. One exception might have
802 been if B is a forwarder block and C has no fallthru edge, but
803 that should be cleaned up by bb-reorder instead. */
804 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
805 return NULL;
806
807 /* We must make sure to not munge nesting of lexical blocks,
808 and loop notes. This is done by squeezing out all the notes
809 and leaving them there to lie. Not ideal, but functional. */
810
811 tmp_edge = find_fallthru_edge (c->succs);
812 c_has_outgoing_fallthru = (tmp_edge != NULL);
813
814 tmp_edge = find_fallthru_edge (b->preds);
815 b_has_incoming_fallthru = (tmp_edge != NULL);
816 b_fallthru_edge = tmp_edge;
817 next = b->prev_bb;
818 if (next == c)
819 next = next->prev_bb;
820
821 /* Otherwise, we're going to try to move C after B. If C does
822 not have an outgoing fallthru, then it can be moved
823 immediately after B without introducing or modifying jumps. */
824 if (! c_has_outgoing_fallthru)
825 {
826 merge_blocks_move_successor_nojumps (b, c);
827 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
828 }
829
830 /* If B does not have an incoming fallthru, then it can be moved
831 immediately before C without introducing or modifying jumps.
832 C cannot be the first block, so we do not have to worry about
833 accessing a non-existent block. */
834
835 if (b_has_incoming_fallthru)
836 {
837 basic_block bb;
838
839 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840 return NULL;
841 bb = force_nonfallthru (b_fallthru_edge);
842 if (bb)
843 notice_new_block (bb);
844 }
845
846 merge_blocks_move_predecessor_nojumps (b, c);
847 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
848 }
849
850 return NULL;
851 }
852 \f
853
854 /* Removes the memory attributes of MEM expression
855 if they are not equal. */
856
857 void
858 merge_memattrs (rtx x, rtx y)
859 {
860 int i;
861 int j;
862 enum rtx_code code;
863 const char *fmt;
864
865 if (x == y)
866 return;
867 if (x == 0 || y == 0)
868 return;
869
870 code = GET_CODE (x);
871
872 if (code != GET_CODE (y))
873 return;
874
875 if (GET_MODE (x) != GET_MODE (y))
876 return;
877
878 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
879 {
880 if (! MEM_ATTRS (x))
881 MEM_ATTRS (y) = 0;
882 else if (! MEM_ATTRS (y))
883 MEM_ATTRS (x) = 0;
884 else
885 {
886 HOST_WIDE_INT mem_size;
887
888 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
889 {
890 set_mem_alias_set (x, 0);
891 set_mem_alias_set (y, 0);
892 }
893
894 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
895 {
896 set_mem_expr (x, 0);
897 set_mem_expr (y, 0);
898 clear_mem_offset (x);
899 clear_mem_offset (y);
900 }
901 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
902 || (MEM_OFFSET_KNOWN_P (x)
903 && MEM_OFFSET (x) != MEM_OFFSET (y)))
904 {
905 clear_mem_offset (x);
906 clear_mem_offset (y);
907 }
908
909 if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
910 {
911 mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
912 set_mem_size (x, mem_size);
913 set_mem_size (y, mem_size);
914 }
915 else
916 {
917 clear_mem_size (x);
918 clear_mem_size (y);
919 }
920
921 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
922 set_mem_align (y, MEM_ALIGN (x));
923 }
924 }
925
926 fmt = GET_RTX_FORMAT (code);
927 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
928 {
929 switch (fmt[i])
930 {
931 case 'E':
932 /* Two vectors must have the same length. */
933 if (XVECLEN (x, i) != XVECLEN (y, i))
934 return;
935
936 for (j = 0; j < XVECLEN (x, i); j++)
937 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
938
939 break;
940
941 case 'e':
942 merge_memattrs (XEXP (x, i), XEXP (y, i));
943 }
944 }
945 return;
946 }
947
948
949 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
950 different single sets S1 and S2. */
951
952 static bool
953 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
954 {
955 int i;
956 rtx e1, e2;
957
958 if (p1 == s1 && p2 == s2)
959 return true;
960
961 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
962 return false;
963
964 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
965 return false;
966
967 for (i = 0; i < XVECLEN (p1, 0); i++)
968 {
969 e1 = XVECEXP (p1, 0, i);
970 e2 = XVECEXP (p2, 0, i);
971 if (e1 == s1 && e2 == s2)
972 continue;
973 if (reload_completed
974 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
975 continue;
976
977 return false;
978 }
979
980 return true;
981 }
982
983 /* Examine register notes on I1 and I2 and return:
984 - dir_forward if I1 can be replaced by I2, or
985 - dir_backward if I2 can be replaced by I1, or
986 - dir_both if both are the case. */
987
988 static enum replace_direction
989 can_replace_by (rtx i1, rtx i2)
990 {
991 rtx s1, s2, d1, d2, src1, src2, note1, note2;
992 bool c1, c2;
993
994 /* Check for 2 sets. */
995 s1 = single_set (i1);
996 s2 = single_set (i2);
997 if (s1 == NULL_RTX || s2 == NULL_RTX)
998 return dir_none;
999
1000 /* Check that the 2 sets set the same dest. */
1001 d1 = SET_DEST (s1);
1002 d2 = SET_DEST (s2);
1003 if (!(reload_completed
1004 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1005 return dir_none;
1006
1007 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1008 set dest to the same value. */
1009 note1 = find_reg_equal_equiv_note (i1);
1010 note2 = find_reg_equal_equiv_note (i2);
1011 if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1012 || !CONST_INT_P (XEXP (note1, 0)))
1013 return dir_none;
1014
1015 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1016 return dir_none;
1017
1018 /* Although the 2 sets set dest to the same value, we cannot replace
1019 (set (dest) (const_int))
1020 by
1021 (set (dest) (reg))
1022 because we don't know if the reg is live and has the same value at the
1023 location of replacement. */
1024 src1 = SET_SRC (s1);
1025 src2 = SET_SRC (s2);
1026 c1 = CONST_INT_P (src1);
1027 c2 = CONST_INT_P (src2);
1028 if (c1 && c2)
1029 return dir_both;
1030 else if (c2)
1031 return dir_forward;
1032 else if (c1)
1033 return dir_backward;
1034
1035 return dir_none;
1036 }
1037
1038 /* Merges directions A and B. */
1039
1040 static enum replace_direction
1041 merge_dir (enum replace_direction a, enum replace_direction b)
1042 {
1043 /* Implements the following table:
1044 |bo fw bw no
1045 ---+-----------
1046 bo |bo fw bw no
1047 fw |-- fw no no
1048 bw |-- -- bw no
1049 no |-- -- -- no. */
1050
1051 if (a == b)
1052 return a;
1053
1054 if (a == dir_both)
1055 return b;
1056 if (b == dir_both)
1057 return a;
1058
1059 return dir_none;
1060 }
1061
1062 /* Examine I1 and I2 and return:
1063 - dir_forward if I1 can be replaced by I2, or
1064 - dir_backward if I2 can be replaced by I1, or
1065 - dir_both if both are the case. */
1066
1067 static enum replace_direction
1068 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1069 {
1070 rtx p1, p2;
1071
1072 /* Verify that I1 and I2 are equivalent. */
1073 if (GET_CODE (i1) != GET_CODE (i2))
1074 return dir_none;
1075
1076 /* __builtin_unreachable() may lead to empty blocks (ending with
1077 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1078 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1079 return dir_both;
1080
1081 p1 = PATTERN (i1);
1082 p2 = PATTERN (i2);
1083
1084 if (GET_CODE (p1) != GET_CODE (p2))
1085 return dir_none;
1086
1087 /* If this is a CALL_INSN, compare register usage information.
1088 If we don't check this on stack register machines, the two
1089 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1090 numbers of stack registers in the same basic block.
1091 If we don't check this on machines with delay slots, a delay slot may
1092 be filled that clobbers a parameter expected by the subroutine.
1093
1094 ??? We take the simple route for now and assume that if they're
1095 equal, they were constructed identically.
1096
1097 Also check for identical exception regions. */
1098
1099 if (CALL_P (i1))
1100 {
1101 /* Ensure the same EH region. */
1102 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1103 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1104
1105 if (!n1 && n2)
1106 return dir_none;
1107
1108 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1109 return dir_none;
1110
1111 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1112 CALL_INSN_FUNCTION_USAGE (i2))
1113 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1114 return dir_none;
1115 }
1116
1117 #ifdef STACK_REGS
1118 /* If cross_jump_death_matters is not 0, the insn's mode
1119 indicates whether or not the insn contains any stack-like
1120 regs. */
1121
1122 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1123 {
1124 /* If register stack conversion has already been done, then
1125 death notes must also be compared before it is certain that
1126 the two instruction streams match. */
1127
1128 rtx note;
1129 HARD_REG_SET i1_regset, i2_regset;
1130
1131 CLEAR_HARD_REG_SET (i1_regset);
1132 CLEAR_HARD_REG_SET (i2_regset);
1133
1134 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1135 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1136 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1137
1138 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1139 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1140 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1141
1142 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1143 return dir_none;
1144 }
1145 #endif
1146
1147 if (reload_completed
1148 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1149 return dir_both;
1150
1151 return can_replace_by (i1, i2);
1152 }
1153 \f
1154 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1155 flow_find_head_matching_sequence, ensure the notes match. */
1156
1157 static void
1158 merge_notes (rtx i1, rtx i2)
1159 {
1160 /* If the merged insns have different REG_EQUAL notes, then
1161 remove them. */
1162 rtx equiv1 = find_reg_equal_equiv_note (i1);
1163 rtx equiv2 = find_reg_equal_equiv_note (i2);
1164
1165 if (equiv1 && !equiv2)
1166 remove_note (i1, equiv1);
1167 else if (!equiv1 && equiv2)
1168 remove_note (i2, equiv2);
1169 else if (equiv1 && equiv2
1170 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1171 {
1172 remove_note (i1, equiv1);
1173 remove_note (i2, equiv2);
1174 }
1175 }
1176
1177 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1178 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1179 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1180 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1181 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1182
1183 static void
1184 walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
1185 bool *did_fallthru)
1186 {
1187 edge fallthru;
1188
1189 *did_fallthru = false;
1190
1191 /* Ignore notes. */
1192 while (!NONDEBUG_INSN_P (*i1))
1193 {
1194 if (*i1 != BB_HEAD (*bb1))
1195 {
1196 *i1 = PREV_INSN (*i1);
1197 continue;
1198 }
1199
1200 if (!follow_fallthru)
1201 return;
1202
1203 fallthru = find_fallthru_edge ((*bb1)->preds);
1204 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
1205 || !single_succ_p (fallthru->src))
1206 return;
1207
1208 *bb1 = fallthru->src;
1209 *i1 = BB_END (*bb1);
1210 *did_fallthru = true;
1211 }
1212 }
1213
1214 /* Look through the insns at the end of BB1 and BB2 and find the longest
1215 sequence that are either equivalent, or allow forward or backward
1216 replacement. Store the first insns for that sequence in *F1 and *F2 and
1217 return the sequence length.
1218
1219 DIR_P indicates the allowed replacement direction on function entry, and
1220 the actual replacement direction on function exit. If NULL, only equivalent
1221 sequences are allowed.
1222
1223 To simplify callers of this function, if the blocks match exactly,
1224 store the head of the blocks in *F1 and *F2. */
1225
1226 int
1227 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
1228 enum replace_direction *dir_p)
1229 {
1230 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1231 int ninsns = 0;
1232 rtx p1;
1233 enum replace_direction dir, last_dir, afterlast_dir;
1234 bool follow_fallthru, did_fallthru;
1235
1236 if (dir_p)
1237 dir = *dir_p;
1238 else
1239 dir = dir_both;
1240 afterlast_dir = dir;
1241 last_dir = afterlast_dir;
1242
1243 /* Skip simple jumps at the end of the blocks. Complex jumps still
1244 need to be compared for equivalence, which we'll do below. */
1245
1246 i1 = BB_END (bb1);
1247 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1248 if (onlyjump_p (i1)
1249 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1250 {
1251 last1 = i1;
1252 i1 = PREV_INSN (i1);
1253 }
1254
1255 i2 = BB_END (bb2);
1256 if (onlyjump_p (i2)
1257 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1258 {
1259 last2 = i2;
1260 /* Count everything except for unconditional jump as insn. */
1261 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1262 ninsns++;
1263 i2 = PREV_INSN (i2);
1264 }
1265
1266 while (true)
1267 {
1268 /* In the following example, we can replace all jumps to C by jumps to A.
1269
1270 This removes 4 duplicate insns.
1271 [bb A] insn1 [bb C] insn1
1272 insn2 insn2
1273 [bb B] insn3 insn3
1274 insn4 insn4
1275 jump_insn jump_insn
1276
1277 We could also replace all jumps to A by jumps to C, but that leaves B
1278 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1279 step, all jumps to B would be replaced with jumps to the middle of C,
1280 achieving the same result with more effort.
1281 So we allow only the first possibility, which means that we don't allow
1282 fallthru in the block that's being replaced. */
1283
1284 follow_fallthru = dir_p && dir != dir_forward;
1285 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1286 if (did_fallthru)
1287 dir = dir_backward;
1288
1289 follow_fallthru = dir_p && dir != dir_backward;
1290 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1291 if (did_fallthru)
1292 dir = dir_forward;
1293
1294 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1295 break;
1296
1297 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1298 if (dir == dir_none || (!dir_p && dir != dir_both))
1299 break;
1300
1301 merge_memattrs (i1, i2);
1302
1303 /* Don't begin a cross-jump with a NOTE insn. */
1304 if (INSN_P (i1))
1305 {
1306 merge_notes (i1, i2);
1307
1308 afterlast1 = last1, afterlast2 = last2;
1309 last1 = i1, last2 = i2;
1310 afterlast_dir = last_dir;
1311 last_dir = dir;
1312 p1 = PATTERN (i1);
1313 if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
1314 ninsns++;
1315 }
1316
1317 i1 = PREV_INSN (i1);
1318 i2 = PREV_INSN (i2);
1319 }
1320
1321 #ifdef HAVE_cc0
1322 /* Don't allow the insn after a compare to be shared by
1323 cross-jumping unless the compare is also shared. */
1324 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1325 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1326 #endif
1327
1328 /* Include preceding notes and labels in the cross-jump. One,
1329 this may bring us to the head of the blocks as requested above.
1330 Two, it keeps line number notes as matched as may be. */
1331 if (ninsns)
1332 {
1333 bb1 = BLOCK_FOR_INSN (last1);
1334 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1335 last1 = PREV_INSN (last1);
1336
1337 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1338 last1 = PREV_INSN (last1);
1339
1340 bb2 = BLOCK_FOR_INSN (last2);
1341 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1342 last2 = PREV_INSN (last2);
1343
1344 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1345 last2 = PREV_INSN (last2);
1346
1347 *f1 = last1;
1348 *f2 = last2;
1349 }
1350
1351 if (dir_p)
1352 *dir_p = last_dir;
1353 return ninsns;
1354 }
1355
1356 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1357 the head of the two blocks. Do not include jumps at the end.
1358 If STOP_AFTER is nonzero, stop after finding that many matching
1359 instructions. */
1360
1361 int
1362 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1363 rtx *f2, int stop_after)
1364 {
1365 rtx i1, i2, last1, last2, beforelast1, beforelast2;
1366 int ninsns = 0;
1367 edge e;
1368 edge_iterator ei;
1369 int nehedges1 = 0, nehedges2 = 0;
1370
1371 FOR_EACH_EDGE (e, ei, bb1->succs)
1372 if (e->flags & EDGE_EH)
1373 nehedges1++;
1374 FOR_EACH_EDGE (e, ei, bb2->succs)
1375 if (e->flags & EDGE_EH)
1376 nehedges2++;
1377
1378 i1 = BB_HEAD (bb1);
1379 i2 = BB_HEAD (bb2);
1380 last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1381
1382 while (true)
1383 {
1384 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1385 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1386 {
1387 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1388 break;
1389 i1 = NEXT_INSN (i1);
1390 }
1391
1392 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1393 {
1394 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1395 break;
1396 i2 = NEXT_INSN (i2);
1397 }
1398
1399 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1400 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1401 break;
1402
1403 if (NOTE_P (i1) || NOTE_P (i2)
1404 || JUMP_P (i1) || JUMP_P (i2))
1405 break;
1406
1407 /* A sanity check to make sure we're not merging insns with different
1408 effects on EH. If only one of them ends a basic block, it shouldn't
1409 have an EH edge; if both end a basic block, there should be the same
1410 number of EH edges. */
1411 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1412 && nehedges1 > 0)
1413 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1414 && nehedges2 > 0)
1415 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1416 && nehedges1 != nehedges2))
1417 break;
1418
1419 if (old_insns_match_p (0, i1, i2) != dir_both)
1420 break;
1421
1422 merge_memattrs (i1, i2);
1423
1424 /* Don't begin a cross-jump with a NOTE insn. */
1425 if (INSN_P (i1))
1426 {
1427 merge_notes (i1, i2);
1428
1429 beforelast1 = last1, beforelast2 = last2;
1430 last1 = i1, last2 = i2;
1431 ninsns++;
1432 }
1433
1434 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1435 || (stop_after > 0 && ninsns == stop_after))
1436 break;
1437
1438 i1 = NEXT_INSN (i1);
1439 i2 = NEXT_INSN (i2);
1440 }
1441
1442 #ifdef HAVE_cc0
1443 /* Don't allow a compare to be shared by cross-jumping unless the insn
1444 after the compare is also shared. */
1445 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1446 last1 = beforelast1, last2 = beforelast2, ninsns--;
1447 #endif
1448
1449 if (ninsns)
1450 {
1451 *f1 = last1;
1452 *f2 = last2;
1453 }
1454
1455 return ninsns;
1456 }
1457
1458 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1459 the branch instruction. This means that if we commonize the control
1460 flow before end of the basic block, the semantic remains unchanged.
1461
1462 We may assume that there exists one edge with a common destination. */
1463
1464 static bool
1465 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1466 {
1467 int nehedges1 = 0, nehedges2 = 0;
1468 edge fallthru1 = 0, fallthru2 = 0;
1469 edge e1, e2;
1470 edge_iterator ei;
1471
1472 /* If BB1 has only one successor, we may be looking at either an
1473 unconditional jump, or a fake edge to exit. */
1474 if (single_succ_p (bb1)
1475 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1476 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1477 return (single_succ_p (bb2)
1478 && (single_succ_edge (bb2)->flags
1479 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1480 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1481
1482 /* Match conditional jumps - this may get tricky when fallthru and branch
1483 edges are crossed. */
1484 if (EDGE_COUNT (bb1->succs) == 2
1485 && any_condjump_p (BB_END (bb1))
1486 && onlyjump_p (BB_END (bb1)))
1487 {
1488 edge b1, f1, b2, f2;
1489 bool reverse, match;
1490 rtx set1, set2, cond1, cond2;
1491 enum rtx_code code1, code2;
1492
1493 if (EDGE_COUNT (bb2->succs) != 2
1494 || !any_condjump_p (BB_END (bb2))
1495 || !onlyjump_p (BB_END (bb2)))
1496 return false;
1497
1498 b1 = BRANCH_EDGE (bb1);
1499 b2 = BRANCH_EDGE (bb2);
1500 f1 = FALLTHRU_EDGE (bb1);
1501 f2 = FALLTHRU_EDGE (bb2);
1502
1503 /* Get around possible forwarders on fallthru edges. Other cases
1504 should be optimized out already. */
1505 if (FORWARDER_BLOCK_P (f1->dest))
1506 f1 = single_succ_edge (f1->dest);
1507
1508 if (FORWARDER_BLOCK_P (f2->dest))
1509 f2 = single_succ_edge (f2->dest);
1510
1511 /* To simplify use of this function, return false if there are
1512 unneeded forwarder blocks. These will get eliminated later
1513 during cleanup_cfg. */
1514 if (FORWARDER_BLOCK_P (f1->dest)
1515 || FORWARDER_BLOCK_P (f2->dest)
1516 || FORWARDER_BLOCK_P (b1->dest)
1517 || FORWARDER_BLOCK_P (b2->dest))
1518 return false;
1519
1520 if (f1->dest == f2->dest && b1->dest == b2->dest)
1521 reverse = false;
1522 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1523 reverse = true;
1524 else
1525 return false;
1526
1527 set1 = pc_set (BB_END (bb1));
1528 set2 = pc_set (BB_END (bb2));
1529 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1530 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1531 reverse = !reverse;
1532
1533 cond1 = XEXP (SET_SRC (set1), 0);
1534 cond2 = XEXP (SET_SRC (set2), 0);
1535 code1 = GET_CODE (cond1);
1536 if (reverse)
1537 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1538 else
1539 code2 = GET_CODE (cond2);
1540
1541 if (code2 == UNKNOWN)
1542 return false;
1543
1544 /* Verify codes and operands match. */
1545 match = ((code1 == code2
1546 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1547 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1548 || (code1 == swap_condition (code2)
1549 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1550 XEXP (cond2, 0))
1551 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1552 XEXP (cond2, 1))));
1553
1554 /* If we return true, we will join the blocks. Which means that
1555 we will only have one branch prediction bit to work with. Thus
1556 we require the existing branches to have probabilities that are
1557 roughly similar. */
1558 if (match
1559 && optimize_bb_for_speed_p (bb1)
1560 && optimize_bb_for_speed_p (bb2))
1561 {
1562 int prob2;
1563
1564 if (b1->dest == b2->dest)
1565 prob2 = b2->probability;
1566 else
1567 /* Do not use f2 probability as f2 may be forwarded. */
1568 prob2 = REG_BR_PROB_BASE - b2->probability;
1569
1570 /* Fail if the difference in probabilities is greater than 50%.
1571 This rules out two well-predicted branches with opposite
1572 outcomes. */
1573 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1574 {
1575 if (dump_file)
1576 fprintf (dump_file,
1577 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1578 bb1->index, bb2->index, b1->probability, prob2);
1579
1580 return false;
1581 }
1582 }
1583
1584 if (dump_file && match)
1585 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1586 bb1->index, bb2->index);
1587
1588 return match;
1589 }
1590
1591 /* Generic case - we are seeing a computed jump, table jump or trapping
1592 instruction. */
1593
1594 /* Check whether there are tablejumps in the end of BB1 and BB2.
1595 Return true if they are identical. */
1596 {
1597 rtx label1, label2;
1598 rtx table1, table2;
1599
1600 if (tablejump_p (BB_END (bb1), &label1, &table1)
1601 && tablejump_p (BB_END (bb2), &label2, &table2)
1602 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1603 {
1604 /* The labels should never be the same rtx. If they really are same
1605 the jump tables are same too. So disable crossjumping of blocks BB1
1606 and BB2 because when deleting the common insns in the end of BB1
1607 by delete_basic_block () the jump table would be deleted too. */
1608 /* If LABEL2 is referenced in BB1->END do not do anything
1609 because we would loose information when replacing
1610 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1611 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1612 {
1613 /* Set IDENTICAL to true when the tables are identical. */
1614 bool identical = false;
1615 rtx p1, p2;
1616
1617 p1 = PATTERN (table1);
1618 p2 = PATTERN (table2);
1619 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1620 {
1621 identical = true;
1622 }
1623 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1624 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1625 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1626 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1627 {
1628 int i;
1629
1630 identical = true;
1631 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1632 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1633 identical = false;
1634 }
1635
1636 if (identical)
1637 {
1638 replace_label_data rr;
1639 bool match;
1640
1641 /* Temporarily replace references to LABEL1 with LABEL2
1642 in BB1->END so that we could compare the instructions. */
1643 rr.r1 = label1;
1644 rr.r2 = label2;
1645 rr.update_label_nuses = false;
1646 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1647
1648 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1649 == dir_both);
1650 if (dump_file && match)
1651 fprintf (dump_file,
1652 "Tablejumps in bb %i and %i match.\n",
1653 bb1->index, bb2->index);
1654
1655 /* Set the original label in BB1->END because when deleting
1656 a block whose end is a tablejump, the tablejump referenced
1657 from the instruction is deleted too. */
1658 rr.r1 = label2;
1659 rr.r2 = label1;
1660 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1661
1662 return match;
1663 }
1664 }
1665 return false;
1666 }
1667 }
1668
1669 /* First ensure that the instructions match. There may be many outgoing
1670 edges so this test is generally cheaper. */
1671 if (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) != dir_both)
1672 return false;
1673
1674 /* Search the outgoing edges, ensure that the counts do match, find possible
1675 fallthru and exception handling edges since these needs more
1676 validation. */
1677 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1678 return false;
1679
1680 FOR_EACH_EDGE (e1, ei, bb1->succs)
1681 {
1682 e2 = EDGE_SUCC (bb2, ei.index);
1683
1684 if (e1->flags & EDGE_EH)
1685 nehedges1++;
1686
1687 if (e2->flags & EDGE_EH)
1688 nehedges2++;
1689
1690 if (e1->flags & EDGE_FALLTHRU)
1691 fallthru1 = e1;
1692 if (e2->flags & EDGE_FALLTHRU)
1693 fallthru2 = e2;
1694 }
1695
1696 /* If number of edges of various types does not match, fail. */
1697 if (nehedges1 != nehedges2
1698 || (fallthru1 != 0) != (fallthru2 != 0))
1699 return false;
1700
1701 /* fallthru edges must be forwarded to the same destination. */
1702 if (fallthru1)
1703 {
1704 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1705 ? single_succ (fallthru1->dest): fallthru1->dest);
1706 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1707 ? single_succ (fallthru2->dest): fallthru2->dest);
1708
1709 if (d1 != d2)
1710 return false;
1711 }
1712
1713 /* Ensure the same EH region. */
1714 {
1715 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1716 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1717
1718 if (!n1 && n2)
1719 return false;
1720
1721 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1722 return false;
1723 }
1724
1725 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1726 version of sequence abstraction. */
1727 FOR_EACH_EDGE (e1, ei, bb2->succs)
1728 {
1729 edge e2;
1730 edge_iterator ei;
1731 basic_block d1 = e1->dest;
1732
1733 if (FORWARDER_BLOCK_P (d1))
1734 d1 = EDGE_SUCC (d1, 0)->dest;
1735
1736 FOR_EACH_EDGE (e2, ei, bb1->succs)
1737 {
1738 basic_block d2 = e2->dest;
1739 if (FORWARDER_BLOCK_P (d2))
1740 d2 = EDGE_SUCC (d2, 0)->dest;
1741 if (d1 == d2)
1742 break;
1743 }
1744
1745 if (!e2)
1746 return false;
1747 }
1748
1749 return true;
1750 }
1751
1752 /* Returns true if BB basic block has a preserve label. */
1753
1754 static bool
1755 block_has_preserve_label (basic_block bb)
1756 {
1757 return (bb
1758 && block_label (bb)
1759 && LABEL_PRESERVE_P (block_label (bb)));
1760 }
1761
1762 /* E1 and E2 are edges with the same destination block. Search their
1763 predecessors for common code. If found, redirect control flow from
1764 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1765 or the other way around (dir_backward). DIR specifies the allowed
1766 replacement direction. */
1767
1768 static bool
1769 try_crossjump_to_edge (int mode, edge e1, edge e2,
1770 enum replace_direction dir)
1771 {
1772 int nmatch;
1773 basic_block src1 = e1->src, src2 = e2->src;
1774 basic_block redirect_to, redirect_from, to_remove;
1775 basic_block osrc1, osrc2, redirect_edges_to, tmp;
1776 rtx newpos1, newpos2;
1777 edge s;
1778 edge_iterator ei;
1779
1780 newpos1 = newpos2 = NULL_RTX;
1781
1782 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1783 to try this optimization.
1784
1785 Basic block partitioning may result in some jumps that appear to
1786 be optimizable (or blocks that appear to be mergeable), but which really
1787 must be left untouched (they are required to make it safely across
1788 partition boundaries). See the comments at the top of
1789 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1790
1791 if (flag_reorder_blocks_and_partition && reload_completed)
1792 return false;
1793
1794 /* Search backward through forwarder blocks. We don't need to worry
1795 about multiple entry or chained forwarders, as they will be optimized
1796 away. We do this to look past the unconditional jump following a
1797 conditional jump that is required due to the current CFG shape. */
1798 if (single_pred_p (src1)
1799 && FORWARDER_BLOCK_P (src1))
1800 e1 = single_pred_edge (src1), src1 = e1->src;
1801
1802 if (single_pred_p (src2)
1803 && FORWARDER_BLOCK_P (src2))
1804 e2 = single_pred_edge (src2), src2 = e2->src;
1805
1806 /* Nothing to do if we reach ENTRY, or a common source block. */
1807 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1808 return false;
1809 if (src1 == src2)
1810 return false;
1811
1812 /* Seeing more than 1 forwarder blocks would confuse us later... */
1813 if (FORWARDER_BLOCK_P (e1->dest)
1814 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1815 return false;
1816
1817 if (FORWARDER_BLOCK_P (e2->dest)
1818 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1819 return false;
1820
1821 /* Likewise with dead code (possibly newly created by the other optimizations
1822 of cfg_cleanup). */
1823 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1824 return false;
1825
1826 /* Look for the common insn sequence, part the first ... */
1827 if (!outgoing_edges_match (mode, src1, src2))
1828 return false;
1829
1830 /* ... and part the second. */
1831 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1832
1833 osrc1 = src1;
1834 osrc2 = src2;
1835 if (newpos1 != NULL_RTX)
1836 src1 = BLOCK_FOR_INSN (newpos1);
1837 if (newpos2 != NULL_RTX)
1838 src2 = BLOCK_FOR_INSN (newpos2);
1839
1840 if (dir == dir_backward)
1841 {
1842 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1843 SWAP (basic_block, osrc1, osrc2);
1844 SWAP (basic_block, src1, src2);
1845 SWAP (edge, e1, e2);
1846 SWAP (rtx, newpos1, newpos2);
1847 #undef SWAP
1848 }
1849
1850 /* Don't proceed with the crossjump unless we found a sufficient number
1851 of matching instructions or the 'from' block was totally matched
1852 (such that its predecessors will hopefully be redirected and the
1853 block removed). */
1854 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1855 && (newpos1 != BB_HEAD (src1)))
1856 return false;
1857
1858 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1859 if (block_has_preserve_label (e1->dest)
1860 && (e1->flags & EDGE_ABNORMAL))
1861 return false;
1862
1863 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1864 will be deleted.
1865 If we have tablejumps in the end of SRC1 and SRC2
1866 they have been already compared for equivalence in outgoing_edges_match ()
1867 so replace the references to TABLE1 by references to TABLE2. */
1868 {
1869 rtx label1, label2;
1870 rtx table1, table2;
1871
1872 if (tablejump_p (BB_END (osrc1), &label1, &table1)
1873 && tablejump_p (BB_END (osrc2), &label2, &table2)
1874 && label1 != label2)
1875 {
1876 replace_label_data rr;
1877 rtx insn;
1878
1879 /* Replace references to LABEL1 with LABEL2. */
1880 rr.r1 = label1;
1881 rr.r2 = label2;
1882 rr.update_label_nuses = true;
1883 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1884 {
1885 /* Do not replace the label in SRC1->END because when deleting
1886 a block whose end is a tablejump, the tablejump referenced
1887 from the instruction is deleted too. */
1888 if (insn != BB_END (osrc1))
1889 for_each_rtx (&insn, replace_label, &rr);
1890 }
1891 }
1892 }
1893
1894 /* Avoid splitting if possible. We must always split when SRC2 has
1895 EH predecessor edges, or we may end up with basic blocks with both
1896 normal and EH predecessor edges. */
1897 if (newpos2 == BB_HEAD (src2)
1898 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1899 redirect_to = src2;
1900 else
1901 {
1902 if (newpos2 == BB_HEAD (src2))
1903 {
1904 /* Skip possible basic block header. */
1905 if (LABEL_P (newpos2))
1906 newpos2 = NEXT_INSN (newpos2);
1907 while (DEBUG_INSN_P (newpos2))
1908 newpos2 = NEXT_INSN (newpos2);
1909 if (NOTE_P (newpos2))
1910 newpos2 = NEXT_INSN (newpos2);
1911 while (DEBUG_INSN_P (newpos2))
1912 newpos2 = NEXT_INSN (newpos2);
1913 }
1914
1915 if (dump_file)
1916 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1917 src2->index, nmatch);
1918 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1919 }
1920
1921 if (dump_file)
1922 fprintf (dump_file,
1923 "Cross jumping from bb %i to bb %i; %i common insns\n",
1924 src1->index, src2->index, nmatch);
1925
1926 /* We may have some registers visible through the block. */
1927 df_set_bb_dirty (redirect_to);
1928
1929 if (osrc2 == src2)
1930 redirect_edges_to = redirect_to;
1931 else
1932 redirect_edges_to = osrc2;
1933
1934 /* Recompute the frequencies and counts of outgoing edges. */
1935 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
1936 {
1937 edge s2;
1938 edge_iterator ei;
1939 basic_block d = s->dest;
1940
1941 if (FORWARDER_BLOCK_P (d))
1942 d = single_succ (d);
1943
1944 FOR_EACH_EDGE (s2, ei, src1->succs)
1945 {
1946 basic_block d2 = s2->dest;
1947 if (FORWARDER_BLOCK_P (d2))
1948 d2 = single_succ (d2);
1949 if (d == d2)
1950 break;
1951 }
1952
1953 s->count += s2->count;
1954
1955 /* Take care to update possible forwarder blocks. We verified
1956 that there is no more than one in the chain, so we can't run
1957 into infinite loop. */
1958 if (FORWARDER_BLOCK_P (s->dest))
1959 {
1960 single_succ_edge (s->dest)->count += s2->count;
1961 s->dest->count += s2->count;
1962 s->dest->frequency += EDGE_FREQUENCY (s);
1963 }
1964
1965 if (FORWARDER_BLOCK_P (s2->dest))
1966 {
1967 single_succ_edge (s2->dest)->count -= s2->count;
1968 if (single_succ_edge (s2->dest)->count < 0)
1969 single_succ_edge (s2->dest)->count = 0;
1970 s2->dest->count -= s2->count;
1971 s2->dest->frequency -= EDGE_FREQUENCY (s);
1972 if (s2->dest->frequency < 0)
1973 s2->dest->frequency = 0;
1974 if (s2->dest->count < 0)
1975 s2->dest->count = 0;
1976 }
1977
1978 if (!redirect_edges_to->frequency && !src1->frequency)
1979 s->probability = (s->probability + s2->probability) / 2;
1980 else
1981 s->probability
1982 = ((s->probability * redirect_edges_to->frequency +
1983 s2->probability * src1->frequency)
1984 / (redirect_edges_to->frequency + src1->frequency));
1985 }
1986
1987 /* Adjust count and frequency for the block. An earlier jump
1988 threading pass may have left the profile in an inconsistent
1989 state (see update_bb_profile_for_threading) so we must be
1990 prepared for overflows. */
1991 tmp = redirect_to;
1992 do
1993 {
1994 tmp->count += src1->count;
1995 tmp->frequency += src1->frequency;
1996 if (tmp->frequency > BB_FREQ_MAX)
1997 tmp->frequency = BB_FREQ_MAX;
1998 if (tmp == redirect_edges_to)
1999 break;
2000 tmp = find_fallthru_edge (tmp->succs)->dest;
2001 }
2002 while (true);
2003 update_br_prob_note (redirect_edges_to);
2004
2005 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2006
2007 /* Skip possible basic block header. */
2008 if (LABEL_P (newpos1))
2009 newpos1 = NEXT_INSN (newpos1);
2010
2011 while (DEBUG_INSN_P (newpos1))
2012 newpos1 = NEXT_INSN (newpos1);
2013
2014 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2015 newpos1 = NEXT_INSN (newpos1);
2016
2017 while (DEBUG_INSN_P (newpos1))
2018 newpos1 = NEXT_INSN (newpos1);
2019
2020 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2021 to_remove = single_succ (redirect_from);
2022
2023 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2024 delete_basic_block (to_remove);
2025
2026 update_forwarder_flag (redirect_from);
2027 if (redirect_to != src2)
2028 update_forwarder_flag (src2);
2029
2030 return true;
2031 }
2032
2033 /* Search the predecessors of BB for common insn sequences. When found,
2034 share code between them by redirecting control flow. Return true if
2035 any changes made. */
2036
2037 static bool
2038 try_crossjump_bb (int mode, basic_block bb)
2039 {
2040 edge e, e2, fallthru;
2041 bool changed;
2042 unsigned max, ix, ix2;
2043
2044 /* Nothing to do if there is not at least two incoming edges. */
2045 if (EDGE_COUNT (bb->preds) < 2)
2046 return false;
2047
2048 /* Don't crossjump if this block ends in a computed jump,
2049 unless we are optimizing for size. */
2050 if (optimize_bb_for_size_p (bb)
2051 && bb != EXIT_BLOCK_PTR
2052 && computed_jump_p (BB_END (bb)))
2053 return false;
2054
2055 /* If we are partitioning hot/cold basic blocks, we don't want to
2056 mess up unconditional or indirect jumps that cross between hot
2057 and cold sections.
2058
2059 Basic block partitioning may result in some jumps that appear to
2060 be optimizable (or blocks that appear to be mergeable), but which really
2061 must be left untouched (they are required to make it safely across
2062 partition boundaries). See the comments at the top of
2063 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2064
2065 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2066 BB_PARTITION (EDGE_PRED (bb, 1)->src)
2067 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2068 return false;
2069
2070 /* It is always cheapest to redirect a block that ends in a branch to
2071 a block that falls through into BB, as that adds no branches to the
2072 program. We'll try that combination first. */
2073 fallthru = NULL;
2074 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2075
2076 if (EDGE_COUNT (bb->preds) > max)
2077 return false;
2078
2079 fallthru = find_fallthru_edge (bb->preds);
2080
2081 changed = false;
2082 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2083 {
2084 e = EDGE_PRED (bb, ix);
2085 ix++;
2086
2087 /* As noted above, first try with the fallthru predecessor (or, a
2088 fallthru predecessor if we are in cfglayout mode). */
2089 if (fallthru)
2090 {
2091 /* Don't combine the fallthru edge into anything else.
2092 If there is a match, we'll do it the other way around. */
2093 if (e == fallthru)
2094 continue;
2095 /* If nothing changed since the last attempt, there is nothing
2096 we can do. */
2097 if (!first_pass
2098 && !((e->src->flags & BB_MODIFIED)
2099 || (fallthru->src->flags & BB_MODIFIED)))
2100 continue;
2101
2102 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2103 {
2104 changed = true;
2105 ix = 0;
2106 continue;
2107 }
2108 }
2109
2110 /* Non-obvious work limiting check: Recognize that we're going
2111 to call try_crossjump_bb on every basic block. So if we have
2112 two blocks with lots of outgoing edges (a switch) and they
2113 share lots of common destinations, then we would do the
2114 cross-jump check once for each common destination.
2115
2116 Now, if the blocks actually are cross-jump candidates, then
2117 all of their destinations will be shared. Which means that
2118 we only need check them for cross-jump candidacy once. We
2119 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2120 choosing to do the check from the block for which the edge
2121 in question is the first successor of A. */
2122 if (EDGE_SUCC (e->src, 0) != e)
2123 continue;
2124
2125 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2126 {
2127 e2 = EDGE_PRED (bb, ix2);
2128
2129 if (e2 == e)
2130 continue;
2131
2132 /* We've already checked the fallthru edge above. */
2133 if (e2 == fallthru)
2134 continue;
2135
2136 /* The "first successor" check above only prevents multiple
2137 checks of crossjump(A,B). In order to prevent redundant
2138 checks of crossjump(B,A), require that A be the block
2139 with the lowest index. */
2140 if (e->src->index > e2->src->index)
2141 continue;
2142
2143 /* If nothing changed since the last attempt, there is nothing
2144 we can do. */
2145 if (!first_pass
2146 && !((e->src->flags & BB_MODIFIED)
2147 || (e2->src->flags & BB_MODIFIED)))
2148 continue;
2149
2150 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2151 direction. */
2152 if (try_crossjump_to_edge (mode, e, e2, dir_both))
2153 {
2154 changed = true;
2155 ix = 0;
2156 break;
2157 }
2158 }
2159 }
2160
2161 if (changed)
2162 crossjumps_occured = true;
2163
2164 return changed;
2165 }
2166
2167 /* Search the successors of BB for common insn sequences. When found,
2168 share code between them by moving it across the basic block
2169 boundary. Return true if any changes made. */
2170
2171 static bool
2172 try_head_merge_bb (basic_block bb)
2173 {
2174 basic_block final_dest_bb = NULL;
2175 int max_match = INT_MAX;
2176 edge e0;
2177 rtx *headptr, *currptr, *nextptr;
2178 bool changed, moveall;
2179 unsigned ix;
2180 rtx e0_last_head, cond, move_before;
2181 unsigned nedges = EDGE_COUNT (bb->succs);
2182 rtx jump = BB_END (bb);
2183 regset live, live_union;
2184
2185 /* Nothing to do if there is not at least two outgoing edges. */
2186 if (nedges < 2)
2187 return false;
2188
2189 /* Don't crossjump if this block ends in a computed jump,
2190 unless we are optimizing for size. */
2191 if (optimize_bb_for_size_p (bb)
2192 && bb != EXIT_BLOCK_PTR
2193 && computed_jump_p (BB_END (bb)))
2194 return false;
2195
2196 cond = get_condition (jump, &move_before, true, false);
2197 if (cond == NULL_RTX)
2198 move_before = jump;
2199
2200 for (ix = 0; ix < nedges; ix++)
2201 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
2202 return false;
2203
2204 for (ix = 0; ix < nedges; ix++)
2205 {
2206 edge e = EDGE_SUCC (bb, ix);
2207 basic_block other_bb = e->dest;
2208
2209 if (df_get_bb_dirty (other_bb))
2210 {
2211 block_was_dirty = true;
2212 return false;
2213 }
2214
2215 if (e->flags & EDGE_ABNORMAL)
2216 return false;
2217
2218 /* Normally, all destination blocks must only be reachable from this
2219 block, i.e. they must have one incoming edge.
2220
2221 There is one special case we can handle, that of multiple consecutive
2222 jumps where the first jumps to one of the targets of the second jump.
2223 This happens frequently in switch statements for default labels.
2224 The structure is as follows:
2225 FINAL_DEST_BB
2226 ....
2227 if (cond) jump A;
2228 fall through
2229 BB
2230 jump with targets A, B, C, D...
2231 A
2232 has two incoming edges, from FINAL_DEST_BB and BB
2233
2234 In this case, we can try to move the insns through BB and into
2235 FINAL_DEST_BB. */
2236 if (EDGE_COUNT (other_bb->preds) != 1)
2237 {
2238 edge incoming_edge, incoming_bb_other_edge;
2239 edge_iterator ei;
2240
2241 if (final_dest_bb != NULL
2242 || EDGE_COUNT (other_bb->preds) != 2)
2243 return false;
2244
2245 /* We must be able to move the insns across the whole block. */
2246 move_before = BB_HEAD (bb);
2247 while (!NONDEBUG_INSN_P (move_before))
2248 move_before = NEXT_INSN (move_before);
2249
2250 if (EDGE_COUNT (bb->preds) != 1)
2251 return false;
2252 incoming_edge = EDGE_PRED (bb, 0);
2253 final_dest_bb = incoming_edge->src;
2254 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2255 return false;
2256 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2257 if (incoming_bb_other_edge != incoming_edge)
2258 break;
2259 if (incoming_bb_other_edge->dest != other_bb)
2260 return false;
2261 }
2262 }
2263
2264 e0 = EDGE_SUCC (bb, 0);
2265 e0_last_head = NULL_RTX;
2266 changed = false;
2267
2268 for (ix = 1; ix < nedges; ix++)
2269 {
2270 edge e = EDGE_SUCC (bb, ix);
2271 rtx e0_last, e_last;
2272 int nmatch;
2273
2274 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2275 &e0_last, &e_last, 0);
2276 if (nmatch == 0)
2277 return false;
2278
2279 if (nmatch < max_match)
2280 {
2281 max_match = nmatch;
2282 e0_last_head = e0_last;
2283 }
2284 }
2285
2286 /* If we matched an entire block, we probably have to avoid moving the
2287 last insn. */
2288 if (max_match > 0
2289 && e0_last_head == BB_END (e0->dest)
2290 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2291 || control_flow_insn_p (e0_last_head)))
2292 {
2293 max_match--;
2294 if (max_match == 0)
2295 return false;
2296 do
2297 e0_last_head = prev_real_insn (e0_last_head);
2298 while (DEBUG_INSN_P (e0_last_head));
2299 }
2300
2301 if (max_match == 0)
2302 return false;
2303
2304 /* We must find a union of the live registers at each of the end points. */
2305 live = BITMAP_ALLOC (NULL);
2306 live_union = BITMAP_ALLOC (NULL);
2307
2308 currptr = XNEWVEC (rtx, nedges);
2309 headptr = XNEWVEC (rtx, nedges);
2310 nextptr = XNEWVEC (rtx, nedges);
2311
2312 for (ix = 0; ix < nedges; ix++)
2313 {
2314 int j;
2315 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2316 rtx head = BB_HEAD (merge_bb);
2317
2318 while (!NONDEBUG_INSN_P (head))
2319 head = NEXT_INSN (head);
2320 headptr[ix] = head;
2321 currptr[ix] = head;
2322
2323 /* Compute the end point and live information */
2324 for (j = 1; j < max_match; j++)
2325 do
2326 head = NEXT_INSN (head);
2327 while (!NONDEBUG_INSN_P (head));
2328 simulate_backwards_to_point (merge_bb, live, head);
2329 IOR_REG_SET (live_union, live);
2330 }
2331
2332 /* If we're moving across two blocks, verify the validity of the
2333 first move, then adjust the target and let the loop below deal
2334 with the final move. */
2335 if (final_dest_bb != NULL)
2336 {
2337 rtx move_upto;
2338
2339 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2340 jump, e0->dest, live_union,
2341 NULL, &move_upto);
2342 if (!moveall)
2343 {
2344 if (move_upto == NULL_RTX)
2345 goto out;
2346
2347 while (e0_last_head != move_upto)
2348 {
2349 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2350 live_union);
2351 e0_last_head = PREV_INSN (e0_last_head);
2352 }
2353 }
2354 if (e0_last_head == NULL_RTX)
2355 goto out;
2356
2357 jump = BB_END (final_dest_bb);
2358 cond = get_condition (jump, &move_before, true, false);
2359 if (cond == NULL_RTX)
2360 move_before = jump;
2361 }
2362
2363 do
2364 {
2365 rtx move_upto;
2366 moveall = can_move_insns_across (currptr[0], e0_last_head,
2367 move_before, jump, e0->dest, live_union,
2368 NULL, &move_upto);
2369 if (!moveall && move_upto == NULL_RTX)
2370 {
2371 if (jump == move_before)
2372 break;
2373
2374 /* Try again, using a different insertion point. */
2375 move_before = jump;
2376
2377 #ifdef HAVE_cc0
2378 /* Don't try moving before a cc0 user, as that may invalidate
2379 the cc0. */
2380 if (reg_mentioned_p (cc0_rtx, jump))
2381 break;
2382 #endif
2383
2384 continue;
2385 }
2386
2387 if (final_dest_bb && !moveall)
2388 /* We haven't checked whether a partial move would be OK for the first
2389 move, so we have to fail this case. */
2390 break;
2391
2392 changed = true;
2393 for (;;)
2394 {
2395 if (currptr[0] == move_upto)
2396 break;
2397 for (ix = 0; ix < nedges; ix++)
2398 {
2399 rtx curr = currptr[ix];
2400 do
2401 curr = NEXT_INSN (curr);
2402 while (!NONDEBUG_INSN_P (curr));
2403 currptr[ix] = curr;
2404 }
2405 }
2406
2407 /* If we can't currently move all of the identical insns, remember
2408 each insn after the range that we'll merge. */
2409 if (!moveall)
2410 for (ix = 0; ix < nedges; ix++)
2411 {
2412 rtx curr = currptr[ix];
2413 do
2414 curr = NEXT_INSN (curr);
2415 while (!NONDEBUG_INSN_P (curr));
2416 nextptr[ix] = curr;
2417 }
2418
2419 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2420 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2421 if (final_dest_bb != NULL)
2422 df_set_bb_dirty (final_dest_bb);
2423 df_set_bb_dirty (bb);
2424 for (ix = 1; ix < nedges; ix++)
2425 {
2426 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2427 delete_insn_chain (headptr[ix], currptr[ix], false);
2428 }
2429 if (!moveall)
2430 {
2431 if (jump == move_before)
2432 break;
2433
2434 /* For the unmerged insns, try a different insertion point. */
2435 move_before = jump;
2436
2437 #ifdef HAVE_cc0
2438 /* Don't try moving before a cc0 user, as that may invalidate
2439 the cc0. */
2440 if (reg_mentioned_p (cc0_rtx, jump))
2441 break;
2442 #endif
2443
2444 for (ix = 0; ix < nedges; ix++)
2445 currptr[ix] = headptr[ix] = nextptr[ix];
2446 }
2447 }
2448 while (!moveall);
2449
2450 out:
2451 free (currptr);
2452 free (headptr);
2453 free (nextptr);
2454
2455 crossjumps_occured |= changed;
2456
2457 return changed;
2458 }
2459
2460 /* Return true if BB contains just bb note, or bb note followed
2461 by only DEBUG_INSNs. */
2462
2463 static bool
2464 trivially_empty_bb_p (basic_block bb)
2465 {
2466 rtx insn = BB_END (bb);
2467
2468 while (1)
2469 {
2470 if (insn == BB_HEAD (bb))
2471 return true;
2472 if (!DEBUG_INSN_P (insn))
2473 return false;
2474 insn = PREV_INSN (insn);
2475 }
2476 }
2477
2478 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2479 instructions etc. Return nonzero if changes were made. */
2480
2481 static bool
2482 try_optimize_cfg (int mode)
2483 {
2484 bool changed_overall = false;
2485 bool changed;
2486 int iterations = 0;
2487 basic_block bb, b, next;
2488
2489 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2490 clear_bb_flags ();
2491
2492 crossjumps_occured = false;
2493
2494 FOR_EACH_BB (bb)
2495 update_forwarder_flag (bb);
2496
2497 if (! targetm.cannot_modify_jumps_p ())
2498 {
2499 first_pass = true;
2500 /* Attempt to merge blocks as made possible by edge removal. If
2501 a block has only one successor, and the successor has only
2502 one predecessor, they may be combined. */
2503 do
2504 {
2505 block_was_dirty = false;
2506 changed = false;
2507 iterations++;
2508
2509 if (dump_file)
2510 fprintf (dump_file,
2511 "\n\ntry_optimize_cfg iteration %i\n\n",
2512 iterations);
2513
2514 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
2515 {
2516 basic_block c;
2517 edge s;
2518 bool changed_here = false;
2519
2520 /* Delete trivially dead basic blocks. This is either
2521 blocks with no predecessors, or empty blocks with no
2522 successors. However if the empty block with no
2523 successors is the successor of the ENTRY_BLOCK, it is
2524 kept. This ensures that the ENTRY_BLOCK will have a
2525 successor which is a precondition for many RTL
2526 passes. Empty blocks may result from expanding
2527 __builtin_unreachable (). */
2528 if (EDGE_COUNT (b->preds) == 0
2529 || (EDGE_COUNT (b->succs) == 0
2530 && trivially_empty_bb_p (b)
2531 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
2532 {
2533 c = b->prev_bb;
2534 if (EDGE_COUNT (b->preds) > 0)
2535 {
2536 edge e;
2537 edge_iterator ei;
2538
2539 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2540 {
2541 if (b->il.rtl->footer
2542 && BARRIER_P (b->il.rtl->footer))
2543 FOR_EACH_EDGE (e, ei, b->preds)
2544 if ((e->flags & EDGE_FALLTHRU)
2545 && e->src->il.rtl->footer == NULL)
2546 {
2547 if (b->il.rtl->footer)
2548 {
2549 e->src->il.rtl->footer = b->il.rtl->footer;
2550 b->il.rtl->footer = NULL;
2551 }
2552 else
2553 {
2554 start_sequence ();
2555 e->src->il.rtl->footer = emit_barrier ();
2556 end_sequence ();
2557 }
2558 }
2559 }
2560 else
2561 {
2562 rtx last = get_last_bb_insn (b);
2563 if (last && BARRIER_P (last))
2564 FOR_EACH_EDGE (e, ei, b->preds)
2565 if ((e->flags & EDGE_FALLTHRU))
2566 emit_barrier_after (BB_END (e->src));
2567 }
2568 }
2569 delete_basic_block (b);
2570 changed = true;
2571 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
2572 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2573 continue;
2574 }
2575
2576 /* Remove code labels no longer used. */
2577 if (single_pred_p (b)
2578 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2579 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2580 && LABEL_P (BB_HEAD (b))
2581 /* If the previous block ends with a branch to this
2582 block, we can't delete the label. Normally this
2583 is a condjump that is yet to be simplified, but
2584 if CASE_DROPS_THRU, this can be a tablejump with
2585 some element going to the same place as the
2586 default (fallthru). */
2587 && (single_pred (b) == ENTRY_BLOCK_PTR
2588 || !JUMP_P (BB_END (single_pred (b)))
2589 || ! label_is_jump_target_p (BB_HEAD (b),
2590 BB_END (single_pred (b)))))
2591 {
2592 rtx label = BB_HEAD (b);
2593
2594 delete_insn_chain (label, label, false);
2595 /* If the case label is undeletable, move it after the
2596 BASIC_BLOCK note. */
2597 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2598 {
2599 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2600
2601 reorder_insns_nobb (label, label, bb_note);
2602 BB_HEAD (b) = bb_note;
2603 if (BB_END (b) == bb_note)
2604 BB_END (b) = label;
2605 }
2606 if (dump_file)
2607 fprintf (dump_file, "Deleted label in block %i.\n",
2608 b->index);
2609 }
2610
2611 /* If we fall through an empty block, we can remove it. */
2612 if (!(mode & CLEANUP_CFGLAYOUT)
2613 && single_pred_p (b)
2614 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2615 && !LABEL_P (BB_HEAD (b))
2616 && FORWARDER_BLOCK_P (b)
2617 /* Note that forwarder_block_p true ensures that
2618 there is a successor for this block. */
2619 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2620 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2621 {
2622 if (dump_file)
2623 fprintf (dump_file,
2624 "Deleting fallthru block %i.\n",
2625 b->index);
2626
2627 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2628 redirect_edge_succ_nodup (single_pred_edge (b),
2629 single_succ (b));
2630 delete_basic_block (b);
2631 changed = true;
2632 b = c;
2633 continue;
2634 }
2635
2636 /* Merge B with its single successor, if any. */
2637 if (single_succ_p (b)
2638 && (s = single_succ_edge (b))
2639 && !(s->flags & EDGE_COMPLEX)
2640 && (c = s->dest) != EXIT_BLOCK_PTR
2641 && single_pred_p (c)
2642 && b != c)
2643 {
2644 /* When not in cfg_layout mode use code aware of reordering
2645 INSN. This code possibly creates new basic blocks so it
2646 does not fit merge_blocks interface and is kept here in
2647 hope that it will become useless once more of compiler
2648 is transformed to use cfg_layout mode. */
2649
2650 if ((mode & CLEANUP_CFGLAYOUT)
2651 && can_merge_blocks_p (b, c))
2652 {
2653 merge_blocks (b, c);
2654 update_forwarder_flag (b);
2655 changed_here = true;
2656 }
2657 else if (!(mode & CLEANUP_CFGLAYOUT)
2658 /* If the jump insn has side effects,
2659 we can't kill the edge. */
2660 && (!JUMP_P (BB_END (b))
2661 || (reload_completed
2662 ? simplejump_p (BB_END (b))
2663 : (onlyjump_p (BB_END (b))
2664 && !tablejump_p (BB_END (b),
2665 NULL, NULL))))
2666 && (next = merge_blocks_move (s, b, c, mode)))
2667 {
2668 b = next;
2669 changed_here = true;
2670 }
2671 }
2672
2673 /* Simplify branch over branch. */
2674 if ((mode & CLEANUP_EXPENSIVE)
2675 && !(mode & CLEANUP_CFGLAYOUT)
2676 && try_simplify_condjump (b))
2677 changed_here = true;
2678
2679 /* If B has a single outgoing edge, but uses a
2680 non-trivial jump instruction without side-effects, we
2681 can either delete the jump entirely, or replace it
2682 with a simple unconditional jump. */
2683 if (single_succ_p (b)
2684 && single_succ (b) != EXIT_BLOCK_PTR
2685 && onlyjump_p (BB_END (b))
2686 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2687 && try_redirect_by_replacing_jump (single_succ_edge (b),
2688 single_succ (b),
2689 (mode & CLEANUP_CFGLAYOUT) != 0))
2690 {
2691 update_forwarder_flag (b);
2692 changed_here = true;
2693 }
2694
2695 /* Simplify branch to branch. */
2696 if (try_forward_edges (mode, b))
2697 {
2698 update_forwarder_flag (b);
2699 changed_here = true;
2700 }
2701
2702 /* Look for shared code between blocks. */
2703 if ((mode & CLEANUP_CROSSJUMP)
2704 && try_crossjump_bb (mode, b))
2705 changed_here = true;
2706
2707 if ((mode & CLEANUP_CROSSJUMP)
2708 /* This can lengthen register lifetimes. Do it only after
2709 reload. */
2710 && reload_completed
2711 && try_head_merge_bb (b))
2712 changed_here = true;
2713
2714 /* Don't get confused by the index shift caused by
2715 deleting blocks. */
2716 if (!changed_here)
2717 b = b->next_bb;
2718 else
2719 changed = true;
2720 }
2721
2722 if ((mode & CLEANUP_CROSSJUMP)
2723 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2724 changed = true;
2725
2726 if (block_was_dirty)
2727 {
2728 /* This should only be set by head-merging. */
2729 gcc_assert (mode & CLEANUP_CROSSJUMP);
2730 df_analyze ();
2731 }
2732
2733 #ifdef ENABLE_CHECKING
2734 if (changed)
2735 verify_flow_info ();
2736 #endif
2737
2738 changed_overall |= changed;
2739 first_pass = false;
2740 }
2741 while (changed);
2742 }
2743
2744 FOR_ALL_BB (b)
2745 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2746
2747 return changed_overall;
2748 }
2749 \f
2750 /* Delete all unreachable basic blocks. */
2751
2752 bool
2753 delete_unreachable_blocks (void)
2754 {
2755 bool changed = false;
2756 basic_block b, prev_bb;
2757
2758 find_unreachable_blocks ();
2759
2760 /* When we're in GIMPLE mode and there may be debug insns, we should
2761 delete blocks in reverse dominator order, so as to get a chance
2762 to substitute all released DEFs into debug stmts. If we don't
2763 have dominators information, walking blocks backward gets us a
2764 better chance of retaining most debug information than
2765 otherwise. */
2766 if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2767 && dom_info_available_p (CDI_DOMINATORS))
2768 {
2769 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2770 {
2771 prev_bb = b->prev_bb;
2772
2773 if (!(b->flags & BB_REACHABLE))
2774 {
2775 /* Speed up the removal of blocks that don't dominate
2776 others. Walking backwards, this should be the common
2777 case. */
2778 if (!first_dom_son (CDI_DOMINATORS, b))
2779 delete_basic_block (b);
2780 else
2781 {
2782 VEC (basic_block, heap) *h
2783 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2784
2785 while (VEC_length (basic_block, h))
2786 {
2787 b = VEC_pop (basic_block, h);
2788
2789 prev_bb = b->prev_bb;
2790
2791 gcc_assert (!(b->flags & BB_REACHABLE));
2792
2793 delete_basic_block (b);
2794 }
2795
2796 VEC_free (basic_block, heap, h);
2797 }
2798
2799 changed = true;
2800 }
2801 }
2802 }
2803 else
2804 {
2805 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2806 {
2807 prev_bb = b->prev_bb;
2808
2809 if (!(b->flags & BB_REACHABLE))
2810 {
2811 delete_basic_block (b);
2812 changed = true;
2813 }
2814 }
2815 }
2816
2817 if (changed)
2818 tidy_fallthru_edges ();
2819 return changed;
2820 }
2821
2822 /* Delete any jump tables never referenced. We can't delete them at the
2823 time of removing tablejump insn as they are referenced by the preceding
2824 insns computing the destination, so we delay deleting and garbagecollect
2825 them once life information is computed. */
2826 void
2827 delete_dead_jumptables (void)
2828 {
2829 basic_block bb;
2830
2831 /* A dead jump table does not belong to any basic block. Scan insns
2832 between two adjacent basic blocks. */
2833 FOR_EACH_BB (bb)
2834 {
2835 rtx insn, next;
2836
2837 for (insn = NEXT_INSN (BB_END (bb));
2838 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2839 insn = next)
2840 {
2841 next = NEXT_INSN (insn);
2842 if (LABEL_P (insn)
2843 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2844 && JUMP_TABLE_DATA_P (next))
2845 {
2846 rtx label = insn, jump = next;
2847
2848 if (dump_file)
2849 fprintf (dump_file, "Dead jumptable %i removed\n",
2850 INSN_UID (insn));
2851
2852 next = NEXT_INSN (next);
2853 delete_insn (jump);
2854 delete_insn (label);
2855 }
2856 }
2857 }
2858 }
2859
2860 \f
2861 /* Tidy the CFG by deleting unreachable code and whatnot. */
2862
2863 bool
2864 cleanup_cfg (int mode)
2865 {
2866 bool changed = false;
2867
2868 /* Set the cfglayout mode flag here. We could update all the callers
2869 but that is just inconvenient, especially given that we eventually
2870 want to have cfglayout mode as the default. */
2871 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2872 mode |= CLEANUP_CFGLAYOUT;
2873
2874 timevar_push (TV_CLEANUP_CFG);
2875 if (delete_unreachable_blocks ())
2876 {
2877 changed = true;
2878 /* We've possibly created trivially dead code. Cleanup it right
2879 now to introduce more opportunities for try_optimize_cfg. */
2880 if (!(mode & (CLEANUP_NO_INSN_DEL))
2881 && !reload_completed)
2882 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2883 }
2884
2885 compact_blocks ();
2886
2887 /* To tail-merge blocks ending in the same noreturn function (e.g.
2888 a call to abort) we have to insert fake edges to exit. Do this
2889 here once. The fake edges do not interfere with any other CFG
2890 cleanups. */
2891 if (mode & CLEANUP_CROSSJUMP)
2892 add_noreturn_fake_exit_edges ();
2893
2894 if (!dbg_cnt (cfg_cleanup))
2895 return changed;
2896
2897 while (try_optimize_cfg (mode))
2898 {
2899 delete_unreachable_blocks (), changed = true;
2900 if (!(mode & CLEANUP_NO_INSN_DEL))
2901 {
2902 /* Try to remove some trivially dead insns when doing an expensive
2903 cleanup. But delete_trivially_dead_insns doesn't work after
2904 reload (it only handles pseudos) and run_fast_dce is too costly
2905 to run in every iteration.
2906
2907 For effective cross jumping, we really want to run a fast DCE to
2908 clean up any dead conditions, or they get in the way of performing
2909 useful tail merges.
2910
2911 Other transformations in cleanup_cfg are not so sensitive to dead
2912 code, so delete_trivially_dead_insns or even doing nothing at all
2913 is good enough. */
2914 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2915 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2916 break;
2917 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
2918 run_fast_dce ();
2919 }
2920 else
2921 break;
2922 }
2923
2924 if (mode & CLEANUP_CROSSJUMP)
2925 remove_fake_exit_edges ();
2926
2927 /* Don't call delete_dead_jumptables in cfglayout mode, because
2928 that function assumes that jump tables are in the insns stream.
2929 But we also don't _have_ to delete dead jumptables in cfglayout
2930 mode because we shouldn't even be looking at things that are
2931 not in a basic block. Dead jumptables are cleaned up when
2932 going out of cfglayout mode. */
2933 if (!(mode & CLEANUP_CFGLAYOUT))
2934 delete_dead_jumptables ();
2935
2936 timevar_pop (TV_CLEANUP_CFG);
2937
2938 return changed;
2939 }
2940 \f
2941 static unsigned int
2942 rest_of_handle_jump (void)
2943 {
2944 if (crtl->tail_call_emit)
2945 fixup_tail_calls ();
2946 return 0;
2947 }
2948
2949 struct rtl_opt_pass pass_jump =
2950 {
2951 {
2952 RTL_PASS,
2953 "sibling", /* name */
2954 NULL, /* gate */
2955 rest_of_handle_jump, /* execute */
2956 NULL, /* sub */
2957 NULL, /* next */
2958 0, /* static_pass_number */
2959 TV_JUMP, /* tv_id */
2960 0, /* properties_required */
2961 0, /* properties_provided */
2962 0, /* properties_destroyed */
2963 TODO_ggc_collect, /* todo_flags_start */
2964 TODO_verify_flow, /* todo_flags_finish */
2965 }
2966 };
2967
2968
2969 static unsigned int
2970 rest_of_handle_jump2 (void)
2971 {
2972 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2973 if (dump_file)
2974 dump_flow_info (dump_file, dump_flags);
2975 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2976 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2977 return 0;
2978 }
2979
2980
2981 struct rtl_opt_pass pass_jump2 =
2982 {
2983 {
2984 RTL_PASS,
2985 "jump", /* name */
2986 NULL, /* gate */
2987 rest_of_handle_jump2, /* execute */
2988 NULL, /* sub */
2989 NULL, /* next */
2990 0, /* static_pass_number */
2991 TV_JUMP, /* tv_id */
2992 0, /* properties_required */
2993 0, /* properties_provided */
2994 0, /* properties_destroyed */
2995 TODO_ggc_collect, /* todo_flags_start */
2996 TODO_verify_rtl_sharing, /* todo_flags_finish */
2997 }
2998 };