basic-block.c (tail_recursion_label_list): Don't declare.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
54
55 /* cleanup_cfg maintains following flags for each basic block. */
56
57 enum bb_flags
58 {
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
63 };
64
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
80
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (basic_block bb)
97 {
98 if (!bb)
99 return;
100
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
103 }
104
105 /* Recompute forwarder flag after block has been modified. */
106
107 static void
108 update_forwarder_flag (basic_block bb)
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
121 {
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
125 rtx insn, next;
126 rtx end;
127
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block->succ
130 || !cbranch_block->succ->succ_next
131 || cbranch_block->succ->succ_next->succ_next)
132 return false;
133
134 /* Verify that we've got a normal conditional branch at the end
135 of the block. */
136 cbranch_insn = BB_END (cbranch_block);
137 if (!any_condjump_p (cbranch_insn))
138 return false;
139
140 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
141 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
142
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block = cbranch_fallthru_edge->dest;
147 if (jump_block->pred->pred_next
148 || jump_block->next_bb == EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block))
150 return false;
151 jump_dest_block = jump_block->succ->dest;
152
153 /* If we are partitioning hot/cold basic blocks, we don't want to
154 mess up unconditional or indirect jumps that cross between hot
155 and cold sections. */
156
157 if (flag_reorder_blocks_and_partition
158 && (jump_block->partition != jump_dest_block->partition
159 || cbranch_jump_edge->crossing_edge))
160 return false;
161
162 /* The conditional branch must target the block after the
163 unconditional branch. */
164 cbranch_dest_block = cbranch_jump_edge->dest;
165
166 if (!can_fallthru (jump_block, cbranch_dest_block))
167 return false;
168
169 /* Invert the conditional branch. */
170 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
171 return false;
172
173 if (dump_file)
174 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
175 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
176
177 /* Success. Update the CFG to match. Note that after this point
178 the edge variable names appear backwards; the redirection is done
179 this way to preserve edge profile data. */
180 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
181 cbranch_dest_block);
182 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
183 jump_dest_block);
184 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
185 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
186 update_br_prob_note (cbranch_block);
187
188 end = BB_END (jump_block);
189 /* Deleting a block may produce unreachable code warning even when we are
190 not deleting anything live. Suppress it by moving all the line number
191 notes out of the block. */
192 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
193 insn = next)
194 {
195 next = NEXT_INSN (insn);
196 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
197 {
198 if (insn == BB_END (jump_block))
199 {
200 BB_END (jump_block) = PREV_INSN (insn);
201 if (insn == end)
202 break;
203 }
204 reorder_insns_nobb (insn, insn, end);
205 end = insn;
206 }
207 }
208 /* Delete the block with the unconditional jump, and clean up the mess. */
209 delete_basic_block (jump_block);
210 tidy_fallthru_edge (cbranch_jump_edge);
211
212 return true;
213 }
214 \f
215 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
216 on register. Used by jump threading. */
217
218 static bool
219 mark_effect (rtx exp, regset nonequal)
220 {
221 int regno;
222 rtx dest;
223 switch (GET_CODE (exp))
224 {
225 /* In case we do clobber the register, mark it as equal, as we know the
226 value is dead so it don't have to match. */
227 case CLOBBER:
228 if (REG_P (XEXP (exp, 0)))
229 {
230 dest = XEXP (exp, 0);
231 regno = REGNO (dest);
232 CLEAR_REGNO_REG_SET (nonequal, regno);
233 if (regno < FIRST_PSEUDO_REGISTER)
234 {
235 int n = hard_regno_nregs[regno][GET_MODE (dest)];
236 while (--n > 0)
237 CLEAR_REGNO_REG_SET (nonequal, regno + n);
238 }
239 }
240 return false;
241
242 case SET:
243 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
244 return false;
245 dest = SET_DEST (exp);
246 if (dest == pc_rtx)
247 return false;
248 if (!REG_P (dest))
249 return true;
250 regno = REGNO (dest);
251 SET_REGNO_REG_SET (nonequal, regno);
252 if (regno < FIRST_PSEUDO_REGISTER)
253 {
254 int n = hard_regno_nregs[regno][GET_MODE (dest)];
255 while (--n > 0)
256 SET_REGNO_REG_SET (nonequal, regno + n);
257 }
258 return false;
259
260 default:
261 return false;
262 }
263 }
264
265 /* Return nonzero if X is a register set in regset DATA.
266 Called via for_each_rtx. */
267 static int
268 mentions_nonequal_regs (rtx *x, void *data)
269 {
270 regset nonequal = (regset) data;
271 if (REG_P (*x))
272 {
273 int regno;
274
275 regno = REGNO (*x);
276 if (REGNO_REG_SET_P (nonequal, regno))
277 return 1;
278 if (regno < FIRST_PSEUDO_REGISTER)
279 {
280 int n = hard_regno_nregs[regno][GET_MODE (*x)];
281 while (--n > 0)
282 if (REGNO_REG_SET_P (nonequal, regno + n))
283 return 1;
284 }
285 }
286 return 0;
287 }
288 /* Attempt to prove that the basic block B will have no side effects and
289 always continues in the same edge if reached via E. Return the edge
290 if exist, NULL otherwise. */
291
292 static edge
293 thread_jump (int mode, edge e, basic_block b)
294 {
295 rtx set1, set2, cond1, cond2, insn;
296 enum rtx_code code1, code2, reversed_code2;
297 bool reverse1 = false;
298 int i;
299 regset nonequal;
300 bool failed = false;
301
302 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
303 return NULL;
304
305 /* At the moment, we do handle only conditional jumps, but later we may
306 want to extend this code to tablejumps and others. */
307 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
308 return NULL;
309 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
310 {
311 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
312 return NULL;
313 }
314
315 /* Second branch must end with onlyjump, as we will eliminate the jump. */
316 if (!any_condjump_p (BB_END (e->src)))
317 return NULL;
318
319 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
320 {
321 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
322 return NULL;
323 }
324
325 set1 = pc_set (BB_END (e->src));
326 set2 = pc_set (BB_END (b));
327 if (((e->flags & EDGE_FALLTHRU) != 0)
328 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
329 reverse1 = true;
330
331 cond1 = XEXP (SET_SRC (set1), 0);
332 cond2 = XEXP (SET_SRC (set2), 0);
333 if (reverse1)
334 code1 = reversed_comparison_code (cond1, BB_END (e->src));
335 else
336 code1 = GET_CODE (cond1);
337
338 code2 = GET_CODE (cond2);
339 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
340
341 if (!comparison_dominates_p (code1, code2)
342 && !comparison_dominates_p (code1, reversed_code2))
343 return NULL;
344
345 /* Ensure that the comparison operators are equivalent.
346 ??? This is far too pessimistic. We should allow swapped operands,
347 different CCmodes, or for example comparisons for interval, that
348 dominate even when operands are not equivalent. */
349 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
350 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
351 return NULL;
352
353 /* Short circuit cases where block B contains some side effects, as we can't
354 safely bypass it. */
355 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
356 insn = NEXT_INSN (insn))
357 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
358 {
359 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
360 return NULL;
361 }
362
363 cselib_init (false);
364
365 /* First process all values computed in the source basic block. */
366 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
367 insn = NEXT_INSN (insn))
368 if (INSN_P (insn))
369 cselib_process_insn (insn);
370
371 nonequal = BITMAP_XMALLOC();
372 CLEAR_REG_SET (nonequal);
373
374 /* Now assume that we've continued by the edge E to B and continue
375 processing as if it were same basic block.
376 Our goal is to prove that whole block is an NOOP. */
377
378 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
379 insn = NEXT_INSN (insn))
380 {
381 if (INSN_P (insn))
382 {
383 rtx pat = PATTERN (insn);
384
385 if (GET_CODE (pat) == PARALLEL)
386 {
387 for (i = 0; i < XVECLEN (pat, 0); i++)
388 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
389 }
390 else
391 failed |= mark_effect (pat, nonequal);
392 }
393
394 cselib_process_insn (insn);
395 }
396
397 /* Later we should clear nonequal of dead registers. So far we don't
398 have life information in cfg_cleanup. */
399 if (failed)
400 {
401 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
402 goto failed_exit;
403 }
404
405 /* cond2 must not mention any register that is not equal to the
406 former block. */
407 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
408 goto failed_exit;
409
410 /* In case liveness information is available, we need to prove equivalence
411 only of the live values. */
412 if (mode & CLEANUP_UPDATE_LIFE)
413 AND_REG_SET (nonequal, b->global_live_at_end);
414
415 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
416
417 BITMAP_XFREE (nonequal);
418 cselib_finish ();
419 if ((comparison_dominates_p (code1, code2) != 0)
420 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
421 return BRANCH_EDGE (b);
422 else
423 return FALLTHRU_EDGE (b);
424
425 failed_exit:
426 BITMAP_XFREE (nonequal);
427 cselib_finish ();
428 return NULL;
429 }
430 \f
431 /* Attempt to forward edges leaving basic block B.
432 Return true if successful. */
433
434 static bool
435 try_forward_edges (int mode, basic_block b)
436 {
437 bool changed = false;
438 edge e, next, *threaded_edges = NULL;
439
440 /* If we are partitioning hot/cold basic blocks, we don't want to
441 mess up unconditional or indirect jumps that cross between hot
442 and cold sections. */
443
444 if (flag_reorder_blocks_and_partition
445 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
446 return false;
447
448 for (e = b->succ; e; e = next)
449 {
450 basic_block target, first;
451 int counter;
452 bool threaded = false;
453 int nthreaded_edges = 0;
454 bool may_thread = first_pass | (b->flags & BB_DIRTY);
455
456 next = e->succ_next;
457
458 /* Skip complex edges because we don't know how to update them.
459
460 Still handle fallthru edges, as we can succeed to forward fallthru
461 edge to the same place as the branch edge of conditional branch
462 and turn conditional branch to an unconditional branch. */
463 if (e->flags & EDGE_COMPLEX)
464 continue;
465
466 target = first = e->dest;
467 counter = 0;
468
469 while (counter < n_basic_blocks)
470 {
471 basic_block new_target = NULL;
472 bool new_target_threaded = false;
473 may_thread |= target->flags & BB_DIRTY;
474
475 if (FORWARDER_BLOCK_P (target)
476 && target->succ->dest != EXIT_BLOCK_PTR)
477 {
478 /* Bypass trivial infinite loops. */
479 if (target == target->succ->dest)
480 counter = n_basic_blocks;
481 new_target = target->succ->dest;
482 }
483
484 /* Allow to thread only over one edge at time to simplify updating
485 of probabilities. */
486 else if ((mode & CLEANUP_THREADING) && may_thread)
487 {
488 edge t = thread_jump (mode, e, target);
489 if (t)
490 {
491 if (!threaded_edges)
492 threaded_edges = xmalloc (sizeof (*threaded_edges)
493 * n_basic_blocks);
494 else
495 {
496 int i;
497
498 /* Detect an infinite loop across blocks not
499 including the start block. */
500 for (i = 0; i < nthreaded_edges; ++i)
501 if (threaded_edges[i] == t)
502 break;
503 if (i < nthreaded_edges)
504 {
505 counter = n_basic_blocks;
506 break;
507 }
508 }
509
510 /* Detect an infinite loop across the start block. */
511 if (t->dest == b)
512 break;
513
514 if (nthreaded_edges >= n_basic_blocks)
515 abort ();
516 threaded_edges[nthreaded_edges++] = t;
517
518 new_target = t->dest;
519 new_target_threaded = true;
520 }
521 }
522
523 if (!new_target)
524 break;
525
526 /* Avoid killing of loop pre-headers, as it is the place loop
527 optimizer wants to hoist code to.
528
529 For fallthru forwarders, the LOOP_BEG note must appear between
530 the header of block and CODE_LABEL of the loop, for non forwarders
531 it must appear before the JUMP_INSN. */
532 if ((mode & CLEANUP_PRE_LOOP) && optimize)
533 {
534 rtx insn = (target->succ->flags & EDGE_FALLTHRU
535 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
536
537 if (GET_CODE (insn) != NOTE)
538 insn = NEXT_INSN (insn);
539
540 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
541 insn = NEXT_INSN (insn))
542 if (GET_CODE (insn) == NOTE
543 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
544 break;
545
546 if (GET_CODE (insn) == NOTE)
547 break;
548
549 /* Do not clean up branches to just past the end of a loop
550 at this time; it can mess up the loop optimizer's
551 recognition of some patterns. */
552
553 insn = PREV_INSN (BB_HEAD (target));
554 if (insn && GET_CODE (insn) == NOTE
555 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
556 break;
557 }
558
559 counter++;
560 target = new_target;
561 threaded |= new_target_threaded;
562 }
563
564 if (counter >= n_basic_blocks)
565 {
566 if (dump_file)
567 fprintf (dump_file, "Infinite loop in BB %i.\n",
568 target->index);
569 }
570 else if (target == first)
571 ; /* We didn't do anything. */
572 else
573 {
574 /* Save the values now, as the edge may get removed. */
575 gcov_type edge_count = e->count;
576 int edge_probability = e->probability;
577 int edge_frequency;
578 int n = 0;
579
580 /* Don't force if target is exit block. */
581 if (threaded && target != EXIT_BLOCK_PTR)
582 {
583 notice_new_block (redirect_edge_and_branch_force (e, target));
584 if (dump_file)
585 fprintf (dump_file, "Conditionals threaded.\n");
586 }
587 else if (!redirect_edge_and_branch (e, target))
588 {
589 if (dump_file)
590 fprintf (dump_file,
591 "Forwarding edge %i->%i to %i failed.\n",
592 b->index, e->dest->index, target->index);
593 continue;
594 }
595
596 /* We successfully forwarded the edge. Now update profile
597 data: for each edge we traversed in the chain, remove
598 the original edge's execution count. */
599 edge_frequency = ((edge_probability * b->frequency
600 + REG_BR_PROB_BASE / 2)
601 / REG_BR_PROB_BASE);
602
603 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
604 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
605
606 do
607 {
608 edge t;
609
610 first->count -= edge_count;
611 if (first->count < 0)
612 first->count = 0;
613 first->frequency -= edge_frequency;
614 if (first->frequency < 0)
615 first->frequency = 0;
616 if (first->succ->succ_next)
617 {
618 edge e;
619 int prob;
620 if (n >= nthreaded_edges)
621 abort ();
622 t = threaded_edges [n++];
623 if (t->src != first)
624 abort ();
625 if (first->frequency)
626 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
627 else
628 prob = 0;
629 if (prob > t->probability)
630 prob = t->probability;
631 t->probability -= prob;
632 prob = REG_BR_PROB_BASE - prob;
633 if (prob <= 0)
634 {
635 first->succ->probability = REG_BR_PROB_BASE;
636 first->succ->succ_next->probability = 0;
637 }
638 else
639 for (e = first->succ; e; e = e->succ_next)
640 e->probability = ((e->probability * REG_BR_PROB_BASE)
641 / (double) prob);
642 update_br_prob_note (first);
643 }
644 else
645 {
646 /* It is possible that as the result of
647 threading we've removed edge as it is
648 threaded to the fallthru edge. Avoid
649 getting out of sync. */
650 if (n < nthreaded_edges
651 && first == threaded_edges [n]->src)
652 n++;
653 t = first->succ;
654 }
655
656 t->count -= edge_count;
657 if (t->count < 0)
658 t->count = 0;
659 first = t->dest;
660 }
661 while (first != target);
662
663 changed = true;
664 }
665 }
666
667 if (threaded_edges)
668 free (threaded_edges);
669 return changed;
670 }
671 \f
672
673 /* Blocks A and B are to be merged into a single block. A has no incoming
674 fallthru edge, so it can be moved before B without adding or modifying
675 any jumps (aside from the jump from A to B). */
676
677 static void
678 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
679 {
680 rtx barrier;
681
682 /* If we are partitioning hot/cold basic blocks, we don't want to
683 mess up unconditional or indirect jumps that cross between hot
684 and cold sections. */
685
686 if (flag_reorder_blocks_and_partition
687 && (a->partition != b->partition
688 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
689 return;
690
691 barrier = next_nonnote_insn (BB_END (a));
692 if (GET_CODE (barrier) != BARRIER)
693 abort ();
694 delete_insn (barrier);
695
696 /* Move block and loop notes out of the chain so that we do not
697 disturb their order.
698
699 ??? A better solution would be to squeeze out all the non-nested notes
700 and adjust the block trees appropriately. Even better would be to have
701 a tighter connection between block trees and rtl so that this is not
702 necessary. */
703 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
704 abort ();
705
706 /* Scramble the insn chain. */
707 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
708 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
709 a->flags |= BB_DIRTY;
710
711 if (dump_file)
712 fprintf (dump_file, "Moved block %d before %d and merged.\n",
713 a->index, b->index);
714
715 /* Swap the records for the two blocks around. */
716
717 unlink_block (a);
718 link_block (a, b->prev_bb);
719
720 /* Now blocks A and B are contiguous. Merge them. */
721 merge_blocks (a, b);
722 }
723
724 /* Blocks A and B are to be merged into a single block. B has no outgoing
725 fallthru edge, so it can be moved after A without adding or modifying
726 any jumps (aside from the jump from A to B). */
727
728 static void
729 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
730 {
731 rtx barrier, real_b_end;
732 rtx label, table;
733
734 /* If we are partitioning hot/cold basic blocks, we don't want to
735 mess up unconditional or indirect jumps that cross between hot
736 and cold sections. */
737
738 if (flag_reorder_blocks_and_partition
739 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
740 || a->partition != b->partition))
741 return;
742
743 real_b_end = BB_END (b);
744
745 /* If there is a jump table following block B temporarily add the jump table
746 to block B so that it will also be moved to the correct location. */
747 if (tablejump_p (BB_END (b), &label, &table)
748 && prev_active_insn (label) == BB_END (b))
749 {
750 BB_END (b) = table;
751 }
752
753 /* There had better have been a barrier there. Delete it. */
754 barrier = NEXT_INSN (BB_END (b));
755 if (barrier && GET_CODE (barrier) == BARRIER)
756 delete_insn (barrier);
757
758 /* Move block and loop notes out of the chain so that we do not
759 disturb their order.
760
761 ??? A better solution would be to squeeze out all the non-nested notes
762 and adjust the block trees appropriately. Even better would be to have
763 a tighter connection between block trees and rtl so that this is not
764 necessary. */
765 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
766 abort ();
767
768 /* Scramble the insn chain. */
769 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
770
771 /* Restore the real end of b. */
772 BB_END (b) = real_b_end;
773
774 if (dump_file)
775 fprintf (dump_file, "Moved block %d after %d and merged.\n",
776 b->index, a->index);
777
778 /* Now blocks A and B are contiguous. Merge them. */
779 merge_blocks (a, b);
780 }
781
782 /* Attempt to merge basic blocks that are potentially non-adjacent.
783 Return NULL iff the attempt failed, otherwise return basic block
784 where cleanup_cfg should continue. Because the merging commonly
785 moves basic block away or introduces another optimization
786 possibility, return basic block just before B so cleanup_cfg don't
787 need to iterate.
788
789 It may be good idea to return basic block before C in the case
790 C has been moved after B and originally appeared earlier in the
791 insn sequence, but we have no information available about the
792 relative ordering of these two. Hopefully it is not too common. */
793
794 static basic_block
795 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
796 {
797 basic_block next;
798
799 /* If we are partitioning hot/cold basic blocks, we don't want to
800 mess up unconditional or indirect jumps that cross between hot
801 and cold sections. */
802
803 if (flag_reorder_blocks_and_partition
804 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
805 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
806 || b->partition != c->partition))
807 return NULL;
808
809
810
811 /* If B has a fallthru edge to C, no need to move anything. */
812 if (e->flags & EDGE_FALLTHRU)
813 {
814 int b_index = b->index, c_index = c->index;
815 merge_blocks (b, c);
816 update_forwarder_flag (b);
817
818 if (dump_file)
819 fprintf (dump_file, "Merged %d and %d without moving.\n",
820 b_index, c_index);
821
822 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
823 }
824
825 /* Otherwise we will need to move code around. Do that only if expensive
826 transformations are allowed. */
827 else if (mode & CLEANUP_EXPENSIVE)
828 {
829 edge tmp_edge, b_fallthru_edge;
830 bool c_has_outgoing_fallthru;
831 bool b_has_incoming_fallthru;
832
833 /* Avoid overactive code motion, as the forwarder blocks should be
834 eliminated by edge redirection instead. One exception might have
835 been if B is a forwarder block and C has no fallthru edge, but
836 that should be cleaned up by bb-reorder instead. */
837 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
838 return NULL;
839
840 /* We must make sure to not munge nesting of lexical blocks,
841 and loop notes. This is done by squeezing out all the notes
842 and leaving them there to lie. Not ideal, but functional. */
843
844 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
845 if (tmp_edge->flags & EDGE_FALLTHRU)
846 break;
847
848 c_has_outgoing_fallthru = (tmp_edge != NULL);
849
850 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
851 if (tmp_edge->flags & EDGE_FALLTHRU)
852 break;
853
854 b_has_incoming_fallthru = (tmp_edge != NULL);
855 b_fallthru_edge = tmp_edge;
856 next = b->prev_bb;
857 if (next == c)
858 next = next->prev_bb;
859
860 /* Otherwise, we're going to try to move C after B. If C does
861 not have an outgoing fallthru, then it can be moved
862 immediately after B without introducing or modifying jumps. */
863 if (! c_has_outgoing_fallthru)
864 {
865 merge_blocks_move_successor_nojumps (b, c);
866 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
867 }
868
869 /* If B does not have an incoming fallthru, then it can be moved
870 immediately before C without introducing or modifying jumps.
871 C cannot be the first block, so we do not have to worry about
872 accessing a non-existent block. */
873
874 if (b_has_incoming_fallthru)
875 {
876 basic_block bb;
877
878 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
879 return NULL;
880 bb = force_nonfallthru (b_fallthru_edge);
881 if (bb)
882 notice_new_block (bb);
883 }
884
885 merge_blocks_move_predecessor_nojumps (b, c);
886 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
887 }
888
889 return NULL;
890 }
891 \f
892
893 /* Removes the memory attributes of MEM expression
894 if they are not equal. */
895
896 void
897 merge_memattrs (rtx x, rtx y)
898 {
899 int i;
900 int j;
901 enum rtx_code code;
902 const char *fmt;
903
904 if (x == y)
905 return;
906 if (x == 0 || y == 0)
907 return;
908
909 code = GET_CODE (x);
910
911 if (code != GET_CODE (y))
912 return;
913
914 if (GET_MODE (x) != GET_MODE (y))
915 return;
916
917 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
918 {
919 if (! MEM_ATTRS (x))
920 MEM_ATTRS (y) = 0;
921 else if (! MEM_ATTRS (y))
922 MEM_ATTRS (x) = 0;
923 else
924 {
925 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
926 {
927 set_mem_alias_set (x, 0);
928 set_mem_alias_set (y, 0);
929 }
930
931 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
932 {
933 set_mem_expr (x, 0);
934 set_mem_expr (y, 0);
935 set_mem_offset (x, 0);
936 set_mem_offset (y, 0);
937 }
938 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
939 {
940 set_mem_offset (x, 0);
941 set_mem_offset (y, 0);
942 }
943
944 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
945 set_mem_size (y, MEM_SIZE (x));
946
947 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
948 set_mem_align (y, MEM_ALIGN (x));
949 }
950 }
951
952 fmt = GET_RTX_FORMAT (code);
953 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
954 {
955 switch (fmt[i])
956 {
957 case 'E':
958 /* Two vectors must have the same length. */
959 if (XVECLEN (x, i) != XVECLEN (y, i))
960 return;
961
962 for (j = 0; j < XVECLEN (x, i); j++)
963 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
964
965 break;
966
967 case 'e':
968 merge_memattrs (XEXP (x, i), XEXP (y, i));
969 }
970 }
971 return;
972 }
973
974
975 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
976
977 static bool
978 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
979 {
980 rtx p1, p2;
981
982 /* Verify that I1 and I2 are equivalent. */
983 if (GET_CODE (i1) != GET_CODE (i2))
984 return false;
985
986 p1 = PATTERN (i1);
987 p2 = PATTERN (i2);
988
989 if (GET_CODE (p1) != GET_CODE (p2))
990 return false;
991
992 /* If this is a CALL_INSN, compare register usage information.
993 If we don't check this on stack register machines, the two
994 CALL_INSNs might be merged leaving reg-stack.c with mismatching
995 numbers of stack registers in the same basic block.
996 If we don't check this on machines with delay slots, a delay slot may
997 be filled that clobbers a parameter expected by the subroutine.
998
999 ??? We take the simple route for now and assume that if they're
1000 equal, they were constructed identically. */
1001
1002 if (GET_CODE (i1) == CALL_INSN
1003 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1004 CALL_INSN_FUNCTION_USAGE (i2))
1005 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1006 return false;
1007
1008 #ifdef STACK_REGS
1009 /* If cross_jump_death_matters is not 0, the insn's mode
1010 indicates whether or not the insn contains any stack-like
1011 regs. */
1012
1013 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1014 {
1015 /* If register stack conversion has already been done, then
1016 death notes must also be compared before it is certain that
1017 the two instruction streams match. */
1018
1019 rtx note;
1020 HARD_REG_SET i1_regset, i2_regset;
1021
1022 CLEAR_HARD_REG_SET (i1_regset);
1023 CLEAR_HARD_REG_SET (i2_regset);
1024
1025 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1026 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1027 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1028
1029 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1030 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1031 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1032
1033 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1034
1035 return false;
1036
1037 done:
1038 ;
1039 }
1040 #endif
1041
1042 if (reload_completed
1043 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1044 return true;
1045
1046 /* Do not do EQUIV substitution after reload. First, we're undoing the
1047 work of reload_cse. Second, we may be undoing the work of the post-
1048 reload splitting pass. */
1049 /* ??? Possibly add a new phase switch variable that can be used by
1050 targets to disallow the troublesome insns after splitting. */
1051 if (!reload_completed)
1052 {
1053 /* The following code helps take care of G++ cleanups. */
1054 rtx equiv1 = find_reg_equal_equiv_note (i1);
1055 rtx equiv2 = find_reg_equal_equiv_note (i2);
1056
1057 if (equiv1 && equiv2
1058 /* If the equivalences are not to a constant, they may
1059 reference pseudos that no longer exist, so we can't
1060 use them. */
1061 && (! reload_completed
1062 || (CONSTANT_P (XEXP (equiv1, 0))
1063 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1064 {
1065 rtx s1 = single_set (i1);
1066 rtx s2 = single_set (i2);
1067 if (s1 != 0 && s2 != 0
1068 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1069 {
1070 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1071 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1072 if (! rtx_renumbered_equal_p (p1, p2))
1073 cancel_changes (0);
1074 else if (apply_change_group ())
1075 return true;
1076 }
1077 }
1078 }
1079
1080 return false;
1081 }
1082 \f
1083 /* Look through the insns at the end of BB1 and BB2 and find the longest
1084 sequence that are equivalent. Store the first insns for that sequence
1085 in *F1 and *F2 and return the sequence length.
1086
1087 To simplify callers of this function, if the blocks match exactly,
1088 store the head of the blocks in *F1 and *F2. */
1089
1090 static int
1091 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1092 basic_block bb2, rtx *f1, rtx *f2)
1093 {
1094 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1095 int ninsns = 0;
1096
1097 /* Skip simple jumps at the end of the blocks. Complex jumps still
1098 need to be compared for equivalence, which we'll do below. */
1099
1100 i1 = BB_END (bb1);
1101 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1102 if (onlyjump_p (i1)
1103 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1104 {
1105 last1 = i1;
1106 i1 = PREV_INSN (i1);
1107 }
1108
1109 i2 = BB_END (bb2);
1110 if (onlyjump_p (i2)
1111 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1112 {
1113 last2 = i2;
1114 /* Count everything except for unconditional jump as insn. */
1115 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1116 ninsns++;
1117 i2 = PREV_INSN (i2);
1118 }
1119
1120 while (true)
1121 {
1122 /* Ignore notes. */
1123 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1124 i1 = PREV_INSN (i1);
1125
1126 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1127 i2 = PREV_INSN (i2);
1128
1129 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1130 break;
1131
1132 if (!insns_match_p (mode, i1, i2))
1133 break;
1134
1135 merge_memattrs (i1, i2);
1136
1137 /* Don't begin a cross-jump with a NOTE insn. */
1138 if (INSN_P (i1))
1139 {
1140 /* If the merged insns have different REG_EQUAL notes, then
1141 remove them. */
1142 rtx equiv1 = find_reg_equal_equiv_note (i1);
1143 rtx equiv2 = find_reg_equal_equiv_note (i2);
1144
1145 if (equiv1 && !equiv2)
1146 remove_note (i1, equiv1);
1147 else if (!equiv1 && equiv2)
1148 remove_note (i2, equiv2);
1149 else if (equiv1 && equiv2
1150 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1151 {
1152 remove_note (i1, equiv1);
1153 remove_note (i2, equiv2);
1154 }
1155
1156 afterlast1 = last1, afterlast2 = last2;
1157 last1 = i1, last2 = i2;
1158 ninsns++;
1159 }
1160
1161 i1 = PREV_INSN (i1);
1162 i2 = PREV_INSN (i2);
1163 }
1164
1165 #ifdef HAVE_cc0
1166 /* Don't allow the insn after a compare to be shared by
1167 cross-jumping unless the compare is also shared. */
1168 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1169 last1 = afterlast1, last2 = afterlast2, ninsns--;
1170 #endif
1171
1172 /* Include preceding notes and labels in the cross-jump. One,
1173 this may bring us to the head of the blocks as requested above.
1174 Two, it keeps line number notes as matched as may be. */
1175 if (ninsns)
1176 {
1177 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1178 last1 = PREV_INSN (last1);
1179
1180 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1181 last1 = PREV_INSN (last1);
1182
1183 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1184 last2 = PREV_INSN (last2);
1185
1186 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1187 last2 = PREV_INSN (last2);
1188
1189 *f1 = last1;
1190 *f2 = last2;
1191 }
1192
1193 return ninsns;
1194 }
1195
1196 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1197 the branch instruction. This means that if we commonize the control
1198 flow before end of the basic block, the semantic remains unchanged.
1199
1200 We may assume that there exists one edge with a common destination. */
1201
1202 static bool
1203 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1204 {
1205 int nehedges1 = 0, nehedges2 = 0;
1206 edge fallthru1 = 0, fallthru2 = 0;
1207 edge e1, e2;
1208
1209 /* If BB1 has only one successor, we may be looking at either an
1210 unconditional jump, or a fake edge to exit. */
1211 if (bb1->succ && !bb1->succ->succ_next
1212 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1213 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1214 return (bb2->succ && !bb2->succ->succ_next
1215 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1216 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1217
1218 /* Match conditional jumps - this may get tricky when fallthru and branch
1219 edges are crossed. */
1220 if (bb1->succ
1221 && bb1->succ->succ_next
1222 && !bb1->succ->succ_next->succ_next
1223 && any_condjump_p (BB_END (bb1))
1224 && onlyjump_p (BB_END (bb1)))
1225 {
1226 edge b1, f1, b2, f2;
1227 bool reverse, match;
1228 rtx set1, set2, cond1, cond2;
1229 enum rtx_code code1, code2;
1230
1231 if (!bb2->succ
1232 || !bb2->succ->succ_next
1233 || bb2->succ->succ_next->succ_next
1234 || !any_condjump_p (BB_END (bb2))
1235 || !onlyjump_p (BB_END (bb2)))
1236 return false;
1237
1238 b1 = BRANCH_EDGE (bb1);
1239 b2 = BRANCH_EDGE (bb2);
1240 f1 = FALLTHRU_EDGE (bb1);
1241 f2 = FALLTHRU_EDGE (bb2);
1242
1243 /* Get around possible forwarders on fallthru edges. Other cases
1244 should be optimized out already. */
1245 if (FORWARDER_BLOCK_P (f1->dest))
1246 f1 = f1->dest->succ;
1247
1248 if (FORWARDER_BLOCK_P (f2->dest))
1249 f2 = f2->dest->succ;
1250
1251 /* To simplify use of this function, return false if there are
1252 unneeded forwarder blocks. These will get eliminated later
1253 during cleanup_cfg. */
1254 if (FORWARDER_BLOCK_P (f1->dest)
1255 || FORWARDER_BLOCK_P (f2->dest)
1256 || FORWARDER_BLOCK_P (b1->dest)
1257 || FORWARDER_BLOCK_P (b2->dest))
1258 return false;
1259
1260 if (f1->dest == f2->dest && b1->dest == b2->dest)
1261 reverse = false;
1262 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1263 reverse = true;
1264 else
1265 return false;
1266
1267 set1 = pc_set (BB_END (bb1));
1268 set2 = pc_set (BB_END (bb2));
1269 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1270 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1271 reverse = !reverse;
1272
1273 cond1 = XEXP (SET_SRC (set1), 0);
1274 cond2 = XEXP (SET_SRC (set2), 0);
1275 code1 = GET_CODE (cond1);
1276 if (reverse)
1277 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1278 else
1279 code2 = GET_CODE (cond2);
1280
1281 if (code2 == UNKNOWN)
1282 return false;
1283
1284 /* Verify codes and operands match. */
1285 match = ((code1 == code2
1286 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1287 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1288 || (code1 == swap_condition (code2)
1289 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1290 XEXP (cond2, 0))
1291 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1292 XEXP (cond2, 1))));
1293
1294 /* If we return true, we will join the blocks. Which means that
1295 we will only have one branch prediction bit to work with. Thus
1296 we require the existing branches to have probabilities that are
1297 roughly similar. */
1298 if (match
1299 && !optimize_size
1300 && maybe_hot_bb_p (bb1)
1301 && maybe_hot_bb_p (bb2))
1302 {
1303 int prob2;
1304
1305 if (b1->dest == b2->dest)
1306 prob2 = b2->probability;
1307 else
1308 /* Do not use f2 probability as f2 may be forwarded. */
1309 prob2 = REG_BR_PROB_BASE - b2->probability;
1310
1311 /* Fail if the difference in probabilities is greater than 50%.
1312 This rules out two well-predicted branches with opposite
1313 outcomes. */
1314 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1315 {
1316 if (dump_file)
1317 fprintf (dump_file,
1318 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1319 bb1->index, bb2->index, b1->probability, prob2);
1320
1321 return false;
1322 }
1323 }
1324
1325 if (dump_file && match)
1326 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1327 bb1->index, bb2->index);
1328
1329 return match;
1330 }
1331
1332 /* Generic case - we are seeing a computed jump, table jump or trapping
1333 instruction. */
1334
1335 #ifndef CASE_DROPS_THROUGH
1336 /* Check whether there are tablejumps in the end of BB1 and BB2.
1337 Return true if they are identical. */
1338 {
1339 rtx label1, label2;
1340 rtx table1, table2;
1341
1342 if (tablejump_p (BB_END (bb1), &label1, &table1)
1343 && tablejump_p (BB_END (bb2), &label2, &table2)
1344 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1345 {
1346 /* The labels should never be the same rtx. If they really are same
1347 the jump tables are same too. So disable crossjumping of blocks BB1
1348 and BB2 because when deleting the common insns in the end of BB1
1349 by delete_basic_block () the jump table would be deleted too. */
1350 /* If LABEL2 is referenced in BB1->END do not do anything
1351 because we would loose information when replacing
1352 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1353 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1354 {
1355 /* Set IDENTICAL to true when the tables are identical. */
1356 bool identical = false;
1357 rtx p1, p2;
1358
1359 p1 = PATTERN (table1);
1360 p2 = PATTERN (table2);
1361 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1362 {
1363 identical = true;
1364 }
1365 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1366 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1367 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1368 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1369 {
1370 int i;
1371
1372 identical = true;
1373 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1374 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1375 identical = false;
1376 }
1377
1378 if (identical)
1379 {
1380 replace_label_data rr;
1381 bool match;
1382
1383 /* Temporarily replace references to LABEL1 with LABEL2
1384 in BB1->END so that we could compare the instructions. */
1385 rr.r1 = label1;
1386 rr.r2 = label2;
1387 rr.update_label_nuses = false;
1388 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1389
1390 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1391 if (dump_file && match)
1392 fprintf (dump_file,
1393 "Tablejumps in bb %i and %i match.\n",
1394 bb1->index, bb2->index);
1395
1396 /* Set the original label in BB1->END because when deleting
1397 a block whose end is a tablejump, the tablejump referenced
1398 from the instruction is deleted too. */
1399 rr.r1 = label2;
1400 rr.r2 = label1;
1401 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1402
1403 return match;
1404 }
1405 }
1406 return false;
1407 }
1408 }
1409 #endif
1410
1411 /* First ensure that the instructions match. There may be many outgoing
1412 edges so this test is generally cheaper. */
1413 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1414 return false;
1415
1416 /* Search the outgoing edges, ensure that the counts do match, find possible
1417 fallthru and exception handling edges since these needs more
1418 validation. */
1419 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1420 e1 = e1->succ_next, e2 = e2->succ_next)
1421 {
1422 if (e1->flags & EDGE_EH)
1423 nehedges1++;
1424
1425 if (e2->flags & EDGE_EH)
1426 nehedges2++;
1427
1428 if (e1->flags & EDGE_FALLTHRU)
1429 fallthru1 = e1;
1430 if (e2->flags & EDGE_FALLTHRU)
1431 fallthru2 = e2;
1432 }
1433
1434 /* If number of edges of various types does not match, fail. */
1435 if (e1 || e2
1436 || nehedges1 != nehedges2
1437 || (fallthru1 != 0) != (fallthru2 != 0))
1438 return false;
1439
1440 /* fallthru edges must be forwarded to the same destination. */
1441 if (fallthru1)
1442 {
1443 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1444 ? fallthru1->dest->succ->dest: fallthru1->dest);
1445 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1446 ? fallthru2->dest->succ->dest: fallthru2->dest);
1447
1448 if (d1 != d2)
1449 return false;
1450 }
1451
1452 /* Ensure the same EH region. */
1453 {
1454 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1455 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1456
1457 if (!n1 && n2)
1458 return false;
1459
1460 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1461 return false;
1462 }
1463
1464 /* We don't need to match the rest of edges as above checks should be enough
1465 to ensure that they are equivalent. */
1466 return true;
1467 }
1468
1469 /* E1 and E2 are edges with the same destination block. Search their
1470 predecessors for common code. If found, redirect control flow from
1471 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1472
1473 static bool
1474 try_crossjump_to_edge (int mode, edge e1, edge e2)
1475 {
1476 int nmatch;
1477 basic_block src1 = e1->src, src2 = e2->src;
1478 basic_block redirect_to, redirect_from, to_remove;
1479 rtx newpos1, newpos2;
1480 edge s;
1481
1482 newpos1 = newpos2 = NULL_RTX;
1483
1484 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1485 to try this optimization. */
1486
1487 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1488 return false;
1489
1490 /* Search backward through forwarder blocks. We don't need to worry
1491 about multiple entry or chained forwarders, as they will be optimized
1492 away. We do this to look past the unconditional jump following a
1493 conditional jump that is required due to the current CFG shape. */
1494 if (src1->pred
1495 && !src1->pred->pred_next
1496 && FORWARDER_BLOCK_P (src1))
1497 e1 = src1->pred, src1 = e1->src;
1498
1499 if (src2->pred
1500 && !src2->pred->pred_next
1501 && FORWARDER_BLOCK_P (src2))
1502 e2 = src2->pred, src2 = e2->src;
1503
1504 /* Nothing to do if we reach ENTRY, or a common source block. */
1505 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1506 return false;
1507 if (src1 == src2)
1508 return false;
1509
1510 /* Seeing more than 1 forwarder blocks would confuse us later... */
1511 if (FORWARDER_BLOCK_P (e1->dest)
1512 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1513 return false;
1514
1515 if (FORWARDER_BLOCK_P (e2->dest)
1516 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1517 return false;
1518
1519 /* Likewise with dead code (possibly newly created by the other optimizations
1520 of cfg_cleanup). */
1521 if (!src1->pred || !src2->pred)
1522 return false;
1523
1524 /* Look for the common insn sequence, part the first ... */
1525 if (!outgoing_edges_match (mode, src1, src2))
1526 return false;
1527
1528 /* ... and part the second. */
1529 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1530 if (!nmatch)
1531 return false;
1532
1533 #ifndef CASE_DROPS_THROUGH
1534 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1535 will be deleted.
1536 If we have tablejumps in the end of SRC1 and SRC2
1537 they have been already compared for equivalence in outgoing_edges_match ()
1538 so replace the references to TABLE1 by references to TABLE2. */
1539 {
1540 rtx label1, label2;
1541 rtx table1, table2;
1542
1543 if (tablejump_p (BB_END (src1), &label1, &table1)
1544 && tablejump_p (BB_END (src2), &label2, &table2)
1545 && label1 != label2)
1546 {
1547 replace_label_data rr;
1548 rtx insn;
1549
1550 /* Replace references to LABEL1 with LABEL2. */
1551 rr.r1 = label1;
1552 rr.r2 = label2;
1553 rr.update_label_nuses = true;
1554 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1555 {
1556 /* Do not replace the label in SRC1->END because when deleting
1557 a block whose end is a tablejump, the tablejump referenced
1558 from the instruction is deleted too. */
1559 if (insn != BB_END (src1))
1560 for_each_rtx (&insn, replace_label, &rr);
1561 }
1562 }
1563 }
1564 #endif
1565
1566 /* Avoid splitting if possible. */
1567 if (newpos2 == BB_HEAD (src2))
1568 redirect_to = src2;
1569 else
1570 {
1571 if (dump_file)
1572 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1573 src2->index, nmatch);
1574 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1575 }
1576
1577 if (dump_file)
1578 fprintf (dump_file,
1579 "Cross jumping from bb %i to bb %i; %i common insns\n",
1580 src1->index, src2->index, nmatch);
1581
1582 redirect_to->count += src1->count;
1583 redirect_to->frequency += src1->frequency;
1584 /* We may have some registers visible trought the block. */
1585 redirect_to->flags |= BB_DIRTY;
1586
1587 /* Recompute the frequencies and counts of outgoing edges. */
1588 for (s = redirect_to->succ; s; s = s->succ_next)
1589 {
1590 edge s2;
1591 basic_block d = s->dest;
1592
1593 if (FORWARDER_BLOCK_P (d))
1594 d = d->succ->dest;
1595
1596 for (s2 = src1->succ; ; s2 = s2->succ_next)
1597 {
1598 basic_block d2 = s2->dest;
1599 if (FORWARDER_BLOCK_P (d2))
1600 d2 = d2->succ->dest;
1601 if (d == d2)
1602 break;
1603 }
1604
1605 s->count += s2->count;
1606
1607 /* Take care to update possible forwarder blocks. We verified
1608 that there is no more than one in the chain, so we can't run
1609 into infinite loop. */
1610 if (FORWARDER_BLOCK_P (s->dest))
1611 {
1612 s->dest->succ->count += s2->count;
1613 s->dest->count += s2->count;
1614 s->dest->frequency += EDGE_FREQUENCY (s);
1615 }
1616
1617 if (FORWARDER_BLOCK_P (s2->dest))
1618 {
1619 s2->dest->succ->count -= s2->count;
1620 if (s2->dest->succ->count < 0)
1621 s2->dest->succ->count = 0;
1622 s2->dest->count -= s2->count;
1623 s2->dest->frequency -= EDGE_FREQUENCY (s);
1624 if (s2->dest->frequency < 0)
1625 s2->dest->frequency = 0;
1626 if (s2->dest->count < 0)
1627 s2->dest->count = 0;
1628 }
1629
1630 if (!redirect_to->frequency && !src1->frequency)
1631 s->probability = (s->probability + s2->probability) / 2;
1632 else
1633 s->probability
1634 = ((s->probability * redirect_to->frequency +
1635 s2->probability * src1->frequency)
1636 / (redirect_to->frequency + src1->frequency));
1637 }
1638
1639 update_br_prob_note (redirect_to);
1640
1641 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1642
1643 /* Skip possible basic block header. */
1644 if (GET_CODE (newpos1) == CODE_LABEL)
1645 newpos1 = NEXT_INSN (newpos1);
1646
1647 if (GET_CODE (newpos1) == NOTE)
1648 newpos1 = NEXT_INSN (newpos1);
1649
1650 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1651 to_remove = redirect_from->succ->dest;
1652
1653 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1654 delete_basic_block (to_remove);
1655
1656 update_forwarder_flag (redirect_from);
1657
1658 return true;
1659 }
1660
1661 /* Search the predecessors of BB for common insn sequences. When found,
1662 share code between them by redirecting control flow. Return true if
1663 any changes made. */
1664
1665 static bool
1666 try_crossjump_bb (int mode, basic_block bb)
1667 {
1668 edge e, e2, nexte2, nexte, fallthru;
1669 bool changed;
1670 int n = 0, max;
1671
1672 /* Nothing to do if there is not at least two incoming edges. */
1673 if (!bb->pred || !bb->pred->pred_next)
1674 return false;
1675
1676 /* If we are partitioning hot/cold basic blocks, we don't want to
1677 mess up unconditional or indirect jumps that cross between hot
1678 and cold sections. */
1679
1680 if (flag_reorder_blocks_and_partition
1681 && (bb->pred->src->partition != bb->pred->pred_next->src->partition
1682 || bb->pred->crossing_edge))
1683 return false;
1684
1685 /* It is always cheapest to redirect a block that ends in a branch to
1686 a block that falls through into BB, as that adds no branches to the
1687 program. We'll try that combination first. */
1688 fallthru = NULL;
1689 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1690 for (e = bb->pred; e ; e = e->pred_next, n++)
1691 {
1692 if (e->flags & EDGE_FALLTHRU)
1693 fallthru = e;
1694 if (n > max)
1695 return false;
1696 }
1697
1698 changed = false;
1699 for (e = bb->pred; e; e = nexte)
1700 {
1701 nexte = e->pred_next;
1702
1703 /* As noted above, first try with the fallthru predecessor. */
1704 if (fallthru)
1705 {
1706 /* Don't combine the fallthru edge into anything else.
1707 If there is a match, we'll do it the other way around. */
1708 if (e == fallthru)
1709 continue;
1710 /* If nothing changed since the last attempt, there is nothing
1711 we can do. */
1712 if (!first_pass
1713 && (!(e->src->flags & BB_DIRTY)
1714 && !(fallthru->src->flags & BB_DIRTY)))
1715 continue;
1716
1717 if (try_crossjump_to_edge (mode, e, fallthru))
1718 {
1719 changed = true;
1720 nexte = bb->pred;
1721 continue;
1722 }
1723 }
1724
1725 /* Non-obvious work limiting check: Recognize that we're going
1726 to call try_crossjump_bb on every basic block. So if we have
1727 two blocks with lots of outgoing edges (a switch) and they
1728 share lots of common destinations, then we would do the
1729 cross-jump check once for each common destination.
1730
1731 Now, if the blocks actually are cross-jump candidates, then
1732 all of their destinations will be shared. Which means that
1733 we only need check them for cross-jump candidacy once. We
1734 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1735 choosing to do the check from the block for which the edge
1736 in question is the first successor of A. */
1737 if (e->src->succ != e)
1738 continue;
1739
1740 for (e2 = bb->pred; e2; e2 = nexte2)
1741 {
1742 nexte2 = e2->pred_next;
1743
1744 if (e2 == e)
1745 continue;
1746
1747 /* We've already checked the fallthru edge above. */
1748 if (e2 == fallthru)
1749 continue;
1750
1751 /* The "first successor" check above only prevents multiple
1752 checks of crossjump(A,B). In order to prevent redundant
1753 checks of crossjump(B,A), require that A be the block
1754 with the lowest index. */
1755 if (e->src->index > e2->src->index)
1756 continue;
1757
1758 /* If nothing changed since the last attempt, there is nothing
1759 we can do. */
1760 if (!first_pass
1761 && (!(e->src->flags & BB_DIRTY)
1762 && !(e2->src->flags & BB_DIRTY)))
1763 continue;
1764
1765 if (try_crossjump_to_edge (mode, e, e2))
1766 {
1767 changed = true;
1768 nexte = bb->pred;
1769 break;
1770 }
1771 }
1772 }
1773
1774 return changed;
1775 }
1776
1777 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1778 instructions etc. Return nonzero if changes were made. */
1779
1780 static bool
1781 try_optimize_cfg (int mode)
1782 {
1783 bool changed_overall = false;
1784 bool changed;
1785 int iterations = 0;
1786 basic_block bb, b, next;
1787
1788 if (mode & CLEANUP_CROSSJUMP)
1789 add_noreturn_fake_exit_edges ();
1790
1791 FOR_EACH_BB (bb)
1792 update_forwarder_flag (bb);
1793
1794 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1795 clear_bb_flags ();
1796
1797 if (! targetm.cannot_modify_jumps_p ())
1798 {
1799 first_pass = true;
1800 /* Attempt to merge blocks as made possible by edge removal. If
1801 a block has only one successor, and the successor has only
1802 one predecessor, they may be combined. */
1803 do
1804 {
1805 changed = false;
1806 iterations++;
1807
1808 if (dump_file)
1809 fprintf (dump_file,
1810 "\n\ntry_optimize_cfg iteration %i\n\n",
1811 iterations);
1812
1813 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1814 {
1815 basic_block c;
1816 edge s;
1817 bool changed_here = false;
1818
1819 /* Delete trivially dead basic blocks. */
1820 while (b->pred == NULL)
1821 {
1822 c = b->prev_bb;
1823 if (dump_file)
1824 fprintf (dump_file, "Deleting block %i.\n",
1825 b->index);
1826
1827 delete_basic_block (b);
1828 if (!(mode & CLEANUP_CFGLAYOUT))
1829 changed = true;
1830 b = c;
1831 }
1832
1833 /* Remove code labels no longer used. */
1834 if (b->pred->pred_next == NULL
1835 && (b->pred->flags & EDGE_FALLTHRU)
1836 && !(b->pred->flags & EDGE_COMPLEX)
1837 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1838 /* If the previous block ends with a branch to this
1839 block, we can't delete the label. Normally this
1840 is a condjump that is yet to be simplified, but
1841 if CASE_DROPS_THRU, this can be a tablejump with
1842 some element going to the same place as the
1843 default (fallthru). */
1844 && (b->pred->src == ENTRY_BLOCK_PTR
1845 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1846 || ! label_is_jump_target_p (BB_HEAD (b),
1847 BB_END (b->pred->src))))
1848 {
1849 rtx label = BB_HEAD (b);
1850
1851 delete_insn_chain (label, label);
1852 /* In the case label is undeletable, move it after the
1853 BASIC_BLOCK note. */
1854 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1855 {
1856 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1857
1858 reorder_insns_nobb (label, label, bb_note);
1859 BB_HEAD (b) = bb_note;
1860 }
1861 if (dump_file)
1862 fprintf (dump_file, "Deleted label in block %i.\n",
1863 b->index);
1864 }
1865
1866 /* If we fall through an empty block, we can remove it. */
1867 if (!(mode & CLEANUP_CFGLAYOUT)
1868 && b->pred->pred_next == NULL
1869 && (b->pred->flags & EDGE_FALLTHRU)
1870 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1871 && FORWARDER_BLOCK_P (b)
1872 /* Note that forwarder_block_p true ensures that
1873 there is a successor for this block. */
1874 && (b->succ->flags & EDGE_FALLTHRU)
1875 && n_basic_blocks > 1)
1876 {
1877 if (dump_file)
1878 fprintf (dump_file,
1879 "Deleting fallthru block %i.\n",
1880 b->index);
1881
1882 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1883 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1884 delete_basic_block (b);
1885 changed = true;
1886 b = c;
1887 }
1888
1889 if ((s = b->succ) != NULL
1890 && s->succ_next == NULL
1891 && !(s->flags & EDGE_COMPLEX)
1892 && (c = s->dest) != EXIT_BLOCK_PTR
1893 && c->pred->pred_next == NULL
1894 && b != c)
1895 {
1896 /* When not in cfg_layout mode use code aware of reordering
1897 INSN. This code possibly creates new basic blocks so it
1898 does not fit merge_blocks interface and is kept here in
1899 hope that it will become useless once more of compiler
1900 is transformed to use cfg_layout mode. */
1901
1902 if ((mode & CLEANUP_CFGLAYOUT)
1903 && can_merge_blocks_p (b, c))
1904 {
1905 merge_blocks (b, c);
1906 update_forwarder_flag (b);
1907 changed_here = true;
1908 }
1909 else if (!(mode & CLEANUP_CFGLAYOUT)
1910 /* If the jump insn has side effects,
1911 we can't kill the edge. */
1912 && (GET_CODE (BB_END (b)) != JUMP_INSN
1913 || (reload_completed
1914 ? simplejump_p (BB_END (b))
1915 : (onlyjump_p (BB_END (b))
1916 && !tablejump_p (BB_END (b),
1917 NULL, NULL))))
1918 && (next = merge_blocks_move (s, b, c, mode)))
1919 {
1920 b = next;
1921 changed_here = true;
1922 }
1923 }
1924
1925 /* Simplify branch over branch. */
1926 if ((mode & CLEANUP_EXPENSIVE)
1927 && !(mode & CLEANUP_CFGLAYOUT)
1928 && try_simplify_condjump (b))
1929 changed_here = true;
1930
1931 /* If B has a single outgoing edge, but uses a
1932 non-trivial jump instruction without side-effects, we
1933 can either delete the jump entirely, or replace it
1934 with a simple unconditional jump. */
1935 if (b->succ
1936 && ! b->succ->succ_next
1937 && b->succ->dest != EXIT_BLOCK_PTR
1938 && onlyjump_p (BB_END (b))
1939 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1940 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1941 (mode & CLEANUP_CFGLAYOUT) != 0))
1942 {
1943 update_forwarder_flag (b);
1944 changed_here = true;
1945 }
1946
1947 /* Simplify branch to branch. */
1948 if (try_forward_edges (mode, b))
1949 changed_here = true;
1950
1951 /* Look for shared code between blocks. */
1952 if ((mode & CLEANUP_CROSSJUMP)
1953 && try_crossjump_bb (mode, b))
1954 changed_here = true;
1955
1956 /* Don't get confused by the index shift caused by
1957 deleting blocks. */
1958 if (!changed_here)
1959 b = b->next_bb;
1960 else
1961 changed = true;
1962 }
1963
1964 if ((mode & CLEANUP_CROSSJUMP)
1965 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1966 changed = true;
1967
1968 #ifdef ENABLE_CHECKING
1969 if (changed)
1970 verify_flow_info ();
1971 #endif
1972
1973 changed_overall |= changed;
1974 first_pass = false;
1975 }
1976 while (changed);
1977 }
1978
1979 if (mode & CLEANUP_CROSSJUMP)
1980 remove_fake_edges ();
1981
1982 clear_aux_for_blocks ();
1983
1984 return changed_overall;
1985 }
1986 \f
1987 /* Delete all unreachable basic blocks. */
1988
1989 bool
1990 delete_unreachable_blocks (void)
1991 {
1992 bool changed = false;
1993 basic_block b, next_bb;
1994
1995 find_unreachable_blocks ();
1996
1997 /* Delete all unreachable basic blocks. */
1998
1999 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2000 {
2001 next_bb = b->next_bb;
2002
2003 if (!(b->flags & BB_REACHABLE))
2004 {
2005 delete_basic_block (b);
2006 changed = true;
2007 }
2008 }
2009
2010 if (changed)
2011 tidy_fallthru_edges ();
2012 return changed;
2013 }
2014
2015 /* Merges sequential blocks if possible. */
2016
2017 bool
2018 merge_seq_blocks (void)
2019 {
2020 basic_block bb;
2021 bool changed = false;
2022
2023 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2024 {
2025 if (bb->succ
2026 && !bb->succ->succ_next
2027 && can_merge_blocks_p (bb, bb->succ->dest))
2028 {
2029 /* Merge the blocks and retry. */
2030 merge_blocks (bb, bb->succ->dest);
2031 changed = true;
2032 continue;
2033 }
2034
2035 bb = bb->next_bb;
2036 }
2037
2038 return changed;
2039 }
2040 \f
2041 /* Tidy the CFG by deleting unreachable code and whatnot. */
2042
2043 bool
2044 cleanup_cfg (int mode)
2045 {
2046 bool changed = false;
2047
2048 timevar_push (TV_CLEANUP_CFG);
2049 if (delete_unreachable_blocks ())
2050 {
2051 changed = true;
2052 /* We've possibly created trivially dead code. Cleanup it right
2053 now to introduce more opportunities for try_optimize_cfg. */
2054 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2055 && !reload_completed)
2056 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2057 }
2058
2059 compact_blocks ();
2060
2061 while (try_optimize_cfg (mode))
2062 {
2063 delete_unreachable_blocks (), changed = true;
2064 if (mode & CLEANUP_UPDATE_LIFE)
2065 {
2066 /* Cleaning up CFG introduces more opportunities for dead code
2067 removal that in turn may introduce more opportunities for
2068 cleaning up the CFG. */
2069 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2070 PROP_DEATH_NOTES
2071 | PROP_SCAN_DEAD_CODE
2072 | PROP_KILL_DEAD_CODE
2073 | ((mode & CLEANUP_LOG_LINKS)
2074 ? PROP_LOG_LINKS : 0)))
2075 break;
2076 }
2077 else if (!(mode & CLEANUP_NO_INSN_DEL)
2078 && (mode & CLEANUP_EXPENSIVE)
2079 && !reload_completed)
2080 {
2081 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2082 break;
2083 }
2084 else
2085 break;
2086 delete_dead_jumptables ();
2087 }
2088
2089 /* Kill the data we won't maintain. */
2090 free_EXPR_LIST_list (&label_value_list);
2091 timevar_pop (TV_CLEANUP_CFG);
2092
2093 return changed;
2094 }