cfgcleanup.c (try_simplify_condjump): Update test to make sure we have a conditional...
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
54
55 /* cleanup_cfg maintains following flags for each basic block. */
56
57 enum bb_flags
58 {
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
63 };
64
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
80
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (basic_block bb)
97 {
98 if (!bb)
99 return;
100
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
103 }
104
105 /* Recompute forwarder flag after block has been modified. */
106
107 static void
108 update_forwarder_flag (basic_block bb)
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
121 {
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
125 rtx insn, next;
126 rtx end;
127
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block->succ
130 || !cbranch_block->succ->succ_next
131 || cbranch_block->succ->succ_next->succ_next)
132 return false;
133
134 /* Verify that we've got a normal conditional branch at the end
135 of the block. */
136 cbranch_insn = BB_END (cbranch_block);
137 if (!any_condjump_p (cbranch_insn))
138 return false;
139
140 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
141 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
142
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block = cbranch_fallthru_edge->dest;
147 if (jump_block->pred->pred_next
148 || jump_block->next_bb == EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block))
150 return false;
151 jump_dest_block = jump_block->succ->dest;
152
153 /* If we are partitioning hot/cold basic blocks, we don't want to
154 mess up unconditional or indirect jumps that cross between hot
155 and cold sections. */
156
157 if (flag_reorder_blocks_and_partition
158 && (jump_block->partition != jump_dest_block->partition
159 || cbranch_jump_edge->crossing_edge))
160 return false;
161
162 /* The conditional branch must target the block after the
163 unconditional branch. */
164 cbranch_dest_block = cbranch_jump_edge->dest;
165
166 if (cbranch_dest_block == EXIT_BLOCK_PTR
167 || !can_fallthru (jump_block, cbranch_dest_block))
168 return false;
169
170 /* Invert the conditional branch. */
171 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
172 return false;
173
174 if (dump_file)
175 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
176 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
177
178 /* Success. Update the CFG to match. Note that after this point
179 the edge variable names appear backwards; the redirection is done
180 this way to preserve edge profile data. */
181 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
182 cbranch_dest_block);
183 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
184 jump_dest_block);
185 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
186 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
187 update_br_prob_note (cbranch_block);
188
189 end = BB_END (jump_block);
190 /* Deleting a block may produce unreachable code warning even when we are
191 not deleting anything live. Suppress it by moving all the line number
192 notes out of the block. */
193 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
194 insn = next)
195 {
196 next = NEXT_INSN (insn);
197 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
198 {
199 if (insn == BB_END (jump_block))
200 {
201 BB_END (jump_block) = PREV_INSN (insn);
202 if (insn == end)
203 break;
204 }
205 reorder_insns_nobb (insn, insn, end);
206 end = insn;
207 }
208 }
209 /* Delete the block with the unconditional jump, and clean up the mess. */
210 delete_basic_block (jump_block);
211 tidy_fallthru_edge (cbranch_jump_edge);
212
213 return true;
214 }
215 \f
216 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
217 on register. Used by jump threading. */
218
219 static bool
220 mark_effect (rtx exp, regset nonequal)
221 {
222 int regno;
223 rtx dest;
224 switch (GET_CODE (exp))
225 {
226 /* In case we do clobber the register, mark it as equal, as we know the
227 value is dead so it don't have to match. */
228 case CLOBBER:
229 if (REG_P (XEXP (exp, 0)))
230 {
231 dest = XEXP (exp, 0);
232 regno = REGNO (dest);
233 CLEAR_REGNO_REG_SET (nonequal, regno);
234 if (regno < FIRST_PSEUDO_REGISTER)
235 {
236 int n = hard_regno_nregs[regno][GET_MODE (dest)];
237 while (--n > 0)
238 CLEAR_REGNO_REG_SET (nonequal, regno + n);
239 }
240 }
241 return false;
242
243 case SET:
244 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
245 return false;
246 dest = SET_DEST (exp);
247 if (dest == pc_rtx)
248 return false;
249 if (!REG_P (dest))
250 return true;
251 regno = REGNO (dest);
252 SET_REGNO_REG_SET (nonequal, regno);
253 if (regno < FIRST_PSEUDO_REGISTER)
254 {
255 int n = hard_regno_nregs[regno][GET_MODE (dest)];
256 while (--n > 0)
257 SET_REGNO_REG_SET (nonequal, regno + n);
258 }
259 return false;
260
261 default:
262 return false;
263 }
264 }
265
266 /* Return nonzero if X is a register set in regset DATA.
267 Called via for_each_rtx. */
268 static int
269 mentions_nonequal_regs (rtx *x, void *data)
270 {
271 regset nonequal = (regset) data;
272 if (REG_P (*x))
273 {
274 int regno;
275
276 regno = REGNO (*x);
277 if (REGNO_REG_SET_P (nonequal, regno))
278 return 1;
279 if (regno < FIRST_PSEUDO_REGISTER)
280 {
281 int n = hard_regno_nregs[regno][GET_MODE (*x)];
282 while (--n > 0)
283 if (REGNO_REG_SET_P (nonequal, regno + n))
284 return 1;
285 }
286 }
287 return 0;
288 }
289 /* Attempt to prove that the basic block B will have no side effects and
290 always continues in the same edge if reached via E. Return the edge
291 if exist, NULL otherwise. */
292
293 static edge
294 thread_jump (int mode, edge e, basic_block b)
295 {
296 rtx set1, set2, cond1, cond2, insn;
297 enum rtx_code code1, code2, reversed_code2;
298 bool reverse1 = false;
299 int i;
300 regset nonequal;
301 bool failed = false;
302
303 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
304 return NULL;
305
306 /* At the moment, we do handle only conditional jumps, but later we may
307 want to extend this code to tablejumps and others. */
308 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
309 return NULL;
310 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
311 {
312 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
313 return NULL;
314 }
315
316 /* Second branch must end with onlyjump, as we will eliminate the jump. */
317 if (!any_condjump_p (BB_END (e->src)))
318 return NULL;
319
320 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
321 {
322 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
323 return NULL;
324 }
325
326 set1 = pc_set (BB_END (e->src));
327 set2 = pc_set (BB_END (b));
328 if (((e->flags & EDGE_FALLTHRU) != 0)
329 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
330 reverse1 = true;
331
332 cond1 = XEXP (SET_SRC (set1), 0);
333 cond2 = XEXP (SET_SRC (set2), 0);
334 if (reverse1)
335 code1 = reversed_comparison_code (cond1, BB_END (e->src));
336 else
337 code1 = GET_CODE (cond1);
338
339 code2 = GET_CODE (cond2);
340 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
341
342 if (!comparison_dominates_p (code1, code2)
343 && !comparison_dominates_p (code1, reversed_code2))
344 return NULL;
345
346 /* Ensure that the comparison operators are equivalent.
347 ??? This is far too pessimistic. We should allow swapped operands,
348 different CCmodes, or for example comparisons for interval, that
349 dominate even when operands are not equivalent. */
350 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
351 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
352 return NULL;
353
354 /* Short circuit cases where block B contains some side effects, as we can't
355 safely bypass it. */
356 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
357 insn = NEXT_INSN (insn))
358 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
359 {
360 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
361 return NULL;
362 }
363
364 cselib_init (false);
365
366 /* First process all values computed in the source basic block. */
367 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
368 insn = NEXT_INSN (insn))
369 if (INSN_P (insn))
370 cselib_process_insn (insn);
371
372 nonequal = BITMAP_XMALLOC();
373 CLEAR_REG_SET (nonequal);
374
375 /* Now assume that we've continued by the edge E to B and continue
376 processing as if it were same basic block.
377 Our goal is to prove that whole block is an NOOP. */
378
379 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
380 insn = NEXT_INSN (insn))
381 {
382 if (INSN_P (insn))
383 {
384 rtx pat = PATTERN (insn);
385
386 if (GET_CODE (pat) == PARALLEL)
387 {
388 for (i = 0; i < XVECLEN (pat, 0); i++)
389 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
390 }
391 else
392 failed |= mark_effect (pat, nonequal);
393 }
394
395 cselib_process_insn (insn);
396 }
397
398 /* Later we should clear nonequal of dead registers. So far we don't
399 have life information in cfg_cleanup. */
400 if (failed)
401 {
402 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
403 goto failed_exit;
404 }
405
406 /* cond2 must not mention any register that is not equal to the
407 former block. */
408 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
409 goto failed_exit;
410
411 /* In case liveness information is available, we need to prove equivalence
412 only of the live values. */
413 if (mode & CLEANUP_UPDATE_LIFE)
414 AND_REG_SET (nonequal, b->global_live_at_end);
415
416 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
417
418 BITMAP_XFREE (nonequal);
419 cselib_finish ();
420 if ((comparison_dominates_p (code1, code2) != 0)
421 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
422 return BRANCH_EDGE (b);
423 else
424 return FALLTHRU_EDGE (b);
425
426 failed_exit:
427 BITMAP_XFREE (nonequal);
428 cselib_finish ();
429 return NULL;
430 }
431 \f
432 /* Attempt to forward edges leaving basic block B.
433 Return true if successful. */
434
435 static bool
436 try_forward_edges (int mode, basic_block b)
437 {
438 bool changed = false;
439 edge e, next, *threaded_edges = NULL;
440
441 /* If we are partitioning hot/cold basic blocks, we don't want to
442 mess up unconditional or indirect jumps that cross between hot
443 and cold sections. */
444
445 if (flag_reorder_blocks_and_partition
446 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
447 return false;
448
449 for (e = b->succ; e; e = next)
450 {
451 basic_block target, first;
452 int counter;
453 bool threaded = false;
454 int nthreaded_edges = 0;
455 bool may_thread = first_pass | (b->flags & BB_DIRTY);
456
457 next = e->succ_next;
458
459 /* Skip complex edges because we don't know how to update them.
460
461 Still handle fallthru edges, as we can succeed to forward fallthru
462 edge to the same place as the branch edge of conditional branch
463 and turn conditional branch to an unconditional branch. */
464 if (e->flags & EDGE_COMPLEX)
465 continue;
466
467 target = first = e->dest;
468 counter = 0;
469
470 while (counter < n_basic_blocks)
471 {
472 basic_block new_target = NULL;
473 bool new_target_threaded = false;
474 may_thread |= target->flags & BB_DIRTY;
475
476 if (FORWARDER_BLOCK_P (target)
477 && target->succ->dest != EXIT_BLOCK_PTR)
478 {
479 /* Bypass trivial infinite loops. */
480 if (target == target->succ->dest)
481 counter = n_basic_blocks;
482 new_target = target->succ->dest;
483 }
484
485 /* Allow to thread only over one edge at time to simplify updating
486 of probabilities. */
487 else if ((mode & CLEANUP_THREADING) && may_thread)
488 {
489 edge t = thread_jump (mode, e, target);
490 if (t)
491 {
492 if (!threaded_edges)
493 threaded_edges = xmalloc (sizeof (*threaded_edges)
494 * n_basic_blocks);
495 else
496 {
497 int i;
498
499 /* Detect an infinite loop across blocks not
500 including the start block. */
501 for (i = 0; i < nthreaded_edges; ++i)
502 if (threaded_edges[i] == t)
503 break;
504 if (i < nthreaded_edges)
505 {
506 counter = n_basic_blocks;
507 break;
508 }
509 }
510
511 /* Detect an infinite loop across the start block. */
512 if (t->dest == b)
513 break;
514
515 if (nthreaded_edges >= n_basic_blocks)
516 abort ();
517 threaded_edges[nthreaded_edges++] = t;
518
519 new_target = t->dest;
520 new_target_threaded = true;
521 }
522 }
523
524 if (!new_target)
525 break;
526
527 /* Avoid killing of loop pre-headers, as it is the place loop
528 optimizer wants to hoist code to.
529
530 For fallthru forwarders, the LOOP_BEG note must appear between
531 the header of block and CODE_LABEL of the loop, for non forwarders
532 it must appear before the JUMP_INSN. */
533 if ((mode & CLEANUP_PRE_LOOP) && optimize)
534 {
535 rtx insn = (target->succ->flags & EDGE_FALLTHRU
536 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
537
538 if (GET_CODE (insn) != NOTE)
539 insn = NEXT_INSN (insn);
540
541 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
542 insn = NEXT_INSN (insn))
543 if (GET_CODE (insn) == NOTE
544 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
545 break;
546
547 if (GET_CODE (insn) == NOTE)
548 break;
549
550 /* Do not clean up branches to just past the end of a loop
551 at this time; it can mess up the loop optimizer's
552 recognition of some patterns. */
553
554 insn = PREV_INSN (BB_HEAD (target));
555 if (insn && GET_CODE (insn) == NOTE
556 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
557 break;
558 }
559
560 counter++;
561 target = new_target;
562 threaded |= new_target_threaded;
563 }
564
565 if (counter >= n_basic_blocks)
566 {
567 if (dump_file)
568 fprintf (dump_file, "Infinite loop in BB %i.\n",
569 target->index);
570 }
571 else if (target == first)
572 ; /* We didn't do anything. */
573 else
574 {
575 /* Save the values now, as the edge may get removed. */
576 gcov_type edge_count = e->count;
577 int edge_probability = e->probability;
578 int edge_frequency;
579 int n = 0;
580
581 /* Don't force if target is exit block. */
582 if (threaded && target != EXIT_BLOCK_PTR)
583 {
584 notice_new_block (redirect_edge_and_branch_force (e, target));
585 if (dump_file)
586 fprintf (dump_file, "Conditionals threaded.\n");
587 }
588 else if (!redirect_edge_and_branch (e, target))
589 {
590 if (dump_file)
591 fprintf (dump_file,
592 "Forwarding edge %i->%i to %i failed.\n",
593 b->index, e->dest->index, target->index);
594 continue;
595 }
596
597 /* We successfully forwarded the edge. Now update profile
598 data: for each edge we traversed in the chain, remove
599 the original edge's execution count. */
600 edge_frequency = ((edge_probability * b->frequency
601 + REG_BR_PROB_BASE / 2)
602 / REG_BR_PROB_BASE);
603
604 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
605 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
606
607 do
608 {
609 edge t;
610
611 first->count -= edge_count;
612 if (first->count < 0)
613 first->count = 0;
614 first->frequency -= edge_frequency;
615 if (first->frequency < 0)
616 first->frequency = 0;
617 if (first->succ->succ_next)
618 {
619 edge e;
620 int prob;
621 if (n >= nthreaded_edges)
622 abort ();
623 t = threaded_edges [n++];
624 if (t->src != first)
625 abort ();
626 if (first->frequency)
627 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
628 else
629 prob = 0;
630 if (prob > t->probability)
631 prob = t->probability;
632 t->probability -= prob;
633 prob = REG_BR_PROB_BASE - prob;
634 if (prob <= 0)
635 {
636 first->succ->probability = REG_BR_PROB_BASE;
637 first->succ->succ_next->probability = 0;
638 }
639 else
640 for (e = first->succ; e; e = e->succ_next)
641 e->probability = ((e->probability * REG_BR_PROB_BASE)
642 / (double) prob);
643 update_br_prob_note (first);
644 }
645 else
646 {
647 /* It is possible that as the result of
648 threading we've removed edge as it is
649 threaded to the fallthru edge. Avoid
650 getting out of sync. */
651 if (n < nthreaded_edges
652 && first == threaded_edges [n]->src)
653 n++;
654 t = first->succ;
655 }
656
657 t->count -= edge_count;
658 if (t->count < 0)
659 t->count = 0;
660 first = t->dest;
661 }
662 while (first != target);
663
664 changed = true;
665 }
666 }
667
668 if (threaded_edges)
669 free (threaded_edges);
670 return changed;
671 }
672 \f
673
674 /* Blocks A and B are to be merged into a single block. A has no incoming
675 fallthru edge, so it can be moved before B without adding or modifying
676 any jumps (aside from the jump from A to B). */
677
678 static void
679 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
680 {
681 rtx barrier;
682
683 /* If we are partitioning hot/cold basic blocks, we don't want to
684 mess up unconditional or indirect jumps that cross between hot
685 and cold sections. */
686
687 if (flag_reorder_blocks_and_partition
688 && (a->partition != b->partition
689 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
690 return;
691
692 barrier = next_nonnote_insn (BB_END (a));
693 if (GET_CODE (barrier) != BARRIER)
694 abort ();
695 delete_insn (barrier);
696
697 /* Move block and loop notes out of the chain so that we do not
698 disturb their order.
699
700 ??? A better solution would be to squeeze out all the non-nested notes
701 and adjust the block trees appropriately. Even better would be to have
702 a tighter connection between block trees and rtl so that this is not
703 necessary. */
704 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
705 abort ();
706
707 /* Scramble the insn chain. */
708 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
709 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
710 a->flags |= BB_DIRTY;
711
712 if (dump_file)
713 fprintf (dump_file, "Moved block %d before %d and merged.\n",
714 a->index, b->index);
715
716 /* Swap the records for the two blocks around. */
717
718 unlink_block (a);
719 link_block (a, b->prev_bb);
720
721 /* Now blocks A and B are contiguous. Merge them. */
722 merge_blocks (a, b);
723 }
724
725 /* Blocks A and B are to be merged into a single block. B has no outgoing
726 fallthru edge, so it can be moved after A without adding or modifying
727 any jumps (aside from the jump from A to B). */
728
729 static void
730 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
731 {
732 rtx barrier, real_b_end;
733 rtx label, table;
734
735 /* If we are partitioning hot/cold basic blocks, we don't want to
736 mess up unconditional or indirect jumps that cross between hot
737 and cold sections. */
738
739 if (flag_reorder_blocks_and_partition
740 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
741 || a->partition != b->partition))
742 return;
743
744 real_b_end = BB_END (b);
745
746 /* If there is a jump table following block B temporarily add the jump table
747 to block B so that it will also be moved to the correct location. */
748 if (tablejump_p (BB_END (b), &label, &table)
749 && prev_active_insn (label) == BB_END (b))
750 {
751 BB_END (b) = table;
752 }
753
754 /* There had better have been a barrier there. Delete it. */
755 barrier = NEXT_INSN (BB_END (b));
756 if (barrier && GET_CODE (barrier) == BARRIER)
757 delete_insn (barrier);
758
759 /* Move block and loop notes out of the chain so that we do not
760 disturb their order.
761
762 ??? A better solution would be to squeeze out all the non-nested notes
763 and adjust the block trees appropriately. Even better would be to have
764 a tighter connection between block trees and rtl so that this is not
765 necessary. */
766 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
767 abort ();
768
769 /* Scramble the insn chain. */
770 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
771
772 /* Restore the real end of b. */
773 BB_END (b) = real_b_end;
774
775 if (dump_file)
776 fprintf (dump_file, "Moved block %d after %d and merged.\n",
777 b->index, a->index);
778
779 /* Now blocks A and B are contiguous. Merge them. */
780 merge_blocks (a, b);
781 }
782
783 /* Attempt to merge basic blocks that are potentially non-adjacent.
784 Return NULL iff the attempt failed, otherwise return basic block
785 where cleanup_cfg should continue. Because the merging commonly
786 moves basic block away or introduces another optimization
787 possibility, return basic block just before B so cleanup_cfg don't
788 need to iterate.
789
790 It may be good idea to return basic block before C in the case
791 C has been moved after B and originally appeared earlier in the
792 insn sequence, but we have no information available about the
793 relative ordering of these two. Hopefully it is not too common. */
794
795 static basic_block
796 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
797 {
798 basic_block next;
799
800 /* If we are partitioning hot/cold basic blocks, we don't want to
801 mess up unconditional or indirect jumps that cross between hot
802 and cold sections. */
803
804 if (flag_reorder_blocks_and_partition
805 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
806 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
807 || b->partition != c->partition))
808 return NULL;
809
810
811
812 /* If B has a fallthru edge to C, no need to move anything. */
813 if (e->flags & EDGE_FALLTHRU)
814 {
815 int b_index = b->index, c_index = c->index;
816 merge_blocks (b, c);
817 update_forwarder_flag (b);
818
819 if (dump_file)
820 fprintf (dump_file, "Merged %d and %d without moving.\n",
821 b_index, c_index);
822
823 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
824 }
825
826 /* Otherwise we will need to move code around. Do that only if expensive
827 transformations are allowed. */
828 else if (mode & CLEANUP_EXPENSIVE)
829 {
830 edge tmp_edge, b_fallthru_edge;
831 bool c_has_outgoing_fallthru;
832 bool b_has_incoming_fallthru;
833
834 /* Avoid overactive code motion, as the forwarder blocks should be
835 eliminated by edge redirection instead. One exception might have
836 been if B is a forwarder block and C has no fallthru edge, but
837 that should be cleaned up by bb-reorder instead. */
838 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
839 return NULL;
840
841 /* We must make sure to not munge nesting of lexical blocks,
842 and loop notes. This is done by squeezing out all the notes
843 and leaving them there to lie. Not ideal, but functional. */
844
845 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
846 if (tmp_edge->flags & EDGE_FALLTHRU)
847 break;
848
849 c_has_outgoing_fallthru = (tmp_edge != NULL);
850
851 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
852 if (tmp_edge->flags & EDGE_FALLTHRU)
853 break;
854
855 b_has_incoming_fallthru = (tmp_edge != NULL);
856 b_fallthru_edge = tmp_edge;
857 next = b->prev_bb;
858 if (next == c)
859 next = next->prev_bb;
860
861 /* Otherwise, we're going to try to move C after B. If C does
862 not have an outgoing fallthru, then it can be moved
863 immediately after B without introducing or modifying jumps. */
864 if (! c_has_outgoing_fallthru)
865 {
866 merge_blocks_move_successor_nojumps (b, c);
867 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
868 }
869
870 /* If B does not have an incoming fallthru, then it can be moved
871 immediately before C without introducing or modifying jumps.
872 C cannot be the first block, so we do not have to worry about
873 accessing a non-existent block. */
874
875 if (b_has_incoming_fallthru)
876 {
877 basic_block bb;
878
879 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
880 return NULL;
881 bb = force_nonfallthru (b_fallthru_edge);
882 if (bb)
883 notice_new_block (bb);
884 }
885
886 merge_blocks_move_predecessor_nojumps (b, c);
887 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
888 }
889
890 return NULL;
891 }
892 \f
893
894 /* Removes the memory attributes of MEM expression
895 if they are not equal. */
896
897 void
898 merge_memattrs (rtx x, rtx y)
899 {
900 int i;
901 int j;
902 enum rtx_code code;
903 const char *fmt;
904
905 if (x == y)
906 return;
907 if (x == 0 || y == 0)
908 return;
909
910 code = GET_CODE (x);
911
912 if (code != GET_CODE (y))
913 return;
914
915 if (GET_MODE (x) != GET_MODE (y))
916 return;
917
918 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
919 {
920 if (! MEM_ATTRS (x))
921 MEM_ATTRS (y) = 0;
922 else if (! MEM_ATTRS (y))
923 MEM_ATTRS (x) = 0;
924 else
925 {
926 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
927 {
928 set_mem_alias_set (x, 0);
929 set_mem_alias_set (y, 0);
930 }
931
932 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
933 {
934 set_mem_expr (x, 0);
935 set_mem_expr (y, 0);
936 set_mem_offset (x, 0);
937 set_mem_offset (y, 0);
938 }
939 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
940 {
941 set_mem_offset (x, 0);
942 set_mem_offset (y, 0);
943 }
944
945 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
946 set_mem_size (y, MEM_SIZE (x));
947
948 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
949 set_mem_align (y, MEM_ALIGN (x));
950 }
951 }
952
953 fmt = GET_RTX_FORMAT (code);
954 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
955 {
956 switch (fmt[i])
957 {
958 case 'E':
959 /* Two vectors must have the same length. */
960 if (XVECLEN (x, i) != XVECLEN (y, i))
961 return;
962
963 for (j = 0; j < XVECLEN (x, i); j++)
964 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
965
966 break;
967
968 case 'e':
969 merge_memattrs (XEXP (x, i), XEXP (y, i));
970 }
971 }
972 return;
973 }
974
975
976 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
977
978 static bool
979 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
980 {
981 rtx p1, p2;
982
983 /* Verify that I1 and I2 are equivalent. */
984 if (GET_CODE (i1) != GET_CODE (i2))
985 return false;
986
987 p1 = PATTERN (i1);
988 p2 = PATTERN (i2);
989
990 if (GET_CODE (p1) != GET_CODE (p2))
991 return false;
992
993 /* If this is a CALL_INSN, compare register usage information.
994 If we don't check this on stack register machines, the two
995 CALL_INSNs might be merged leaving reg-stack.c with mismatching
996 numbers of stack registers in the same basic block.
997 If we don't check this on machines with delay slots, a delay slot may
998 be filled that clobbers a parameter expected by the subroutine.
999
1000 ??? We take the simple route for now and assume that if they're
1001 equal, they were constructed identically. */
1002
1003 if (GET_CODE (i1) == CALL_INSN
1004 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1005 CALL_INSN_FUNCTION_USAGE (i2))
1006 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1007 return false;
1008
1009 #ifdef STACK_REGS
1010 /* If cross_jump_death_matters is not 0, the insn's mode
1011 indicates whether or not the insn contains any stack-like
1012 regs. */
1013
1014 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1015 {
1016 /* If register stack conversion has already been done, then
1017 death notes must also be compared before it is certain that
1018 the two instruction streams match. */
1019
1020 rtx note;
1021 HARD_REG_SET i1_regset, i2_regset;
1022
1023 CLEAR_HARD_REG_SET (i1_regset);
1024 CLEAR_HARD_REG_SET (i2_regset);
1025
1026 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1027 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1028 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1029
1030 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1031 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1032 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1033
1034 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1035
1036 return false;
1037
1038 done:
1039 ;
1040 }
1041 #endif
1042
1043 if (reload_completed
1044 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1045 return true;
1046
1047 /* Do not do EQUIV substitution after reload. First, we're undoing the
1048 work of reload_cse. Second, we may be undoing the work of the post-
1049 reload splitting pass. */
1050 /* ??? Possibly add a new phase switch variable that can be used by
1051 targets to disallow the troublesome insns after splitting. */
1052 if (!reload_completed)
1053 {
1054 /* The following code helps take care of G++ cleanups. */
1055 rtx equiv1 = find_reg_equal_equiv_note (i1);
1056 rtx equiv2 = find_reg_equal_equiv_note (i2);
1057
1058 if (equiv1 && equiv2
1059 /* If the equivalences are not to a constant, they may
1060 reference pseudos that no longer exist, so we can't
1061 use them. */
1062 && (! reload_completed
1063 || (CONSTANT_P (XEXP (equiv1, 0))
1064 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1065 {
1066 rtx s1 = single_set (i1);
1067 rtx s2 = single_set (i2);
1068 if (s1 != 0 && s2 != 0
1069 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1070 {
1071 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1072 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1073 if (! rtx_renumbered_equal_p (p1, p2))
1074 cancel_changes (0);
1075 else if (apply_change_group ())
1076 return true;
1077 }
1078 }
1079 }
1080
1081 return false;
1082 }
1083 \f
1084 /* Look through the insns at the end of BB1 and BB2 and find the longest
1085 sequence that are equivalent. Store the first insns for that sequence
1086 in *F1 and *F2 and return the sequence length.
1087
1088 To simplify callers of this function, if the blocks match exactly,
1089 store the head of the blocks in *F1 and *F2. */
1090
1091 static int
1092 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1093 basic_block bb2, rtx *f1, rtx *f2)
1094 {
1095 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1096 int ninsns = 0;
1097
1098 /* Skip simple jumps at the end of the blocks. Complex jumps still
1099 need to be compared for equivalence, which we'll do below. */
1100
1101 i1 = BB_END (bb1);
1102 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1103 if (onlyjump_p (i1)
1104 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1105 {
1106 last1 = i1;
1107 i1 = PREV_INSN (i1);
1108 }
1109
1110 i2 = BB_END (bb2);
1111 if (onlyjump_p (i2)
1112 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1113 {
1114 last2 = i2;
1115 /* Count everything except for unconditional jump as insn. */
1116 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1117 ninsns++;
1118 i2 = PREV_INSN (i2);
1119 }
1120
1121 while (true)
1122 {
1123 /* Ignore notes. */
1124 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1125 i1 = PREV_INSN (i1);
1126
1127 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1128 i2 = PREV_INSN (i2);
1129
1130 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1131 break;
1132
1133 if (!insns_match_p (mode, i1, i2))
1134 break;
1135
1136 merge_memattrs (i1, i2);
1137
1138 /* Don't begin a cross-jump with a NOTE insn. */
1139 if (INSN_P (i1))
1140 {
1141 /* If the merged insns have different REG_EQUAL notes, then
1142 remove them. */
1143 rtx equiv1 = find_reg_equal_equiv_note (i1);
1144 rtx equiv2 = find_reg_equal_equiv_note (i2);
1145
1146 if (equiv1 && !equiv2)
1147 remove_note (i1, equiv1);
1148 else if (!equiv1 && equiv2)
1149 remove_note (i2, equiv2);
1150 else if (equiv1 && equiv2
1151 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1152 {
1153 remove_note (i1, equiv1);
1154 remove_note (i2, equiv2);
1155 }
1156
1157 afterlast1 = last1, afterlast2 = last2;
1158 last1 = i1, last2 = i2;
1159 ninsns++;
1160 }
1161
1162 i1 = PREV_INSN (i1);
1163 i2 = PREV_INSN (i2);
1164 }
1165
1166 #ifdef HAVE_cc0
1167 /* Don't allow the insn after a compare to be shared by
1168 cross-jumping unless the compare is also shared. */
1169 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1170 last1 = afterlast1, last2 = afterlast2, ninsns--;
1171 #endif
1172
1173 /* Include preceding notes and labels in the cross-jump. One,
1174 this may bring us to the head of the blocks as requested above.
1175 Two, it keeps line number notes as matched as may be. */
1176 if (ninsns)
1177 {
1178 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1179 last1 = PREV_INSN (last1);
1180
1181 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1182 last1 = PREV_INSN (last1);
1183
1184 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1185 last2 = PREV_INSN (last2);
1186
1187 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1188 last2 = PREV_INSN (last2);
1189
1190 *f1 = last1;
1191 *f2 = last2;
1192 }
1193
1194 return ninsns;
1195 }
1196
1197 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1198 the branch instruction. This means that if we commonize the control
1199 flow before end of the basic block, the semantic remains unchanged.
1200
1201 We may assume that there exists one edge with a common destination. */
1202
1203 static bool
1204 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1205 {
1206 int nehedges1 = 0, nehedges2 = 0;
1207 edge fallthru1 = 0, fallthru2 = 0;
1208 edge e1, e2;
1209
1210 /* If BB1 has only one successor, we may be looking at either an
1211 unconditional jump, or a fake edge to exit. */
1212 if (bb1->succ && !bb1->succ->succ_next
1213 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1214 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1215 return (bb2->succ && !bb2->succ->succ_next
1216 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1217 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1218
1219 /* Match conditional jumps - this may get tricky when fallthru and branch
1220 edges are crossed. */
1221 if (bb1->succ
1222 && bb1->succ->succ_next
1223 && !bb1->succ->succ_next->succ_next
1224 && any_condjump_p (BB_END (bb1))
1225 && onlyjump_p (BB_END (bb1)))
1226 {
1227 edge b1, f1, b2, f2;
1228 bool reverse, match;
1229 rtx set1, set2, cond1, cond2;
1230 enum rtx_code code1, code2;
1231
1232 if (!bb2->succ
1233 || !bb2->succ->succ_next
1234 || bb2->succ->succ_next->succ_next
1235 || !any_condjump_p (BB_END (bb2))
1236 || !onlyjump_p (BB_END (bb2)))
1237 return false;
1238
1239 b1 = BRANCH_EDGE (bb1);
1240 b2 = BRANCH_EDGE (bb2);
1241 f1 = FALLTHRU_EDGE (bb1);
1242 f2 = FALLTHRU_EDGE (bb2);
1243
1244 /* Get around possible forwarders on fallthru edges. Other cases
1245 should be optimized out already. */
1246 if (FORWARDER_BLOCK_P (f1->dest))
1247 f1 = f1->dest->succ;
1248
1249 if (FORWARDER_BLOCK_P (f2->dest))
1250 f2 = f2->dest->succ;
1251
1252 /* To simplify use of this function, return false if there are
1253 unneeded forwarder blocks. These will get eliminated later
1254 during cleanup_cfg. */
1255 if (FORWARDER_BLOCK_P (f1->dest)
1256 || FORWARDER_BLOCK_P (f2->dest)
1257 || FORWARDER_BLOCK_P (b1->dest)
1258 || FORWARDER_BLOCK_P (b2->dest))
1259 return false;
1260
1261 if (f1->dest == f2->dest && b1->dest == b2->dest)
1262 reverse = false;
1263 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1264 reverse = true;
1265 else
1266 return false;
1267
1268 set1 = pc_set (BB_END (bb1));
1269 set2 = pc_set (BB_END (bb2));
1270 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1271 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1272 reverse = !reverse;
1273
1274 cond1 = XEXP (SET_SRC (set1), 0);
1275 cond2 = XEXP (SET_SRC (set2), 0);
1276 code1 = GET_CODE (cond1);
1277 if (reverse)
1278 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1279 else
1280 code2 = GET_CODE (cond2);
1281
1282 if (code2 == UNKNOWN)
1283 return false;
1284
1285 /* Verify codes and operands match. */
1286 match = ((code1 == code2
1287 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1288 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1289 || (code1 == swap_condition (code2)
1290 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1291 XEXP (cond2, 0))
1292 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1293 XEXP (cond2, 1))));
1294
1295 /* If we return true, we will join the blocks. Which means that
1296 we will only have one branch prediction bit to work with. Thus
1297 we require the existing branches to have probabilities that are
1298 roughly similar. */
1299 if (match
1300 && !optimize_size
1301 && maybe_hot_bb_p (bb1)
1302 && maybe_hot_bb_p (bb2))
1303 {
1304 int prob2;
1305
1306 if (b1->dest == b2->dest)
1307 prob2 = b2->probability;
1308 else
1309 /* Do not use f2 probability as f2 may be forwarded. */
1310 prob2 = REG_BR_PROB_BASE - b2->probability;
1311
1312 /* Fail if the difference in probabilities is greater than 50%.
1313 This rules out two well-predicted branches with opposite
1314 outcomes. */
1315 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1316 {
1317 if (dump_file)
1318 fprintf (dump_file,
1319 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1320 bb1->index, bb2->index, b1->probability, prob2);
1321
1322 return false;
1323 }
1324 }
1325
1326 if (dump_file && match)
1327 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1328 bb1->index, bb2->index);
1329
1330 return match;
1331 }
1332
1333 /* Generic case - we are seeing a computed jump, table jump or trapping
1334 instruction. */
1335
1336 #ifndef CASE_DROPS_THROUGH
1337 /* Check whether there are tablejumps in the end of BB1 and BB2.
1338 Return true if they are identical. */
1339 {
1340 rtx label1, label2;
1341 rtx table1, table2;
1342
1343 if (tablejump_p (BB_END (bb1), &label1, &table1)
1344 && tablejump_p (BB_END (bb2), &label2, &table2)
1345 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1346 {
1347 /* The labels should never be the same rtx. If they really are same
1348 the jump tables are same too. So disable crossjumping of blocks BB1
1349 and BB2 because when deleting the common insns in the end of BB1
1350 by delete_basic_block () the jump table would be deleted too. */
1351 /* If LABEL2 is referenced in BB1->END do not do anything
1352 because we would loose information when replacing
1353 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1354 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1355 {
1356 /* Set IDENTICAL to true when the tables are identical. */
1357 bool identical = false;
1358 rtx p1, p2;
1359
1360 p1 = PATTERN (table1);
1361 p2 = PATTERN (table2);
1362 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1363 {
1364 identical = true;
1365 }
1366 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1367 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1368 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1369 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1370 {
1371 int i;
1372
1373 identical = true;
1374 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1375 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1376 identical = false;
1377 }
1378
1379 if (identical)
1380 {
1381 replace_label_data rr;
1382 bool match;
1383
1384 /* Temporarily replace references to LABEL1 with LABEL2
1385 in BB1->END so that we could compare the instructions. */
1386 rr.r1 = label1;
1387 rr.r2 = label2;
1388 rr.update_label_nuses = false;
1389 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1390
1391 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1392 if (dump_file && match)
1393 fprintf (dump_file,
1394 "Tablejumps in bb %i and %i match.\n",
1395 bb1->index, bb2->index);
1396
1397 /* Set the original label in BB1->END because when deleting
1398 a block whose end is a tablejump, the tablejump referenced
1399 from the instruction is deleted too. */
1400 rr.r1 = label2;
1401 rr.r2 = label1;
1402 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1403
1404 return match;
1405 }
1406 }
1407 return false;
1408 }
1409 }
1410 #endif
1411
1412 /* First ensure that the instructions match. There may be many outgoing
1413 edges so this test is generally cheaper. */
1414 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1415 return false;
1416
1417 /* Search the outgoing edges, ensure that the counts do match, find possible
1418 fallthru and exception handling edges since these needs more
1419 validation. */
1420 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1421 e1 = e1->succ_next, e2 = e2->succ_next)
1422 {
1423 if (e1->flags & EDGE_EH)
1424 nehedges1++;
1425
1426 if (e2->flags & EDGE_EH)
1427 nehedges2++;
1428
1429 if (e1->flags & EDGE_FALLTHRU)
1430 fallthru1 = e1;
1431 if (e2->flags & EDGE_FALLTHRU)
1432 fallthru2 = e2;
1433 }
1434
1435 /* If number of edges of various types does not match, fail. */
1436 if (e1 || e2
1437 || nehedges1 != nehedges2
1438 || (fallthru1 != 0) != (fallthru2 != 0))
1439 return false;
1440
1441 /* fallthru edges must be forwarded to the same destination. */
1442 if (fallthru1)
1443 {
1444 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1445 ? fallthru1->dest->succ->dest: fallthru1->dest);
1446 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1447 ? fallthru2->dest->succ->dest: fallthru2->dest);
1448
1449 if (d1 != d2)
1450 return false;
1451 }
1452
1453 /* Ensure the same EH region. */
1454 {
1455 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1456 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1457
1458 if (!n1 && n2)
1459 return false;
1460
1461 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1462 return false;
1463 }
1464
1465 /* We don't need to match the rest of edges as above checks should be enough
1466 to ensure that they are equivalent. */
1467 return true;
1468 }
1469
1470 /* E1 and E2 are edges with the same destination block. Search their
1471 predecessors for common code. If found, redirect control flow from
1472 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1473
1474 static bool
1475 try_crossjump_to_edge (int mode, edge e1, edge e2)
1476 {
1477 int nmatch;
1478 basic_block src1 = e1->src, src2 = e2->src;
1479 basic_block redirect_to, redirect_from, to_remove;
1480 rtx newpos1, newpos2;
1481 edge s;
1482
1483 newpos1 = newpos2 = NULL_RTX;
1484
1485 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1486 to try this optimization. */
1487
1488 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1489 return false;
1490
1491 /* Search backward through forwarder blocks. We don't need to worry
1492 about multiple entry or chained forwarders, as they will be optimized
1493 away. We do this to look past the unconditional jump following a
1494 conditional jump that is required due to the current CFG shape. */
1495 if (src1->pred
1496 && !src1->pred->pred_next
1497 && FORWARDER_BLOCK_P (src1))
1498 e1 = src1->pred, src1 = e1->src;
1499
1500 if (src2->pred
1501 && !src2->pred->pred_next
1502 && FORWARDER_BLOCK_P (src2))
1503 e2 = src2->pred, src2 = e2->src;
1504
1505 /* Nothing to do if we reach ENTRY, or a common source block. */
1506 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1507 return false;
1508 if (src1 == src2)
1509 return false;
1510
1511 /* Seeing more than 1 forwarder blocks would confuse us later... */
1512 if (FORWARDER_BLOCK_P (e1->dest)
1513 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1514 return false;
1515
1516 if (FORWARDER_BLOCK_P (e2->dest)
1517 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1518 return false;
1519
1520 /* Likewise with dead code (possibly newly created by the other optimizations
1521 of cfg_cleanup). */
1522 if (!src1->pred || !src2->pred)
1523 return false;
1524
1525 /* Look for the common insn sequence, part the first ... */
1526 if (!outgoing_edges_match (mode, src1, src2))
1527 return false;
1528
1529 /* ... and part the second. */
1530 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1531 if (!nmatch)
1532 return false;
1533
1534 #ifndef CASE_DROPS_THROUGH
1535 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1536 will be deleted.
1537 If we have tablejumps in the end of SRC1 and SRC2
1538 they have been already compared for equivalence in outgoing_edges_match ()
1539 so replace the references to TABLE1 by references to TABLE2. */
1540 {
1541 rtx label1, label2;
1542 rtx table1, table2;
1543
1544 if (tablejump_p (BB_END (src1), &label1, &table1)
1545 && tablejump_p (BB_END (src2), &label2, &table2)
1546 && label1 != label2)
1547 {
1548 replace_label_data rr;
1549 rtx insn;
1550
1551 /* Replace references to LABEL1 with LABEL2. */
1552 rr.r1 = label1;
1553 rr.r2 = label2;
1554 rr.update_label_nuses = true;
1555 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1556 {
1557 /* Do not replace the label in SRC1->END because when deleting
1558 a block whose end is a tablejump, the tablejump referenced
1559 from the instruction is deleted too. */
1560 if (insn != BB_END (src1))
1561 for_each_rtx (&insn, replace_label, &rr);
1562 }
1563 }
1564 }
1565 #endif
1566
1567 /* Avoid splitting if possible. */
1568 if (newpos2 == BB_HEAD (src2))
1569 redirect_to = src2;
1570 else
1571 {
1572 if (dump_file)
1573 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1574 src2->index, nmatch);
1575 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1576 }
1577
1578 if (dump_file)
1579 fprintf (dump_file,
1580 "Cross jumping from bb %i to bb %i; %i common insns\n",
1581 src1->index, src2->index, nmatch);
1582
1583 redirect_to->count += src1->count;
1584 redirect_to->frequency += src1->frequency;
1585 /* We may have some registers visible trought the block. */
1586 redirect_to->flags |= BB_DIRTY;
1587
1588 /* Recompute the frequencies and counts of outgoing edges. */
1589 for (s = redirect_to->succ; s; s = s->succ_next)
1590 {
1591 edge s2;
1592 basic_block d = s->dest;
1593
1594 if (FORWARDER_BLOCK_P (d))
1595 d = d->succ->dest;
1596
1597 for (s2 = src1->succ; ; s2 = s2->succ_next)
1598 {
1599 basic_block d2 = s2->dest;
1600 if (FORWARDER_BLOCK_P (d2))
1601 d2 = d2->succ->dest;
1602 if (d == d2)
1603 break;
1604 }
1605
1606 s->count += s2->count;
1607
1608 /* Take care to update possible forwarder blocks. We verified
1609 that there is no more than one in the chain, so we can't run
1610 into infinite loop. */
1611 if (FORWARDER_BLOCK_P (s->dest))
1612 {
1613 s->dest->succ->count += s2->count;
1614 s->dest->count += s2->count;
1615 s->dest->frequency += EDGE_FREQUENCY (s);
1616 }
1617
1618 if (FORWARDER_BLOCK_P (s2->dest))
1619 {
1620 s2->dest->succ->count -= s2->count;
1621 if (s2->dest->succ->count < 0)
1622 s2->dest->succ->count = 0;
1623 s2->dest->count -= s2->count;
1624 s2->dest->frequency -= EDGE_FREQUENCY (s);
1625 if (s2->dest->frequency < 0)
1626 s2->dest->frequency = 0;
1627 if (s2->dest->count < 0)
1628 s2->dest->count = 0;
1629 }
1630
1631 if (!redirect_to->frequency && !src1->frequency)
1632 s->probability = (s->probability + s2->probability) / 2;
1633 else
1634 s->probability
1635 = ((s->probability * redirect_to->frequency +
1636 s2->probability * src1->frequency)
1637 / (redirect_to->frequency + src1->frequency));
1638 }
1639
1640 update_br_prob_note (redirect_to);
1641
1642 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1643
1644 /* Skip possible basic block header. */
1645 if (GET_CODE (newpos1) == CODE_LABEL)
1646 newpos1 = NEXT_INSN (newpos1);
1647
1648 if (GET_CODE (newpos1) == NOTE)
1649 newpos1 = NEXT_INSN (newpos1);
1650
1651 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1652 to_remove = redirect_from->succ->dest;
1653
1654 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1655 delete_basic_block (to_remove);
1656
1657 update_forwarder_flag (redirect_from);
1658
1659 return true;
1660 }
1661
1662 /* Search the predecessors of BB for common insn sequences. When found,
1663 share code between them by redirecting control flow. Return true if
1664 any changes made. */
1665
1666 static bool
1667 try_crossjump_bb (int mode, basic_block bb)
1668 {
1669 edge e, e2, nexte2, nexte, fallthru;
1670 bool changed;
1671 int n = 0, max;
1672
1673 /* Nothing to do if there is not at least two incoming edges. */
1674 if (!bb->pred || !bb->pred->pred_next)
1675 return false;
1676
1677 /* If we are partitioning hot/cold basic blocks, we don't want to
1678 mess up unconditional or indirect jumps that cross between hot
1679 and cold sections. */
1680
1681 if (flag_reorder_blocks_and_partition
1682 && (bb->pred->src->partition != bb->pred->pred_next->src->partition
1683 || bb->pred->crossing_edge))
1684 return false;
1685
1686 /* It is always cheapest to redirect a block that ends in a branch to
1687 a block that falls through into BB, as that adds no branches to the
1688 program. We'll try that combination first. */
1689 fallthru = NULL;
1690 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1691 for (e = bb->pred; e ; e = e->pred_next, n++)
1692 {
1693 if (e->flags & EDGE_FALLTHRU)
1694 fallthru = e;
1695 if (n > max)
1696 return false;
1697 }
1698
1699 changed = false;
1700 for (e = bb->pred; e; e = nexte)
1701 {
1702 nexte = e->pred_next;
1703
1704 /* As noted above, first try with the fallthru predecessor. */
1705 if (fallthru)
1706 {
1707 /* Don't combine the fallthru edge into anything else.
1708 If there is a match, we'll do it the other way around. */
1709 if (e == fallthru)
1710 continue;
1711 /* If nothing changed since the last attempt, there is nothing
1712 we can do. */
1713 if (!first_pass
1714 && (!(e->src->flags & BB_DIRTY)
1715 && !(fallthru->src->flags & BB_DIRTY)))
1716 continue;
1717
1718 if (try_crossjump_to_edge (mode, e, fallthru))
1719 {
1720 changed = true;
1721 nexte = bb->pred;
1722 continue;
1723 }
1724 }
1725
1726 /* Non-obvious work limiting check: Recognize that we're going
1727 to call try_crossjump_bb on every basic block. So if we have
1728 two blocks with lots of outgoing edges (a switch) and they
1729 share lots of common destinations, then we would do the
1730 cross-jump check once for each common destination.
1731
1732 Now, if the blocks actually are cross-jump candidates, then
1733 all of their destinations will be shared. Which means that
1734 we only need check them for cross-jump candidacy once. We
1735 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1736 choosing to do the check from the block for which the edge
1737 in question is the first successor of A. */
1738 if (e->src->succ != e)
1739 continue;
1740
1741 for (e2 = bb->pred; e2; e2 = nexte2)
1742 {
1743 nexte2 = e2->pred_next;
1744
1745 if (e2 == e)
1746 continue;
1747
1748 /* We've already checked the fallthru edge above. */
1749 if (e2 == fallthru)
1750 continue;
1751
1752 /* The "first successor" check above only prevents multiple
1753 checks of crossjump(A,B). In order to prevent redundant
1754 checks of crossjump(B,A), require that A be the block
1755 with the lowest index. */
1756 if (e->src->index > e2->src->index)
1757 continue;
1758
1759 /* If nothing changed since the last attempt, there is nothing
1760 we can do. */
1761 if (!first_pass
1762 && (!(e->src->flags & BB_DIRTY)
1763 && !(e2->src->flags & BB_DIRTY)))
1764 continue;
1765
1766 if (try_crossjump_to_edge (mode, e, e2))
1767 {
1768 changed = true;
1769 nexte = bb->pred;
1770 break;
1771 }
1772 }
1773 }
1774
1775 return changed;
1776 }
1777
1778 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1779 instructions etc. Return nonzero if changes were made. */
1780
1781 static bool
1782 try_optimize_cfg (int mode)
1783 {
1784 bool changed_overall = false;
1785 bool changed;
1786 int iterations = 0;
1787 basic_block bb, b, next;
1788
1789 if (mode & CLEANUP_CROSSJUMP)
1790 add_noreturn_fake_exit_edges ();
1791
1792 FOR_EACH_BB (bb)
1793 update_forwarder_flag (bb);
1794
1795 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1796 clear_bb_flags ();
1797
1798 if (! targetm.cannot_modify_jumps_p ())
1799 {
1800 first_pass = true;
1801 /* Attempt to merge blocks as made possible by edge removal. If
1802 a block has only one successor, and the successor has only
1803 one predecessor, they may be combined. */
1804 do
1805 {
1806 changed = false;
1807 iterations++;
1808
1809 if (dump_file)
1810 fprintf (dump_file,
1811 "\n\ntry_optimize_cfg iteration %i\n\n",
1812 iterations);
1813
1814 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1815 {
1816 basic_block c;
1817 edge s;
1818 bool changed_here = false;
1819
1820 /* Delete trivially dead basic blocks. */
1821 while (b->pred == NULL)
1822 {
1823 c = b->prev_bb;
1824 if (dump_file)
1825 fprintf (dump_file, "Deleting block %i.\n",
1826 b->index);
1827
1828 delete_basic_block (b);
1829 if (!(mode & CLEANUP_CFGLAYOUT))
1830 changed = true;
1831 b = c;
1832 }
1833
1834 /* Remove code labels no longer used. */
1835 if (b->pred->pred_next == NULL
1836 && (b->pred->flags & EDGE_FALLTHRU)
1837 && !(b->pred->flags & EDGE_COMPLEX)
1838 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1839 /* If the previous block ends with a branch to this
1840 block, we can't delete the label. Normally this
1841 is a condjump that is yet to be simplified, but
1842 if CASE_DROPS_THRU, this can be a tablejump with
1843 some element going to the same place as the
1844 default (fallthru). */
1845 && (b->pred->src == ENTRY_BLOCK_PTR
1846 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1847 || ! label_is_jump_target_p (BB_HEAD (b),
1848 BB_END (b->pred->src))))
1849 {
1850 rtx label = BB_HEAD (b);
1851
1852 delete_insn_chain (label, label);
1853 /* In the case label is undeletable, move it after the
1854 BASIC_BLOCK note. */
1855 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1856 {
1857 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1858
1859 reorder_insns_nobb (label, label, bb_note);
1860 BB_HEAD (b) = bb_note;
1861 }
1862 if (dump_file)
1863 fprintf (dump_file, "Deleted label in block %i.\n",
1864 b->index);
1865 }
1866
1867 /* If we fall through an empty block, we can remove it. */
1868 if (!(mode & CLEANUP_CFGLAYOUT)
1869 && b->pred->pred_next == NULL
1870 && (b->pred->flags & EDGE_FALLTHRU)
1871 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1872 && FORWARDER_BLOCK_P (b)
1873 /* Note that forwarder_block_p true ensures that
1874 there is a successor for this block. */
1875 && (b->succ->flags & EDGE_FALLTHRU)
1876 && n_basic_blocks > 1)
1877 {
1878 if (dump_file)
1879 fprintf (dump_file,
1880 "Deleting fallthru block %i.\n",
1881 b->index);
1882
1883 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1884 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1885 delete_basic_block (b);
1886 changed = true;
1887 b = c;
1888 }
1889
1890 if ((s = b->succ) != NULL
1891 && s->succ_next == NULL
1892 && !(s->flags & EDGE_COMPLEX)
1893 && (c = s->dest) != EXIT_BLOCK_PTR
1894 && c->pred->pred_next == NULL
1895 && b != c)
1896 {
1897 /* When not in cfg_layout mode use code aware of reordering
1898 INSN. This code possibly creates new basic blocks so it
1899 does not fit merge_blocks interface and is kept here in
1900 hope that it will become useless once more of compiler
1901 is transformed to use cfg_layout mode. */
1902
1903 if ((mode & CLEANUP_CFGLAYOUT)
1904 && can_merge_blocks_p (b, c))
1905 {
1906 merge_blocks (b, c);
1907 update_forwarder_flag (b);
1908 changed_here = true;
1909 }
1910 else if (!(mode & CLEANUP_CFGLAYOUT)
1911 /* If the jump insn has side effects,
1912 we can't kill the edge. */
1913 && (GET_CODE (BB_END (b)) != JUMP_INSN
1914 || (reload_completed
1915 ? simplejump_p (BB_END (b))
1916 : (onlyjump_p (BB_END (b))
1917 && !tablejump_p (BB_END (b),
1918 NULL, NULL))))
1919 && (next = merge_blocks_move (s, b, c, mode)))
1920 {
1921 b = next;
1922 changed_here = true;
1923 }
1924 }
1925
1926 /* Simplify branch over branch. */
1927 if ((mode & CLEANUP_EXPENSIVE)
1928 && !(mode & CLEANUP_CFGLAYOUT)
1929 && try_simplify_condjump (b))
1930 changed_here = true;
1931
1932 /* If B has a single outgoing edge, but uses a
1933 non-trivial jump instruction without side-effects, we
1934 can either delete the jump entirely, or replace it
1935 with a simple unconditional jump. */
1936 if (b->succ
1937 && ! b->succ->succ_next
1938 && b->succ->dest != EXIT_BLOCK_PTR
1939 && onlyjump_p (BB_END (b))
1940 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1941 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1942 (mode & CLEANUP_CFGLAYOUT) != 0))
1943 {
1944 update_forwarder_flag (b);
1945 changed_here = true;
1946 }
1947
1948 /* Simplify branch to branch. */
1949 if (try_forward_edges (mode, b))
1950 changed_here = true;
1951
1952 /* Look for shared code between blocks. */
1953 if ((mode & CLEANUP_CROSSJUMP)
1954 && try_crossjump_bb (mode, b))
1955 changed_here = true;
1956
1957 /* Don't get confused by the index shift caused by
1958 deleting blocks. */
1959 if (!changed_here)
1960 b = b->next_bb;
1961 else
1962 changed = true;
1963 }
1964
1965 if ((mode & CLEANUP_CROSSJUMP)
1966 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1967 changed = true;
1968
1969 #ifdef ENABLE_CHECKING
1970 if (changed)
1971 verify_flow_info ();
1972 #endif
1973
1974 changed_overall |= changed;
1975 first_pass = false;
1976 }
1977 while (changed);
1978 }
1979
1980 if (mode & CLEANUP_CROSSJUMP)
1981 remove_fake_edges ();
1982
1983 clear_aux_for_blocks ();
1984
1985 return changed_overall;
1986 }
1987 \f
1988 /* Delete all unreachable basic blocks. */
1989
1990 bool
1991 delete_unreachable_blocks (void)
1992 {
1993 bool changed = false;
1994 basic_block b, next_bb;
1995
1996 find_unreachable_blocks ();
1997
1998 /* Delete all unreachable basic blocks. */
1999
2000 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2001 {
2002 next_bb = b->next_bb;
2003
2004 if (!(b->flags & BB_REACHABLE))
2005 {
2006 delete_basic_block (b);
2007 changed = true;
2008 }
2009 }
2010
2011 if (changed)
2012 tidy_fallthru_edges ();
2013 return changed;
2014 }
2015
2016 /* Merges sequential blocks if possible. */
2017
2018 bool
2019 merge_seq_blocks (void)
2020 {
2021 basic_block bb;
2022 bool changed = false;
2023
2024 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2025 {
2026 if (bb->succ
2027 && !bb->succ->succ_next
2028 && can_merge_blocks_p (bb, bb->succ->dest))
2029 {
2030 /* Merge the blocks and retry. */
2031 merge_blocks (bb, bb->succ->dest);
2032 changed = true;
2033 continue;
2034 }
2035
2036 bb = bb->next_bb;
2037 }
2038
2039 return changed;
2040 }
2041 \f
2042 /* Tidy the CFG by deleting unreachable code and whatnot. */
2043
2044 bool
2045 cleanup_cfg (int mode)
2046 {
2047 bool changed = false;
2048
2049 timevar_push (TV_CLEANUP_CFG);
2050 if (delete_unreachable_blocks ())
2051 {
2052 changed = true;
2053 /* We've possibly created trivially dead code. Cleanup it right
2054 now to introduce more opportunities for try_optimize_cfg. */
2055 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2056 && !reload_completed)
2057 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2058 }
2059
2060 compact_blocks ();
2061
2062 while (try_optimize_cfg (mode))
2063 {
2064 delete_unreachable_blocks (), changed = true;
2065 if (mode & CLEANUP_UPDATE_LIFE)
2066 {
2067 /* Cleaning up CFG introduces more opportunities for dead code
2068 removal that in turn may introduce more opportunities for
2069 cleaning up the CFG. */
2070 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2071 PROP_DEATH_NOTES
2072 | PROP_SCAN_DEAD_CODE
2073 | PROP_KILL_DEAD_CODE
2074 | ((mode & CLEANUP_LOG_LINKS)
2075 ? PROP_LOG_LINKS : 0)))
2076 break;
2077 }
2078 else if (!(mode & CLEANUP_NO_INSN_DEL)
2079 && (mode & CLEANUP_EXPENSIVE)
2080 && !reload_completed)
2081 {
2082 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2083 break;
2084 }
2085 else
2086 break;
2087 delete_dead_jumptables ();
2088 }
2089
2090 /* Kill the data we won't maintain. */
2091 free_EXPR_LIST_list (&label_value_list);
2092 timevar_pop (TV_CLEANUP_CFG);
2093
2094 return changed;
2095 }