basic-block.h (FOR_ALL_BB_FN): New macro.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53
54 /* cleanup_cfg maintains following flags for each basic block. */
55
56 enum bb_flags
57 {
58 /* Set if BB is the forwarder block to avoid too many
59 forwarder_block_p calls. */
60 BB_FORWARDER_BLOCK = 1,
61 BB_NONTHREADABLE_BLOCK = 2
62 };
63
64 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
65 #define BB_SET_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
67 #define BB_CLEAR_FLAG(BB, FLAG) \
68 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
69
70 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
71
72 /* Set to true when we are running first pass of try_optimize_cfg loop. */
73 static bool first_pass;
74 static bool try_crossjump_to_edge (int, edge, edge);
75 static bool try_crossjump_bb (int, basic_block);
76 static bool outgoing_edges_match (int, basic_block, basic_block);
77 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
78 static bool insns_match_p (int, rtx, rtx);
79
80 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
81 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
82 static bool try_optimize_cfg (int);
83 static bool try_simplify_condjump (basic_block);
84 static bool try_forward_edges (int, basic_block);
85 static edge thread_jump (int, edge, basic_block);
86 static bool mark_effect (rtx, bitmap);
87 static void notice_new_block (basic_block);
88 static void update_forwarder_flag (basic_block);
89 static int mentions_nonequal_regs (rtx *, void *);
90 static void merge_memattrs (rtx, rtx);
91 \f
92 /* Set flags for newly created block. */
93
94 static void
95 notice_new_block (basic_block bb)
96 {
97 if (!bb)
98 return;
99
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
102 }
103
104 /* Recompute forwarder flag after block has been modified. */
105
106 static void
107 update_forwarder_flag (basic_block bb)
108 {
109 if (forwarder_block_p (bb))
110 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
111 else
112 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
113 }
114 \f
115 /* Simplify a conditional jump around an unconditional jump.
116 Return true if something changed. */
117
118 static bool
119 try_simplify_condjump (basic_block cbranch_block)
120 {
121 basic_block jump_block, jump_dest_block, cbranch_dest_block;
122 edge cbranch_jump_edge, cbranch_fallthru_edge;
123 rtx cbranch_insn;
124
125 /* Verify that there are exactly two successors. */
126 if (EDGE_COUNT (cbranch_block->succs) != 2)
127 return false;
128
129 /* Verify that we've got a normal conditional branch at the end
130 of the block. */
131 cbranch_insn = BB_END (cbranch_block);
132 if (!any_condjump_p (cbranch_insn))
133 return false;
134
135 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
136 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
137
138 /* The next block must not have multiple predecessors, must not
139 be the last block in the function, and must contain just the
140 unconditional jump. */
141 jump_block = cbranch_fallthru_edge->dest;
142 if (!single_pred_p (jump_block)
143 || jump_block->next_bb == EXIT_BLOCK_PTR
144 || !FORWARDER_BLOCK_P (jump_block))
145 return false;
146 jump_dest_block = single_succ (jump_block);
147
148 /* If we are partitioning hot/cold basic blocks, we don't want to
149 mess up unconditional or indirect jumps that cross between hot
150 and cold sections.
151
152 Basic block partitioning may result in some jumps that appear to
153 be optimizable (or blocks that appear to be mergeable), but which really
154 must be left untouched (they are required to make it safely across
155 partition boundaries). See the comments at the top of
156 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
157
158 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
159 || (cbranch_jump_edge->flags & EDGE_CROSSING))
160 return false;
161
162 /* The conditional branch must target the block after the
163 unconditional branch. */
164 cbranch_dest_block = cbranch_jump_edge->dest;
165
166 if (cbranch_dest_block == EXIT_BLOCK_PTR
167 || !can_fallthru (jump_block, cbranch_dest_block))
168 return false;
169
170 /* Invert the conditional branch. */
171 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
172 return false;
173
174 if (dump_file)
175 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
176 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
177
178 /* Success. Update the CFG to match. Note that after this point
179 the edge variable names appear backwards; the redirection is done
180 this way to preserve edge profile data. */
181 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
182 cbranch_dest_block);
183 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
184 jump_dest_block);
185 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
186 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
187 update_br_prob_note (cbranch_block);
188
189 /* Delete the block with the unconditional jump, and clean up the mess. */
190 delete_basic_block (jump_block);
191 tidy_fallthru_edge (cbranch_jump_edge);
192 update_forwarder_flag (cbranch_block);
193
194 return true;
195 }
196 \f
197 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
198 on register. Used by jump threading. */
199
200 static bool
201 mark_effect (rtx exp, regset nonequal)
202 {
203 int regno;
204 rtx dest;
205 switch (GET_CODE (exp))
206 {
207 /* In case we do clobber the register, mark it as equal, as we know the
208 value is dead so it don't have to match. */
209 case CLOBBER:
210 if (REG_P (XEXP (exp, 0)))
211 {
212 dest = XEXP (exp, 0);
213 regno = REGNO (dest);
214 CLEAR_REGNO_REG_SET (nonequal, regno);
215 if (regno < FIRST_PSEUDO_REGISTER)
216 {
217 int n = hard_regno_nregs[regno][GET_MODE (dest)];
218 while (--n > 0)
219 CLEAR_REGNO_REG_SET (nonequal, regno + n);
220 }
221 }
222 return false;
223
224 case SET:
225 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
226 return false;
227 dest = SET_DEST (exp);
228 if (dest == pc_rtx)
229 return false;
230 if (!REG_P (dest))
231 return true;
232 regno = REGNO (dest);
233 SET_REGNO_REG_SET (nonequal, regno);
234 if (regno < FIRST_PSEUDO_REGISTER)
235 {
236 int n = hard_regno_nregs[regno][GET_MODE (dest)];
237 while (--n > 0)
238 SET_REGNO_REG_SET (nonequal, regno + n);
239 }
240 return false;
241
242 default:
243 return false;
244 }
245 }
246
247 /* Return nonzero if X is a register set in regset DATA.
248 Called via for_each_rtx. */
249 static int
250 mentions_nonequal_regs (rtx *x, void *data)
251 {
252 regset nonequal = (regset) data;
253 if (REG_P (*x))
254 {
255 int regno;
256
257 regno = REGNO (*x);
258 if (REGNO_REG_SET_P (nonequal, regno))
259 return 1;
260 if (regno < FIRST_PSEUDO_REGISTER)
261 {
262 int n = hard_regno_nregs[regno][GET_MODE (*x)];
263 while (--n > 0)
264 if (REGNO_REG_SET_P (nonequal, regno + n))
265 return 1;
266 }
267 }
268 return 0;
269 }
270 /* Attempt to prove that the basic block B will have no side effects and
271 always continues in the same edge if reached via E. Return the edge
272 if exist, NULL otherwise. */
273
274 static edge
275 thread_jump (int mode, edge e, basic_block b)
276 {
277 rtx set1, set2, cond1, cond2, insn;
278 enum rtx_code code1, code2, reversed_code2;
279 bool reverse1 = false;
280 unsigned i;
281 regset nonequal;
282 bool failed = false;
283 reg_set_iterator rsi;
284
285 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
286 return NULL;
287
288 /* At the moment, we do handle only conditional jumps, but later we may
289 want to extend this code to tablejumps and others. */
290 if (EDGE_COUNT (e->src->succs) != 2)
291 return NULL;
292 if (EDGE_COUNT (b->succs) != 2)
293 {
294 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
295 return NULL;
296 }
297
298 /* Second branch must end with onlyjump, as we will eliminate the jump. */
299 if (!any_condjump_p (BB_END (e->src)))
300 return NULL;
301
302 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
303 {
304 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
305 return NULL;
306 }
307
308 set1 = pc_set (BB_END (e->src));
309 set2 = pc_set (BB_END (b));
310 if (((e->flags & EDGE_FALLTHRU) != 0)
311 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
312 reverse1 = true;
313
314 cond1 = XEXP (SET_SRC (set1), 0);
315 cond2 = XEXP (SET_SRC (set2), 0);
316 if (reverse1)
317 code1 = reversed_comparison_code (cond1, BB_END (e->src));
318 else
319 code1 = GET_CODE (cond1);
320
321 code2 = GET_CODE (cond2);
322 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
323
324 if (!comparison_dominates_p (code1, code2)
325 && !comparison_dominates_p (code1, reversed_code2))
326 return NULL;
327
328 /* Ensure that the comparison operators are equivalent.
329 ??? This is far too pessimistic. We should allow swapped operands,
330 different CCmodes, or for example comparisons for interval, that
331 dominate even when operands are not equivalent. */
332 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
333 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
334 return NULL;
335
336 /* Short circuit cases where block B contains some side effects, as we can't
337 safely bypass it. */
338 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
339 insn = NEXT_INSN (insn))
340 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
341 {
342 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
343 return NULL;
344 }
345
346 cselib_init (false);
347
348 /* First process all values computed in the source basic block. */
349 for (insn = NEXT_INSN (BB_HEAD (e->src));
350 insn != NEXT_INSN (BB_END (e->src));
351 insn = NEXT_INSN (insn))
352 if (INSN_P (insn))
353 cselib_process_insn (insn);
354
355 nonequal = BITMAP_ALLOC (NULL);
356 CLEAR_REG_SET (nonequal);
357
358 /* Now assume that we've continued by the edge E to B and continue
359 processing as if it were same basic block.
360 Our goal is to prove that whole block is an NOOP. */
361
362 for (insn = NEXT_INSN (BB_HEAD (b));
363 insn != NEXT_INSN (BB_END (b)) && !failed;
364 insn = NEXT_INSN (insn))
365 {
366 if (INSN_P (insn))
367 {
368 rtx pat = PATTERN (insn);
369
370 if (GET_CODE (pat) == PARALLEL)
371 {
372 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
373 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
374 }
375 else
376 failed |= mark_effect (pat, nonequal);
377 }
378
379 cselib_process_insn (insn);
380 }
381
382 /* Later we should clear nonequal of dead registers. So far we don't
383 have life information in cfg_cleanup. */
384 if (failed)
385 {
386 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
387 goto failed_exit;
388 }
389
390 /* cond2 must not mention any register that is not equal to the
391 former block. */
392 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
393 goto failed_exit;
394
395 /* In case liveness information is available, we need to prove equivalence
396 only of the live values. */
397 if (mode & CLEANUP_UPDATE_LIFE)
398 AND_REG_SET (nonequal, b->global_live_at_end);
399
400 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
401 goto failed_exit;
402
403 BITMAP_FREE (nonequal);
404 cselib_finish ();
405 if ((comparison_dominates_p (code1, code2) != 0)
406 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
407 return BRANCH_EDGE (b);
408 else
409 return FALLTHRU_EDGE (b);
410
411 failed_exit:
412 BITMAP_FREE (nonequal);
413 cselib_finish ();
414 return NULL;
415 }
416 \f
417 /* Attempt to forward edges leaving basic block B.
418 Return true if successful. */
419
420 static bool
421 try_forward_edges (int mode, basic_block b)
422 {
423 bool changed = false;
424 edge_iterator ei;
425 edge e, *threaded_edges = NULL;
426
427 /* If we are partitioning hot/cold basic blocks, we don't want to
428 mess up unconditional or indirect jumps that cross between hot
429 and cold sections.
430
431 Basic block partitioning may result in some jumps that appear to
432 be optimizable (or blocks that appear to be mergeable), but which really m
433 ust be left untouched (they are required to make it safely across
434 partition boundaries). See the comments at the top of
435 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
436
437 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
438 return false;
439
440 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
441 {
442 basic_block target, first;
443 int counter;
444 bool threaded = false;
445 int nthreaded_edges = 0;
446 bool may_thread = first_pass | (b->flags & BB_DIRTY);
447
448 /* Skip complex edges because we don't know how to update them.
449
450 Still handle fallthru edges, as we can succeed to forward fallthru
451 edge to the same place as the branch edge of conditional branch
452 and turn conditional branch to an unconditional branch. */
453 if (e->flags & EDGE_COMPLEX)
454 {
455 ei_next (&ei);
456 continue;
457 }
458
459 target = first = e->dest;
460 counter = 0;
461
462 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
463 up jumps that cross between hot/cold sections.
464
465 Basic block partitioning may result in some jumps that appear
466 to be optimizable (or blocks that appear to be mergeable), but which
467 really must be left untouched (they are required to make it safely
468 across partition boundaries). See the comments at the top of
469 bb-reorder.c:partition_hot_cold_basic_blocks for complete
470 details. */
471
472 if (first != EXIT_BLOCK_PTR
473 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
474 return false;
475
476 while (counter < n_basic_blocks)
477 {
478 basic_block new_target = NULL;
479 bool new_target_threaded = false;
480 may_thread |= target->flags & BB_DIRTY;
481
482 if (FORWARDER_BLOCK_P (target)
483 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
484 && single_succ (target) != EXIT_BLOCK_PTR)
485 {
486 /* Bypass trivial infinite loops. */
487 new_target = single_succ (target);
488 if (target == new_target)
489 counter = n_basic_blocks;
490 }
491
492 /* Allow to thread only over one edge at time to simplify updating
493 of probabilities. */
494 else if ((mode & CLEANUP_THREADING) && may_thread)
495 {
496 edge t = thread_jump (mode, e, target);
497 if (t)
498 {
499 if (!threaded_edges)
500 threaded_edges = xmalloc (sizeof (*threaded_edges)
501 * n_basic_blocks);
502 else
503 {
504 int i;
505
506 /* Detect an infinite loop across blocks not
507 including the start block. */
508 for (i = 0; i < nthreaded_edges; ++i)
509 if (threaded_edges[i] == t)
510 break;
511 if (i < nthreaded_edges)
512 {
513 counter = n_basic_blocks;
514 break;
515 }
516 }
517
518 /* Detect an infinite loop across the start block. */
519 if (t->dest == b)
520 break;
521
522 gcc_assert (nthreaded_edges < n_basic_blocks);
523 threaded_edges[nthreaded_edges++] = t;
524
525 new_target = t->dest;
526 new_target_threaded = true;
527 }
528 }
529
530 if (!new_target)
531 break;
532
533 /* Avoid killing of loop pre-headers, as it is the place loop
534 optimizer wants to hoist code to.
535
536 For fallthru forwarders, the LOOP_BEG note must appear between
537 the header of block and CODE_LABEL of the loop, for non forwarders
538 it must appear before the JUMP_INSN. */
539 if ((mode & CLEANUP_PRE_LOOP) && optimize && flag_loop_optimize)
540 {
541 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
542 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
543
544 if (!NOTE_P (insn))
545 insn = NEXT_INSN (insn);
546
547 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
548 insn = NEXT_INSN (insn))
549 if (NOTE_P (insn)
550 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
551 break;
552
553 if (NOTE_P (insn))
554 break;
555
556 /* Do not clean up branches to just past the end of a loop
557 at this time; it can mess up the loop optimizer's
558 recognition of some patterns. */
559
560 insn = PREV_INSN (BB_HEAD (target));
561 if (insn && NOTE_P (insn)
562 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
563 break;
564 }
565
566 counter++;
567 target = new_target;
568 threaded |= new_target_threaded;
569 }
570
571 if (counter >= n_basic_blocks)
572 {
573 if (dump_file)
574 fprintf (dump_file, "Infinite loop in BB %i.\n",
575 target->index);
576 }
577 else if (target == first)
578 ; /* We didn't do anything. */
579 else
580 {
581 /* Save the values now, as the edge may get removed. */
582 gcov_type edge_count = e->count;
583 int edge_probability = e->probability;
584 int edge_frequency;
585 int n = 0;
586
587 /* Don't force if target is exit block. */
588 if (threaded && target != EXIT_BLOCK_PTR)
589 {
590 notice_new_block (redirect_edge_and_branch_force (e, target));
591 if (dump_file)
592 fprintf (dump_file, "Conditionals threaded.\n");
593 }
594 else if (!redirect_edge_and_branch (e, target))
595 {
596 if (dump_file)
597 fprintf (dump_file,
598 "Forwarding edge %i->%i to %i failed.\n",
599 b->index, e->dest->index, target->index);
600 ei_next (&ei);
601 continue;
602 }
603
604 /* We successfully forwarded the edge. Now update profile
605 data: for each edge we traversed in the chain, remove
606 the original edge's execution count. */
607 edge_frequency = ((edge_probability * b->frequency
608 + REG_BR_PROB_BASE / 2)
609 / REG_BR_PROB_BASE);
610
611 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
612 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
613
614 do
615 {
616 edge t;
617
618 if (!single_succ_p (first))
619 {
620 gcc_assert (n < nthreaded_edges);
621 t = threaded_edges [n++];
622 gcc_assert (t->src == first);
623 update_bb_profile_for_threading (first, edge_frequency,
624 edge_count, t);
625 update_br_prob_note (first);
626 }
627 else
628 {
629 first->count -= edge_count;
630 if (first->count < 0)
631 first->count = 0;
632 first->frequency -= edge_frequency;
633 if (first->frequency < 0)
634 first->frequency = 0;
635 /* It is possible that as the result of
636 threading we've removed edge as it is
637 threaded to the fallthru edge. Avoid
638 getting out of sync. */
639 if (n < nthreaded_edges
640 && first == threaded_edges [n]->src)
641 n++;
642 t = single_succ_edge (first);
643 }
644
645 t->count -= edge_count;
646 if (t->count < 0)
647 t->count = 0;
648 first = t->dest;
649 }
650 while (first != target);
651
652 changed = true;
653 continue;
654 }
655 ei_next (&ei);
656 }
657
658 if (threaded_edges)
659 free (threaded_edges);
660 return changed;
661 }
662 \f
663
664 /* Blocks A and B are to be merged into a single block. A has no incoming
665 fallthru edge, so it can be moved before B without adding or modifying
666 any jumps (aside from the jump from A to B). */
667
668 static void
669 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
670 {
671 rtx barrier;
672 bool only_notes;
673
674 /* If we are partitioning hot/cold basic blocks, we don't want to
675 mess up unconditional or indirect jumps that cross between hot
676 and cold sections.
677
678 Basic block partitioning may result in some jumps that appear to
679 be optimizable (or blocks that appear to be mergeable), but which really
680 must be left untouched (they are required to make it safely across
681 partition boundaries). See the comments at the top of
682 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
683
684 if (BB_PARTITION (a) != BB_PARTITION (b))
685 return;
686
687 barrier = next_nonnote_insn (BB_END (a));
688 gcc_assert (BARRIER_P (barrier));
689 delete_insn (barrier);
690
691 /* Move block and loop notes out of the chain so that we do not
692 disturb their order.
693
694 ??? A better solution would be to squeeze out all the non-nested notes
695 and adjust the block trees appropriately. Even better would be to have
696 a tighter connection between block trees and rtl so that this is not
697 necessary. */
698 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
699 gcc_assert (!only_notes);
700
701 /* Scramble the insn chain. */
702 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
703 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
704 a->flags |= BB_DIRTY;
705
706 if (dump_file)
707 fprintf (dump_file, "Moved block %d before %d and merged.\n",
708 a->index, b->index);
709
710 /* Swap the records for the two blocks around. */
711
712 unlink_block (a);
713 link_block (a, b->prev_bb);
714
715 /* Now blocks A and B are contiguous. Merge them. */
716 merge_blocks (a, b);
717 }
718
719 /* Blocks A and B are to be merged into a single block. B has no outgoing
720 fallthru edge, so it can be moved after A without adding or modifying
721 any jumps (aside from the jump from A to B). */
722
723 static void
724 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
725 {
726 rtx barrier, real_b_end;
727 rtx label, table;
728 bool only_notes;
729
730 /* If we are partitioning hot/cold basic blocks, we don't want to
731 mess up unconditional or indirect jumps that cross between hot
732 and cold sections.
733
734 Basic block partitioning may result in some jumps that appear to
735 be optimizable (or blocks that appear to be mergeable), but which really
736 must be left untouched (they are required to make it safely across
737 partition boundaries). See the comments at the top of
738 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
739
740 if (BB_PARTITION (a) != BB_PARTITION (b))
741 return;
742
743 real_b_end = BB_END (b);
744
745 /* If there is a jump table following block B temporarily add the jump table
746 to block B so that it will also be moved to the correct location. */
747 if (tablejump_p (BB_END (b), &label, &table)
748 && prev_active_insn (label) == BB_END (b))
749 {
750 BB_END (b) = table;
751 }
752
753 /* There had better have been a barrier there. Delete it. */
754 barrier = NEXT_INSN (BB_END (b));
755 if (barrier && BARRIER_P (barrier))
756 delete_insn (barrier);
757
758 /* Move block and loop notes out of the chain so that we do not
759 disturb their order.
760
761 ??? A better solution would be to squeeze out all the non-nested notes
762 and adjust the block trees appropriately. Even better would be to have
763 a tighter connection between block trees and rtl so that this is not
764 necessary. */
765 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
766 gcc_assert (!only_notes);
767
768
769 /* Scramble the insn chain. */
770 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
771
772 /* Restore the real end of b. */
773 BB_END (b) = real_b_end;
774
775 if (dump_file)
776 fprintf (dump_file, "Moved block %d after %d and merged.\n",
777 b->index, a->index);
778
779 /* Now blocks A and B are contiguous. Merge them. */
780 merge_blocks (a, b);
781 }
782
783 /* Attempt to merge basic blocks that are potentially non-adjacent.
784 Return NULL iff the attempt failed, otherwise return basic block
785 where cleanup_cfg should continue. Because the merging commonly
786 moves basic block away or introduces another optimization
787 possibility, return basic block just before B so cleanup_cfg don't
788 need to iterate.
789
790 It may be good idea to return basic block before C in the case
791 C has been moved after B and originally appeared earlier in the
792 insn sequence, but we have no information available about the
793 relative ordering of these two. Hopefully it is not too common. */
794
795 static basic_block
796 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
797 {
798 basic_block next;
799
800 /* If we are partitioning hot/cold basic blocks, we don't want to
801 mess up unconditional or indirect jumps that cross between hot
802 and cold sections.
803
804 Basic block partitioning may result in some jumps that appear to
805 be optimizable (or blocks that appear to be mergeable), but which really
806 must be left untouched (they are required to make it safely across
807 partition boundaries). See the comments at the top of
808 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
809
810 if (BB_PARTITION (b) != BB_PARTITION (c))
811 return NULL;
812
813
814
815 /* If B has a fallthru edge to C, no need to move anything. */
816 if (e->flags & EDGE_FALLTHRU)
817 {
818 int b_index = b->index, c_index = c->index;
819 merge_blocks (b, c);
820 update_forwarder_flag (b);
821
822 if (dump_file)
823 fprintf (dump_file, "Merged %d and %d without moving.\n",
824 b_index, c_index);
825
826 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
827 }
828
829 /* Otherwise we will need to move code around. Do that only if expensive
830 transformations are allowed. */
831 else if (mode & CLEANUP_EXPENSIVE)
832 {
833 edge tmp_edge, b_fallthru_edge;
834 bool c_has_outgoing_fallthru;
835 bool b_has_incoming_fallthru;
836 edge_iterator ei;
837
838 /* Avoid overactive code motion, as the forwarder blocks should be
839 eliminated by edge redirection instead. One exception might have
840 been if B is a forwarder block and C has no fallthru edge, but
841 that should be cleaned up by bb-reorder instead. */
842 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
843 return NULL;
844
845 /* We must make sure to not munge nesting of lexical blocks,
846 and loop notes. This is done by squeezing out all the notes
847 and leaving them there to lie. Not ideal, but functional. */
848
849 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
850 if (tmp_edge->flags & EDGE_FALLTHRU)
851 break;
852
853 c_has_outgoing_fallthru = (tmp_edge != NULL);
854
855 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
856 if (tmp_edge->flags & EDGE_FALLTHRU)
857 break;
858
859 b_has_incoming_fallthru = (tmp_edge != NULL);
860 b_fallthru_edge = tmp_edge;
861 next = b->prev_bb;
862 if (next == c)
863 next = next->prev_bb;
864
865 /* Otherwise, we're going to try to move C after B. If C does
866 not have an outgoing fallthru, then it can be moved
867 immediately after B without introducing or modifying jumps. */
868 if (! c_has_outgoing_fallthru)
869 {
870 merge_blocks_move_successor_nojumps (b, c);
871 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
872 }
873
874 /* If B does not have an incoming fallthru, then it can be moved
875 immediately before C without introducing or modifying jumps.
876 C cannot be the first block, so we do not have to worry about
877 accessing a non-existent block. */
878
879 if (b_has_incoming_fallthru)
880 {
881 basic_block bb;
882
883 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
884 return NULL;
885 bb = force_nonfallthru (b_fallthru_edge);
886 if (bb)
887 notice_new_block (bb);
888 }
889
890 merge_blocks_move_predecessor_nojumps (b, c);
891 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
892 }
893
894 return NULL;
895 }
896 \f
897
898 /* Removes the memory attributes of MEM expression
899 if they are not equal. */
900
901 void
902 merge_memattrs (rtx x, rtx y)
903 {
904 int i;
905 int j;
906 enum rtx_code code;
907 const char *fmt;
908
909 if (x == y)
910 return;
911 if (x == 0 || y == 0)
912 return;
913
914 code = GET_CODE (x);
915
916 if (code != GET_CODE (y))
917 return;
918
919 if (GET_MODE (x) != GET_MODE (y))
920 return;
921
922 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
923 {
924 if (! MEM_ATTRS (x))
925 MEM_ATTRS (y) = 0;
926 else if (! MEM_ATTRS (y))
927 MEM_ATTRS (x) = 0;
928 else
929 {
930 rtx mem_size;
931
932 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
933 {
934 set_mem_alias_set (x, 0);
935 set_mem_alias_set (y, 0);
936 }
937
938 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
939 {
940 set_mem_expr (x, 0);
941 set_mem_expr (y, 0);
942 set_mem_offset (x, 0);
943 set_mem_offset (y, 0);
944 }
945 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
946 {
947 set_mem_offset (x, 0);
948 set_mem_offset (y, 0);
949 }
950
951 if (!MEM_SIZE (x))
952 mem_size = NULL_RTX;
953 else if (!MEM_SIZE (y))
954 mem_size = NULL_RTX;
955 else
956 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
957 INTVAL (MEM_SIZE (y))));
958 set_mem_size (x, mem_size);
959 set_mem_size (y, mem_size);
960
961 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
962 set_mem_align (y, MEM_ALIGN (x));
963 }
964 }
965
966 fmt = GET_RTX_FORMAT (code);
967 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 {
969 switch (fmt[i])
970 {
971 case 'E':
972 /* Two vectors must have the same length. */
973 if (XVECLEN (x, i) != XVECLEN (y, i))
974 return;
975
976 for (j = 0; j < XVECLEN (x, i); j++)
977 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
978
979 break;
980
981 case 'e':
982 merge_memattrs (XEXP (x, i), XEXP (y, i));
983 }
984 }
985 return;
986 }
987
988
989 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
990
991 static bool
992 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
993 {
994 rtx p1, p2;
995
996 /* Verify that I1 and I2 are equivalent. */
997 if (GET_CODE (i1) != GET_CODE (i2))
998 return false;
999
1000 p1 = PATTERN (i1);
1001 p2 = PATTERN (i2);
1002
1003 if (GET_CODE (p1) != GET_CODE (p2))
1004 return false;
1005
1006 /* If this is a CALL_INSN, compare register usage information.
1007 If we don't check this on stack register machines, the two
1008 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1009 numbers of stack registers in the same basic block.
1010 If we don't check this on machines with delay slots, a delay slot may
1011 be filled that clobbers a parameter expected by the subroutine.
1012
1013 ??? We take the simple route for now and assume that if they're
1014 equal, they were constructed identically. */
1015
1016 if (CALL_P (i1)
1017 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1018 CALL_INSN_FUNCTION_USAGE (i2))
1019 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1020 return false;
1021
1022 #ifdef STACK_REGS
1023 /* If cross_jump_death_matters is not 0, the insn's mode
1024 indicates whether or not the insn contains any stack-like
1025 regs. */
1026
1027 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1028 {
1029 /* If register stack conversion has already been done, then
1030 death notes must also be compared before it is certain that
1031 the two instruction streams match. */
1032
1033 rtx note;
1034 HARD_REG_SET i1_regset, i2_regset;
1035
1036 CLEAR_HARD_REG_SET (i1_regset);
1037 CLEAR_HARD_REG_SET (i2_regset);
1038
1039 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1040 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1041 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1042
1043 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1044 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1045 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1046
1047 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1048
1049 return false;
1050
1051 done:
1052 ;
1053 }
1054 #endif
1055
1056 if (reload_completed
1057 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1058 return true;
1059
1060 /* Do not do EQUIV substitution after reload. First, we're undoing the
1061 work of reload_cse. Second, we may be undoing the work of the post-
1062 reload splitting pass. */
1063 /* ??? Possibly add a new phase switch variable that can be used by
1064 targets to disallow the troublesome insns after splitting. */
1065 if (!reload_completed)
1066 {
1067 /* The following code helps take care of G++ cleanups. */
1068 rtx equiv1 = find_reg_equal_equiv_note (i1);
1069 rtx equiv2 = find_reg_equal_equiv_note (i2);
1070
1071 if (equiv1 && equiv2
1072 /* If the equivalences are not to a constant, they may
1073 reference pseudos that no longer exist, so we can't
1074 use them. */
1075 && (! reload_completed
1076 || (CONSTANT_P (XEXP (equiv1, 0))
1077 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1078 {
1079 rtx s1 = single_set (i1);
1080 rtx s2 = single_set (i2);
1081 if (s1 != 0 && s2 != 0
1082 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1083 {
1084 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1085 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1086 if (! rtx_renumbered_equal_p (p1, p2))
1087 cancel_changes (0);
1088 else if (apply_change_group ())
1089 return true;
1090 }
1091 }
1092 }
1093
1094 return false;
1095 }
1096 \f
1097 /* Look through the insns at the end of BB1 and BB2 and find the longest
1098 sequence that are equivalent. Store the first insns for that sequence
1099 in *F1 and *F2 and return the sequence length.
1100
1101 To simplify callers of this function, if the blocks match exactly,
1102 store the head of the blocks in *F1 and *F2. */
1103
1104 static int
1105 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1106 basic_block bb2, rtx *f1, rtx *f2)
1107 {
1108 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1109 int ninsns = 0;
1110
1111 /* Skip simple jumps at the end of the blocks. Complex jumps still
1112 need to be compared for equivalence, which we'll do below. */
1113
1114 i1 = BB_END (bb1);
1115 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1116 if (onlyjump_p (i1)
1117 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1118 {
1119 last1 = i1;
1120 i1 = PREV_INSN (i1);
1121 }
1122
1123 i2 = BB_END (bb2);
1124 if (onlyjump_p (i2)
1125 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1126 {
1127 last2 = i2;
1128 /* Count everything except for unconditional jump as insn. */
1129 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1130 ninsns++;
1131 i2 = PREV_INSN (i2);
1132 }
1133
1134 while (true)
1135 {
1136 /* Ignore notes. */
1137 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1138 i1 = PREV_INSN (i1);
1139
1140 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1141 i2 = PREV_INSN (i2);
1142
1143 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1144 break;
1145
1146 if (!insns_match_p (mode, i1, i2))
1147 break;
1148
1149 merge_memattrs (i1, i2);
1150
1151 /* Don't begin a cross-jump with a NOTE insn. */
1152 if (INSN_P (i1))
1153 {
1154 /* If the merged insns have different REG_EQUAL notes, then
1155 remove them. */
1156 rtx equiv1 = find_reg_equal_equiv_note (i1);
1157 rtx equiv2 = find_reg_equal_equiv_note (i2);
1158
1159 if (equiv1 && !equiv2)
1160 remove_note (i1, equiv1);
1161 else if (!equiv1 && equiv2)
1162 remove_note (i2, equiv2);
1163 else if (equiv1 && equiv2
1164 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1165 {
1166 remove_note (i1, equiv1);
1167 remove_note (i2, equiv2);
1168 }
1169
1170 afterlast1 = last1, afterlast2 = last2;
1171 last1 = i1, last2 = i2;
1172 ninsns++;
1173 }
1174
1175 i1 = PREV_INSN (i1);
1176 i2 = PREV_INSN (i2);
1177 }
1178
1179 #ifdef HAVE_cc0
1180 /* Don't allow the insn after a compare to be shared by
1181 cross-jumping unless the compare is also shared. */
1182 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1183 last1 = afterlast1, last2 = afterlast2, ninsns--;
1184 #endif
1185
1186 /* Include preceding notes and labels in the cross-jump. One,
1187 this may bring us to the head of the blocks as requested above.
1188 Two, it keeps line number notes as matched as may be. */
1189 if (ninsns)
1190 {
1191 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1192 last1 = PREV_INSN (last1);
1193
1194 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1195 last1 = PREV_INSN (last1);
1196
1197 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1198 last2 = PREV_INSN (last2);
1199
1200 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1201 last2 = PREV_INSN (last2);
1202
1203 *f1 = last1;
1204 *f2 = last2;
1205 }
1206
1207 return ninsns;
1208 }
1209
1210 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1211 the branch instruction. This means that if we commonize the control
1212 flow before end of the basic block, the semantic remains unchanged.
1213
1214 We may assume that there exists one edge with a common destination. */
1215
1216 static bool
1217 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1218 {
1219 int nehedges1 = 0, nehedges2 = 0;
1220 edge fallthru1 = 0, fallthru2 = 0;
1221 edge e1, e2;
1222 edge_iterator ei;
1223
1224 /* If BB1 has only one successor, we may be looking at either an
1225 unconditional jump, or a fake edge to exit. */
1226 if (single_succ_p (bb1)
1227 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1228 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1229 return (single_succ_p (bb2)
1230 && (single_succ_edge (bb2)->flags
1231 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1232 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1233
1234 /* Match conditional jumps - this may get tricky when fallthru and branch
1235 edges are crossed. */
1236 if (EDGE_COUNT (bb1->succs) == 2
1237 && any_condjump_p (BB_END (bb1))
1238 && onlyjump_p (BB_END (bb1)))
1239 {
1240 edge b1, f1, b2, f2;
1241 bool reverse, match;
1242 rtx set1, set2, cond1, cond2;
1243 enum rtx_code code1, code2;
1244
1245 if (EDGE_COUNT (bb2->succs) != 2
1246 || !any_condjump_p (BB_END (bb2))
1247 || !onlyjump_p (BB_END (bb2)))
1248 return false;
1249
1250 b1 = BRANCH_EDGE (bb1);
1251 b2 = BRANCH_EDGE (bb2);
1252 f1 = FALLTHRU_EDGE (bb1);
1253 f2 = FALLTHRU_EDGE (bb2);
1254
1255 /* Get around possible forwarders on fallthru edges. Other cases
1256 should be optimized out already. */
1257 if (FORWARDER_BLOCK_P (f1->dest))
1258 f1 = single_succ_edge (f1->dest);
1259
1260 if (FORWARDER_BLOCK_P (f2->dest))
1261 f2 = single_succ_edge (f2->dest);
1262
1263 /* To simplify use of this function, return false if there are
1264 unneeded forwarder blocks. These will get eliminated later
1265 during cleanup_cfg. */
1266 if (FORWARDER_BLOCK_P (f1->dest)
1267 || FORWARDER_BLOCK_P (f2->dest)
1268 || FORWARDER_BLOCK_P (b1->dest)
1269 || FORWARDER_BLOCK_P (b2->dest))
1270 return false;
1271
1272 if (f1->dest == f2->dest && b1->dest == b2->dest)
1273 reverse = false;
1274 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1275 reverse = true;
1276 else
1277 return false;
1278
1279 set1 = pc_set (BB_END (bb1));
1280 set2 = pc_set (BB_END (bb2));
1281 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1282 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1283 reverse = !reverse;
1284
1285 cond1 = XEXP (SET_SRC (set1), 0);
1286 cond2 = XEXP (SET_SRC (set2), 0);
1287 code1 = GET_CODE (cond1);
1288 if (reverse)
1289 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1290 else
1291 code2 = GET_CODE (cond2);
1292
1293 if (code2 == UNKNOWN)
1294 return false;
1295
1296 /* Verify codes and operands match. */
1297 match = ((code1 == code2
1298 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1299 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1300 || (code1 == swap_condition (code2)
1301 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1302 XEXP (cond2, 0))
1303 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1304 XEXP (cond2, 1))));
1305
1306 /* If we return true, we will join the blocks. Which means that
1307 we will only have one branch prediction bit to work with. Thus
1308 we require the existing branches to have probabilities that are
1309 roughly similar. */
1310 if (match
1311 && !optimize_size
1312 && maybe_hot_bb_p (bb1)
1313 && maybe_hot_bb_p (bb2))
1314 {
1315 int prob2;
1316
1317 if (b1->dest == b2->dest)
1318 prob2 = b2->probability;
1319 else
1320 /* Do not use f2 probability as f2 may be forwarded. */
1321 prob2 = REG_BR_PROB_BASE - b2->probability;
1322
1323 /* Fail if the difference in probabilities is greater than 50%.
1324 This rules out two well-predicted branches with opposite
1325 outcomes. */
1326 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1327 {
1328 if (dump_file)
1329 fprintf (dump_file,
1330 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1331 bb1->index, bb2->index, b1->probability, prob2);
1332
1333 return false;
1334 }
1335 }
1336
1337 if (dump_file && match)
1338 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1339 bb1->index, bb2->index);
1340
1341 return match;
1342 }
1343
1344 /* Generic case - we are seeing a computed jump, table jump or trapping
1345 instruction. */
1346
1347 /* Check whether there are tablejumps in the end of BB1 and BB2.
1348 Return true if they are identical. */
1349 {
1350 rtx label1, label2;
1351 rtx table1, table2;
1352
1353 if (tablejump_p (BB_END (bb1), &label1, &table1)
1354 && tablejump_p (BB_END (bb2), &label2, &table2)
1355 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1356 {
1357 /* The labels should never be the same rtx. If they really are same
1358 the jump tables are same too. So disable crossjumping of blocks BB1
1359 and BB2 because when deleting the common insns in the end of BB1
1360 by delete_basic_block () the jump table would be deleted too. */
1361 /* If LABEL2 is referenced in BB1->END do not do anything
1362 because we would loose information when replacing
1363 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1364 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1365 {
1366 /* Set IDENTICAL to true when the tables are identical. */
1367 bool identical = false;
1368 rtx p1, p2;
1369
1370 p1 = PATTERN (table1);
1371 p2 = PATTERN (table2);
1372 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1373 {
1374 identical = true;
1375 }
1376 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1377 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1378 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1379 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1380 {
1381 int i;
1382
1383 identical = true;
1384 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1385 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1386 identical = false;
1387 }
1388
1389 if (identical)
1390 {
1391 replace_label_data rr;
1392 bool match;
1393
1394 /* Temporarily replace references to LABEL1 with LABEL2
1395 in BB1->END so that we could compare the instructions. */
1396 rr.r1 = label1;
1397 rr.r2 = label2;
1398 rr.update_label_nuses = false;
1399 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1400
1401 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1402 if (dump_file && match)
1403 fprintf (dump_file,
1404 "Tablejumps in bb %i and %i match.\n",
1405 bb1->index, bb2->index);
1406
1407 /* Set the original label in BB1->END because when deleting
1408 a block whose end is a tablejump, the tablejump referenced
1409 from the instruction is deleted too. */
1410 rr.r1 = label2;
1411 rr.r2 = label1;
1412 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1413
1414 return match;
1415 }
1416 }
1417 return false;
1418 }
1419 }
1420
1421 /* First ensure that the instructions match. There may be many outgoing
1422 edges so this test is generally cheaper. */
1423 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1424 return false;
1425
1426 /* Search the outgoing edges, ensure that the counts do match, find possible
1427 fallthru and exception handling edges since these needs more
1428 validation. */
1429 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1430 return false;
1431
1432 FOR_EACH_EDGE (e1, ei, bb1->succs)
1433 {
1434 e2 = EDGE_SUCC (bb2, ei.index);
1435
1436 if (e1->flags & EDGE_EH)
1437 nehedges1++;
1438
1439 if (e2->flags & EDGE_EH)
1440 nehedges2++;
1441
1442 if (e1->flags & EDGE_FALLTHRU)
1443 fallthru1 = e1;
1444 if (e2->flags & EDGE_FALLTHRU)
1445 fallthru2 = e2;
1446 }
1447
1448 /* If number of edges of various types does not match, fail. */
1449 if (nehedges1 != nehedges2
1450 || (fallthru1 != 0) != (fallthru2 != 0))
1451 return false;
1452
1453 /* fallthru edges must be forwarded to the same destination. */
1454 if (fallthru1)
1455 {
1456 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1457 ? single_succ (fallthru1->dest): fallthru1->dest);
1458 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1459 ? single_succ (fallthru2->dest): fallthru2->dest);
1460
1461 if (d1 != d2)
1462 return false;
1463 }
1464
1465 /* Ensure the same EH region. */
1466 {
1467 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1468 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1469
1470 if (!n1 && n2)
1471 return false;
1472
1473 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1474 return false;
1475 }
1476
1477 /* We don't need to match the rest of edges as above checks should be enough
1478 to ensure that they are equivalent. */
1479 return true;
1480 }
1481
1482 /* E1 and E2 are edges with the same destination block. Search their
1483 predecessors for common code. If found, redirect control flow from
1484 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1485
1486 static bool
1487 try_crossjump_to_edge (int mode, edge e1, edge e2)
1488 {
1489 int nmatch;
1490 basic_block src1 = e1->src, src2 = e2->src;
1491 basic_block redirect_to, redirect_from, to_remove;
1492 rtx newpos1, newpos2;
1493 edge s;
1494 edge_iterator ei;
1495
1496 newpos1 = newpos2 = NULL_RTX;
1497
1498 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1499 to try this optimization.
1500
1501 Basic block partitioning may result in some jumps that appear to
1502 be optimizable (or blocks that appear to be mergeable), but which really
1503 must be left untouched (they are required to make it safely across
1504 partition boundaries). See the comments at the top of
1505 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1506
1507 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1508 return false;
1509
1510 /* Search backward through forwarder blocks. We don't need to worry
1511 about multiple entry or chained forwarders, as they will be optimized
1512 away. We do this to look past the unconditional jump following a
1513 conditional jump that is required due to the current CFG shape. */
1514 if (single_pred_p (src1)
1515 && FORWARDER_BLOCK_P (src1))
1516 e1 = single_pred_edge (src1), src1 = e1->src;
1517
1518 if (single_pred_p (src2)
1519 && FORWARDER_BLOCK_P (src2))
1520 e2 = single_pred_edge (src2), src2 = e2->src;
1521
1522 /* Nothing to do if we reach ENTRY, or a common source block. */
1523 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1524 return false;
1525 if (src1 == src2)
1526 return false;
1527
1528 /* Seeing more than 1 forwarder blocks would confuse us later... */
1529 if (FORWARDER_BLOCK_P (e1->dest)
1530 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1531 return false;
1532
1533 if (FORWARDER_BLOCK_P (e2->dest)
1534 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1535 return false;
1536
1537 /* Likewise with dead code (possibly newly created by the other optimizations
1538 of cfg_cleanup). */
1539 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1540 return false;
1541
1542 /* Look for the common insn sequence, part the first ... */
1543 if (!outgoing_edges_match (mode, src1, src2))
1544 return false;
1545
1546 /* ... and part the second. */
1547 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1548
1549 /* Don't proceed with the crossjump unless we found a sufficient number
1550 of matching instructions or the 'from' block was totally matched
1551 (such that its predecessors will hopefully be redirected and the
1552 block removed). */
1553 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1554 && (newpos1 != BB_HEAD (src1)))
1555 return false;
1556
1557 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1558 will be deleted.
1559 If we have tablejumps in the end of SRC1 and SRC2
1560 they have been already compared for equivalence in outgoing_edges_match ()
1561 so replace the references to TABLE1 by references to TABLE2. */
1562 {
1563 rtx label1, label2;
1564 rtx table1, table2;
1565
1566 if (tablejump_p (BB_END (src1), &label1, &table1)
1567 && tablejump_p (BB_END (src2), &label2, &table2)
1568 && label1 != label2)
1569 {
1570 replace_label_data rr;
1571 rtx insn;
1572
1573 /* Replace references to LABEL1 with LABEL2. */
1574 rr.r1 = label1;
1575 rr.r2 = label2;
1576 rr.update_label_nuses = true;
1577 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1578 {
1579 /* Do not replace the label in SRC1->END because when deleting
1580 a block whose end is a tablejump, the tablejump referenced
1581 from the instruction is deleted too. */
1582 if (insn != BB_END (src1))
1583 for_each_rtx (&insn, replace_label, &rr);
1584 }
1585 }
1586 }
1587
1588 /* Avoid splitting if possible. */
1589 if (newpos2 == BB_HEAD (src2))
1590 redirect_to = src2;
1591 else
1592 {
1593 if (dump_file)
1594 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1595 src2->index, nmatch);
1596 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1597 }
1598
1599 if (dump_file)
1600 fprintf (dump_file,
1601 "Cross jumping from bb %i to bb %i; %i common insns\n",
1602 src1->index, src2->index, nmatch);
1603
1604 redirect_to->count += src1->count;
1605 redirect_to->frequency += src1->frequency;
1606 /* We may have some registers visible trought the block. */
1607 redirect_to->flags |= BB_DIRTY;
1608
1609 /* Recompute the frequencies and counts of outgoing edges. */
1610 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1611 {
1612 edge s2;
1613 edge_iterator ei;
1614 basic_block d = s->dest;
1615
1616 if (FORWARDER_BLOCK_P (d))
1617 d = single_succ (d);
1618
1619 FOR_EACH_EDGE (s2, ei, src1->succs)
1620 {
1621 basic_block d2 = s2->dest;
1622 if (FORWARDER_BLOCK_P (d2))
1623 d2 = single_succ (d2);
1624 if (d == d2)
1625 break;
1626 }
1627
1628 s->count += s2->count;
1629
1630 /* Take care to update possible forwarder blocks. We verified
1631 that there is no more than one in the chain, so we can't run
1632 into infinite loop. */
1633 if (FORWARDER_BLOCK_P (s->dest))
1634 {
1635 single_succ_edge (s->dest)->count += s2->count;
1636 s->dest->count += s2->count;
1637 s->dest->frequency += EDGE_FREQUENCY (s);
1638 }
1639
1640 if (FORWARDER_BLOCK_P (s2->dest))
1641 {
1642 single_succ_edge (s2->dest)->count -= s2->count;
1643 if (single_succ_edge (s2->dest)->count < 0)
1644 single_succ_edge (s2->dest)->count = 0;
1645 s2->dest->count -= s2->count;
1646 s2->dest->frequency -= EDGE_FREQUENCY (s);
1647 if (s2->dest->frequency < 0)
1648 s2->dest->frequency = 0;
1649 if (s2->dest->count < 0)
1650 s2->dest->count = 0;
1651 }
1652
1653 if (!redirect_to->frequency && !src1->frequency)
1654 s->probability = (s->probability + s2->probability) / 2;
1655 else
1656 s->probability
1657 = ((s->probability * redirect_to->frequency +
1658 s2->probability * src1->frequency)
1659 / (redirect_to->frequency + src1->frequency));
1660 }
1661
1662 update_br_prob_note (redirect_to);
1663
1664 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1665
1666 /* Skip possible basic block header. */
1667 if (LABEL_P (newpos1))
1668 newpos1 = NEXT_INSN (newpos1);
1669
1670 if (NOTE_P (newpos1))
1671 newpos1 = NEXT_INSN (newpos1);
1672
1673 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1674 to_remove = single_succ (redirect_from);
1675
1676 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1677 delete_basic_block (to_remove);
1678
1679 update_forwarder_flag (redirect_from);
1680
1681 return true;
1682 }
1683
1684 /* Search the predecessors of BB for common insn sequences. When found,
1685 share code between them by redirecting control flow. Return true if
1686 any changes made. */
1687
1688 static bool
1689 try_crossjump_bb (int mode, basic_block bb)
1690 {
1691 edge e, e2, fallthru;
1692 bool changed;
1693 unsigned max, ix, ix2;
1694 basic_block ev, ev2;
1695 edge_iterator ei;
1696
1697 /* Nothing to do if there is not at least two incoming edges. */
1698 if (EDGE_COUNT (bb->preds) < 2)
1699 return false;
1700
1701 /* Don't crossjump if this block ends in a computed jump,
1702 unless we are optimizing for size. */
1703 if (!optimize_size
1704 && bb != EXIT_BLOCK_PTR
1705 && computed_jump_p (BB_END (bb)))
1706 return false;
1707
1708 /* If we are partitioning hot/cold basic blocks, we don't want to
1709 mess up unconditional or indirect jumps that cross between hot
1710 and cold sections.
1711
1712 Basic block partitioning may result in some jumps that appear to
1713 be optimizable (or blocks that appear to be mergeable), but which really
1714 must be left untouched (they are required to make it safely across
1715 partition boundaries). See the comments at the top of
1716 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1717
1718 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1719 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1720 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1721 return false;
1722
1723 /* It is always cheapest to redirect a block that ends in a branch to
1724 a block that falls through into BB, as that adds no branches to the
1725 program. We'll try that combination first. */
1726 fallthru = NULL;
1727 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1728
1729 if (EDGE_COUNT (bb->preds) > max)
1730 return false;
1731
1732 FOR_EACH_EDGE (e, ei, bb->preds)
1733 {
1734 if (e->flags & EDGE_FALLTHRU)
1735 fallthru = e;
1736 }
1737
1738 changed = false;
1739 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1740 {
1741 e = EDGE_PRED (ev, ix);
1742 ix++;
1743
1744 /* As noted above, first try with the fallthru predecessor. */
1745 if (fallthru)
1746 {
1747 /* Don't combine the fallthru edge into anything else.
1748 If there is a match, we'll do it the other way around. */
1749 if (e == fallthru)
1750 continue;
1751 /* If nothing changed since the last attempt, there is nothing
1752 we can do. */
1753 if (!first_pass
1754 && (!(e->src->flags & BB_DIRTY)
1755 && !(fallthru->src->flags & BB_DIRTY)))
1756 continue;
1757
1758 if (try_crossjump_to_edge (mode, e, fallthru))
1759 {
1760 changed = true;
1761 ix = 0;
1762 ev = bb;
1763 continue;
1764 }
1765 }
1766
1767 /* Non-obvious work limiting check: Recognize that we're going
1768 to call try_crossjump_bb on every basic block. So if we have
1769 two blocks with lots of outgoing edges (a switch) and they
1770 share lots of common destinations, then we would do the
1771 cross-jump check once for each common destination.
1772
1773 Now, if the blocks actually are cross-jump candidates, then
1774 all of their destinations will be shared. Which means that
1775 we only need check them for cross-jump candidacy once. We
1776 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1777 choosing to do the check from the block for which the edge
1778 in question is the first successor of A. */
1779 if (EDGE_SUCC (e->src, 0) != e)
1780 continue;
1781
1782 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1783 {
1784 e2 = EDGE_PRED (ev2, ix2);
1785 ix2++;
1786
1787 if (e2 == e)
1788 continue;
1789
1790 /* We've already checked the fallthru edge above. */
1791 if (e2 == fallthru)
1792 continue;
1793
1794 /* The "first successor" check above only prevents multiple
1795 checks of crossjump(A,B). In order to prevent redundant
1796 checks of crossjump(B,A), require that A be the block
1797 with the lowest index. */
1798 if (e->src->index > e2->src->index)
1799 continue;
1800
1801 /* If nothing changed since the last attempt, there is nothing
1802 we can do. */
1803 if (!first_pass
1804 && (!(e->src->flags & BB_DIRTY)
1805 && !(e2->src->flags & BB_DIRTY)))
1806 continue;
1807
1808 if (try_crossjump_to_edge (mode, e, e2))
1809 {
1810 changed = true;
1811 ev2 = bb;
1812 ix = 0;
1813 break;
1814 }
1815 }
1816 }
1817
1818 return changed;
1819 }
1820
1821 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1822 instructions etc. Return nonzero if changes were made. */
1823
1824 static bool
1825 try_optimize_cfg (int mode)
1826 {
1827 bool changed_overall = false;
1828 bool changed;
1829 int iterations = 0;
1830 basic_block bb, b, next;
1831
1832 if (mode & CLEANUP_CROSSJUMP)
1833 add_noreturn_fake_exit_edges ();
1834
1835 FOR_EACH_BB (bb)
1836 update_forwarder_flag (bb);
1837
1838 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1839 clear_bb_flags ();
1840
1841 if (! targetm.cannot_modify_jumps_p ())
1842 {
1843 first_pass = true;
1844 /* Attempt to merge blocks as made possible by edge removal. If
1845 a block has only one successor, and the successor has only
1846 one predecessor, they may be combined. */
1847 do
1848 {
1849 changed = false;
1850 iterations++;
1851
1852 if (dump_file)
1853 fprintf (dump_file,
1854 "\n\ntry_optimize_cfg iteration %i\n\n",
1855 iterations);
1856
1857 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1858 {
1859 basic_block c;
1860 edge s;
1861 bool changed_here = false;
1862
1863 /* Delete trivially dead basic blocks. */
1864 while (EDGE_COUNT (b->preds) == 0)
1865 {
1866 c = b->prev_bb;
1867 if (dump_file)
1868 fprintf (dump_file, "Deleting block %i.\n",
1869 b->index);
1870
1871 delete_basic_block (b);
1872 if (!(mode & CLEANUP_CFGLAYOUT))
1873 changed = true;
1874 b = c;
1875 }
1876
1877 /* Remove code labels no longer used. */
1878 if (single_pred_p (b)
1879 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1880 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1881 && LABEL_P (BB_HEAD (b))
1882 /* If the previous block ends with a branch to this
1883 block, we can't delete the label. Normally this
1884 is a condjump that is yet to be simplified, but
1885 if CASE_DROPS_THRU, this can be a tablejump with
1886 some element going to the same place as the
1887 default (fallthru). */
1888 && (single_pred (b) == ENTRY_BLOCK_PTR
1889 || !JUMP_P (BB_END (single_pred (b)))
1890 || ! label_is_jump_target_p (BB_HEAD (b),
1891 BB_END (single_pred (b)))))
1892 {
1893 rtx label = BB_HEAD (b);
1894
1895 delete_insn_chain (label, label);
1896 /* In the case label is undeletable, move it after the
1897 BASIC_BLOCK note. */
1898 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1899 {
1900 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1901
1902 reorder_insns_nobb (label, label, bb_note);
1903 BB_HEAD (b) = bb_note;
1904 }
1905 if (dump_file)
1906 fprintf (dump_file, "Deleted label in block %i.\n",
1907 b->index);
1908 }
1909
1910 /* If we fall through an empty block, we can remove it. */
1911 if (!(mode & CLEANUP_CFGLAYOUT)
1912 && single_pred_p (b)
1913 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1914 && !LABEL_P (BB_HEAD (b))
1915 && FORWARDER_BLOCK_P (b)
1916 /* Note that forwarder_block_p true ensures that
1917 there is a successor for this block. */
1918 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1919 && n_basic_blocks > 1)
1920 {
1921 if (dump_file)
1922 fprintf (dump_file,
1923 "Deleting fallthru block %i.\n",
1924 b->index);
1925
1926 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1927 redirect_edge_succ_nodup (single_pred_edge (b),
1928 single_succ (b));
1929 delete_basic_block (b);
1930 changed = true;
1931 b = c;
1932 }
1933
1934 if (single_succ_p (b)
1935 && (s = single_succ_edge (b))
1936 && !(s->flags & EDGE_COMPLEX)
1937 && (c = s->dest) != EXIT_BLOCK_PTR
1938 && single_pred_p (c)
1939 && b != c)
1940 {
1941 /* When not in cfg_layout mode use code aware of reordering
1942 INSN. This code possibly creates new basic blocks so it
1943 does not fit merge_blocks interface and is kept here in
1944 hope that it will become useless once more of compiler
1945 is transformed to use cfg_layout mode. */
1946
1947 if ((mode & CLEANUP_CFGLAYOUT)
1948 && can_merge_blocks_p (b, c))
1949 {
1950 merge_blocks (b, c);
1951 update_forwarder_flag (b);
1952 changed_here = true;
1953 }
1954 else if (!(mode & CLEANUP_CFGLAYOUT)
1955 /* If the jump insn has side effects,
1956 we can't kill the edge. */
1957 && (!JUMP_P (BB_END (b))
1958 || (reload_completed
1959 ? simplejump_p (BB_END (b))
1960 : (onlyjump_p (BB_END (b))
1961 && !tablejump_p (BB_END (b),
1962 NULL, NULL))))
1963 && (next = merge_blocks_move (s, b, c, mode)))
1964 {
1965 b = next;
1966 changed_here = true;
1967 }
1968 }
1969
1970 /* Simplify branch over branch. */
1971 if ((mode & CLEANUP_EXPENSIVE)
1972 && !(mode & CLEANUP_CFGLAYOUT)
1973 && try_simplify_condjump (b))
1974 changed_here = true;
1975
1976 /* If B has a single outgoing edge, but uses a
1977 non-trivial jump instruction without side-effects, we
1978 can either delete the jump entirely, or replace it
1979 with a simple unconditional jump. */
1980 if (single_succ_p (b)
1981 && single_succ (b) != EXIT_BLOCK_PTR
1982 && onlyjump_p (BB_END (b))
1983 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1984 && try_redirect_by_replacing_jump (single_succ_edge (b),
1985 single_succ (b),
1986 (mode & CLEANUP_CFGLAYOUT) != 0))
1987 {
1988 update_forwarder_flag (b);
1989 changed_here = true;
1990 }
1991
1992 /* Simplify branch to branch. */
1993 if (try_forward_edges (mode, b))
1994 changed_here = true;
1995
1996 /* Look for shared code between blocks. */
1997 if ((mode & CLEANUP_CROSSJUMP)
1998 && try_crossjump_bb (mode, b))
1999 changed_here = true;
2000
2001 /* Don't get confused by the index shift caused by
2002 deleting blocks. */
2003 if (!changed_here)
2004 b = b->next_bb;
2005 else
2006 changed = true;
2007 }
2008
2009 if ((mode & CLEANUP_CROSSJUMP)
2010 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2011 changed = true;
2012
2013 #ifdef ENABLE_CHECKING
2014 if (changed)
2015 verify_flow_info ();
2016 #endif
2017
2018 changed_overall |= changed;
2019 first_pass = false;
2020 }
2021 while (changed);
2022 }
2023
2024 if (mode & CLEANUP_CROSSJUMP)
2025 remove_fake_exit_edges ();
2026
2027 clear_aux_for_blocks ();
2028
2029 return changed_overall;
2030 }
2031 \f
2032 /* Delete all unreachable basic blocks. */
2033
2034 bool
2035 delete_unreachable_blocks (void)
2036 {
2037 bool changed = false;
2038 basic_block b, next_bb;
2039
2040 find_unreachable_blocks ();
2041
2042 /* Delete all unreachable basic blocks. */
2043
2044 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2045 {
2046 next_bb = b->next_bb;
2047
2048 if (!(b->flags & BB_REACHABLE))
2049 {
2050 delete_basic_block (b);
2051 changed = true;
2052 }
2053 }
2054
2055 if (changed)
2056 tidy_fallthru_edges ();
2057 return changed;
2058 }
2059
2060 /* Merges sequential blocks if possible. */
2061
2062 bool
2063 merge_seq_blocks (void)
2064 {
2065 basic_block bb;
2066 bool changed = false;
2067
2068 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2069 {
2070 if (single_succ_p (bb)
2071 && can_merge_blocks_p (bb, single_succ (bb)))
2072 {
2073 /* Merge the blocks and retry. */
2074 merge_blocks (bb, single_succ (bb));
2075 changed = true;
2076 continue;
2077 }
2078
2079 bb = bb->next_bb;
2080 }
2081
2082 return changed;
2083 }
2084 \f
2085 /* Tidy the CFG by deleting unreachable code and whatnot. */
2086
2087 bool
2088 cleanup_cfg (int mode)
2089 {
2090 bool changed = false;
2091
2092 timevar_push (TV_CLEANUP_CFG);
2093 if (delete_unreachable_blocks ())
2094 {
2095 changed = true;
2096 /* We've possibly created trivially dead code. Cleanup it right
2097 now to introduce more opportunities for try_optimize_cfg. */
2098 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2099 && !reload_completed)
2100 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2101 }
2102
2103 compact_blocks ();
2104
2105 while (try_optimize_cfg (mode))
2106 {
2107 delete_unreachable_blocks (), changed = true;
2108 if (mode & CLEANUP_UPDATE_LIFE)
2109 {
2110 /* Cleaning up CFG introduces more opportunities for dead code
2111 removal that in turn may introduce more opportunities for
2112 cleaning up the CFG. */
2113 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2114 PROP_DEATH_NOTES
2115 | PROP_SCAN_DEAD_CODE
2116 | PROP_KILL_DEAD_CODE
2117 | ((mode & CLEANUP_LOG_LINKS)
2118 ? PROP_LOG_LINKS : 0)))
2119 break;
2120 }
2121 else if (!(mode & CLEANUP_NO_INSN_DEL)
2122 && (mode & CLEANUP_EXPENSIVE)
2123 && !reload_completed)
2124 {
2125 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2126 break;
2127 }
2128 else
2129 break;
2130 delete_dead_jumptables ();
2131 }
2132
2133 /* Kill the data we won't maintain. */
2134 free_EXPR_LIST_list (&label_value_list);
2135 timevar_pop (TV_CLEANUP_CFG);
2136
2137 return changed;
2138 }