* cfgcleanup.c (outgoing_edges_match): Fix typo in dump output.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
54
55 /* cleanup_cfg maintains following flags for each basic block. */
56
57 enum bb_flags
58 {
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
63 };
64
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
80
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (basic_block bb)
97 {
98 if (!bb)
99 return;
100
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
103 }
104
105 /* Recompute forwarder flag after block has been modified. */
106
107 static void
108 update_forwarder_flag (basic_block bb)
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
121 {
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
125
126 /* Verify that there are exactly two successors. */
127 if (EDGE_COUNT (cbranch_block->succs) != 2)
128 return false;
129
130 /* Verify that we've got a normal conditional branch at the end
131 of the block. */
132 cbranch_insn = BB_END (cbranch_block);
133 if (!any_condjump_p (cbranch_insn))
134 return false;
135
136 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
137 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
138
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block = cbranch_fallthru_edge->dest;
143 if (EDGE_COUNT (jump_block->preds) >= 2
144 || jump_block->next_bb == EXIT_BLOCK_PTR
145 || !FORWARDER_BLOCK_P (jump_block))
146 return false;
147 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
148
149 /* If we are partitioning hot/cold basic blocks, we don't want to
150 mess up unconditional or indirect jumps that cross between hot
151 and cold sections.
152
153 Basic block partitioning may result in some jumps that appear to
154 be optimizable (or blocks that appear to be mergeable), but which really
155 must be left untouched (they are required to make it safely across
156 partition boundaries). See the comments at the top of
157 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
158
159 if (flag_reorder_blocks_and_partition
160 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
161 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
162 return false;
163
164 /* The conditional branch must target the block after the
165 unconditional branch. */
166 cbranch_dest_block = cbranch_jump_edge->dest;
167
168 if (cbranch_dest_block == EXIT_BLOCK_PTR
169 || !can_fallthru (jump_block, cbranch_dest_block))
170 return false;
171
172 /* Invert the conditional branch. */
173 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
174 return false;
175
176 if (dump_file)
177 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
178 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
179
180 /* Success. Update the CFG to match. Note that after this point
181 the edge variable names appear backwards; the redirection is done
182 this way to preserve edge profile data. */
183 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
184 cbranch_dest_block);
185 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
186 jump_dest_block);
187 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
188 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
189 update_br_prob_note (cbranch_block);
190
191 /* Delete the block with the unconditional jump, and clean up the mess. */
192 delete_basic_block (jump_block);
193 tidy_fallthru_edge (cbranch_jump_edge);
194 update_forwarder_flag (cbranch_block);
195
196 return true;
197 }
198 \f
199 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
200 on register. Used by jump threading. */
201
202 static bool
203 mark_effect (rtx exp, regset nonequal)
204 {
205 int regno;
206 rtx dest;
207 switch (GET_CODE (exp))
208 {
209 /* In case we do clobber the register, mark it as equal, as we know the
210 value is dead so it don't have to match. */
211 case CLOBBER:
212 if (REG_P (XEXP (exp, 0)))
213 {
214 dest = XEXP (exp, 0);
215 regno = REGNO (dest);
216 CLEAR_REGNO_REG_SET (nonequal, regno);
217 if (regno < FIRST_PSEUDO_REGISTER)
218 {
219 int n = hard_regno_nregs[regno][GET_MODE (dest)];
220 while (--n > 0)
221 CLEAR_REGNO_REG_SET (nonequal, regno + n);
222 }
223 }
224 return false;
225
226 case SET:
227 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
228 return false;
229 dest = SET_DEST (exp);
230 if (dest == pc_rtx)
231 return false;
232 if (!REG_P (dest))
233 return true;
234 regno = REGNO (dest);
235 SET_REGNO_REG_SET (nonequal, regno);
236 if (regno < FIRST_PSEUDO_REGISTER)
237 {
238 int n = hard_regno_nregs[regno][GET_MODE (dest)];
239 while (--n > 0)
240 SET_REGNO_REG_SET (nonequal, regno + n);
241 }
242 return false;
243
244 default:
245 return false;
246 }
247 }
248
249 /* Return nonzero if X is a register set in regset DATA.
250 Called via for_each_rtx. */
251 static int
252 mentions_nonequal_regs (rtx *x, void *data)
253 {
254 regset nonequal = (regset) data;
255 if (REG_P (*x))
256 {
257 int regno;
258
259 regno = REGNO (*x);
260 if (REGNO_REG_SET_P (nonequal, regno))
261 return 1;
262 if (regno < FIRST_PSEUDO_REGISTER)
263 {
264 int n = hard_regno_nregs[regno][GET_MODE (*x)];
265 while (--n > 0)
266 if (REGNO_REG_SET_P (nonequal, regno + n))
267 return 1;
268 }
269 }
270 return 0;
271 }
272 /* Attempt to prove that the basic block B will have no side effects and
273 always continues in the same edge if reached via E. Return the edge
274 if exist, NULL otherwise. */
275
276 static edge
277 thread_jump (int mode, edge e, basic_block b)
278 {
279 rtx set1, set2, cond1, cond2, insn;
280 enum rtx_code code1, code2, reversed_code2;
281 bool reverse1 = false;
282 int i;
283 regset nonequal;
284 bool failed = false;
285 reg_set_iterator rsi;
286
287 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
288 return NULL;
289
290 /* At the moment, we do handle only conditional jumps, but later we may
291 want to extend this code to tablejumps and others. */
292 if (EDGE_COUNT (e->src->succs) != 2)
293 return NULL;
294 if (EDGE_COUNT (b->succs) != 2)
295 {
296 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
297 return NULL;
298 }
299
300 /* Second branch must end with onlyjump, as we will eliminate the jump. */
301 if (!any_condjump_p (BB_END (e->src)))
302 return NULL;
303
304 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
305 {
306 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
307 return NULL;
308 }
309
310 set1 = pc_set (BB_END (e->src));
311 set2 = pc_set (BB_END (b));
312 if (((e->flags & EDGE_FALLTHRU) != 0)
313 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
314 reverse1 = true;
315
316 cond1 = XEXP (SET_SRC (set1), 0);
317 cond2 = XEXP (SET_SRC (set2), 0);
318 if (reverse1)
319 code1 = reversed_comparison_code (cond1, BB_END (e->src));
320 else
321 code1 = GET_CODE (cond1);
322
323 code2 = GET_CODE (cond2);
324 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
325
326 if (!comparison_dominates_p (code1, code2)
327 && !comparison_dominates_p (code1, reversed_code2))
328 return NULL;
329
330 /* Ensure that the comparison operators are equivalent.
331 ??? This is far too pessimistic. We should allow swapped operands,
332 different CCmodes, or for example comparisons for interval, that
333 dominate even when operands are not equivalent. */
334 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
335 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
336 return NULL;
337
338 /* Short circuit cases where block B contains some side effects, as we can't
339 safely bypass it. */
340 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
341 insn = NEXT_INSN (insn))
342 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
343 {
344 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
345 return NULL;
346 }
347
348 cselib_init (false);
349
350 /* First process all values computed in the source basic block. */
351 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
352 insn = NEXT_INSN (insn))
353 if (INSN_P (insn))
354 cselib_process_insn (insn);
355
356 nonequal = BITMAP_XMALLOC();
357 CLEAR_REG_SET (nonequal);
358
359 /* Now assume that we've continued by the edge E to B and continue
360 processing as if it were same basic block.
361 Our goal is to prove that whole block is an NOOP. */
362
363 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
364 insn = NEXT_INSN (insn))
365 {
366 if (INSN_P (insn))
367 {
368 rtx pat = PATTERN (insn);
369
370 if (GET_CODE (pat) == PARALLEL)
371 {
372 for (i = 0; i < XVECLEN (pat, 0); i++)
373 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
374 }
375 else
376 failed |= mark_effect (pat, nonequal);
377 }
378
379 cselib_process_insn (insn);
380 }
381
382 /* Later we should clear nonequal of dead registers. So far we don't
383 have life information in cfg_cleanup. */
384 if (failed)
385 {
386 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
387 goto failed_exit;
388 }
389
390 /* cond2 must not mention any register that is not equal to the
391 former block. */
392 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
393 goto failed_exit;
394
395 /* In case liveness information is available, we need to prove equivalence
396 only of the live values. */
397 if (mode & CLEANUP_UPDATE_LIFE)
398 AND_REG_SET (nonequal, b->global_live_at_end);
399
400 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
401 goto failed_exit;
402
403 BITMAP_XFREE (nonequal);
404 cselib_finish ();
405 if ((comparison_dominates_p (code1, code2) != 0)
406 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
407 return BRANCH_EDGE (b);
408 else
409 return FALLTHRU_EDGE (b);
410
411 failed_exit:
412 BITMAP_XFREE (nonequal);
413 cselib_finish ();
414 return NULL;
415 }
416 \f
417 /* Attempt to forward edges leaving basic block B.
418 Return true if successful. */
419
420 static bool
421 try_forward_edges (int mode, basic_block b)
422 {
423 bool changed = false;
424 edge_iterator ei;
425 edge e, *threaded_edges = NULL;
426
427 /* If we are partitioning hot/cold basic blocks, we don't want to
428 mess up unconditional or indirect jumps that cross between hot
429 and cold sections.
430
431 Basic block partitioning may result in some jumps that appear to
432 be optimizable (or blocks that appear to be mergeable), but which really m
433 ust be left untouched (they are required to make it safely across
434 partition boundaries). See the comments at the top of
435 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
436
437 if (flag_reorder_blocks_and_partition
438 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
439 return false;
440
441 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
442 {
443 basic_block target, first;
444 int counter;
445 bool threaded = false;
446 int nthreaded_edges = 0;
447 bool may_thread = first_pass | (b->flags & BB_DIRTY);
448
449 /* Skip complex edges because we don't know how to update them.
450
451 Still handle fallthru edges, as we can succeed to forward fallthru
452 edge to the same place as the branch edge of conditional branch
453 and turn conditional branch to an unconditional branch. */
454 if (e->flags & EDGE_COMPLEX)
455 {
456 ei_next (&ei);
457 continue;
458 }
459
460 target = first = e->dest;
461 counter = 0;
462
463 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
464 up jumps that cross between hot/cold sections.
465
466 Basic block partitioning may result in some jumps that appear
467 to be optimizable (or blocks that appear to be mergeable), but which
468 really must be left untouched (they are required to make it safely
469 across partition boundaries). See the comments at the top of
470 bb-reorder.c:partition_hot_cold_basic_blocks for complete
471 details. */
472
473 if (flag_reorder_blocks_and_partition
474 && first != EXIT_BLOCK_PTR
475 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
476 return false;
477
478 while (counter < n_basic_blocks)
479 {
480 basic_block new_target = NULL;
481 bool new_target_threaded = false;
482 may_thread |= target->flags & BB_DIRTY;
483
484 if (FORWARDER_BLOCK_P (target)
485 && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
486 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
487 {
488 /* Bypass trivial infinite loops. */
489 if (target == EDGE_SUCC (target, 0)->dest)
490 counter = n_basic_blocks;
491 new_target = EDGE_SUCC (target, 0)->dest;
492 }
493
494 /* Allow to thread only over one edge at time to simplify updating
495 of probabilities. */
496 else if ((mode & CLEANUP_THREADING) && may_thread)
497 {
498 edge t = thread_jump (mode, e, target);
499 if (t)
500 {
501 if (!threaded_edges)
502 threaded_edges = xmalloc (sizeof (*threaded_edges)
503 * n_basic_blocks);
504 else
505 {
506 int i;
507
508 /* Detect an infinite loop across blocks not
509 including the start block. */
510 for (i = 0; i < nthreaded_edges; ++i)
511 if (threaded_edges[i] == t)
512 break;
513 if (i < nthreaded_edges)
514 {
515 counter = n_basic_blocks;
516 break;
517 }
518 }
519
520 /* Detect an infinite loop across the start block. */
521 if (t->dest == b)
522 break;
523
524 gcc_assert (nthreaded_edges < n_basic_blocks);
525 threaded_edges[nthreaded_edges++] = t;
526
527 new_target = t->dest;
528 new_target_threaded = true;
529 }
530 }
531
532 if (!new_target)
533 break;
534
535 /* Avoid killing of loop pre-headers, as it is the place loop
536 optimizer wants to hoist code to.
537
538 For fallthru forwarders, the LOOP_BEG note must appear between
539 the header of block and CODE_LABEL of the loop, for non forwarders
540 it must appear before the JUMP_INSN. */
541 if ((mode & CLEANUP_PRE_LOOP) && optimize)
542 {
543 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
544 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
545
546 if (!NOTE_P (insn))
547 insn = NEXT_INSN (insn);
548
549 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
550 insn = NEXT_INSN (insn))
551 if (NOTE_P (insn)
552 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
553 break;
554
555 if (NOTE_P (insn))
556 break;
557
558 /* Do not clean up branches to just past the end of a loop
559 at this time; it can mess up the loop optimizer's
560 recognition of some patterns. */
561
562 insn = PREV_INSN (BB_HEAD (target));
563 if (insn && NOTE_P (insn)
564 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
565 break;
566 }
567
568 counter++;
569 target = new_target;
570 threaded |= new_target_threaded;
571 }
572
573 if (counter >= n_basic_blocks)
574 {
575 if (dump_file)
576 fprintf (dump_file, "Infinite loop in BB %i.\n",
577 target->index);
578 }
579 else if (target == first)
580 ; /* We didn't do anything. */
581 else
582 {
583 /* Save the values now, as the edge may get removed. */
584 gcov_type edge_count = e->count;
585 int edge_probability = e->probability;
586 int edge_frequency;
587 int n = 0;
588
589 /* Don't force if target is exit block. */
590 if (threaded && target != EXIT_BLOCK_PTR)
591 {
592 notice_new_block (redirect_edge_and_branch_force (e, target));
593 if (dump_file)
594 fprintf (dump_file, "Conditionals threaded.\n");
595 }
596 else if (!redirect_edge_and_branch (e, target))
597 {
598 if (dump_file)
599 fprintf (dump_file,
600 "Forwarding edge %i->%i to %i failed.\n",
601 b->index, e->dest->index, target->index);
602 ei_next (&ei);
603 continue;
604 }
605
606 /* We successfully forwarded the edge. Now update profile
607 data: for each edge we traversed in the chain, remove
608 the original edge's execution count. */
609 edge_frequency = ((edge_probability * b->frequency
610 + REG_BR_PROB_BASE / 2)
611 / REG_BR_PROB_BASE);
612
613 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
614 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
615
616 do
617 {
618 edge t;
619
620 if (EDGE_COUNT (first->succs) > 1)
621 {
622 gcc_assert (n < nthreaded_edges);
623 t = threaded_edges [n++];
624 gcc_assert (t->src == first);
625 update_bb_profile_for_threading (first, edge_frequency,
626 edge_count, t);
627 update_br_prob_note (first);
628 }
629 else
630 {
631 first->count -= edge_count;
632 if (first->count < 0)
633 first->count = 0;
634 first->frequency -= edge_frequency;
635 if (first->frequency < 0)
636 first->frequency = 0;
637 /* It is possible that as the result of
638 threading we've removed edge as it is
639 threaded to the fallthru edge. Avoid
640 getting out of sync. */
641 if (n < nthreaded_edges
642 && first == threaded_edges [n]->src)
643 n++;
644 t = EDGE_SUCC (first, 0);
645 }
646
647 t->count -= edge_count;
648 if (t->count < 0)
649 t->count = 0;
650 first = t->dest;
651 }
652 while (first != target);
653
654 changed = true;
655 continue;
656 }
657 ei_next (&ei);
658 }
659
660 if (threaded_edges)
661 free (threaded_edges);
662 return changed;
663 }
664 \f
665
666 /* Blocks A and B are to be merged into a single block. A has no incoming
667 fallthru edge, so it can be moved before B without adding or modifying
668 any jumps (aside from the jump from A to B). */
669
670 static void
671 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
672 {
673 rtx barrier;
674 bool only_notes;
675
676 /* If we are partitioning hot/cold basic blocks, we don't want to
677 mess up unconditional or indirect jumps that cross between hot
678 and cold sections.
679
680 Basic block partitioning may result in some jumps that appear to
681 be optimizable (or blocks that appear to be mergeable), but which really
682 must be left untouched (they are required to make it safely across
683 partition boundaries). See the comments at the top of
684 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
685
686 if (flag_reorder_blocks_and_partition
687 && (BB_PARTITION (a) != BB_PARTITION (b)
688 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
689 return;
690
691 barrier = next_nonnote_insn (BB_END (a));
692 gcc_assert (BARRIER_P (barrier));
693 delete_insn (barrier);
694
695 /* Move block and loop notes out of the chain so that we do not
696 disturb their order.
697
698 ??? A better solution would be to squeeze out all the non-nested notes
699 and adjust the block trees appropriately. Even better would be to have
700 a tighter connection between block trees and rtl so that this is not
701 necessary. */
702 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
703 gcc_assert (!only_notes);
704
705 /* Scramble the insn chain. */
706 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
707 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
708 a->flags |= BB_DIRTY;
709
710 if (dump_file)
711 fprintf (dump_file, "Moved block %d before %d and merged.\n",
712 a->index, b->index);
713
714 /* Swap the records for the two blocks around. */
715
716 unlink_block (a);
717 link_block (a, b->prev_bb);
718
719 /* Now blocks A and B are contiguous. Merge them. */
720 merge_blocks (a, b);
721 }
722
723 /* Blocks A and B are to be merged into a single block. B has no outgoing
724 fallthru edge, so it can be moved after A without adding or modifying
725 any jumps (aside from the jump from A to B). */
726
727 static void
728 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
729 {
730 rtx barrier, real_b_end;
731 rtx label, table;
732 bool only_notes;
733
734 /* If we are partitioning hot/cold basic blocks, we don't want to
735 mess up unconditional or indirect jumps that cross between hot
736 and cold sections.
737
738 Basic block partitioning may result in some jumps that appear to
739 be optimizable (or blocks that appear to be mergeable), but which really
740 must be left untouched (they are required to make it safely across
741 partition boundaries). See the comments at the top of
742 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
743
744 if (flag_reorder_blocks_and_partition
745 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
746 || BB_PARTITION (a) != BB_PARTITION (b)))
747 return;
748
749 real_b_end = BB_END (b);
750
751 /* If there is a jump table following block B temporarily add the jump table
752 to block B so that it will also be moved to the correct location. */
753 if (tablejump_p (BB_END (b), &label, &table)
754 && prev_active_insn (label) == BB_END (b))
755 {
756 BB_END (b) = table;
757 }
758
759 /* There had better have been a barrier there. Delete it. */
760 barrier = NEXT_INSN (BB_END (b));
761 if (barrier && BARRIER_P (barrier))
762 delete_insn (barrier);
763
764 /* Move block and loop notes out of the chain so that we do not
765 disturb their order.
766
767 ??? A better solution would be to squeeze out all the non-nested notes
768 and adjust the block trees appropriately. Even better would be to have
769 a tighter connection between block trees and rtl so that this is not
770 necessary. */
771 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
772 gcc_assert (!only_notes);
773
774
775 /* Scramble the insn chain. */
776 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
777
778 /* Restore the real end of b. */
779 BB_END (b) = real_b_end;
780
781 if (dump_file)
782 fprintf (dump_file, "Moved block %d after %d and merged.\n",
783 b->index, a->index);
784
785 /* Now blocks A and B are contiguous. Merge them. */
786 merge_blocks (a, b);
787 }
788
789 /* Attempt to merge basic blocks that are potentially non-adjacent.
790 Return NULL iff the attempt failed, otherwise return basic block
791 where cleanup_cfg should continue. Because the merging commonly
792 moves basic block away or introduces another optimization
793 possibility, return basic block just before B so cleanup_cfg don't
794 need to iterate.
795
796 It may be good idea to return basic block before C in the case
797 C has been moved after B and originally appeared earlier in the
798 insn sequence, but we have no information available about the
799 relative ordering of these two. Hopefully it is not too common. */
800
801 static basic_block
802 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
803 {
804 basic_block next;
805
806 /* If we are partitioning hot/cold basic blocks, we don't want to
807 mess up unconditional or indirect jumps that cross between hot
808 and cold sections.
809
810 Basic block partitioning may result in some jumps that appear to
811 be optimizable (or blocks that appear to be mergeable), but which really
812 must be left untouched (they are required to make it safely across
813 partition boundaries). See the comments at the top of
814 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
815
816 if (flag_reorder_blocks_and_partition
817 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
818 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
819 || BB_PARTITION (b) != BB_PARTITION (c)))
820 return NULL;
821
822
823
824 /* If B has a fallthru edge to C, no need to move anything. */
825 if (e->flags & EDGE_FALLTHRU)
826 {
827 int b_index = b->index, c_index = c->index;
828 merge_blocks (b, c);
829 update_forwarder_flag (b);
830
831 if (dump_file)
832 fprintf (dump_file, "Merged %d and %d without moving.\n",
833 b_index, c_index);
834
835 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
836 }
837
838 /* Otherwise we will need to move code around. Do that only if expensive
839 transformations are allowed. */
840 else if (mode & CLEANUP_EXPENSIVE)
841 {
842 edge tmp_edge, b_fallthru_edge;
843 bool c_has_outgoing_fallthru;
844 bool b_has_incoming_fallthru;
845 edge_iterator ei;
846
847 /* Avoid overactive code motion, as the forwarder blocks should be
848 eliminated by edge redirection instead. One exception might have
849 been if B is a forwarder block and C has no fallthru edge, but
850 that should be cleaned up by bb-reorder instead. */
851 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
852 return NULL;
853
854 /* We must make sure to not munge nesting of lexical blocks,
855 and loop notes. This is done by squeezing out all the notes
856 and leaving them there to lie. Not ideal, but functional. */
857
858 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
859 if (tmp_edge->flags & EDGE_FALLTHRU)
860 break;
861
862 c_has_outgoing_fallthru = (tmp_edge != NULL);
863
864 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
865 if (tmp_edge->flags & EDGE_FALLTHRU)
866 break;
867
868 b_has_incoming_fallthru = (tmp_edge != NULL);
869 b_fallthru_edge = tmp_edge;
870 next = b->prev_bb;
871 if (next == c)
872 next = next->prev_bb;
873
874 /* Otherwise, we're going to try to move C after B. If C does
875 not have an outgoing fallthru, then it can be moved
876 immediately after B without introducing or modifying jumps. */
877 if (! c_has_outgoing_fallthru)
878 {
879 merge_blocks_move_successor_nojumps (b, c);
880 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
881 }
882
883 /* If B does not have an incoming fallthru, then it can be moved
884 immediately before C without introducing or modifying jumps.
885 C cannot be the first block, so we do not have to worry about
886 accessing a non-existent block. */
887
888 if (b_has_incoming_fallthru)
889 {
890 basic_block bb;
891
892 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
893 return NULL;
894 bb = force_nonfallthru (b_fallthru_edge);
895 if (bb)
896 notice_new_block (bb);
897 }
898
899 merge_blocks_move_predecessor_nojumps (b, c);
900 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
901 }
902
903 return NULL;
904 }
905 \f
906
907 /* Removes the memory attributes of MEM expression
908 if they are not equal. */
909
910 void
911 merge_memattrs (rtx x, rtx y)
912 {
913 int i;
914 int j;
915 enum rtx_code code;
916 const char *fmt;
917
918 if (x == y)
919 return;
920 if (x == 0 || y == 0)
921 return;
922
923 code = GET_CODE (x);
924
925 if (code != GET_CODE (y))
926 return;
927
928 if (GET_MODE (x) != GET_MODE (y))
929 return;
930
931 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
932 {
933 if (! MEM_ATTRS (x))
934 MEM_ATTRS (y) = 0;
935 else if (! MEM_ATTRS (y))
936 MEM_ATTRS (x) = 0;
937 else
938 {
939 rtx mem_size;
940
941 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
942 {
943 set_mem_alias_set (x, 0);
944 set_mem_alias_set (y, 0);
945 }
946
947 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
948 {
949 set_mem_expr (x, 0);
950 set_mem_expr (y, 0);
951 set_mem_offset (x, 0);
952 set_mem_offset (y, 0);
953 }
954 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
955 {
956 set_mem_offset (x, 0);
957 set_mem_offset (y, 0);
958 }
959
960 if (!MEM_SIZE (x))
961 mem_size = NULL_RTX;
962 else if (!MEM_SIZE (y))
963 mem_size = NULL_RTX;
964 else
965 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
966 INTVAL (MEM_SIZE (y))));
967 set_mem_size (x, mem_size);
968 set_mem_size (y, mem_size);
969
970 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
971 set_mem_align (y, MEM_ALIGN (x));
972 }
973 }
974
975 fmt = GET_RTX_FORMAT (code);
976 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
977 {
978 switch (fmt[i])
979 {
980 case 'E':
981 /* Two vectors must have the same length. */
982 if (XVECLEN (x, i) != XVECLEN (y, i))
983 return;
984
985 for (j = 0; j < XVECLEN (x, i); j++)
986 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
987
988 break;
989
990 case 'e':
991 merge_memattrs (XEXP (x, i), XEXP (y, i));
992 }
993 }
994 return;
995 }
996
997
998 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
999
1000 static bool
1001 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1002 {
1003 rtx p1, p2;
1004
1005 /* Verify that I1 and I2 are equivalent. */
1006 if (GET_CODE (i1) != GET_CODE (i2))
1007 return false;
1008
1009 p1 = PATTERN (i1);
1010 p2 = PATTERN (i2);
1011
1012 if (GET_CODE (p1) != GET_CODE (p2))
1013 return false;
1014
1015 /* If this is a CALL_INSN, compare register usage information.
1016 If we don't check this on stack register machines, the two
1017 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1018 numbers of stack registers in the same basic block.
1019 If we don't check this on machines with delay slots, a delay slot may
1020 be filled that clobbers a parameter expected by the subroutine.
1021
1022 ??? We take the simple route for now and assume that if they're
1023 equal, they were constructed identically. */
1024
1025 if (CALL_P (i1)
1026 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1027 CALL_INSN_FUNCTION_USAGE (i2))
1028 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1029 return false;
1030
1031 #ifdef STACK_REGS
1032 /* If cross_jump_death_matters is not 0, the insn's mode
1033 indicates whether or not the insn contains any stack-like
1034 regs. */
1035
1036 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1037 {
1038 /* If register stack conversion has already been done, then
1039 death notes must also be compared before it is certain that
1040 the two instruction streams match. */
1041
1042 rtx note;
1043 HARD_REG_SET i1_regset, i2_regset;
1044
1045 CLEAR_HARD_REG_SET (i1_regset);
1046 CLEAR_HARD_REG_SET (i2_regset);
1047
1048 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1049 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1050 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1051
1052 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1053 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1054 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1055
1056 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1057
1058 return false;
1059
1060 done:
1061 ;
1062 }
1063 #endif
1064
1065 if (reload_completed
1066 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1067 return true;
1068
1069 /* Do not do EQUIV substitution after reload. First, we're undoing the
1070 work of reload_cse. Second, we may be undoing the work of the post-
1071 reload splitting pass. */
1072 /* ??? Possibly add a new phase switch variable that can be used by
1073 targets to disallow the troublesome insns after splitting. */
1074 if (!reload_completed)
1075 {
1076 /* The following code helps take care of G++ cleanups. */
1077 rtx equiv1 = find_reg_equal_equiv_note (i1);
1078 rtx equiv2 = find_reg_equal_equiv_note (i2);
1079
1080 if (equiv1 && equiv2
1081 /* If the equivalences are not to a constant, they may
1082 reference pseudos that no longer exist, so we can't
1083 use them. */
1084 && (! reload_completed
1085 || (CONSTANT_P (XEXP (equiv1, 0))
1086 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1087 {
1088 rtx s1 = single_set (i1);
1089 rtx s2 = single_set (i2);
1090 if (s1 != 0 && s2 != 0
1091 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1092 {
1093 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1094 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1095 if (! rtx_renumbered_equal_p (p1, p2))
1096 cancel_changes (0);
1097 else if (apply_change_group ())
1098 return true;
1099 }
1100 }
1101 }
1102
1103 return false;
1104 }
1105 \f
1106 /* Look through the insns at the end of BB1 and BB2 and find the longest
1107 sequence that are equivalent. Store the first insns for that sequence
1108 in *F1 and *F2 and return the sequence length.
1109
1110 To simplify callers of this function, if the blocks match exactly,
1111 store the head of the blocks in *F1 and *F2. */
1112
1113 static int
1114 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1115 basic_block bb2, rtx *f1, rtx *f2)
1116 {
1117 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1118 int ninsns = 0;
1119
1120 /* Skip simple jumps at the end of the blocks. Complex jumps still
1121 need to be compared for equivalence, which we'll do below. */
1122
1123 i1 = BB_END (bb1);
1124 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1125 if (onlyjump_p (i1)
1126 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1127 {
1128 last1 = i1;
1129 i1 = PREV_INSN (i1);
1130 }
1131
1132 i2 = BB_END (bb2);
1133 if (onlyjump_p (i2)
1134 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1135 {
1136 last2 = i2;
1137 /* Count everything except for unconditional jump as insn. */
1138 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1139 ninsns++;
1140 i2 = PREV_INSN (i2);
1141 }
1142
1143 while (true)
1144 {
1145 /* Ignore notes. */
1146 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1147 i1 = PREV_INSN (i1);
1148
1149 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1150 i2 = PREV_INSN (i2);
1151
1152 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1153 break;
1154
1155 if (!insns_match_p (mode, i1, i2))
1156 break;
1157
1158 merge_memattrs (i1, i2);
1159
1160 /* Don't begin a cross-jump with a NOTE insn. */
1161 if (INSN_P (i1))
1162 {
1163 /* If the merged insns have different REG_EQUAL notes, then
1164 remove them. */
1165 rtx equiv1 = find_reg_equal_equiv_note (i1);
1166 rtx equiv2 = find_reg_equal_equiv_note (i2);
1167
1168 if (equiv1 && !equiv2)
1169 remove_note (i1, equiv1);
1170 else if (!equiv1 && equiv2)
1171 remove_note (i2, equiv2);
1172 else if (equiv1 && equiv2
1173 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1174 {
1175 remove_note (i1, equiv1);
1176 remove_note (i2, equiv2);
1177 }
1178
1179 afterlast1 = last1, afterlast2 = last2;
1180 last1 = i1, last2 = i2;
1181 ninsns++;
1182 }
1183
1184 i1 = PREV_INSN (i1);
1185 i2 = PREV_INSN (i2);
1186 }
1187
1188 #ifdef HAVE_cc0
1189 /* Don't allow the insn after a compare to be shared by
1190 cross-jumping unless the compare is also shared. */
1191 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1192 last1 = afterlast1, last2 = afterlast2, ninsns--;
1193 #endif
1194
1195 /* Include preceding notes and labels in the cross-jump. One,
1196 this may bring us to the head of the blocks as requested above.
1197 Two, it keeps line number notes as matched as may be. */
1198 if (ninsns)
1199 {
1200 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1201 last1 = PREV_INSN (last1);
1202
1203 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1204 last1 = PREV_INSN (last1);
1205
1206 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1207 last2 = PREV_INSN (last2);
1208
1209 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1210 last2 = PREV_INSN (last2);
1211
1212 *f1 = last1;
1213 *f2 = last2;
1214 }
1215
1216 return ninsns;
1217 }
1218
1219 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1220 the branch instruction. This means that if we commonize the control
1221 flow before end of the basic block, the semantic remains unchanged.
1222
1223 We may assume that there exists one edge with a common destination. */
1224
1225 static bool
1226 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1227 {
1228 int nehedges1 = 0, nehedges2 = 0;
1229 edge fallthru1 = 0, fallthru2 = 0;
1230 edge e1, e2;
1231 edge_iterator ei;
1232
1233 /* If BB1 has only one successor, we may be looking at either an
1234 unconditional jump, or a fake edge to exit. */
1235 if (EDGE_COUNT (bb1->succs) == 1
1236 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1237 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1238 return (EDGE_COUNT (bb2->succs) == 1
1239 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1240 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1241
1242 /* Match conditional jumps - this may get tricky when fallthru and branch
1243 edges are crossed. */
1244 if (EDGE_COUNT (bb1->succs) == 2
1245 && any_condjump_p (BB_END (bb1))
1246 && onlyjump_p (BB_END (bb1)))
1247 {
1248 edge b1, f1, b2, f2;
1249 bool reverse, match;
1250 rtx set1, set2, cond1, cond2;
1251 enum rtx_code code1, code2;
1252
1253 if (EDGE_COUNT (bb2->succs) != 2
1254 || !any_condjump_p (BB_END (bb2))
1255 || !onlyjump_p (BB_END (bb2)))
1256 return false;
1257
1258 b1 = BRANCH_EDGE (bb1);
1259 b2 = BRANCH_EDGE (bb2);
1260 f1 = FALLTHRU_EDGE (bb1);
1261 f2 = FALLTHRU_EDGE (bb2);
1262
1263 /* Get around possible forwarders on fallthru edges. Other cases
1264 should be optimized out already. */
1265 if (FORWARDER_BLOCK_P (f1->dest))
1266 f1 = EDGE_SUCC (f1->dest, 0);
1267
1268 if (FORWARDER_BLOCK_P (f2->dest))
1269 f2 = EDGE_SUCC (f2->dest, 0);
1270
1271 /* To simplify use of this function, return false if there are
1272 unneeded forwarder blocks. These will get eliminated later
1273 during cleanup_cfg. */
1274 if (FORWARDER_BLOCK_P (f1->dest)
1275 || FORWARDER_BLOCK_P (f2->dest)
1276 || FORWARDER_BLOCK_P (b1->dest)
1277 || FORWARDER_BLOCK_P (b2->dest))
1278 return false;
1279
1280 if (f1->dest == f2->dest && b1->dest == b2->dest)
1281 reverse = false;
1282 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1283 reverse = true;
1284 else
1285 return false;
1286
1287 set1 = pc_set (BB_END (bb1));
1288 set2 = pc_set (BB_END (bb2));
1289 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1290 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1291 reverse = !reverse;
1292
1293 cond1 = XEXP (SET_SRC (set1), 0);
1294 cond2 = XEXP (SET_SRC (set2), 0);
1295 code1 = GET_CODE (cond1);
1296 if (reverse)
1297 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1298 else
1299 code2 = GET_CODE (cond2);
1300
1301 if (code2 == UNKNOWN)
1302 return false;
1303
1304 /* Verify codes and operands match. */
1305 match = ((code1 == code2
1306 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1307 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1308 || (code1 == swap_condition (code2)
1309 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1310 XEXP (cond2, 0))
1311 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1312 XEXP (cond2, 1))));
1313
1314 /* If we return true, we will join the blocks. Which means that
1315 we will only have one branch prediction bit to work with. Thus
1316 we require the existing branches to have probabilities that are
1317 roughly similar. */
1318 if (match
1319 && !optimize_size
1320 && maybe_hot_bb_p (bb1)
1321 && maybe_hot_bb_p (bb2))
1322 {
1323 int prob2;
1324
1325 if (b1->dest == b2->dest)
1326 prob2 = b2->probability;
1327 else
1328 /* Do not use f2 probability as f2 may be forwarded. */
1329 prob2 = REG_BR_PROB_BASE - b2->probability;
1330
1331 /* Fail if the difference in probabilities is greater than 50%.
1332 This rules out two well-predicted branches with opposite
1333 outcomes. */
1334 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1335 {
1336 if (dump_file)
1337 fprintf (dump_file,
1338 "Outcomes of branch in bb %i and %i differs too much (%i %i)\n",
1339 bb1->index, bb2->index, b1->probability, prob2);
1340
1341 return false;
1342 }
1343 }
1344
1345 if (dump_file && match)
1346 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1347 bb1->index, bb2->index);
1348
1349 return match;
1350 }
1351
1352 /* Generic case - we are seeing a computed jump, table jump or trapping
1353 instruction. */
1354
1355 #ifndef CASE_DROPS_THROUGH
1356 /* Check whether there are tablejumps in the end of BB1 and BB2.
1357 Return true if they are identical. */
1358 {
1359 rtx label1, label2;
1360 rtx table1, table2;
1361
1362 if (tablejump_p (BB_END (bb1), &label1, &table1)
1363 && tablejump_p (BB_END (bb2), &label2, &table2)
1364 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1365 {
1366 /* The labels should never be the same rtx. If they really are same
1367 the jump tables are same too. So disable crossjumping of blocks BB1
1368 and BB2 because when deleting the common insns in the end of BB1
1369 by delete_basic_block () the jump table would be deleted too. */
1370 /* If LABEL2 is referenced in BB1->END do not do anything
1371 because we would loose information when replacing
1372 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1373 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1374 {
1375 /* Set IDENTICAL to true when the tables are identical. */
1376 bool identical = false;
1377 rtx p1, p2;
1378
1379 p1 = PATTERN (table1);
1380 p2 = PATTERN (table2);
1381 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1382 {
1383 identical = true;
1384 }
1385 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1386 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1387 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1388 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1389 {
1390 int i;
1391
1392 identical = true;
1393 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1394 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1395 identical = false;
1396 }
1397
1398 if (identical)
1399 {
1400 replace_label_data rr;
1401 bool match;
1402
1403 /* Temporarily replace references to LABEL1 with LABEL2
1404 in BB1->END so that we could compare the instructions. */
1405 rr.r1 = label1;
1406 rr.r2 = label2;
1407 rr.update_label_nuses = false;
1408 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1409
1410 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1411 if (dump_file && match)
1412 fprintf (dump_file,
1413 "Tablejumps in bb %i and %i match.\n",
1414 bb1->index, bb2->index);
1415
1416 /* Set the original label in BB1->END because when deleting
1417 a block whose end is a tablejump, the tablejump referenced
1418 from the instruction is deleted too. */
1419 rr.r1 = label2;
1420 rr.r2 = label1;
1421 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1422
1423 return match;
1424 }
1425 }
1426 return false;
1427 }
1428 }
1429 #endif
1430
1431 /* First ensure that the instructions match. There may be many outgoing
1432 edges so this test is generally cheaper. */
1433 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1434 return false;
1435
1436 /* Search the outgoing edges, ensure that the counts do match, find possible
1437 fallthru and exception handling edges since these needs more
1438 validation. */
1439 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1440 return false;
1441
1442 FOR_EACH_EDGE (e1, ei, bb1->succs)
1443 {
1444 e2 = EDGE_SUCC (bb2, ei.index);
1445
1446 if (e1->flags & EDGE_EH)
1447 nehedges1++;
1448
1449 if (e2->flags & EDGE_EH)
1450 nehedges2++;
1451
1452 if (e1->flags & EDGE_FALLTHRU)
1453 fallthru1 = e1;
1454 if (e2->flags & EDGE_FALLTHRU)
1455 fallthru2 = e2;
1456 }
1457
1458 /* If number of edges of various types does not match, fail. */
1459 if (nehedges1 != nehedges2
1460 || (fallthru1 != 0) != (fallthru2 != 0))
1461 return false;
1462
1463 /* fallthru edges must be forwarded to the same destination. */
1464 if (fallthru1)
1465 {
1466 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1467 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1468 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1469 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1470
1471 if (d1 != d2)
1472 return false;
1473 }
1474
1475 /* Ensure the same EH region. */
1476 {
1477 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1478 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1479
1480 if (!n1 && n2)
1481 return false;
1482
1483 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1484 return false;
1485 }
1486
1487 /* We don't need to match the rest of edges as above checks should be enough
1488 to ensure that they are equivalent. */
1489 return true;
1490 }
1491
1492 /* E1 and E2 are edges with the same destination block. Search their
1493 predecessors for common code. If found, redirect control flow from
1494 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1495
1496 static bool
1497 try_crossjump_to_edge (int mode, edge e1, edge e2)
1498 {
1499 int nmatch;
1500 basic_block src1 = e1->src, src2 = e2->src;
1501 basic_block redirect_to, redirect_from, to_remove;
1502 rtx newpos1, newpos2;
1503 edge s;
1504 edge_iterator ei;
1505
1506 newpos1 = newpos2 = NULL_RTX;
1507
1508 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1509 to try this optimization.
1510
1511 Basic block partitioning may result in some jumps that appear to
1512 be optimizable (or blocks that appear to be mergeable), but which really
1513 must be left untouched (they are required to make it safely across
1514 partition boundaries). See the comments at the top of
1515 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1516
1517 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1518 return false;
1519
1520 /* Search backward through forwarder blocks. We don't need to worry
1521 about multiple entry or chained forwarders, as they will be optimized
1522 away. We do this to look past the unconditional jump following a
1523 conditional jump that is required due to the current CFG shape. */
1524 if (EDGE_COUNT (src1->preds) == 1
1525 && FORWARDER_BLOCK_P (src1))
1526 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1527
1528 if (EDGE_COUNT (src2->preds) == 1
1529 && FORWARDER_BLOCK_P (src2))
1530 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1531
1532 /* Nothing to do if we reach ENTRY, or a common source block. */
1533 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1534 return false;
1535 if (src1 == src2)
1536 return false;
1537
1538 /* Seeing more than 1 forwarder blocks would confuse us later... */
1539 if (FORWARDER_BLOCK_P (e1->dest)
1540 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1541 return false;
1542
1543 if (FORWARDER_BLOCK_P (e2->dest)
1544 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1545 return false;
1546
1547 /* Likewise with dead code (possibly newly created by the other optimizations
1548 of cfg_cleanup). */
1549 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1550 return false;
1551
1552 /* Look for the common insn sequence, part the first ... */
1553 if (!outgoing_edges_match (mode, src1, src2))
1554 return false;
1555
1556 /* ... and part the second. */
1557 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1558
1559 /* Don't proceed with the crossjump unless we found a sufficient number
1560 of matching instructions or the 'from' block was totally matched
1561 (such that its predecessors will hopefully be redirected and the
1562 block removed). */
1563 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1564 && (newpos1 != BB_HEAD (src1)))
1565 return false;
1566
1567 #ifndef CASE_DROPS_THROUGH
1568 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1569 will be deleted.
1570 If we have tablejumps in the end of SRC1 and SRC2
1571 they have been already compared for equivalence in outgoing_edges_match ()
1572 so replace the references to TABLE1 by references to TABLE2. */
1573 {
1574 rtx label1, label2;
1575 rtx table1, table2;
1576
1577 if (tablejump_p (BB_END (src1), &label1, &table1)
1578 && tablejump_p (BB_END (src2), &label2, &table2)
1579 && label1 != label2)
1580 {
1581 replace_label_data rr;
1582 rtx insn;
1583
1584 /* Replace references to LABEL1 with LABEL2. */
1585 rr.r1 = label1;
1586 rr.r2 = label2;
1587 rr.update_label_nuses = true;
1588 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1589 {
1590 /* Do not replace the label in SRC1->END because when deleting
1591 a block whose end is a tablejump, the tablejump referenced
1592 from the instruction is deleted too. */
1593 if (insn != BB_END (src1))
1594 for_each_rtx (&insn, replace_label, &rr);
1595 }
1596 }
1597 }
1598 #endif
1599
1600 /* Avoid splitting if possible. */
1601 if (newpos2 == BB_HEAD (src2))
1602 redirect_to = src2;
1603 else
1604 {
1605 if (dump_file)
1606 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1607 src2->index, nmatch);
1608 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1609 }
1610
1611 if (dump_file)
1612 fprintf (dump_file,
1613 "Cross jumping from bb %i to bb %i; %i common insns\n",
1614 src1->index, src2->index, nmatch);
1615
1616 redirect_to->count += src1->count;
1617 redirect_to->frequency += src1->frequency;
1618 /* We may have some registers visible trought the block. */
1619 redirect_to->flags |= BB_DIRTY;
1620
1621 /* Recompute the frequencies and counts of outgoing edges. */
1622 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1623 {
1624 edge s2;
1625 edge_iterator ei;
1626 basic_block d = s->dest;
1627
1628 if (FORWARDER_BLOCK_P (d))
1629 d = EDGE_SUCC (d, 0)->dest;
1630
1631 FOR_EACH_EDGE (s2, ei, src1->succs)
1632 {
1633 basic_block d2 = s2->dest;
1634 if (FORWARDER_BLOCK_P (d2))
1635 d2 = EDGE_SUCC (d2, 0)->dest;
1636 if (d == d2)
1637 break;
1638 }
1639
1640 s->count += s2->count;
1641
1642 /* Take care to update possible forwarder blocks. We verified
1643 that there is no more than one in the chain, so we can't run
1644 into infinite loop. */
1645 if (FORWARDER_BLOCK_P (s->dest))
1646 {
1647 EDGE_SUCC (s->dest, 0)->count += s2->count;
1648 s->dest->count += s2->count;
1649 s->dest->frequency += EDGE_FREQUENCY (s);
1650 }
1651
1652 if (FORWARDER_BLOCK_P (s2->dest))
1653 {
1654 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1655 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1656 EDGE_SUCC (s2->dest, 0)->count = 0;
1657 s2->dest->count -= s2->count;
1658 s2->dest->frequency -= EDGE_FREQUENCY (s);
1659 if (s2->dest->frequency < 0)
1660 s2->dest->frequency = 0;
1661 if (s2->dest->count < 0)
1662 s2->dest->count = 0;
1663 }
1664
1665 if (!redirect_to->frequency && !src1->frequency)
1666 s->probability = (s->probability + s2->probability) / 2;
1667 else
1668 s->probability
1669 = ((s->probability * redirect_to->frequency +
1670 s2->probability * src1->frequency)
1671 / (redirect_to->frequency + src1->frequency));
1672 }
1673
1674 update_br_prob_note (redirect_to);
1675
1676 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1677
1678 /* Skip possible basic block header. */
1679 if (LABEL_P (newpos1))
1680 newpos1 = NEXT_INSN (newpos1);
1681
1682 if (NOTE_P (newpos1))
1683 newpos1 = NEXT_INSN (newpos1);
1684
1685 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1686 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1687
1688 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1689 delete_basic_block (to_remove);
1690
1691 update_forwarder_flag (redirect_from);
1692
1693 return true;
1694 }
1695
1696 /* Search the predecessors of BB for common insn sequences. When found,
1697 share code between them by redirecting control flow. Return true if
1698 any changes made. */
1699
1700 static bool
1701 try_crossjump_bb (int mode, basic_block bb)
1702 {
1703 edge e, e2, fallthru;
1704 bool changed;
1705 unsigned max, ix, ix2;
1706 basic_block ev, ev2;
1707 edge_iterator ei;
1708
1709 /* Nothing to do if there is not at least two incoming edges. */
1710 if (EDGE_COUNT (bb->preds) < 2)
1711 return false;
1712
1713 /* If we are partitioning hot/cold basic blocks, we don't want to
1714 mess up unconditional or indirect jumps that cross between hot
1715 and cold sections.
1716
1717 Basic block partitioning may result in some jumps that appear to
1718 be optimizable (or blocks that appear to be mergeable), but which really
1719 must be left untouched (they are required to make it safely across
1720 partition boundaries). See the comments at the top of
1721 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1722
1723 if (flag_reorder_blocks_and_partition
1724 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1725 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1726 return false;
1727
1728 /* It is always cheapest to redirect a block that ends in a branch to
1729 a block that falls through into BB, as that adds no branches to the
1730 program. We'll try that combination first. */
1731 fallthru = NULL;
1732 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1733
1734 if (EDGE_COUNT (bb->preds) > max)
1735 return false;
1736
1737 FOR_EACH_EDGE (e, ei, bb->preds)
1738 {
1739 if (e->flags & EDGE_FALLTHRU)
1740 fallthru = e;
1741 }
1742
1743 changed = false;
1744 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1745 {
1746 e = EDGE_PRED (ev, ix);
1747 ix++;
1748
1749 /* As noted above, first try with the fallthru predecessor. */
1750 if (fallthru)
1751 {
1752 /* Don't combine the fallthru edge into anything else.
1753 If there is a match, we'll do it the other way around. */
1754 if (e == fallthru)
1755 continue;
1756 /* If nothing changed since the last attempt, there is nothing
1757 we can do. */
1758 if (!first_pass
1759 && (!(e->src->flags & BB_DIRTY)
1760 && !(fallthru->src->flags & BB_DIRTY)))
1761 continue;
1762
1763 if (try_crossjump_to_edge (mode, e, fallthru))
1764 {
1765 changed = true;
1766 ix = 0;
1767 ev = bb;
1768 continue;
1769 }
1770 }
1771
1772 /* Non-obvious work limiting check: Recognize that we're going
1773 to call try_crossjump_bb on every basic block. So if we have
1774 two blocks with lots of outgoing edges (a switch) and they
1775 share lots of common destinations, then we would do the
1776 cross-jump check once for each common destination.
1777
1778 Now, if the blocks actually are cross-jump candidates, then
1779 all of their destinations will be shared. Which means that
1780 we only need check them for cross-jump candidacy once. We
1781 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1782 choosing to do the check from the block for which the edge
1783 in question is the first successor of A. */
1784 if (EDGE_SUCC (e->src, 0) != e)
1785 continue;
1786
1787 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1788 {
1789 e2 = EDGE_PRED (ev2, ix2);
1790 ix2++;
1791
1792 if (e2 == e)
1793 continue;
1794
1795 /* We've already checked the fallthru edge above. */
1796 if (e2 == fallthru)
1797 continue;
1798
1799 /* The "first successor" check above only prevents multiple
1800 checks of crossjump(A,B). In order to prevent redundant
1801 checks of crossjump(B,A), require that A be the block
1802 with the lowest index. */
1803 if (e->src->index > e2->src->index)
1804 continue;
1805
1806 /* If nothing changed since the last attempt, there is nothing
1807 we can do. */
1808 if (!first_pass
1809 && (!(e->src->flags & BB_DIRTY)
1810 && !(e2->src->flags & BB_DIRTY)))
1811 continue;
1812
1813 if (try_crossjump_to_edge (mode, e, e2))
1814 {
1815 changed = true;
1816 ev2 = bb;
1817 ix = 0;
1818 break;
1819 }
1820 }
1821 }
1822
1823 return changed;
1824 }
1825
1826 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1827 instructions etc. Return nonzero if changes were made. */
1828
1829 static bool
1830 try_optimize_cfg (int mode)
1831 {
1832 bool changed_overall = false;
1833 bool changed;
1834 int iterations = 0;
1835 basic_block bb, b, next;
1836
1837 if (mode & CLEANUP_CROSSJUMP)
1838 add_noreturn_fake_exit_edges ();
1839
1840 FOR_EACH_BB (bb)
1841 update_forwarder_flag (bb);
1842
1843 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1844 clear_bb_flags ();
1845
1846 if (! targetm.cannot_modify_jumps_p ())
1847 {
1848 first_pass = true;
1849 /* Attempt to merge blocks as made possible by edge removal. If
1850 a block has only one successor, and the successor has only
1851 one predecessor, they may be combined. */
1852 do
1853 {
1854 changed = false;
1855 iterations++;
1856
1857 if (dump_file)
1858 fprintf (dump_file,
1859 "\n\ntry_optimize_cfg iteration %i\n\n",
1860 iterations);
1861
1862 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1863 {
1864 basic_block c;
1865 edge s;
1866 bool changed_here = false;
1867
1868 /* Delete trivially dead basic blocks. */
1869 while (EDGE_COUNT (b->preds) == 0)
1870 {
1871 c = b->prev_bb;
1872 if (dump_file)
1873 fprintf (dump_file, "Deleting block %i.\n",
1874 b->index);
1875
1876 delete_basic_block (b);
1877 if (!(mode & CLEANUP_CFGLAYOUT))
1878 changed = true;
1879 b = c;
1880 }
1881
1882 /* Remove code labels no longer used. */
1883 if (EDGE_COUNT (b->preds) == 1
1884 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1885 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1886 && LABEL_P (BB_HEAD (b))
1887 /* If the previous block ends with a branch to this
1888 block, we can't delete the label. Normally this
1889 is a condjump that is yet to be simplified, but
1890 if CASE_DROPS_THRU, this can be a tablejump with
1891 some element going to the same place as the
1892 default (fallthru). */
1893 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1894 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1895 || ! label_is_jump_target_p (BB_HEAD (b),
1896 BB_END (EDGE_PRED (b, 0)->src))))
1897 {
1898 rtx label = BB_HEAD (b);
1899
1900 delete_insn_chain (label, label);
1901 /* In the case label is undeletable, move it after the
1902 BASIC_BLOCK note. */
1903 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1904 {
1905 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1906
1907 reorder_insns_nobb (label, label, bb_note);
1908 BB_HEAD (b) = bb_note;
1909 }
1910 if (dump_file)
1911 fprintf (dump_file, "Deleted label in block %i.\n",
1912 b->index);
1913 }
1914
1915 /* If we fall through an empty block, we can remove it. */
1916 if (!(mode & CLEANUP_CFGLAYOUT)
1917 && EDGE_COUNT (b->preds) == 1
1918 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1919 && !LABEL_P (BB_HEAD (b))
1920 && FORWARDER_BLOCK_P (b)
1921 /* Note that forwarder_block_p true ensures that
1922 there is a successor for this block. */
1923 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1924 && n_basic_blocks > 1)
1925 {
1926 if (dump_file)
1927 fprintf (dump_file,
1928 "Deleting fallthru block %i.\n",
1929 b->index);
1930
1931 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1932 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1933 delete_basic_block (b);
1934 changed = true;
1935 b = c;
1936 }
1937
1938 if (EDGE_COUNT (b->succs) == 1
1939 && (s = EDGE_SUCC (b, 0))
1940 && !(s->flags & EDGE_COMPLEX)
1941 && (c = s->dest) != EXIT_BLOCK_PTR
1942 && EDGE_COUNT (c->preds) == 1
1943 && b != c)
1944 {
1945 /* When not in cfg_layout mode use code aware of reordering
1946 INSN. This code possibly creates new basic blocks so it
1947 does not fit merge_blocks interface and is kept here in
1948 hope that it will become useless once more of compiler
1949 is transformed to use cfg_layout mode. */
1950
1951 if ((mode & CLEANUP_CFGLAYOUT)
1952 && can_merge_blocks_p (b, c))
1953 {
1954 merge_blocks (b, c);
1955 update_forwarder_flag (b);
1956 changed_here = true;
1957 }
1958 else if (!(mode & CLEANUP_CFGLAYOUT)
1959 /* If the jump insn has side effects,
1960 we can't kill the edge. */
1961 && (!JUMP_P (BB_END (b))
1962 || (reload_completed
1963 ? simplejump_p (BB_END (b))
1964 : (onlyjump_p (BB_END (b))
1965 && !tablejump_p (BB_END (b),
1966 NULL, NULL))))
1967 && (next = merge_blocks_move (s, b, c, mode)))
1968 {
1969 b = next;
1970 changed_here = true;
1971 }
1972 }
1973
1974 /* Simplify branch over branch. */
1975 if ((mode & CLEANUP_EXPENSIVE)
1976 && !(mode & CLEANUP_CFGLAYOUT)
1977 && try_simplify_condjump (b))
1978 changed_here = true;
1979
1980 /* If B has a single outgoing edge, but uses a
1981 non-trivial jump instruction without side-effects, we
1982 can either delete the jump entirely, or replace it
1983 with a simple unconditional jump. */
1984 if (EDGE_COUNT (b->succs) == 1
1985 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1986 && onlyjump_p (BB_END (b))
1987 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1988 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1989 (mode & CLEANUP_CFGLAYOUT) != 0))
1990 {
1991 update_forwarder_flag (b);
1992 changed_here = true;
1993 }
1994
1995 /* Simplify branch to branch. */
1996 if (try_forward_edges (mode, b))
1997 changed_here = true;
1998
1999 /* Look for shared code between blocks. */
2000 if ((mode & CLEANUP_CROSSJUMP)
2001 && try_crossjump_bb (mode, b))
2002 changed_here = true;
2003
2004 /* Don't get confused by the index shift caused by
2005 deleting blocks. */
2006 if (!changed_here)
2007 b = b->next_bb;
2008 else
2009 changed = true;
2010 }
2011
2012 if ((mode & CLEANUP_CROSSJUMP)
2013 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2014 changed = true;
2015
2016 #ifdef ENABLE_CHECKING
2017 if (changed)
2018 verify_flow_info ();
2019 #endif
2020
2021 changed_overall |= changed;
2022 first_pass = false;
2023 }
2024 while (changed);
2025 }
2026
2027 if (mode & CLEANUP_CROSSJUMP)
2028 remove_fake_exit_edges ();
2029
2030 clear_aux_for_blocks ();
2031
2032 return changed_overall;
2033 }
2034 \f
2035 /* Delete all unreachable basic blocks. */
2036
2037 bool
2038 delete_unreachable_blocks (void)
2039 {
2040 bool changed = false;
2041 basic_block b, next_bb;
2042
2043 find_unreachable_blocks ();
2044
2045 /* Delete all unreachable basic blocks. */
2046
2047 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2048 {
2049 next_bb = b->next_bb;
2050
2051 if (!(b->flags & BB_REACHABLE))
2052 {
2053 delete_basic_block (b);
2054 changed = true;
2055 }
2056 }
2057
2058 if (changed)
2059 tidy_fallthru_edges ();
2060 return changed;
2061 }
2062
2063 /* Merges sequential blocks if possible. */
2064
2065 bool
2066 merge_seq_blocks (void)
2067 {
2068 basic_block bb;
2069 bool changed = false;
2070
2071 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2072 {
2073 if (EDGE_COUNT (bb->succs) == 1
2074 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2075 {
2076 /* Merge the blocks and retry. */
2077 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2078 changed = true;
2079 continue;
2080 }
2081
2082 bb = bb->next_bb;
2083 }
2084
2085 return changed;
2086 }
2087 \f
2088 /* Tidy the CFG by deleting unreachable code and whatnot. */
2089
2090 bool
2091 cleanup_cfg (int mode)
2092 {
2093 bool changed = false;
2094
2095 timevar_push (TV_CLEANUP_CFG);
2096 if (delete_unreachable_blocks ())
2097 {
2098 changed = true;
2099 /* We've possibly created trivially dead code. Cleanup it right
2100 now to introduce more opportunities for try_optimize_cfg. */
2101 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2102 && !reload_completed)
2103 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2104 }
2105
2106 compact_blocks ();
2107
2108 while (try_optimize_cfg (mode))
2109 {
2110 delete_unreachable_blocks (), changed = true;
2111 if (mode & CLEANUP_UPDATE_LIFE)
2112 {
2113 /* Cleaning up CFG introduces more opportunities for dead code
2114 removal that in turn may introduce more opportunities for
2115 cleaning up the CFG. */
2116 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2117 PROP_DEATH_NOTES
2118 | PROP_SCAN_DEAD_CODE
2119 | PROP_KILL_DEAD_CODE
2120 | ((mode & CLEANUP_LOG_LINKS)
2121 ? PROP_LOG_LINKS : 0)))
2122 break;
2123 }
2124 else if (!(mode & CLEANUP_NO_INSN_DEL)
2125 && (mode & CLEANUP_EXPENSIVE)
2126 && !reload_completed)
2127 {
2128 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2129 break;
2130 }
2131 else
2132 break;
2133 delete_dead_jumptables ();
2134 }
2135
2136 /* Kill the data we won't maintain. */
2137 free_EXPR_LIST_list (&label_value_list);
2138 timevar_pop (TV_CLEANUP_CFG);
2139
2140 return changed;
2141 }