basic-block.h (single_succ_p, [...]): New inline functions.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53
54 /* cleanup_cfg maintains following flags for each basic block. */
55
56 enum bb_flags
57 {
58 /* Set if BB is the forwarder block to avoid too many
59 forwarder_block_p calls. */
60 BB_FORWARDER_BLOCK = 1,
61 BB_NONTHREADABLE_BLOCK = 2
62 };
63
64 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
65 #define BB_SET_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
67 #define BB_CLEAR_FLAG(BB, FLAG) \
68 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
69
70 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
71
72 /* Set to true when we are running first pass of try_optimize_cfg loop. */
73 static bool first_pass;
74 static bool try_crossjump_to_edge (int, edge, edge);
75 static bool try_crossjump_bb (int, basic_block);
76 static bool outgoing_edges_match (int, basic_block, basic_block);
77 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
78 static bool insns_match_p (int, rtx, rtx);
79
80 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
81 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
82 static bool try_optimize_cfg (int);
83 static bool try_simplify_condjump (basic_block);
84 static bool try_forward_edges (int, basic_block);
85 static edge thread_jump (int, edge, basic_block);
86 static bool mark_effect (rtx, bitmap);
87 static void notice_new_block (basic_block);
88 static void update_forwarder_flag (basic_block);
89 static int mentions_nonequal_regs (rtx *, void *);
90 static void merge_memattrs (rtx, rtx);
91 \f
92 /* Set flags for newly created block. */
93
94 static void
95 notice_new_block (basic_block bb)
96 {
97 if (!bb)
98 return;
99
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
102 }
103
104 /* Recompute forwarder flag after block has been modified. */
105
106 static void
107 update_forwarder_flag (basic_block bb)
108 {
109 if (forwarder_block_p (bb))
110 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
111 else
112 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
113 }
114 \f
115 /* Simplify a conditional jump around an unconditional jump.
116 Return true if something changed. */
117
118 static bool
119 try_simplify_condjump (basic_block cbranch_block)
120 {
121 basic_block jump_block, jump_dest_block, cbranch_dest_block;
122 edge cbranch_jump_edge, cbranch_fallthru_edge;
123 rtx cbranch_insn;
124
125 /* Verify that there are exactly two successors. */
126 if (EDGE_COUNT (cbranch_block->succs) != 2)
127 return false;
128
129 /* Verify that we've got a normal conditional branch at the end
130 of the block. */
131 cbranch_insn = BB_END (cbranch_block);
132 if (!any_condjump_p (cbranch_insn))
133 return false;
134
135 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
136 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
137
138 /* The next block must not have multiple predecessors, must not
139 be the last block in the function, and must contain just the
140 unconditional jump. */
141 jump_block = cbranch_fallthru_edge->dest;
142 if (!single_pred_p (jump_block)
143 || jump_block->next_bb == EXIT_BLOCK_PTR
144 || !FORWARDER_BLOCK_P (jump_block))
145 return false;
146 jump_dest_block = single_succ (jump_block);
147
148 /* If we are partitioning hot/cold basic blocks, we don't want to
149 mess up unconditional or indirect jumps that cross between hot
150 and cold sections.
151
152 Basic block partitioning may result in some jumps that appear to
153 be optimizable (or blocks that appear to be mergeable), but which really
154 must be left untouched (they are required to make it safely across
155 partition boundaries). See the comments at the top of
156 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
157
158 if (flag_reorder_blocks_and_partition
159 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
160 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
161 return false;
162
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block = cbranch_jump_edge->dest;
166
167 if (cbranch_dest_block == EXIT_BLOCK_PTR
168 || !can_fallthru (jump_block, cbranch_dest_block))
169 return false;
170
171 /* Invert the conditional branch. */
172 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
173 return false;
174
175 if (dump_file)
176 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
177 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
178
179 /* Success. Update the CFG to match. Note that after this point
180 the edge variable names appear backwards; the redirection is done
181 this way to preserve edge profile data. */
182 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
183 cbranch_dest_block);
184 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
185 jump_dest_block);
186 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
187 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
188 update_br_prob_note (cbranch_block);
189
190 /* Delete the block with the unconditional jump, and clean up the mess. */
191 delete_basic_block (jump_block);
192 tidy_fallthru_edge (cbranch_jump_edge);
193 update_forwarder_flag (cbranch_block);
194
195 return true;
196 }
197 \f
198 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
199 on register. Used by jump threading. */
200
201 static bool
202 mark_effect (rtx exp, regset nonequal)
203 {
204 int regno;
205 rtx dest;
206 switch (GET_CODE (exp))
207 {
208 /* In case we do clobber the register, mark it as equal, as we know the
209 value is dead so it don't have to match. */
210 case CLOBBER:
211 if (REG_P (XEXP (exp, 0)))
212 {
213 dest = XEXP (exp, 0);
214 regno = REGNO (dest);
215 CLEAR_REGNO_REG_SET (nonequal, regno);
216 if (regno < FIRST_PSEUDO_REGISTER)
217 {
218 int n = hard_regno_nregs[regno][GET_MODE (dest)];
219 while (--n > 0)
220 CLEAR_REGNO_REG_SET (nonequal, regno + n);
221 }
222 }
223 return false;
224
225 case SET:
226 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
227 return false;
228 dest = SET_DEST (exp);
229 if (dest == pc_rtx)
230 return false;
231 if (!REG_P (dest))
232 return true;
233 regno = REGNO (dest);
234 SET_REGNO_REG_SET (nonequal, regno);
235 if (regno < FIRST_PSEUDO_REGISTER)
236 {
237 int n = hard_regno_nregs[regno][GET_MODE (dest)];
238 while (--n > 0)
239 SET_REGNO_REG_SET (nonequal, regno + n);
240 }
241 return false;
242
243 default:
244 return false;
245 }
246 }
247
248 /* Return nonzero if X is a register set in regset DATA.
249 Called via for_each_rtx. */
250 static int
251 mentions_nonequal_regs (rtx *x, void *data)
252 {
253 regset nonequal = (regset) data;
254 if (REG_P (*x))
255 {
256 int regno;
257
258 regno = REGNO (*x);
259 if (REGNO_REG_SET_P (nonequal, regno))
260 return 1;
261 if (regno < FIRST_PSEUDO_REGISTER)
262 {
263 int n = hard_regno_nregs[regno][GET_MODE (*x)];
264 while (--n > 0)
265 if (REGNO_REG_SET_P (nonequal, regno + n))
266 return 1;
267 }
268 }
269 return 0;
270 }
271 /* Attempt to prove that the basic block B will have no side effects and
272 always continues in the same edge if reached via E. Return the edge
273 if exist, NULL otherwise. */
274
275 static edge
276 thread_jump (int mode, edge e, basic_block b)
277 {
278 rtx set1, set2, cond1, cond2, insn;
279 enum rtx_code code1, code2, reversed_code2;
280 bool reverse1 = false;
281 unsigned i;
282 regset nonequal;
283 bool failed = false;
284 reg_set_iterator rsi;
285
286 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
287 return NULL;
288
289 /* At the moment, we do handle only conditional jumps, but later we may
290 want to extend this code to tablejumps and others. */
291 if (EDGE_COUNT (e->src->succs) != 2)
292 return NULL;
293 if (EDGE_COUNT (b->succs) != 2)
294 {
295 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
296 return NULL;
297 }
298
299 /* Second branch must end with onlyjump, as we will eliminate the jump. */
300 if (!any_condjump_p (BB_END (e->src)))
301 return NULL;
302
303 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
304 {
305 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
306 return NULL;
307 }
308
309 set1 = pc_set (BB_END (e->src));
310 set2 = pc_set (BB_END (b));
311 if (((e->flags & EDGE_FALLTHRU) != 0)
312 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
313 reverse1 = true;
314
315 cond1 = XEXP (SET_SRC (set1), 0);
316 cond2 = XEXP (SET_SRC (set2), 0);
317 if (reverse1)
318 code1 = reversed_comparison_code (cond1, BB_END (e->src));
319 else
320 code1 = GET_CODE (cond1);
321
322 code2 = GET_CODE (cond2);
323 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
324
325 if (!comparison_dominates_p (code1, code2)
326 && !comparison_dominates_p (code1, reversed_code2))
327 return NULL;
328
329 /* Ensure that the comparison operators are equivalent.
330 ??? This is far too pessimistic. We should allow swapped operands,
331 different CCmodes, or for example comparisons for interval, that
332 dominate even when operands are not equivalent. */
333 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
334 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
335 return NULL;
336
337 /* Short circuit cases where block B contains some side effects, as we can't
338 safely bypass it. */
339 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
342 {
343 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
344 return NULL;
345 }
346
347 cselib_init (false);
348
349 /* First process all values computed in the source basic block. */
350 for (insn = NEXT_INSN (BB_HEAD (e->src));
351 insn != NEXT_INSN (BB_END (e->src));
352 insn = NEXT_INSN (insn))
353 if (INSN_P (insn))
354 cselib_process_insn (insn);
355
356 nonequal = BITMAP_ALLOC (NULL);
357 CLEAR_REG_SET (nonequal);
358
359 /* Now assume that we've continued by the edge E to B and continue
360 processing as if it were same basic block.
361 Our goal is to prove that whole block is an NOOP. */
362
363 for (insn = NEXT_INSN (BB_HEAD (b));
364 insn != NEXT_INSN (BB_END (b)) && !failed;
365 insn = NEXT_INSN (insn))
366 {
367 if (INSN_P (insn))
368 {
369 rtx pat = PATTERN (insn);
370
371 if (GET_CODE (pat) == PARALLEL)
372 {
373 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
374 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
375 }
376 else
377 failed |= mark_effect (pat, nonequal);
378 }
379
380 cselib_process_insn (insn);
381 }
382
383 /* Later we should clear nonequal of dead registers. So far we don't
384 have life information in cfg_cleanup. */
385 if (failed)
386 {
387 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
388 goto failed_exit;
389 }
390
391 /* cond2 must not mention any register that is not equal to the
392 former block. */
393 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
394 goto failed_exit;
395
396 /* In case liveness information is available, we need to prove equivalence
397 only of the live values. */
398 if (mode & CLEANUP_UPDATE_LIFE)
399 AND_REG_SET (nonequal, b->global_live_at_end);
400
401 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
402 goto failed_exit;
403
404 BITMAP_FREE (nonequal);
405 cselib_finish ();
406 if ((comparison_dominates_p (code1, code2) != 0)
407 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
408 return BRANCH_EDGE (b);
409 else
410 return FALLTHRU_EDGE (b);
411
412 failed_exit:
413 BITMAP_FREE (nonequal);
414 cselib_finish ();
415 return NULL;
416 }
417 \f
418 /* Attempt to forward edges leaving basic block B.
419 Return true if successful. */
420
421 static bool
422 try_forward_edges (int mode, basic_block b)
423 {
424 bool changed = false;
425 edge_iterator ei;
426 edge e, *threaded_edges = NULL;
427
428 /* If we are partitioning hot/cold basic blocks, we don't want to
429 mess up unconditional or indirect jumps that cross between hot
430 and cold sections.
431
432 Basic block partitioning may result in some jumps that appear to
433 be optimizable (or blocks that appear to be mergeable), but which really m
434 ust be left untouched (they are required to make it safely across
435 partition boundaries). See the comments at the top of
436 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
437
438 if (flag_reorder_blocks_and_partition
439 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
440 return false;
441
442 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
443 {
444 basic_block target, first;
445 int counter;
446 bool threaded = false;
447 int nthreaded_edges = 0;
448 bool may_thread = first_pass | (b->flags & BB_DIRTY);
449
450 /* Skip complex edges because we don't know how to update them.
451
452 Still handle fallthru edges, as we can succeed to forward fallthru
453 edge to the same place as the branch edge of conditional branch
454 and turn conditional branch to an unconditional branch. */
455 if (e->flags & EDGE_COMPLEX)
456 {
457 ei_next (&ei);
458 continue;
459 }
460
461 target = first = e->dest;
462 counter = 0;
463
464 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
465 up jumps that cross between hot/cold sections.
466
467 Basic block partitioning may result in some jumps that appear
468 to be optimizable (or blocks that appear to be mergeable), but which
469 really must be left untouched (they are required to make it safely
470 across partition boundaries). See the comments at the top of
471 bb-reorder.c:partition_hot_cold_basic_blocks for complete
472 details. */
473
474 if (flag_reorder_blocks_and_partition
475 && first != EXIT_BLOCK_PTR
476 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
477 return false;
478
479 while (counter < n_basic_blocks)
480 {
481 basic_block new_target = NULL;
482 bool new_target_threaded = false;
483 may_thread |= target->flags & BB_DIRTY;
484
485 if (FORWARDER_BLOCK_P (target)
486 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
487 && single_succ (target) != EXIT_BLOCK_PTR)
488 {
489 /* Bypass trivial infinite loops. */
490 new_target = single_succ (target);
491 if (target == new_target)
492 counter = n_basic_blocks;
493 }
494
495 /* Allow to thread only over one edge at time to simplify updating
496 of probabilities. */
497 else if ((mode & CLEANUP_THREADING) && may_thread)
498 {
499 edge t = thread_jump (mode, e, target);
500 if (t)
501 {
502 if (!threaded_edges)
503 threaded_edges = xmalloc (sizeof (*threaded_edges)
504 * n_basic_blocks);
505 else
506 {
507 int i;
508
509 /* Detect an infinite loop across blocks not
510 including the start block. */
511 for (i = 0; i < nthreaded_edges; ++i)
512 if (threaded_edges[i] == t)
513 break;
514 if (i < nthreaded_edges)
515 {
516 counter = n_basic_blocks;
517 break;
518 }
519 }
520
521 /* Detect an infinite loop across the start block. */
522 if (t->dest == b)
523 break;
524
525 gcc_assert (nthreaded_edges < n_basic_blocks);
526 threaded_edges[nthreaded_edges++] = t;
527
528 new_target = t->dest;
529 new_target_threaded = true;
530 }
531 }
532
533 if (!new_target)
534 break;
535
536 /* Avoid killing of loop pre-headers, as it is the place loop
537 optimizer wants to hoist code to.
538
539 For fallthru forwarders, the LOOP_BEG note must appear between
540 the header of block and CODE_LABEL of the loop, for non forwarders
541 it must appear before the JUMP_INSN. */
542 if ((mode & CLEANUP_PRE_LOOP) && optimize)
543 {
544 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
545 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
546
547 if (!NOTE_P (insn))
548 insn = NEXT_INSN (insn);
549
550 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
551 insn = NEXT_INSN (insn))
552 if (NOTE_P (insn)
553 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
554 break;
555
556 if (NOTE_P (insn))
557 break;
558
559 /* Do not clean up branches to just past the end of a loop
560 at this time; it can mess up the loop optimizer's
561 recognition of some patterns. */
562
563 insn = PREV_INSN (BB_HEAD (target));
564 if (insn && NOTE_P (insn)
565 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
566 break;
567 }
568
569 counter++;
570 target = new_target;
571 threaded |= new_target_threaded;
572 }
573
574 if (counter >= n_basic_blocks)
575 {
576 if (dump_file)
577 fprintf (dump_file, "Infinite loop in BB %i.\n",
578 target->index);
579 }
580 else if (target == first)
581 ; /* We didn't do anything. */
582 else
583 {
584 /* Save the values now, as the edge may get removed. */
585 gcov_type edge_count = e->count;
586 int edge_probability = e->probability;
587 int edge_frequency;
588 int n = 0;
589
590 /* Don't force if target is exit block. */
591 if (threaded && target != EXIT_BLOCK_PTR)
592 {
593 notice_new_block (redirect_edge_and_branch_force (e, target));
594 if (dump_file)
595 fprintf (dump_file, "Conditionals threaded.\n");
596 }
597 else if (!redirect_edge_and_branch (e, target))
598 {
599 if (dump_file)
600 fprintf (dump_file,
601 "Forwarding edge %i->%i to %i failed.\n",
602 b->index, e->dest->index, target->index);
603 ei_next (&ei);
604 continue;
605 }
606
607 /* We successfully forwarded the edge. Now update profile
608 data: for each edge we traversed in the chain, remove
609 the original edge's execution count. */
610 edge_frequency = ((edge_probability * b->frequency
611 + REG_BR_PROB_BASE / 2)
612 / REG_BR_PROB_BASE);
613
614 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
615 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
616
617 do
618 {
619 edge t;
620
621 if (!single_succ_p (first))
622 {
623 gcc_assert (n < nthreaded_edges);
624 t = threaded_edges [n++];
625 gcc_assert (t->src == first);
626 update_bb_profile_for_threading (first, edge_frequency,
627 edge_count, t);
628 update_br_prob_note (first);
629 }
630 else
631 {
632 first->count -= edge_count;
633 if (first->count < 0)
634 first->count = 0;
635 first->frequency -= edge_frequency;
636 if (first->frequency < 0)
637 first->frequency = 0;
638 /* It is possible that as the result of
639 threading we've removed edge as it is
640 threaded to the fallthru edge. Avoid
641 getting out of sync. */
642 if (n < nthreaded_edges
643 && first == threaded_edges [n]->src)
644 n++;
645 t = single_succ_edge (first);
646 }
647
648 t->count -= edge_count;
649 if (t->count < 0)
650 t->count = 0;
651 first = t->dest;
652 }
653 while (first != target);
654
655 changed = true;
656 continue;
657 }
658 ei_next (&ei);
659 }
660
661 if (threaded_edges)
662 free (threaded_edges);
663 return changed;
664 }
665 \f
666
667 /* Blocks A and B are to be merged into a single block. A has no incoming
668 fallthru edge, so it can be moved before B without adding or modifying
669 any jumps (aside from the jump from A to B). */
670
671 static void
672 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
673 {
674 rtx barrier;
675 bool only_notes;
676
677 /* If we are partitioning hot/cold basic blocks, we don't want to
678 mess up unconditional or indirect jumps that cross between hot
679 and cold sections.
680
681 Basic block partitioning may result in some jumps that appear to
682 be optimizable (or blocks that appear to be mergeable), but which really
683 must be left untouched (they are required to make it safely across
684 partition boundaries). See the comments at the top of
685 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
686
687 if (flag_reorder_blocks_and_partition
688 && (BB_PARTITION (a) != BB_PARTITION (b)
689 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
690 return;
691
692 barrier = next_nonnote_insn (BB_END (a));
693 gcc_assert (BARRIER_P (barrier));
694 delete_insn (barrier);
695
696 /* Move block and loop notes out of the chain so that we do not
697 disturb their order.
698
699 ??? A better solution would be to squeeze out all the non-nested notes
700 and adjust the block trees appropriately. Even better would be to have
701 a tighter connection between block trees and rtl so that this is not
702 necessary. */
703 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
704 gcc_assert (!only_notes);
705
706 /* Scramble the insn chain. */
707 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
708 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
709 a->flags |= BB_DIRTY;
710
711 if (dump_file)
712 fprintf (dump_file, "Moved block %d before %d and merged.\n",
713 a->index, b->index);
714
715 /* Swap the records for the two blocks around. */
716
717 unlink_block (a);
718 link_block (a, b->prev_bb);
719
720 /* Now blocks A and B are contiguous. Merge them. */
721 merge_blocks (a, b);
722 }
723
724 /* Blocks A and B are to be merged into a single block. B has no outgoing
725 fallthru edge, so it can be moved after A without adding or modifying
726 any jumps (aside from the jump from A to B). */
727
728 static void
729 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
730 {
731 rtx barrier, real_b_end;
732 rtx label, table;
733 bool only_notes;
734
735 /* If we are partitioning hot/cold basic blocks, we don't want to
736 mess up unconditional or indirect jumps that cross between hot
737 and cold sections.
738
739 Basic block partitioning may result in some jumps that appear to
740 be optimizable (or blocks that appear to be mergeable), but which really
741 must be left untouched (they are required to make it safely across
742 partition boundaries). See the comments at the top of
743 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
744
745 if (flag_reorder_blocks_and_partition
746 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
747 || BB_PARTITION (a) != BB_PARTITION (b)))
748 return;
749
750 real_b_end = BB_END (b);
751
752 /* If there is a jump table following block B temporarily add the jump table
753 to block B so that it will also be moved to the correct location. */
754 if (tablejump_p (BB_END (b), &label, &table)
755 && prev_active_insn (label) == BB_END (b))
756 {
757 BB_END (b) = table;
758 }
759
760 /* There had better have been a barrier there. Delete it. */
761 barrier = NEXT_INSN (BB_END (b));
762 if (barrier && BARRIER_P (barrier))
763 delete_insn (barrier);
764
765 /* Move block and loop notes out of the chain so that we do not
766 disturb their order.
767
768 ??? A better solution would be to squeeze out all the non-nested notes
769 and adjust the block trees appropriately. Even better would be to have
770 a tighter connection between block trees and rtl so that this is not
771 necessary. */
772 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
773 gcc_assert (!only_notes);
774
775
776 /* Scramble the insn chain. */
777 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
778
779 /* Restore the real end of b. */
780 BB_END (b) = real_b_end;
781
782 if (dump_file)
783 fprintf (dump_file, "Moved block %d after %d and merged.\n",
784 b->index, a->index);
785
786 /* Now blocks A and B are contiguous. Merge them. */
787 merge_blocks (a, b);
788 }
789
790 /* Attempt to merge basic blocks that are potentially non-adjacent.
791 Return NULL iff the attempt failed, otherwise return basic block
792 where cleanup_cfg should continue. Because the merging commonly
793 moves basic block away or introduces another optimization
794 possibility, return basic block just before B so cleanup_cfg don't
795 need to iterate.
796
797 It may be good idea to return basic block before C in the case
798 C has been moved after B and originally appeared earlier in the
799 insn sequence, but we have no information available about the
800 relative ordering of these two. Hopefully it is not too common. */
801
802 static basic_block
803 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
804 {
805 basic_block next;
806
807 /* If we are partitioning hot/cold basic blocks, we don't want to
808 mess up unconditional or indirect jumps that cross between hot
809 and cold sections.
810
811 Basic block partitioning may result in some jumps that appear to
812 be optimizable (or blocks that appear to be mergeable), but which really
813 must be left untouched (they are required to make it safely across
814 partition boundaries). See the comments at the top of
815 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
816
817 if (flag_reorder_blocks_and_partition
818 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
819 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
820 || BB_PARTITION (b) != BB_PARTITION (c)))
821 return NULL;
822
823
824
825 /* If B has a fallthru edge to C, no need to move anything. */
826 if (e->flags & EDGE_FALLTHRU)
827 {
828 int b_index = b->index, c_index = c->index;
829 merge_blocks (b, c);
830 update_forwarder_flag (b);
831
832 if (dump_file)
833 fprintf (dump_file, "Merged %d and %d without moving.\n",
834 b_index, c_index);
835
836 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
837 }
838
839 /* Otherwise we will need to move code around. Do that only if expensive
840 transformations are allowed. */
841 else if (mode & CLEANUP_EXPENSIVE)
842 {
843 edge tmp_edge, b_fallthru_edge;
844 bool c_has_outgoing_fallthru;
845 bool b_has_incoming_fallthru;
846 edge_iterator ei;
847
848 /* Avoid overactive code motion, as the forwarder blocks should be
849 eliminated by edge redirection instead. One exception might have
850 been if B is a forwarder block and C has no fallthru edge, but
851 that should be cleaned up by bb-reorder instead. */
852 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
853 return NULL;
854
855 /* We must make sure to not munge nesting of lexical blocks,
856 and loop notes. This is done by squeezing out all the notes
857 and leaving them there to lie. Not ideal, but functional. */
858
859 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
860 if (tmp_edge->flags & EDGE_FALLTHRU)
861 break;
862
863 c_has_outgoing_fallthru = (tmp_edge != NULL);
864
865 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
866 if (tmp_edge->flags & EDGE_FALLTHRU)
867 break;
868
869 b_has_incoming_fallthru = (tmp_edge != NULL);
870 b_fallthru_edge = tmp_edge;
871 next = b->prev_bb;
872 if (next == c)
873 next = next->prev_bb;
874
875 /* Otherwise, we're going to try to move C after B. If C does
876 not have an outgoing fallthru, then it can be moved
877 immediately after B without introducing or modifying jumps. */
878 if (! c_has_outgoing_fallthru)
879 {
880 merge_blocks_move_successor_nojumps (b, c);
881 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
882 }
883
884 /* If B does not have an incoming fallthru, then it can be moved
885 immediately before C without introducing or modifying jumps.
886 C cannot be the first block, so we do not have to worry about
887 accessing a non-existent block. */
888
889 if (b_has_incoming_fallthru)
890 {
891 basic_block bb;
892
893 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
894 return NULL;
895 bb = force_nonfallthru (b_fallthru_edge);
896 if (bb)
897 notice_new_block (bb);
898 }
899
900 merge_blocks_move_predecessor_nojumps (b, c);
901 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
902 }
903
904 return NULL;
905 }
906 \f
907
908 /* Removes the memory attributes of MEM expression
909 if they are not equal. */
910
911 void
912 merge_memattrs (rtx x, rtx y)
913 {
914 int i;
915 int j;
916 enum rtx_code code;
917 const char *fmt;
918
919 if (x == y)
920 return;
921 if (x == 0 || y == 0)
922 return;
923
924 code = GET_CODE (x);
925
926 if (code != GET_CODE (y))
927 return;
928
929 if (GET_MODE (x) != GET_MODE (y))
930 return;
931
932 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
933 {
934 if (! MEM_ATTRS (x))
935 MEM_ATTRS (y) = 0;
936 else if (! MEM_ATTRS (y))
937 MEM_ATTRS (x) = 0;
938 else
939 {
940 rtx mem_size;
941
942 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
943 {
944 set_mem_alias_set (x, 0);
945 set_mem_alias_set (y, 0);
946 }
947
948 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
949 {
950 set_mem_expr (x, 0);
951 set_mem_expr (y, 0);
952 set_mem_offset (x, 0);
953 set_mem_offset (y, 0);
954 }
955 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
956 {
957 set_mem_offset (x, 0);
958 set_mem_offset (y, 0);
959 }
960
961 if (!MEM_SIZE (x))
962 mem_size = NULL_RTX;
963 else if (!MEM_SIZE (y))
964 mem_size = NULL_RTX;
965 else
966 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
967 INTVAL (MEM_SIZE (y))));
968 set_mem_size (x, mem_size);
969 set_mem_size (y, mem_size);
970
971 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
972 set_mem_align (y, MEM_ALIGN (x));
973 }
974 }
975
976 fmt = GET_RTX_FORMAT (code);
977 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
978 {
979 switch (fmt[i])
980 {
981 case 'E':
982 /* Two vectors must have the same length. */
983 if (XVECLEN (x, i) != XVECLEN (y, i))
984 return;
985
986 for (j = 0; j < XVECLEN (x, i); j++)
987 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
988
989 break;
990
991 case 'e':
992 merge_memattrs (XEXP (x, i), XEXP (y, i));
993 }
994 }
995 return;
996 }
997
998
999 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
1000
1001 static bool
1002 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1003 {
1004 rtx p1, p2;
1005
1006 /* Verify that I1 and I2 are equivalent. */
1007 if (GET_CODE (i1) != GET_CODE (i2))
1008 return false;
1009
1010 p1 = PATTERN (i1);
1011 p2 = PATTERN (i2);
1012
1013 if (GET_CODE (p1) != GET_CODE (p2))
1014 return false;
1015
1016 /* If this is a CALL_INSN, compare register usage information.
1017 If we don't check this on stack register machines, the two
1018 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1019 numbers of stack registers in the same basic block.
1020 If we don't check this on machines with delay slots, a delay slot may
1021 be filled that clobbers a parameter expected by the subroutine.
1022
1023 ??? We take the simple route for now and assume that if they're
1024 equal, they were constructed identically. */
1025
1026 if (CALL_P (i1)
1027 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1028 CALL_INSN_FUNCTION_USAGE (i2))
1029 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1030 return false;
1031
1032 #ifdef STACK_REGS
1033 /* If cross_jump_death_matters is not 0, the insn's mode
1034 indicates whether or not the insn contains any stack-like
1035 regs. */
1036
1037 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1038 {
1039 /* If register stack conversion has already been done, then
1040 death notes must also be compared before it is certain that
1041 the two instruction streams match. */
1042
1043 rtx note;
1044 HARD_REG_SET i1_regset, i2_regset;
1045
1046 CLEAR_HARD_REG_SET (i1_regset);
1047 CLEAR_HARD_REG_SET (i2_regset);
1048
1049 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1050 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1051 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1052
1053 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1054 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1055 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1056
1057 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1058
1059 return false;
1060
1061 done:
1062 ;
1063 }
1064 #endif
1065
1066 if (reload_completed
1067 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1068 return true;
1069
1070 /* Do not do EQUIV substitution after reload. First, we're undoing the
1071 work of reload_cse. Second, we may be undoing the work of the post-
1072 reload splitting pass. */
1073 /* ??? Possibly add a new phase switch variable that can be used by
1074 targets to disallow the troublesome insns after splitting. */
1075 if (!reload_completed)
1076 {
1077 /* The following code helps take care of G++ cleanups. */
1078 rtx equiv1 = find_reg_equal_equiv_note (i1);
1079 rtx equiv2 = find_reg_equal_equiv_note (i2);
1080
1081 if (equiv1 && equiv2
1082 /* If the equivalences are not to a constant, they may
1083 reference pseudos that no longer exist, so we can't
1084 use them. */
1085 && (! reload_completed
1086 || (CONSTANT_P (XEXP (equiv1, 0))
1087 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1088 {
1089 rtx s1 = single_set (i1);
1090 rtx s2 = single_set (i2);
1091 if (s1 != 0 && s2 != 0
1092 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1093 {
1094 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1095 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1096 if (! rtx_renumbered_equal_p (p1, p2))
1097 cancel_changes (0);
1098 else if (apply_change_group ())
1099 return true;
1100 }
1101 }
1102 }
1103
1104 return false;
1105 }
1106 \f
1107 /* Look through the insns at the end of BB1 and BB2 and find the longest
1108 sequence that are equivalent. Store the first insns for that sequence
1109 in *F1 and *F2 and return the sequence length.
1110
1111 To simplify callers of this function, if the blocks match exactly,
1112 store the head of the blocks in *F1 and *F2. */
1113
1114 static int
1115 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1116 basic_block bb2, rtx *f1, rtx *f2)
1117 {
1118 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1119 int ninsns = 0;
1120
1121 /* Skip simple jumps at the end of the blocks. Complex jumps still
1122 need to be compared for equivalence, which we'll do below. */
1123
1124 i1 = BB_END (bb1);
1125 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1126 if (onlyjump_p (i1)
1127 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1128 {
1129 last1 = i1;
1130 i1 = PREV_INSN (i1);
1131 }
1132
1133 i2 = BB_END (bb2);
1134 if (onlyjump_p (i2)
1135 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1136 {
1137 last2 = i2;
1138 /* Count everything except for unconditional jump as insn. */
1139 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1140 ninsns++;
1141 i2 = PREV_INSN (i2);
1142 }
1143
1144 while (true)
1145 {
1146 /* Ignore notes. */
1147 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1148 i1 = PREV_INSN (i1);
1149
1150 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1151 i2 = PREV_INSN (i2);
1152
1153 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1154 break;
1155
1156 if (!insns_match_p (mode, i1, i2))
1157 break;
1158
1159 merge_memattrs (i1, i2);
1160
1161 /* Don't begin a cross-jump with a NOTE insn. */
1162 if (INSN_P (i1))
1163 {
1164 /* If the merged insns have different REG_EQUAL notes, then
1165 remove them. */
1166 rtx equiv1 = find_reg_equal_equiv_note (i1);
1167 rtx equiv2 = find_reg_equal_equiv_note (i2);
1168
1169 if (equiv1 && !equiv2)
1170 remove_note (i1, equiv1);
1171 else if (!equiv1 && equiv2)
1172 remove_note (i2, equiv2);
1173 else if (equiv1 && equiv2
1174 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1175 {
1176 remove_note (i1, equiv1);
1177 remove_note (i2, equiv2);
1178 }
1179
1180 afterlast1 = last1, afterlast2 = last2;
1181 last1 = i1, last2 = i2;
1182 ninsns++;
1183 }
1184
1185 i1 = PREV_INSN (i1);
1186 i2 = PREV_INSN (i2);
1187 }
1188
1189 #ifdef HAVE_cc0
1190 /* Don't allow the insn after a compare to be shared by
1191 cross-jumping unless the compare is also shared. */
1192 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1193 last1 = afterlast1, last2 = afterlast2, ninsns--;
1194 #endif
1195
1196 /* Include preceding notes and labels in the cross-jump. One,
1197 this may bring us to the head of the blocks as requested above.
1198 Two, it keeps line number notes as matched as may be. */
1199 if (ninsns)
1200 {
1201 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1202 last1 = PREV_INSN (last1);
1203
1204 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1205 last1 = PREV_INSN (last1);
1206
1207 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1208 last2 = PREV_INSN (last2);
1209
1210 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1211 last2 = PREV_INSN (last2);
1212
1213 *f1 = last1;
1214 *f2 = last2;
1215 }
1216
1217 return ninsns;
1218 }
1219
1220 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1221 the branch instruction. This means that if we commonize the control
1222 flow before end of the basic block, the semantic remains unchanged.
1223
1224 We may assume that there exists one edge with a common destination. */
1225
1226 static bool
1227 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1228 {
1229 int nehedges1 = 0, nehedges2 = 0;
1230 edge fallthru1 = 0, fallthru2 = 0;
1231 edge e1, e2;
1232 edge_iterator ei;
1233
1234 /* If BB1 has only one successor, we may be looking at either an
1235 unconditional jump, or a fake edge to exit. */
1236 if (single_succ_p (bb1)
1237 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1239 return (single_succ_p (bb2)
1240 && (single_succ_edge (bb2)->flags
1241 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1242 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1243
1244 /* Match conditional jumps - this may get tricky when fallthru and branch
1245 edges are crossed. */
1246 if (EDGE_COUNT (bb1->succs) == 2
1247 && any_condjump_p (BB_END (bb1))
1248 && onlyjump_p (BB_END (bb1)))
1249 {
1250 edge b1, f1, b2, f2;
1251 bool reverse, match;
1252 rtx set1, set2, cond1, cond2;
1253 enum rtx_code code1, code2;
1254
1255 if (EDGE_COUNT (bb2->succs) != 2
1256 || !any_condjump_p (BB_END (bb2))
1257 || !onlyjump_p (BB_END (bb2)))
1258 return false;
1259
1260 b1 = BRANCH_EDGE (bb1);
1261 b2 = BRANCH_EDGE (bb2);
1262 f1 = FALLTHRU_EDGE (bb1);
1263 f2 = FALLTHRU_EDGE (bb2);
1264
1265 /* Get around possible forwarders on fallthru edges. Other cases
1266 should be optimized out already. */
1267 if (FORWARDER_BLOCK_P (f1->dest))
1268 f1 = single_succ_edge (f1->dest);
1269
1270 if (FORWARDER_BLOCK_P (f2->dest))
1271 f2 = single_succ_edge (f2->dest);
1272
1273 /* To simplify use of this function, return false if there are
1274 unneeded forwarder blocks. These will get eliminated later
1275 during cleanup_cfg. */
1276 if (FORWARDER_BLOCK_P (f1->dest)
1277 || FORWARDER_BLOCK_P (f2->dest)
1278 || FORWARDER_BLOCK_P (b1->dest)
1279 || FORWARDER_BLOCK_P (b2->dest))
1280 return false;
1281
1282 if (f1->dest == f2->dest && b1->dest == b2->dest)
1283 reverse = false;
1284 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1285 reverse = true;
1286 else
1287 return false;
1288
1289 set1 = pc_set (BB_END (bb1));
1290 set2 = pc_set (BB_END (bb2));
1291 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1292 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1293 reverse = !reverse;
1294
1295 cond1 = XEXP (SET_SRC (set1), 0);
1296 cond2 = XEXP (SET_SRC (set2), 0);
1297 code1 = GET_CODE (cond1);
1298 if (reverse)
1299 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1300 else
1301 code2 = GET_CODE (cond2);
1302
1303 if (code2 == UNKNOWN)
1304 return false;
1305
1306 /* Verify codes and operands match. */
1307 match = ((code1 == code2
1308 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1310 || (code1 == swap_condition (code2)
1311 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1312 XEXP (cond2, 0))
1313 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1314 XEXP (cond2, 1))));
1315
1316 /* If we return true, we will join the blocks. Which means that
1317 we will only have one branch prediction bit to work with. Thus
1318 we require the existing branches to have probabilities that are
1319 roughly similar. */
1320 if (match
1321 && !optimize_size
1322 && maybe_hot_bb_p (bb1)
1323 && maybe_hot_bb_p (bb2))
1324 {
1325 int prob2;
1326
1327 if (b1->dest == b2->dest)
1328 prob2 = b2->probability;
1329 else
1330 /* Do not use f2 probability as f2 may be forwarded. */
1331 prob2 = REG_BR_PROB_BASE - b2->probability;
1332
1333 /* Fail if the difference in probabilities is greater than 50%.
1334 This rules out two well-predicted branches with opposite
1335 outcomes. */
1336 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1337 {
1338 if (dump_file)
1339 fprintf (dump_file,
1340 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1341 bb1->index, bb2->index, b1->probability, prob2);
1342
1343 return false;
1344 }
1345 }
1346
1347 if (dump_file && match)
1348 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1349 bb1->index, bb2->index);
1350
1351 return match;
1352 }
1353
1354 /* Generic case - we are seeing a computed jump, table jump or trapping
1355 instruction. */
1356
1357 /* Check whether there are tablejumps in the end of BB1 and BB2.
1358 Return true if they are identical. */
1359 {
1360 rtx label1, label2;
1361 rtx table1, table2;
1362
1363 if (tablejump_p (BB_END (bb1), &label1, &table1)
1364 && tablejump_p (BB_END (bb2), &label2, &table2)
1365 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1366 {
1367 /* The labels should never be the same rtx. If they really are same
1368 the jump tables are same too. So disable crossjumping of blocks BB1
1369 and BB2 because when deleting the common insns in the end of BB1
1370 by delete_basic_block () the jump table would be deleted too. */
1371 /* If LABEL2 is referenced in BB1->END do not do anything
1372 because we would loose information when replacing
1373 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1374 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1375 {
1376 /* Set IDENTICAL to true when the tables are identical. */
1377 bool identical = false;
1378 rtx p1, p2;
1379
1380 p1 = PATTERN (table1);
1381 p2 = PATTERN (table2);
1382 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1383 {
1384 identical = true;
1385 }
1386 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1387 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1388 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1389 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1390 {
1391 int i;
1392
1393 identical = true;
1394 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1395 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1396 identical = false;
1397 }
1398
1399 if (identical)
1400 {
1401 replace_label_data rr;
1402 bool match;
1403
1404 /* Temporarily replace references to LABEL1 with LABEL2
1405 in BB1->END so that we could compare the instructions. */
1406 rr.r1 = label1;
1407 rr.r2 = label2;
1408 rr.update_label_nuses = false;
1409 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1410
1411 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1412 if (dump_file && match)
1413 fprintf (dump_file,
1414 "Tablejumps in bb %i and %i match.\n",
1415 bb1->index, bb2->index);
1416
1417 /* Set the original label in BB1->END because when deleting
1418 a block whose end is a tablejump, the tablejump referenced
1419 from the instruction is deleted too. */
1420 rr.r1 = label2;
1421 rr.r2 = label1;
1422 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1423
1424 return match;
1425 }
1426 }
1427 return false;
1428 }
1429 }
1430
1431 /* First ensure that the instructions match. There may be many outgoing
1432 edges so this test is generally cheaper. */
1433 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1434 return false;
1435
1436 /* Search the outgoing edges, ensure that the counts do match, find possible
1437 fallthru and exception handling edges since these needs more
1438 validation. */
1439 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1440 return false;
1441
1442 FOR_EACH_EDGE (e1, ei, bb1->succs)
1443 {
1444 e2 = EDGE_SUCC (bb2, ei.index);
1445
1446 if (e1->flags & EDGE_EH)
1447 nehedges1++;
1448
1449 if (e2->flags & EDGE_EH)
1450 nehedges2++;
1451
1452 if (e1->flags & EDGE_FALLTHRU)
1453 fallthru1 = e1;
1454 if (e2->flags & EDGE_FALLTHRU)
1455 fallthru2 = e2;
1456 }
1457
1458 /* If number of edges of various types does not match, fail. */
1459 if (nehedges1 != nehedges2
1460 || (fallthru1 != 0) != (fallthru2 != 0))
1461 return false;
1462
1463 /* fallthru edges must be forwarded to the same destination. */
1464 if (fallthru1)
1465 {
1466 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1467 ? single_succ (fallthru1->dest): fallthru1->dest);
1468 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1469 ? single_succ (fallthru2->dest): fallthru2->dest);
1470
1471 if (d1 != d2)
1472 return false;
1473 }
1474
1475 /* Ensure the same EH region. */
1476 {
1477 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1478 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1479
1480 if (!n1 && n2)
1481 return false;
1482
1483 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1484 return false;
1485 }
1486
1487 /* We don't need to match the rest of edges as above checks should be enough
1488 to ensure that they are equivalent. */
1489 return true;
1490 }
1491
1492 /* E1 and E2 are edges with the same destination block. Search their
1493 predecessors for common code. If found, redirect control flow from
1494 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1495
1496 static bool
1497 try_crossjump_to_edge (int mode, edge e1, edge e2)
1498 {
1499 int nmatch;
1500 basic_block src1 = e1->src, src2 = e2->src;
1501 basic_block redirect_to, redirect_from, to_remove;
1502 rtx newpos1, newpos2;
1503 edge s;
1504 edge_iterator ei;
1505
1506 newpos1 = newpos2 = NULL_RTX;
1507
1508 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1509 to try this optimization.
1510
1511 Basic block partitioning may result in some jumps that appear to
1512 be optimizable (or blocks that appear to be mergeable), but which really
1513 must be left untouched (they are required to make it safely across
1514 partition boundaries). See the comments at the top of
1515 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1516
1517 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1518 return false;
1519
1520 /* Search backward through forwarder blocks. We don't need to worry
1521 about multiple entry or chained forwarders, as they will be optimized
1522 away. We do this to look past the unconditional jump following a
1523 conditional jump that is required due to the current CFG shape. */
1524 if (single_pred_p (src1)
1525 && FORWARDER_BLOCK_P (src1))
1526 e1 = single_pred_edge (src1), src1 = e1->src;
1527
1528 if (single_pred_p (src2)
1529 && FORWARDER_BLOCK_P (src2))
1530 e2 = single_pred_edge (src2), src2 = e2->src;
1531
1532 /* Nothing to do if we reach ENTRY, or a common source block. */
1533 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1534 return false;
1535 if (src1 == src2)
1536 return false;
1537
1538 /* Seeing more than 1 forwarder blocks would confuse us later... */
1539 if (FORWARDER_BLOCK_P (e1->dest)
1540 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1541 return false;
1542
1543 if (FORWARDER_BLOCK_P (e2->dest)
1544 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1545 return false;
1546
1547 /* Likewise with dead code (possibly newly created by the other optimizations
1548 of cfg_cleanup). */
1549 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1550 return false;
1551
1552 /* Look for the common insn sequence, part the first ... */
1553 if (!outgoing_edges_match (mode, src1, src2))
1554 return false;
1555
1556 /* ... and part the second. */
1557 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1558
1559 /* Don't proceed with the crossjump unless we found a sufficient number
1560 of matching instructions or the 'from' block was totally matched
1561 (such that its predecessors will hopefully be redirected and the
1562 block removed). */
1563 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1564 && (newpos1 != BB_HEAD (src1)))
1565 return false;
1566
1567 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1568 will be deleted.
1569 If we have tablejumps in the end of SRC1 and SRC2
1570 they have been already compared for equivalence in outgoing_edges_match ()
1571 so replace the references to TABLE1 by references to TABLE2. */
1572 {
1573 rtx label1, label2;
1574 rtx table1, table2;
1575
1576 if (tablejump_p (BB_END (src1), &label1, &table1)
1577 && tablejump_p (BB_END (src2), &label2, &table2)
1578 && label1 != label2)
1579 {
1580 replace_label_data rr;
1581 rtx insn;
1582
1583 /* Replace references to LABEL1 with LABEL2. */
1584 rr.r1 = label1;
1585 rr.r2 = label2;
1586 rr.update_label_nuses = true;
1587 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1588 {
1589 /* Do not replace the label in SRC1->END because when deleting
1590 a block whose end is a tablejump, the tablejump referenced
1591 from the instruction is deleted too. */
1592 if (insn != BB_END (src1))
1593 for_each_rtx (&insn, replace_label, &rr);
1594 }
1595 }
1596 }
1597
1598 /* Avoid splitting if possible. */
1599 if (newpos2 == BB_HEAD (src2))
1600 redirect_to = src2;
1601 else
1602 {
1603 if (dump_file)
1604 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1605 src2->index, nmatch);
1606 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1607 }
1608
1609 if (dump_file)
1610 fprintf (dump_file,
1611 "Cross jumping from bb %i to bb %i; %i common insns\n",
1612 src1->index, src2->index, nmatch);
1613
1614 redirect_to->count += src1->count;
1615 redirect_to->frequency += src1->frequency;
1616 /* We may have some registers visible trought the block. */
1617 redirect_to->flags |= BB_DIRTY;
1618
1619 /* Recompute the frequencies and counts of outgoing edges. */
1620 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1621 {
1622 edge s2;
1623 edge_iterator ei;
1624 basic_block d = s->dest;
1625
1626 if (FORWARDER_BLOCK_P (d))
1627 d = single_succ (d);
1628
1629 FOR_EACH_EDGE (s2, ei, src1->succs)
1630 {
1631 basic_block d2 = s2->dest;
1632 if (FORWARDER_BLOCK_P (d2))
1633 d2 = single_succ (d2);
1634 if (d == d2)
1635 break;
1636 }
1637
1638 s->count += s2->count;
1639
1640 /* Take care to update possible forwarder blocks. We verified
1641 that there is no more than one in the chain, so we can't run
1642 into infinite loop. */
1643 if (FORWARDER_BLOCK_P (s->dest))
1644 {
1645 single_succ_edge (s->dest)->count += s2->count;
1646 s->dest->count += s2->count;
1647 s->dest->frequency += EDGE_FREQUENCY (s);
1648 }
1649
1650 if (FORWARDER_BLOCK_P (s2->dest))
1651 {
1652 single_succ_edge (s2->dest)->count -= s2->count;
1653 if (single_succ_edge (s2->dest)->count < 0)
1654 single_succ_edge (s2->dest)->count = 0;
1655 s2->dest->count -= s2->count;
1656 s2->dest->frequency -= EDGE_FREQUENCY (s);
1657 if (s2->dest->frequency < 0)
1658 s2->dest->frequency = 0;
1659 if (s2->dest->count < 0)
1660 s2->dest->count = 0;
1661 }
1662
1663 if (!redirect_to->frequency && !src1->frequency)
1664 s->probability = (s->probability + s2->probability) / 2;
1665 else
1666 s->probability
1667 = ((s->probability * redirect_to->frequency +
1668 s2->probability * src1->frequency)
1669 / (redirect_to->frequency + src1->frequency));
1670 }
1671
1672 update_br_prob_note (redirect_to);
1673
1674 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1675
1676 /* Skip possible basic block header. */
1677 if (LABEL_P (newpos1))
1678 newpos1 = NEXT_INSN (newpos1);
1679
1680 if (NOTE_P (newpos1))
1681 newpos1 = NEXT_INSN (newpos1);
1682
1683 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1684 to_remove = single_succ (redirect_from);
1685
1686 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1687 delete_basic_block (to_remove);
1688
1689 update_forwarder_flag (redirect_from);
1690
1691 return true;
1692 }
1693
1694 /* Search the predecessors of BB for common insn sequences. When found,
1695 share code between them by redirecting control flow. Return true if
1696 any changes made. */
1697
1698 static bool
1699 try_crossjump_bb (int mode, basic_block bb)
1700 {
1701 edge e, e2, fallthru;
1702 bool changed;
1703 unsigned max, ix, ix2;
1704 basic_block ev, ev2;
1705 edge_iterator ei;
1706
1707 /* Nothing to do if there is not at least two incoming edges. */
1708 if (EDGE_COUNT (bb->preds) < 2)
1709 return false;
1710
1711 /* Don't crossjump if this block ends in a computed jump,
1712 unless we are optimizing for size. */
1713 if (!optimize_size
1714 && bb != EXIT_BLOCK_PTR
1715 && computed_jump_p (BB_END (bb)))
1716 return false;
1717
1718 /* If we are partitioning hot/cold basic blocks, we don't want to
1719 mess up unconditional or indirect jumps that cross between hot
1720 and cold sections.
1721
1722 Basic block partitioning may result in some jumps that appear to
1723 be optimizable (or blocks that appear to be mergeable), but which really
1724 must be left untouched (they are required to make it safely across
1725 partition boundaries). See the comments at the top of
1726 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1727
1728 if (flag_reorder_blocks_and_partition
1729 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1730 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1731 return false;
1732
1733 /* It is always cheapest to redirect a block that ends in a branch to
1734 a block that falls through into BB, as that adds no branches to the
1735 program. We'll try that combination first. */
1736 fallthru = NULL;
1737 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1738
1739 if (EDGE_COUNT (bb->preds) > max)
1740 return false;
1741
1742 FOR_EACH_EDGE (e, ei, bb->preds)
1743 {
1744 if (e->flags & EDGE_FALLTHRU)
1745 fallthru = e;
1746 }
1747
1748 changed = false;
1749 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1750 {
1751 e = EDGE_PRED (ev, ix);
1752 ix++;
1753
1754 /* As noted above, first try with the fallthru predecessor. */
1755 if (fallthru)
1756 {
1757 /* Don't combine the fallthru edge into anything else.
1758 If there is a match, we'll do it the other way around. */
1759 if (e == fallthru)
1760 continue;
1761 /* If nothing changed since the last attempt, there is nothing
1762 we can do. */
1763 if (!first_pass
1764 && (!(e->src->flags & BB_DIRTY)
1765 && !(fallthru->src->flags & BB_DIRTY)))
1766 continue;
1767
1768 if (try_crossjump_to_edge (mode, e, fallthru))
1769 {
1770 changed = true;
1771 ix = 0;
1772 ev = bb;
1773 continue;
1774 }
1775 }
1776
1777 /* Non-obvious work limiting check: Recognize that we're going
1778 to call try_crossjump_bb on every basic block. So if we have
1779 two blocks with lots of outgoing edges (a switch) and they
1780 share lots of common destinations, then we would do the
1781 cross-jump check once for each common destination.
1782
1783 Now, if the blocks actually are cross-jump candidates, then
1784 all of their destinations will be shared. Which means that
1785 we only need check them for cross-jump candidacy once. We
1786 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1787 choosing to do the check from the block for which the edge
1788 in question is the first successor of A. */
1789 if (EDGE_SUCC (e->src, 0) != e)
1790 continue;
1791
1792 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1793 {
1794 e2 = EDGE_PRED (ev2, ix2);
1795 ix2++;
1796
1797 if (e2 == e)
1798 continue;
1799
1800 /* We've already checked the fallthru edge above. */
1801 if (e2 == fallthru)
1802 continue;
1803
1804 /* The "first successor" check above only prevents multiple
1805 checks of crossjump(A,B). In order to prevent redundant
1806 checks of crossjump(B,A), require that A be the block
1807 with the lowest index. */
1808 if (e->src->index > e2->src->index)
1809 continue;
1810
1811 /* If nothing changed since the last attempt, there is nothing
1812 we can do. */
1813 if (!first_pass
1814 && (!(e->src->flags & BB_DIRTY)
1815 && !(e2->src->flags & BB_DIRTY)))
1816 continue;
1817
1818 if (try_crossjump_to_edge (mode, e, e2))
1819 {
1820 changed = true;
1821 ev2 = bb;
1822 ix = 0;
1823 break;
1824 }
1825 }
1826 }
1827
1828 return changed;
1829 }
1830
1831 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1832 instructions etc. Return nonzero if changes were made. */
1833
1834 static bool
1835 try_optimize_cfg (int mode)
1836 {
1837 bool changed_overall = false;
1838 bool changed;
1839 int iterations = 0;
1840 basic_block bb, b, next;
1841
1842 if (mode & CLEANUP_CROSSJUMP)
1843 add_noreturn_fake_exit_edges ();
1844
1845 FOR_EACH_BB (bb)
1846 update_forwarder_flag (bb);
1847
1848 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1849 clear_bb_flags ();
1850
1851 if (! targetm.cannot_modify_jumps_p ())
1852 {
1853 first_pass = true;
1854 /* Attempt to merge blocks as made possible by edge removal. If
1855 a block has only one successor, and the successor has only
1856 one predecessor, they may be combined. */
1857 do
1858 {
1859 changed = false;
1860 iterations++;
1861
1862 if (dump_file)
1863 fprintf (dump_file,
1864 "\n\ntry_optimize_cfg iteration %i\n\n",
1865 iterations);
1866
1867 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1868 {
1869 basic_block c;
1870 edge s;
1871 bool changed_here = false;
1872
1873 /* Delete trivially dead basic blocks. */
1874 while (EDGE_COUNT (b->preds) == 0)
1875 {
1876 c = b->prev_bb;
1877 if (dump_file)
1878 fprintf (dump_file, "Deleting block %i.\n",
1879 b->index);
1880
1881 delete_basic_block (b);
1882 if (!(mode & CLEANUP_CFGLAYOUT))
1883 changed = true;
1884 b = c;
1885 }
1886
1887 /* Remove code labels no longer used. */
1888 if (single_pred_p (b)
1889 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1890 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1891 && LABEL_P (BB_HEAD (b))
1892 /* If the previous block ends with a branch to this
1893 block, we can't delete the label. Normally this
1894 is a condjump that is yet to be simplified, but
1895 if CASE_DROPS_THRU, this can be a tablejump with
1896 some element going to the same place as the
1897 default (fallthru). */
1898 && (single_pred (b) == ENTRY_BLOCK_PTR
1899 || !JUMP_P (BB_END (single_pred (b)))
1900 || ! label_is_jump_target_p (BB_HEAD (b),
1901 BB_END (single_pred (b)))))
1902 {
1903 rtx label = BB_HEAD (b);
1904
1905 delete_insn_chain (label, label);
1906 /* In the case label is undeletable, move it after the
1907 BASIC_BLOCK note. */
1908 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1909 {
1910 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1911
1912 reorder_insns_nobb (label, label, bb_note);
1913 BB_HEAD (b) = bb_note;
1914 }
1915 if (dump_file)
1916 fprintf (dump_file, "Deleted label in block %i.\n",
1917 b->index);
1918 }
1919
1920 /* If we fall through an empty block, we can remove it. */
1921 if (!(mode & CLEANUP_CFGLAYOUT)
1922 && single_pred_p (b)
1923 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1924 && !LABEL_P (BB_HEAD (b))
1925 && FORWARDER_BLOCK_P (b)
1926 /* Note that forwarder_block_p true ensures that
1927 there is a successor for this block. */
1928 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1929 && n_basic_blocks > 1)
1930 {
1931 if (dump_file)
1932 fprintf (dump_file,
1933 "Deleting fallthru block %i.\n",
1934 b->index);
1935
1936 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1937 redirect_edge_succ_nodup (single_pred_edge (b),
1938 single_succ (b));
1939 delete_basic_block (b);
1940 changed = true;
1941 b = c;
1942 }
1943
1944 if (single_succ_p (b)
1945 && (s = single_succ_edge (b))
1946 && !(s->flags & EDGE_COMPLEX)
1947 && (c = s->dest) != EXIT_BLOCK_PTR
1948 && single_pred_p (c)
1949 && b != c)
1950 {
1951 /* When not in cfg_layout mode use code aware of reordering
1952 INSN. This code possibly creates new basic blocks so it
1953 does not fit merge_blocks interface and is kept here in
1954 hope that it will become useless once more of compiler
1955 is transformed to use cfg_layout mode. */
1956
1957 if ((mode & CLEANUP_CFGLAYOUT)
1958 && can_merge_blocks_p (b, c))
1959 {
1960 merge_blocks (b, c);
1961 update_forwarder_flag (b);
1962 changed_here = true;
1963 }
1964 else if (!(mode & CLEANUP_CFGLAYOUT)
1965 /* If the jump insn has side effects,
1966 we can't kill the edge. */
1967 && (!JUMP_P (BB_END (b))
1968 || (reload_completed
1969 ? simplejump_p (BB_END (b))
1970 : (onlyjump_p (BB_END (b))
1971 && !tablejump_p (BB_END (b),
1972 NULL, NULL))))
1973 && (next = merge_blocks_move (s, b, c, mode)))
1974 {
1975 b = next;
1976 changed_here = true;
1977 }
1978 }
1979
1980 /* Simplify branch over branch. */
1981 if ((mode & CLEANUP_EXPENSIVE)
1982 && !(mode & CLEANUP_CFGLAYOUT)
1983 && try_simplify_condjump (b))
1984 changed_here = true;
1985
1986 /* If B has a single outgoing edge, but uses a
1987 non-trivial jump instruction without side-effects, we
1988 can either delete the jump entirely, or replace it
1989 with a simple unconditional jump. */
1990 if (single_succ_p (b)
1991 && single_succ (b) != EXIT_BLOCK_PTR
1992 && onlyjump_p (BB_END (b))
1993 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1994 && try_redirect_by_replacing_jump (single_succ_edge (b),
1995 single_succ (b),
1996 (mode & CLEANUP_CFGLAYOUT) != 0))
1997 {
1998 update_forwarder_flag (b);
1999 changed_here = true;
2000 }
2001
2002 /* Simplify branch to branch. */
2003 if (try_forward_edges (mode, b))
2004 changed_here = true;
2005
2006 /* Look for shared code between blocks. */
2007 if ((mode & CLEANUP_CROSSJUMP)
2008 && try_crossjump_bb (mode, b))
2009 changed_here = true;
2010
2011 /* Don't get confused by the index shift caused by
2012 deleting blocks. */
2013 if (!changed_here)
2014 b = b->next_bb;
2015 else
2016 changed = true;
2017 }
2018
2019 if ((mode & CLEANUP_CROSSJUMP)
2020 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2021 changed = true;
2022
2023 #ifdef ENABLE_CHECKING
2024 if (changed)
2025 verify_flow_info ();
2026 #endif
2027
2028 changed_overall |= changed;
2029 first_pass = false;
2030 }
2031 while (changed);
2032 }
2033
2034 if (mode & CLEANUP_CROSSJUMP)
2035 remove_fake_exit_edges ();
2036
2037 clear_aux_for_blocks ();
2038
2039 return changed_overall;
2040 }
2041 \f
2042 /* Delete all unreachable basic blocks. */
2043
2044 bool
2045 delete_unreachable_blocks (void)
2046 {
2047 bool changed = false;
2048 basic_block b, next_bb;
2049
2050 find_unreachable_blocks ();
2051
2052 /* Delete all unreachable basic blocks. */
2053
2054 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2055 {
2056 next_bb = b->next_bb;
2057
2058 if (!(b->flags & BB_REACHABLE))
2059 {
2060 delete_basic_block (b);
2061 changed = true;
2062 }
2063 }
2064
2065 if (changed)
2066 tidy_fallthru_edges ();
2067 return changed;
2068 }
2069
2070 /* Merges sequential blocks if possible. */
2071
2072 bool
2073 merge_seq_blocks (void)
2074 {
2075 basic_block bb;
2076 bool changed = false;
2077
2078 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2079 {
2080 if (single_succ_p (bb)
2081 && can_merge_blocks_p (bb, single_succ (bb)))
2082 {
2083 /* Merge the blocks and retry. */
2084 merge_blocks (bb, single_succ (bb));
2085 changed = true;
2086 continue;
2087 }
2088
2089 bb = bb->next_bb;
2090 }
2091
2092 return changed;
2093 }
2094 \f
2095 /* Tidy the CFG by deleting unreachable code and whatnot. */
2096
2097 bool
2098 cleanup_cfg (int mode)
2099 {
2100 bool changed = false;
2101
2102 timevar_push (TV_CLEANUP_CFG);
2103 if (delete_unreachable_blocks ())
2104 {
2105 changed = true;
2106 /* We've possibly created trivially dead code. Cleanup it right
2107 now to introduce more opportunities for try_optimize_cfg. */
2108 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2109 && !reload_completed)
2110 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2111 }
2112
2113 compact_blocks ();
2114
2115 while (try_optimize_cfg (mode))
2116 {
2117 delete_unreachable_blocks (), changed = true;
2118 if (mode & CLEANUP_UPDATE_LIFE)
2119 {
2120 /* Cleaning up CFG introduces more opportunities for dead code
2121 removal that in turn may introduce more opportunities for
2122 cleaning up the CFG. */
2123 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2124 PROP_DEATH_NOTES
2125 | PROP_SCAN_DEAD_CODE
2126 | PROP_KILL_DEAD_CODE
2127 | ((mode & CLEANUP_LOG_LINKS)
2128 ? PROP_LOG_LINKS : 0)))
2129 break;
2130 }
2131 else if (!(mode & CLEANUP_NO_INSN_DEL)
2132 && (mode & CLEANUP_EXPENSIVE)
2133 && !reload_completed)
2134 {
2135 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2136 break;
2137 }
2138 else
2139 break;
2140 delete_dead_jumptables ();
2141 }
2142
2143 /* Kill the data we won't maintain. */
2144 free_EXPR_LIST_list (&label_value_list);
2145 timevar_pop (TV_CLEANUP_CFG);
2146
2147 return changed;
2148 }