target.h (globalize_decl_name): New.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58
59 /* Set to true when we are running first pass of try_optimize_cfg loop. */
60 static bool first_pass;
61 static bool try_crossjump_to_edge (int, edge, edge);
62 static bool try_crossjump_bb (int, basic_block);
63 static bool outgoing_edges_match (int, basic_block, basic_block);
64 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
65 static bool old_insns_match_p (int, rtx, rtx);
66
67 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
68 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
69 static bool try_optimize_cfg (int);
70 static bool try_simplify_condjump (basic_block);
71 static bool try_forward_edges (int, basic_block);
72 static edge thread_jump (int, edge, basic_block);
73 static bool mark_effect (rtx, bitmap);
74 static void notice_new_block (basic_block);
75 static void update_forwarder_flag (basic_block);
76 static int mentions_nonequal_regs (rtx *, void *);
77 static void merge_memattrs (rtx, rtx);
78 \f
79 /* Set flags for newly created block. */
80
81 static void
82 notice_new_block (basic_block bb)
83 {
84 if (!bb)
85 return;
86
87 if (forwarder_block_p (bb))
88 bb->flags |= BB_FORWARDER_BLOCK;
89 }
90
91 /* Recompute forwarder flag after block has been modified. */
92
93 static void
94 update_forwarder_flag (basic_block bb)
95 {
96 if (forwarder_block_p (bb))
97 bb->flags |= BB_FORWARDER_BLOCK;
98 else
99 bb->flags &= ~BB_FORWARDER_BLOCK;
100 }
101 \f
102 /* Simplify a conditional jump around an unconditional jump.
103 Return true if something changed. */
104
105 static bool
106 try_simplify_condjump (basic_block cbranch_block)
107 {
108 basic_block jump_block, jump_dest_block, cbranch_dest_block;
109 edge cbranch_jump_edge, cbranch_fallthru_edge;
110 rtx cbranch_insn;
111
112 /* Verify that there are exactly two successors. */
113 if (EDGE_COUNT (cbranch_block->succs) != 2)
114 return false;
115
116 /* Verify that we've got a normal conditional branch at the end
117 of the block. */
118 cbranch_insn = BB_END (cbranch_block);
119 if (!any_condjump_p (cbranch_insn))
120 return false;
121
122 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
123 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
124
125 /* The next block must not have multiple predecessors, must not
126 be the last block in the function, and must contain just the
127 unconditional jump. */
128 jump_block = cbranch_fallthru_edge->dest;
129 if (!single_pred_p (jump_block)
130 || jump_block->next_bb == EXIT_BLOCK_PTR
131 || !FORWARDER_BLOCK_P (jump_block))
132 return false;
133 jump_dest_block = single_succ (jump_block);
134
135 /* If we are partitioning hot/cold basic blocks, we don't want to
136 mess up unconditional or indirect jumps that cross between hot
137 and cold sections.
138
139 Basic block partitioning may result in some jumps that appear to
140 be optimizable (or blocks that appear to be mergeable), but which really
141 must be left untouched (they are required to make it safely across
142 partition boundaries). See the comments at the top of
143 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
144
145 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
146 || (cbranch_jump_edge->flags & EDGE_CROSSING))
147 return false;
148
149 /* The conditional branch must target the block after the
150 unconditional branch. */
151 cbranch_dest_block = cbranch_jump_edge->dest;
152
153 if (cbranch_dest_block == EXIT_BLOCK_PTR
154 || !can_fallthru (jump_block, cbranch_dest_block))
155 return false;
156
157 /* Invert the conditional branch. */
158 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
159 return false;
160
161 if (dump_file)
162 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
163 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
164
165 /* Success. Update the CFG to match. Note that after this point
166 the edge variable names appear backwards; the redirection is done
167 this way to preserve edge profile data. */
168 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
169 cbranch_dest_block);
170 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
171 jump_dest_block);
172 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
173 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174 update_br_prob_note (cbranch_block);
175
176 /* Delete the block with the unconditional jump, and clean up the mess. */
177 delete_basic_block (jump_block);
178 tidy_fallthru_edge (cbranch_jump_edge);
179 update_forwarder_flag (cbranch_block);
180
181 return true;
182 }
183 \f
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
186
187 static bool
188 mark_effect (rtx exp, regset nonequal)
189 {
190 int regno;
191 rtx dest;
192 switch (GET_CODE (exp))
193 {
194 /* In case we do clobber the register, mark it as equal, as we know the
195 value is dead so it don't have to match. */
196 case CLOBBER:
197 if (REG_P (XEXP (exp, 0)))
198 {
199 dest = XEXP (exp, 0);
200 regno = REGNO (dest);
201 CLEAR_REGNO_REG_SET (nonequal, regno);
202 if (regno < FIRST_PSEUDO_REGISTER)
203 {
204 int n = hard_regno_nregs[regno][GET_MODE (dest)];
205 while (--n > 0)
206 CLEAR_REGNO_REG_SET (nonequal, regno + n);
207 }
208 }
209 return false;
210
211 case SET:
212 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
213 return false;
214 dest = SET_DEST (exp);
215 if (dest == pc_rtx)
216 return false;
217 if (!REG_P (dest))
218 return true;
219 regno = REGNO (dest);
220 SET_REGNO_REG_SET (nonequal, regno);
221 if (regno < FIRST_PSEUDO_REGISTER)
222 {
223 int n = hard_regno_nregs[regno][GET_MODE (dest)];
224 while (--n > 0)
225 SET_REGNO_REG_SET (nonequal, regno + n);
226 }
227 return false;
228
229 default:
230 return false;
231 }
232 }
233
234 /* Return nonzero if X is a register set in regset DATA.
235 Called via for_each_rtx. */
236 static int
237 mentions_nonequal_regs (rtx *x, void *data)
238 {
239 regset nonequal = (regset) data;
240 if (REG_P (*x))
241 {
242 int regno;
243
244 regno = REGNO (*x);
245 if (REGNO_REG_SET_P (nonequal, regno))
246 return 1;
247 if (regno < FIRST_PSEUDO_REGISTER)
248 {
249 int n = hard_regno_nregs[regno][GET_MODE (*x)];
250 while (--n > 0)
251 if (REGNO_REG_SET_P (nonequal, regno + n))
252 return 1;
253 }
254 }
255 return 0;
256 }
257 /* Attempt to prove that the basic block B will have no side effects and
258 always continues in the same edge if reached via E. Return the edge
259 if exist, NULL otherwise. */
260
261 static edge
262 thread_jump (int mode, edge e, basic_block b)
263 {
264 rtx set1, set2, cond1, cond2, insn;
265 enum rtx_code code1, code2, reversed_code2;
266 bool reverse1 = false;
267 unsigned i;
268 regset nonequal;
269 bool failed = false;
270 reg_set_iterator rsi;
271
272 if (b->flags & BB_NONTHREADABLE_BLOCK)
273 return NULL;
274
275 /* At the moment, we do handle only conditional jumps, but later we may
276 want to extend this code to tablejumps and others. */
277 if (EDGE_COUNT (e->src->succs) != 2)
278 return NULL;
279 if (EDGE_COUNT (b->succs) != 2)
280 {
281 b->flags |= BB_NONTHREADABLE_BLOCK;
282 return NULL;
283 }
284
285 /* Second branch must end with onlyjump, as we will eliminate the jump. */
286 if (!any_condjump_p (BB_END (e->src)))
287 return NULL;
288
289 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
290 {
291 b->flags |= BB_NONTHREADABLE_BLOCK;
292 return NULL;
293 }
294
295 set1 = pc_set (BB_END (e->src));
296 set2 = pc_set (BB_END (b));
297 if (((e->flags & EDGE_FALLTHRU) != 0)
298 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
299 reverse1 = true;
300
301 cond1 = XEXP (SET_SRC (set1), 0);
302 cond2 = XEXP (SET_SRC (set2), 0);
303 if (reverse1)
304 code1 = reversed_comparison_code (cond1, BB_END (e->src));
305 else
306 code1 = GET_CODE (cond1);
307
308 code2 = GET_CODE (cond2);
309 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
310
311 if (!comparison_dominates_p (code1, code2)
312 && !comparison_dominates_p (code1, reversed_code2))
313 return NULL;
314
315 /* Ensure that the comparison operators are equivalent.
316 ??? This is far too pessimistic. We should allow swapped operands,
317 different CCmodes, or for example comparisons for interval, that
318 dominate even when operands are not equivalent. */
319 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
320 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
321 return NULL;
322
323 /* Short circuit cases where block B contains some side effects, as we can't
324 safely bypass it. */
325 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
326 insn = NEXT_INSN (insn))
327 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
328 {
329 b->flags |= BB_NONTHREADABLE_BLOCK;
330 return NULL;
331 }
332
333 cselib_init (false);
334
335 /* First process all values computed in the source basic block. */
336 for (insn = NEXT_INSN (BB_HEAD (e->src));
337 insn != NEXT_INSN (BB_END (e->src));
338 insn = NEXT_INSN (insn))
339 if (INSN_P (insn))
340 cselib_process_insn (insn);
341
342 nonequal = BITMAP_ALLOC (NULL);
343 CLEAR_REG_SET (nonequal);
344
345 /* Now assume that we've continued by the edge E to B and continue
346 processing as if it were same basic block.
347 Our goal is to prove that whole block is an NOOP. */
348
349 for (insn = NEXT_INSN (BB_HEAD (b));
350 insn != NEXT_INSN (BB_END (b)) && !failed;
351 insn = NEXT_INSN (insn))
352 {
353 if (INSN_P (insn))
354 {
355 rtx pat = PATTERN (insn);
356
357 if (GET_CODE (pat) == PARALLEL)
358 {
359 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
360 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
361 }
362 else
363 failed |= mark_effect (pat, nonequal);
364 }
365
366 cselib_process_insn (insn);
367 }
368
369 /* Later we should clear nonequal of dead registers. So far we don't
370 have life information in cfg_cleanup. */
371 if (failed)
372 {
373 b->flags |= BB_NONTHREADABLE_BLOCK;
374 goto failed_exit;
375 }
376
377 /* cond2 must not mention any register that is not equal to the
378 former block. */
379 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
380 goto failed_exit;
381
382 /* In case liveness information is available, we need to prove equivalence
383 only of the live values. */
384 if (mode & CLEANUP_UPDATE_LIFE)
385 AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
386
387 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
388 goto failed_exit;
389
390 BITMAP_FREE (nonequal);
391 cselib_finish ();
392 if ((comparison_dominates_p (code1, code2) != 0)
393 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
394 return BRANCH_EDGE (b);
395 else
396 return FALLTHRU_EDGE (b);
397
398 failed_exit:
399 BITMAP_FREE (nonequal);
400 cselib_finish ();
401 return NULL;
402 }
403 \f
404 /* Attempt to forward edges leaving basic block B.
405 Return true if successful. */
406
407 static bool
408 try_forward_edges (int mode, basic_block b)
409 {
410 bool changed = false;
411 edge_iterator ei;
412 edge e, *threaded_edges = NULL;
413
414 /* If we are partitioning hot/cold basic blocks, we don't want to
415 mess up unconditional or indirect jumps that cross between hot
416 and cold sections.
417
418 Basic block partitioning may result in some jumps that appear to
419 be optimizable (or blocks that appear to be mergeable), but which really m
420 ust be left untouched (they are required to make it safely across
421 partition boundaries). See the comments at the top of
422 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
423
424 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
425 return false;
426
427 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
428 {
429 basic_block target, first;
430 int counter;
431 bool threaded = false;
432 int nthreaded_edges = 0;
433 bool may_thread = first_pass | (b->flags & BB_DIRTY);
434
435 /* Skip complex edges because we don't know how to update them.
436
437 Still handle fallthru edges, as we can succeed to forward fallthru
438 edge to the same place as the branch edge of conditional branch
439 and turn conditional branch to an unconditional branch. */
440 if (e->flags & EDGE_COMPLEX)
441 {
442 ei_next (&ei);
443 continue;
444 }
445
446 target = first = e->dest;
447 counter = NUM_FIXED_BLOCKS;
448
449 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
450 up jumps that cross between hot/cold sections.
451
452 Basic block partitioning may result in some jumps that appear
453 to be optimizable (or blocks that appear to be mergeable), but which
454 really must be left untouched (they are required to make it safely
455 across partition boundaries). See the comments at the top of
456 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457 details. */
458
459 if (first != EXIT_BLOCK_PTR
460 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
461 return false;
462
463 while (counter < n_basic_blocks)
464 {
465 basic_block new_target = NULL;
466 bool new_target_threaded = false;
467 may_thread |= target->flags & BB_DIRTY;
468
469 if (FORWARDER_BLOCK_P (target)
470 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
471 && single_succ (target) != EXIT_BLOCK_PTR)
472 {
473 /* Bypass trivial infinite loops. */
474 new_target = single_succ (target);
475 if (target == new_target)
476 counter = n_basic_blocks;
477 }
478
479 /* Allow to thread only over one edge at time to simplify updating
480 of probabilities. */
481 else if ((mode & CLEANUP_THREADING) && may_thread)
482 {
483 edge t = thread_jump (mode, e, target);
484 if (t)
485 {
486 if (!threaded_edges)
487 threaded_edges = XNEWVEC (edge, n_basic_blocks);
488 else
489 {
490 int i;
491
492 /* Detect an infinite loop across blocks not
493 including the start block. */
494 for (i = 0; i < nthreaded_edges; ++i)
495 if (threaded_edges[i] == t)
496 break;
497 if (i < nthreaded_edges)
498 {
499 counter = n_basic_blocks;
500 break;
501 }
502 }
503
504 /* Detect an infinite loop across the start block. */
505 if (t->dest == b)
506 break;
507
508 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
509 threaded_edges[nthreaded_edges++] = t;
510
511 new_target = t->dest;
512 new_target_threaded = true;
513 }
514 }
515
516 if (!new_target)
517 break;
518
519 counter++;
520 target = new_target;
521 threaded |= new_target_threaded;
522 }
523
524 if (counter >= n_basic_blocks)
525 {
526 if (dump_file)
527 fprintf (dump_file, "Infinite loop in BB %i.\n",
528 target->index);
529 }
530 else if (target == first)
531 ; /* We didn't do anything. */
532 else
533 {
534 /* Save the values now, as the edge may get removed. */
535 gcov_type edge_count = e->count;
536 int edge_probability = e->probability;
537 int edge_frequency;
538 int n = 0;
539
540 /* Don't force if target is exit block. */
541 if (threaded && target != EXIT_BLOCK_PTR)
542 {
543 notice_new_block (redirect_edge_and_branch_force (e, target));
544 if (dump_file)
545 fprintf (dump_file, "Conditionals threaded.\n");
546 }
547 else if (!redirect_edge_and_branch (e, target))
548 {
549 if (dump_file)
550 fprintf (dump_file,
551 "Forwarding edge %i->%i to %i failed.\n",
552 b->index, e->dest->index, target->index);
553 ei_next (&ei);
554 continue;
555 }
556
557 /* We successfully forwarded the edge. Now update profile
558 data: for each edge we traversed in the chain, remove
559 the original edge's execution count. */
560 edge_frequency = ((edge_probability * b->frequency
561 + REG_BR_PROB_BASE / 2)
562 / REG_BR_PROB_BASE);
563
564 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
565 b->flags |= BB_FORWARDER_BLOCK;
566
567 do
568 {
569 edge t;
570
571 if (!single_succ_p (first))
572 {
573 gcc_assert (n < nthreaded_edges);
574 t = threaded_edges [n++];
575 gcc_assert (t->src == first);
576 update_bb_profile_for_threading (first, edge_frequency,
577 edge_count, t);
578 update_br_prob_note (first);
579 }
580 else
581 {
582 first->count -= edge_count;
583 if (first->count < 0)
584 first->count = 0;
585 first->frequency -= edge_frequency;
586 if (first->frequency < 0)
587 first->frequency = 0;
588 /* It is possible that as the result of
589 threading we've removed edge as it is
590 threaded to the fallthru edge. Avoid
591 getting out of sync. */
592 if (n < nthreaded_edges
593 && first == threaded_edges [n]->src)
594 n++;
595 t = single_succ_edge (first);
596 }
597
598 t->count -= edge_count;
599 if (t->count < 0)
600 t->count = 0;
601 first = t->dest;
602 }
603 while (first != target);
604
605 changed = true;
606 continue;
607 }
608 ei_next (&ei);
609 }
610
611 if (threaded_edges)
612 free (threaded_edges);
613 return changed;
614 }
615 \f
616
617 /* Blocks A and B are to be merged into a single block. A has no incoming
618 fallthru edge, so it can be moved before B without adding or modifying
619 any jumps (aside from the jump from A to B). */
620
621 static void
622 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
623 {
624 rtx barrier;
625 bool only_notes;
626
627 /* If we are partitioning hot/cold basic blocks, we don't want to
628 mess up unconditional or indirect jumps that cross between hot
629 and cold sections.
630
631 Basic block partitioning may result in some jumps that appear to
632 be optimizable (or blocks that appear to be mergeable), but which really
633 must be left untouched (they are required to make it safely across
634 partition boundaries). See the comments at the top of
635 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
636
637 if (BB_PARTITION (a) != BB_PARTITION (b))
638 return;
639
640 barrier = next_nonnote_insn (BB_END (a));
641 gcc_assert (BARRIER_P (barrier));
642 delete_insn (barrier);
643
644 /* Move block and loop notes out of the chain so that we do not
645 disturb their order.
646
647 ??? A better solution would be to squeeze out all the non-nested notes
648 and adjust the block trees appropriately. Even better would be to have
649 a tighter connection between block trees and rtl so that this is not
650 necessary. */
651 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
652 gcc_assert (!only_notes);
653
654 /* Scramble the insn chain. */
655 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
656 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
657 a->flags |= BB_DIRTY;
658
659 if (dump_file)
660 fprintf (dump_file, "Moved block %d before %d and merged.\n",
661 a->index, b->index);
662
663 /* Swap the records for the two blocks around. */
664
665 unlink_block (a);
666 link_block (a, b->prev_bb);
667
668 /* Now blocks A and B are contiguous. Merge them. */
669 merge_blocks (a, b);
670 }
671
672 /* Blocks A and B are to be merged into a single block. B has no outgoing
673 fallthru edge, so it can be moved after A without adding or modifying
674 any jumps (aside from the jump from A to B). */
675
676 static void
677 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
678 {
679 rtx barrier, real_b_end;
680 rtx label, table;
681 bool only_notes;
682
683 /* If we are partitioning hot/cold basic blocks, we don't want to
684 mess up unconditional or indirect jumps that cross between hot
685 and cold sections.
686
687 Basic block partitioning may result in some jumps that appear to
688 be optimizable (or blocks that appear to be mergeable), but which really
689 must be left untouched (they are required to make it safely across
690 partition boundaries). See the comments at the top of
691 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
692
693 if (BB_PARTITION (a) != BB_PARTITION (b))
694 return;
695
696 real_b_end = BB_END (b);
697
698 /* If there is a jump table following block B temporarily add the jump table
699 to block B so that it will also be moved to the correct location. */
700 if (tablejump_p (BB_END (b), &label, &table)
701 && prev_active_insn (label) == BB_END (b))
702 {
703 BB_END (b) = table;
704 }
705
706 /* There had better have been a barrier there. Delete it. */
707 barrier = NEXT_INSN (BB_END (b));
708 if (barrier && BARRIER_P (barrier))
709 delete_insn (barrier);
710
711 /* Move block and loop notes out of the chain so that we do not
712 disturb their order.
713
714 ??? A better solution would be to squeeze out all the non-nested notes
715 and adjust the block trees appropriately. Even better would be to have
716 a tighter connection between block trees and rtl so that this is not
717 necessary. */
718 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
719 gcc_assert (!only_notes);
720
721
722 /* Scramble the insn chain. */
723 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
724
725 /* Restore the real end of b. */
726 BB_END (b) = real_b_end;
727
728 if (dump_file)
729 fprintf (dump_file, "Moved block %d after %d and merged.\n",
730 b->index, a->index);
731
732 /* Now blocks A and B are contiguous. Merge them. */
733 merge_blocks (a, b);
734 }
735
736 /* Attempt to merge basic blocks that are potentially non-adjacent.
737 Return NULL iff the attempt failed, otherwise return basic block
738 where cleanup_cfg should continue. Because the merging commonly
739 moves basic block away or introduces another optimization
740 possibility, return basic block just before B so cleanup_cfg don't
741 need to iterate.
742
743 It may be good idea to return basic block before C in the case
744 C has been moved after B and originally appeared earlier in the
745 insn sequence, but we have no information available about the
746 relative ordering of these two. Hopefully it is not too common. */
747
748 static basic_block
749 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
750 {
751 basic_block next;
752
753 /* If we are partitioning hot/cold basic blocks, we don't want to
754 mess up unconditional or indirect jumps that cross between hot
755 and cold sections.
756
757 Basic block partitioning may result in some jumps that appear to
758 be optimizable (or blocks that appear to be mergeable), but which really
759 must be left untouched (they are required to make it safely across
760 partition boundaries). See the comments at the top of
761 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
762
763 if (BB_PARTITION (b) != BB_PARTITION (c))
764 return NULL;
765
766 /* If B has a fallthru edge to C, no need to move anything. */
767 if (e->flags & EDGE_FALLTHRU)
768 {
769 int b_index = b->index, c_index = c->index;
770 merge_blocks (b, c);
771 update_forwarder_flag (b);
772
773 if (dump_file)
774 fprintf (dump_file, "Merged %d and %d without moving.\n",
775 b_index, c_index);
776
777 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
778 }
779
780 /* Otherwise we will need to move code around. Do that only if expensive
781 transformations are allowed. */
782 else if (mode & CLEANUP_EXPENSIVE)
783 {
784 edge tmp_edge, b_fallthru_edge;
785 bool c_has_outgoing_fallthru;
786 bool b_has_incoming_fallthru;
787 edge_iterator ei;
788
789 /* Avoid overactive code motion, as the forwarder blocks should be
790 eliminated by edge redirection instead. One exception might have
791 been if B is a forwarder block and C has no fallthru edge, but
792 that should be cleaned up by bb-reorder instead. */
793 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
794 return NULL;
795
796 /* We must make sure to not munge nesting of lexical blocks,
797 and loop notes. This is done by squeezing out all the notes
798 and leaving them there to lie. Not ideal, but functional. */
799
800 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
801 if (tmp_edge->flags & EDGE_FALLTHRU)
802 break;
803
804 c_has_outgoing_fallthru = (tmp_edge != NULL);
805
806 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
807 if (tmp_edge->flags & EDGE_FALLTHRU)
808 break;
809
810 b_has_incoming_fallthru = (tmp_edge != NULL);
811 b_fallthru_edge = tmp_edge;
812 next = b->prev_bb;
813 if (next == c)
814 next = next->prev_bb;
815
816 /* Otherwise, we're going to try to move C after B. If C does
817 not have an outgoing fallthru, then it can be moved
818 immediately after B without introducing or modifying jumps. */
819 if (! c_has_outgoing_fallthru)
820 {
821 merge_blocks_move_successor_nojumps (b, c);
822 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
823 }
824
825 /* If B does not have an incoming fallthru, then it can be moved
826 immediately before C without introducing or modifying jumps.
827 C cannot be the first block, so we do not have to worry about
828 accessing a non-existent block. */
829
830 if (b_has_incoming_fallthru)
831 {
832 basic_block bb;
833
834 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
835 return NULL;
836 bb = force_nonfallthru (b_fallthru_edge);
837 if (bb)
838 notice_new_block (bb);
839 }
840
841 merge_blocks_move_predecessor_nojumps (b, c);
842 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
843 }
844
845 return NULL;
846 }
847 \f
848
849 /* Removes the memory attributes of MEM expression
850 if they are not equal. */
851
852 void
853 merge_memattrs (rtx x, rtx y)
854 {
855 int i;
856 int j;
857 enum rtx_code code;
858 const char *fmt;
859
860 if (x == y)
861 return;
862 if (x == 0 || y == 0)
863 return;
864
865 code = GET_CODE (x);
866
867 if (code != GET_CODE (y))
868 return;
869
870 if (GET_MODE (x) != GET_MODE (y))
871 return;
872
873 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
874 {
875 if (! MEM_ATTRS (x))
876 MEM_ATTRS (y) = 0;
877 else if (! MEM_ATTRS (y))
878 MEM_ATTRS (x) = 0;
879 else
880 {
881 rtx mem_size;
882
883 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
884 {
885 set_mem_alias_set (x, 0);
886 set_mem_alias_set (y, 0);
887 }
888
889 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
890 {
891 set_mem_expr (x, 0);
892 set_mem_expr (y, 0);
893 set_mem_offset (x, 0);
894 set_mem_offset (y, 0);
895 }
896 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
897 {
898 set_mem_offset (x, 0);
899 set_mem_offset (y, 0);
900 }
901
902 if (!MEM_SIZE (x))
903 mem_size = NULL_RTX;
904 else if (!MEM_SIZE (y))
905 mem_size = NULL_RTX;
906 else
907 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
908 INTVAL (MEM_SIZE (y))));
909 set_mem_size (x, mem_size);
910 set_mem_size (y, mem_size);
911
912 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
913 set_mem_align (y, MEM_ALIGN (x));
914 }
915 }
916
917 fmt = GET_RTX_FORMAT (code);
918 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
919 {
920 switch (fmt[i])
921 {
922 case 'E':
923 /* Two vectors must have the same length. */
924 if (XVECLEN (x, i) != XVECLEN (y, i))
925 return;
926
927 for (j = 0; j < XVECLEN (x, i); j++)
928 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
929
930 break;
931
932 case 'e':
933 merge_memattrs (XEXP (x, i), XEXP (y, i));
934 }
935 }
936 return;
937 }
938
939
940 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
941
942 static bool
943 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
944 {
945 rtx p1, p2;
946
947 /* Verify that I1 and I2 are equivalent. */
948 if (GET_CODE (i1) != GET_CODE (i2))
949 return false;
950
951 p1 = PATTERN (i1);
952 p2 = PATTERN (i2);
953
954 if (GET_CODE (p1) != GET_CODE (p2))
955 return false;
956
957 /* If this is a CALL_INSN, compare register usage information.
958 If we don't check this on stack register machines, the two
959 CALL_INSNs might be merged leaving reg-stack.c with mismatching
960 numbers of stack registers in the same basic block.
961 If we don't check this on machines with delay slots, a delay slot may
962 be filled that clobbers a parameter expected by the subroutine.
963
964 ??? We take the simple route for now and assume that if they're
965 equal, they were constructed identically. */
966
967 if (CALL_P (i1)
968 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
969 CALL_INSN_FUNCTION_USAGE (i2))
970 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
971 return false;
972
973 #ifdef STACK_REGS
974 /* If cross_jump_death_matters is not 0, the insn's mode
975 indicates whether or not the insn contains any stack-like
976 regs. */
977
978 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
979 {
980 /* If register stack conversion has already been done, then
981 death notes must also be compared before it is certain that
982 the two instruction streams match. */
983
984 rtx note;
985 HARD_REG_SET i1_regset, i2_regset;
986
987 CLEAR_HARD_REG_SET (i1_regset);
988 CLEAR_HARD_REG_SET (i2_regset);
989
990 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
991 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
992 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
993
994 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
995 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
996 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
997
998 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
999
1000 return false;
1001
1002 done:
1003 ;
1004 }
1005 #endif
1006
1007 if (reload_completed
1008 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1009 return true;
1010
1011 /* Do not do EQUIV substitution after reload. First, we're undoing the
1012 work of reload_cse. Second, we may be undoing the work of the post-
1013 reload splitting pass. */
1014 /* ??? Possibly add a new phase switch variable that can be used by
1015 targets to disallow the troublesome insns after splitting. */
1016 if (!reload_completed)
1017 {
1018 /* The following code helps take care of G++ cleanups. */
1019 rtx equiv1 = find_reg_equal_equiv_note (i1);
1020 rtx equiv2 = find_reg_equal_equiv_note (i2);
1021
1022 if (equiv1 && equiv2
1023 /* If the equivalences are not to a constant, they may
1024 reference pseudos that no longer exist, so we can't
1025 use them. */
1026 && (! reload_completed
1027 || (CONSTANT_P (XEXP (equiv1, 0))
1028 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1029 {
1030 rtx s1 = single_set (i1);
1031 rtx s2 = single_set (i2);
1032 if (s1 != 0 && s2 != 0
1033 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1034 {
1035 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1036 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1037 if (! rtx_renumbered_equal_p (p1, p2))
1038 cancel_changes (0);
1039 else if (apply_change_group ())
1040 return true;
1041 }
1042 }
1043 }
1044
1045 return false;
1046 }
1047 \f
1048 /* Look through the insns at the end of BB1 and BB2 and find the longest
1049 sequence that are equivalent. Store the first insns for that sequence
1050 in *F1 and *F2 and return the sequence length.
1051
1052 To simplify callers of this function, if the blocks match exactly,
1053 store the head of the blocks in *F1 and *F2. */
1054
1055 static int
1056 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1057 basic_block bb2, rtx *f1, rtx *f2)
1058 {
1059 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1060 int ninsns = 0;
1061
1062 /* Skip simple jumps at the end of the blocks. Complex jumps still
1063 need to be compared for equivalence, which we'll do below. */
1064
1065 i1 = BB_END (bb1);
1066 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1067 if (onlyjump_p (i1)
1068 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1069 {
1070 last1 = i1;
1071 i1 = PREV_INSN (i1);
1072 }
1073
1074 i2 = BB_END (bb2);
1075 if (onlyjump_p (i2)
1076 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1077 {
1078 last2 = i2;
1079 /* Count everything except for unconditional jump as insn. */
1080 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1081 ninsns++;
1082 i2 = PREV_INSN (i2);
1083 }
1084
1085 while (true)
1086 {
1087 /* Ignore notes. */
1088 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1089 i1 = PREV_INSN (i1);
1090
1091 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1092 i2 = PREV_INSN (i2);
1093
1094 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1095 break;
1096
1097 if (!old_insns_match_p (mode, i1, i2))
1098 break;
1099
1100 merge_memattrs (i1, i2);
1101
1102 /* Don't begin a cross-jump with a NOTE insn. */
1103 if (INSN_P (i1))
1104 {
1105 /* If the merged insns have different REG_EQUAL notes, then
1106 remove them. */
1107 rtx equiv1 = find_reg_equal_equiv_note (i1);
1108 rtx equiv2 = find_reg_equal_equiv_note (i2);
1109
1110 if (equiv1 && !equiv2)
1111 remove_note (i1, equiv1);
1112 else if (!equiv1 && equiv2)
1113 remove_note (i2, equiv2);
1114 else if (equiv1 && equiv2
1115 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1116 {
1117 remove_note (i1, equiv1);
1118 remove_note (i2, equiv2);
1119 }
1120
1121 afterlast1 = last1, afterlast2 = last2;
1122 last1 = i1, last2 = i2;
1123 ninsns++;
1124 }
1125
1126 i1 = PREV_INSN (i1);
1127 i2 = PREV_INSN (i2);
1128 }
1129
1130 #ifdef HAVE_cc0
1131 /* Don't allow the insn after a compare to be shared by
1132 cross-jumping unless the compare is also shared. */
1133 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1134 last1 = afterlast1, last2 = afterlast2, ninsns--;
1135 #endif
1136
1137 /* Include preceding notes and labels in the cross-jump. One,
1138 this may bring us to the head of the blocks as requested above.
1139 Two, it keeps line number notes as matched as may be. */
1140 if (ninsns)
1141 {
1142 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1143 last1 = PREV_INSN (last1);
1144
1145 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1146 last1 = PREV_INSN (last1);
1147
1148 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1149 last2 = PREV_INSN (last2);
1150
1151 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1152 last2 = PREV_INSN (last2);
1153
1154 *f1 = last1;
1155 *f2 = last2;
1156 }
1157
1158 return ninsns;
1159 }
1160
1161 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1162 bool
1163 condjump_equiv_p (struct equiv_info *info, bool call_init)
1164 {
1165 basic_block bb1 = info->x_block;
1166 basic_block bb2 = info->y_block;
1167 edge b1 = BRANCH_EDGE (bb1);
1168 edge b2 = BRANCH_EDGE (bb2);
1169 edge f1 = FALLTHRU_EDGE (bb1);
1170 edge f2 = FALLTHRU_EDGE (bb2);
1171 bool reverse, match;
1172 rtx set1, set2, cond1, cond2;
1173 rtx src1, src2;
1174 enum rtx_code code1, code2;
1175
1176 /* Get around possible forwarders on fallthru edges. Other cases
1177 should be optimized out already. */
1178 if (FORWARDER_BLOCK_P (f1->dest))
1179 f1 = single_succ_edge (f1->dest);
1180
1181 if (FORWARDER_BLOCK_P (f2->dest))
1182 f2 = single_succ_edge (f2->dest);
1183
1184 /* To simplify use of this function, return false if there are
1185 unneeded forwarder blocks. These will get eliminated later
1186 during cleanup_cfg. */
1187 if (FORWARDER_BLOCK_P (f1->dest)
1188 || FORWARDER_BLOCK_P (f2->dest)
1189 || FORWARDER_BLOCK_P (b1->dest)
1190 || FORWARDER_BLOCK_P (b2->dest))
1191 return false;
1192
1193 if (f1->dest == f2->dest && b1->dest == b2->dest)
1194 reverse = false;
1195 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1196 reverse = true;
1197 else
1198 return false;
1199
1200 set1 = pc_set (BB_END (bb1));
1201 set2 = pc_set (BB_END (bb2));
1202 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1203 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1204 reverse = !reverse;
1205
1206 src1 = SET_SRC (set1);
1207 src2 = SET_SRC (set2);
1208 cond1 = XEXP (src1, 0);
1209 cond2 = XEXP (src2, 0);
1210 code1 = GET_CODE (cond1);
1211 if (reverse)
1212 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1213 else
1214 code2 = GET_CODE (cond2);
1215
1216 if (code2 == UNKNOWN)
1217 return false;
1218
1219 if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1220 gcc_unreachable ();
1221 /* Make the sources of the pc sets unreadable so that when we call
1222 insns_match_p it won't process them.
1223 The death_notes_match_p from insns_match_p won't see the local registers
1224 used for the pc set, but that could only cause missed optimizations when
1225 there are actually condjumps that use stack registers. */
1226 SET_SRC (set1) = pc_rtx;
1227 SET_SRC (set2) = pc_rtx;
1228 /* Verify codes and operands match. */
1229 if (code1 == code2)
1230 {
1231 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1232 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1233 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1234
1235 }
1236 else if (code1 == swap_condition (code2))
1237 {
1238 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1239 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1240 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1241
1242 }
1243 else
1244 match = false;
1245 SET_SRC (set1) = src1;
1246 SET_SRC (set2) = src2;
1247 match &= verify_changes (0);
1248
1249 /* If we return true, we will join the blocks. Which means that
1250 we will only have one branch prediction bit to work with. Thus
1251 we require the existing branches to have probabilities that are
1252 roughly similar. */
1253 if (match
1254 && !optimize_size
1255 && maybe_hot_bb_p (bb1)
1256 && maybe_hot_bb_p (bb2))
1257 {
1258 int prob2;
1259
1260 if (b1->dest == b2->dest)
1261 prob2 = b2->probability;
1262 else
1263 /* Do not use f2 probability as f2 may be forwarded. */
1264 prob2 = REG_BR_PROB_BASE - b2->probability;
1265
1266 /* Fail if the difference in probabilities is greater than 50%.
1267 This rules out two well-predicted branches with opposite
1268 outcomes. */
1269 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1270 {
1271 if (dump_file)
1272 fprintf (dump_file,
1273 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1274 bb1->index, bb2->index, b1->probability, prob2);
1275
1276 match = false;
1277 }
1278 }
1279
1280 if (dump_file && match)
1281 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1282 bb1->index, bb2->index);
1283
1284 if (!match)
1285 cancel_changes (0);
1286 return match;
1287 }
1288
1289 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1290 the branch instruction. This means that if we commonize the control
1291 flow before end of the basic block, the semantic remains unchanged.
1292
1293 We may assume that there exists one edge with a common destination. */
1294
1295 static bool
1296 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1297 {
1298 int nehedges1 = 0, nehedges2 = 0;
1299 edge fallthru1 = 0, fallthru2 = 0;
1300 edge e1, e2;
1301 edge_iterator ei;
1302
1303 /* If BB1 has only one successor, we may be looking at either an
1304 unconditional jump, or a fake edge to exit. */
1305 if (single_succ_p (bb1)
1306 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1307 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1308 return (single_succ_p (bb2)
1309 && (single_succ_edge (bb2)->flags
1310 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1311 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1312
1313 /* Match conditional jumps - this may get tricky when fallthru and branch
1314 edges are crossed. */
1315 if (EDGE_COUNT (bb1->succs) == 2
1316 && any_condjump_p (BB_END (bb1))
1317 && onlyjump_p (BB_END (bb1)))
1318 {
1319 edge b1, f1, b2, f2;
1320 bool reverse, match;
1321 rtx set1, set2, cond1, cond2;
1322 enum rtx_code code1, code2;
1323
1324 if (EDGE_COUNT (bb2->succs) != 2
1325 || !any_condjump_p (BB_END (bb2))
1326 || !onlyjump_p (BB_END (bb2)))
1327 return false;
1328
1329 b1 = BRANCH_EDGE (bb1);
1330 b2 = BRANCH_EDGE (bb2);
1331 f1 = FALLTHRU_EDGE (bb1);
1332 f2 = FALLTHRU_EDGE (bb2);
1333
1334 /* Get around possible forwarders on fallthru edges. Other cases
1335 should be optimized out already. */
1336 if (FORWARDER_BLOCK_P (f1->dest))
1337 f1 = single_succ_edge (f1->dest);
1338
1339 if (FORWARDER_BLOCK_P (f2->dest))
1340 f2 = single_succ_edge (f2->dest);
1341
1342 /* To simplify use of this function, return false if there are
1343 unneeded forwarder blocks. These will get eliminated later
1344 during cleanup_cfg. */
1345 if (FORWARDER_BLOCK_P (f1->dest)
1346 || FORWARDER_BLOCK_P (f2->dest)
1347 || FORWARDER_BLOCK_P (b1->dest)
1348 || FORWARDER_BLOCK_P (b2->dest))
1349 return false;
1350
1351 if (f1->dest == f2->dest && b1->dest == b2->dest)
1352 reverse = false;
1353 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1354 reverse = true;
1355 else
1356 return false;
1357
1358 set1 = pc_set (BB_END (bb1));
1359 set2 = pc_set (BB_END (bb2));
1360 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1361 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1362 reverse = !reverse;
1363
1364 cond1 = XEXP (SET_SRC (set1), 0);
1365 cond2 = XEXP (SET_SRC (set2), 0);
1366 code1 = GET_CODE (cond1);
1367 if (reverse)
1368 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1369 else
1370 code2 = GET_CODE (cond2);
1371
1372 if (code2 == UNKNOWN)
1373 return false;
1374
1375 /* Verify codes and operands match. */
1376 match = ((code1 == code2
1377 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1378 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1379 || (code1 == swap_condition (code2)
1380 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1381 XEXP (cond2, 0))
1382 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1383 XEXP (cond2, 1))));
1384
1385 /* If we return true, we will join the blocks. Which means that
1386 we will only have one branch prediction bit to work with. Thus
1387 we require the existing branches to have probabilities that are
1388 roughly similar. */
1389 if (match
1390 && !optimize_size
1391 && maybe_hot_bb_p (bb1)
1392 && maybe_hot_bb_p (bb2))
1393 {
1394 int prob2;
1395
1396 if (b1->dest == b2->dest)
1397 prob2 = b2->probability;
1398 else
1399 /* Do not use f2 probability as f2 may be forwarded. */
1400 prob2 = REG_BR_PROB_BASE - b2->probability;
1401
1402 /* Fail if the difference in probabilities is greater than 50%.
1403 This rules out two well-predicted branches with opposite
1404 outcomes. */
1405 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1406 {
1407 if (dump_file)
1408 fprintf (dump_file,
1409 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1410 bb1->index, bb2->index, b1->probability, prob2);
1411
1412 return false;
1413 }
1414 }
1415
1416 if (dump_file && match)
1417 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1418 bb1->index, bb2->index);
1419
1420 return match;
1421 }
1422
1423 /* Generic case - we are seeing a computed jump, table jump or trapping
1424 instruction. */
1425
1426 /* Check whether there are tablejumps in the end of BB1 and BB2.
1427 Return true if they are identical. */
1428 {
1429 rtx label1, label2;
1430 rtx table1, table2;
1431
1432 if (tablejump_p (BB_END (bb1), &label1, &table1)
1433 && tablejump_p (BB_END (bb2), &label2, &table2)
1434 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1435 {
1436 /* The labels should never be the same rtx. If they really are same
1437 the jump tables are same too. So disable crossjumping of blocks BB1
1438 and BB2 because when deleting the common insns in the end of BB1
1439 by delete_basic_block () the jump table would be deleted too. */
1440 /* If LABEL2 is referenced in BB1->END do not do anything
1441 because we would loose information when replacing
1442 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1443 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1444 {
1445 /* Set IDENTICAL to true when the tables are identical. */
1446 bool identical = false;
1447 rtx p1, p2;
1448
1449 p1 = PATTERN (table1);
1450 p2 = PATTERN (table2);
1451 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1452 {
1453 identical = true;
1454 }
1455 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1456 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1457 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1458 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1459 {
1460 int i;
1461
1462 identical = true;
1463 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1464 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1465 identical = false;
1466 }
1467
1468 if (identical)
1469 {
1470 replace_label_data rr;
1471 bool match;
1472
1473 /* Temporarily replace references to LABEL1 with LABEL2
1474 in BB1->END so that we could compare the instructions. */
1475 rr.r1 = label1;
1476 rr.r2 = label2;
1477 rr.update_label_nuses = false;
1478 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1479
1480 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1481 if (dump_file && match)
1482 fprintf (dump_file,
1483 "Tablejumps in bb %i and %i match.\n",
1484 bb1->index, bb2->index);
1485
1486 /* Set the original label in BB1->END because when deleting
1487 a block whose end is a tablejump, the tablejump referenced
1488 from the instruction is deleted too. */
1489 rr.r1 = label2;
1490 rr.r2 = label1;
1491 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1492
1493 return match;
1494 }
1495 }
1496 return false;
1497 }
1498 }
1499
1500 /* First ensure that the instructions match. There may be many outgoing
1501 edges so this test is generally cheaper. */
1502 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1503 return false;
1504
1505 /* Search the outgoing edges, ensure that the counts do match, find possible
1506 fallthru and exception handling edges since these needs more
1507 validation. */
1508 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1509 return false;
1510
1511 FOR_EACH_EDGE (e1, ei, bb1->succs)
1512 {
1513 e2 = EDGE_SUCC (bb2, ei.index);
1514
1515 if (e1->flags & EDGE_EH)
1516 nehedges1++;
1517
1518 if (e2->flags & EDGE_EH)
1519 nehedges2++;
1520
1521 if (e1->flags & EDGE_FALLTHRU)
1522 fallthru1 = e1;
1523 if (e2->flags & EDGE_FALLTHRU)
1524 fallthru2 = e2;
1525 }
1526
1527 /* If number of edges of various types does not match, fail. */
1528 if (nehedges1 != nehedges2
1529 || (fallthru1 != 0) != (fallthru2 != 0))
1530 return false;
1531
1532 /* fallthru edges must be forwarded to the same destination. */
1533 if (fallthru1)
1534 {
1535 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1536 ? single_succ (fallthru1->dest): fallthru1->dest);
1537 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1538 ? single_succ (fallthru2->dest): fallthru2->dest);
1539
1540 if (d1 != d2)
1541 return false;
1542 }
1543
1544 /* Ensure the same EH region. */
1545 {
1546 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1547 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1548
1549 if (!n1 && n2)
1550 return false;
1551
1552 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1553 return false;
1554 }
1555
1556 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1557 version of sequence abstraction. */
1558 FOR_EACH_EDGE (e1, ei, bb2->succs)
1559 {
1560 edge e2;
1561 edge_iterator ei;
1562 basic_block d1 = e1->dest;
1563
1564 if (FORWARDER_BLOCK_P (d1))
1565 d1 = EDGE_SUCC (d1, 0)->dest;
1566
1567 FOR_EACH_EDGE (e2, ei, bb1->succs)
1568 {
1569 basic_block d2 = e2->dest;
1570 if (FORWARDER_BLOCK_P (d2))
1571 d2 = EDGE_SUCC (d2, 0)->dest;
1572 if (d1 == d2)
1573 break;
1574 }
1575
1576 if (!e2)
1577 return false;
1578 }
1579
1580 return true;
1581 }
1582
1583 /* Returns true if BB basic block has a preserve label. */
1584
1585 static bool
1586 block_has_preserve_label (basic_block bb)
1587 {
1588 return (bb
1589 && block_label (bb)
1590 && LABEL_PRESERVE_P (block_label (bb)));
1591 }
1592
1593 /* E1 and E2 are edges with the same destination block. Search their
1594 predecessors for common code. If found, redirect control flow from
1595 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1596
1597 static bool
1598 try_crossjump_to_edge (int mode, edge e1, edge e2)
1599 {
1600 int nmatch;
1601 basic_block src1 = e1->src, src2 = e2->src;
1602 basic_block redirect_to, redirect_from, to_remove;
1603 rtx newpos1, newpos2;
1604 edge s;
1605 edge_iterator ei;
1606
1607 newpos1 = newpos2 = NULL_RTX;
1608
1609 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1610 to try this optimization.
1611
1612 Basic block partitioning may result in some jumps that appear to
1613 be optimizable (or blocks that appear to be mergeable), but which really
1614 must be left untouched (they are required to make it safely across
1615 partition boundaries). See the comments at the top of
1616 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1617
1618 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1619 return false;
1620
1621 /* Search backward through forwarder blocks. We don't need to worry
1622 about multiple entry or chained forwarders, as they will be optimized
1623 away. We do this to look past the unconditional jump following a
1624 conditional jump that is required due to the current CFG shape. */
1625 if (single_pred_p (src1)
1626 && FORWARDER_BLOCK_P (src1))
1627 e1 = single_pred_edge (src1), src1 = e1->src;
1628
1629 if (single_pred_p (src2)
1630 && FORWARDER_BLOCK_P (src2))
1631 e2 = single_pred_edge (src2), src2 = e2->src;
1632
1633 /* Nothing to do if we reach ENTRY, or a common source block. */
1634 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1635 return false;
1636 if (src1 == src2)
1637 return false;
1638
1639 /* Seeing more than 1 forwarder blocks would confuse us later... */
1640 if (FORWARDER_BLOCK_P (e1->dest)
1641 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1642 return false;
1643
1644 if (FORWARDER_BLOCK_P (e2->dest)
1645 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1646 return false;
1647
1648 /* Likewise with dead code (possibly newly created by the other optimizations
1649 of cfg_cleanup). */
1650 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1651 return false;
1652
1653 /* Look for the common insn sequence, part the first ... */
1654 if (!outgoing_edges_match (mode, src1, src2))
1655 return false;
1656
1657 /* ... and part the second. */
1658 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1659
1660 /* Don't proceed with the crossjump unless we found a sufficient number
1661 of matching instructions or the 'from' block was totally matched
1662 (such that its predecessors will hopefully be redirected and the
1663 block removed). */
1664 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1665 && (newpos1 != BB_HEAD (src1)))
1666 return false;
1667
1668 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1669 if (block_has_preserve_label (e1->dest)
1670 && (e1->flags & EDGE_ABNORMAL))
1671 return false;
1672
1673 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1674 will be deleted.
1675 If we have tablejumps in the end of SRC1 and SRC2
1676 they have been already compared for equivalence in outgoing_edges_match ()
1677 so replace the references to TABLE1 by references to TABLE2. */
1678 {
1679 rtx label1, label2;
1680 rtx table1, table2;
1681
1682 if (tablejump_p (BB_END (src1), &label1, &table1)
1683 && tablejump_p (BB_END (src2), &label2, &table2)
1684 && label1 != label2)
1685 {
1686 replace_label_data rr;
1687 rtx insn;
1688
1689 /* Replace references to LABEL1 with LABEL2. */
1690 rr.r1 = label1;
1691 rr.r2 = label2;
1692 rr.update_label_nuses = true;
1693 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1694 {
1695 /* Do not replace the label in SRC1->END because when deleting
1696 a block whose end is a tablejump, the tablejump referenced
1697 from the instruction is deleted too. */
1698 if (insn != BB_END (src1))
1699 for_each_rtx (&insn, replace_label, &rr);
1700 }
1701 }
1702 }
1703
1704 /* Avoid splitting if possible. We must always split when SRC2 has
1705 EH predecessor edges, or we may end up with basic blocks with both
1706 normal and EH predecessor edges. */
1707 if (newpos2 == BB_HEAD (src2)
1708 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1709 redirect_to = src2;
1710 else
1711 {
1712 if (newpos2 == BB_HEAD (src2))
1713 {
1714 /* Skip possible basic block header. */
1715 if (LABEL_P (newpos2))
1716 newpos2 = NEXT_INSN (newpos2);
1717 if (NOTE_P (newpos2))
1718 newpos2 = NEXT_INSN (newpos2);
1719 }
1720
1721 if (dump_file)
1722 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1723 src2->index, nmatch);
1724 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1725 }
1726
1727 if (dump_file)
1728 fprintf (dump_file,
1729 "Cross jumping from bb %i to bb %i; %i common insns\n",
1730 src1->index, src2->index, nmatch);
1731
1732 redirect_to->count += src1->count;
1733 redirect_to->frequency += src1->frequency;
1734 /* We may have some registers visible through the block. */
1735 redirect_to->flags |= BB_DIRTY;
1736
1737 /* Recompute the frequencies and counts of outgoing edges. */
1738 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1739 {
1740 edge s2;
1741 edge_iterator ei;
1742 basic_block d = s->dest;
1743
1744 if (FORWARDER_BLOCK_P (d))
1745 d = single_succ (d);
1746
1747 FOR_EACH_EDGE (s2, ei, src1->succs)
1748 {
1749 basic_block d2 = s2->dest;
1750 if (FORWARDER_BLOCK_P (d2))
1751 d2 = single_succ (d2);
1752 if (d == d2)
1753 break;
1754 }
1755
1756 s->count += s2->count;
1757
1758 /* Take care to update possible forwarder blocks. We verified
1759 that there is no more than one in the chain, so we can't run
1760 into infinite loop. */
1761 if (FORWARDER_BLOCK_P (s->dest))
1762 {
1763 single_succ_edge (s->dest)->count += s2->count;
1764 s->dest->count += s2->count;
1765 s->dest->frequency += EDGE_FREQUENCY (s);
1766 }
1767
1768 if (FORWARDER_BLOCK_P (s2->dest))
1769 {
1770 single_succ_edge (s2->dest)->count -= s2->count;
1771 if (single_succ_edge (s2->dest)->count < 0)
1772 single_succ_edge (s2->dest)->count = 0;
1773 s2->dest->count -= s2->count;
1774 s2->dest->frequency -= EDGE_FREQUENCY (s);
1775 if (s2->dest->frequency < 0)
1776 s2->dest->frequency = 0;
1777 if (s2->dest->count < 0)
1778 s2->dest->count = 0;
1779 }
1780
1781 if (!redirect_to->frequency && !src1->frequency)
1782 s->probability = (s->probability + s2->probability) / 2;
1783 else
1784 s->probability
1785 = ((s->probability * redirect_to->frequency +
1786 s2->probability * src1->frequency)
1787 / (redirect_to->frequency + src1->frequency));
1788 }
1789
1790 update_br_prob_note (redirect_to);
1791
1792 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1793
1794 /* Skip possible basic block header. */
1795 if (LABEL_P (newpos1))
1796 newpos1 = NEXT_INSN (newpos1);
1797
1798 if (NOTE_P (newpos1))
1799 newpos1 = NEXT_INSN (newpos1);
1800
1801 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1802 to_remove = single_succ (redirect_from);
1803
1804 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1805 delete_basic_block (to_remove);
1806
1807 update_forwarder_flag (redirect_from);
1808 if (redirect_to != src2)
1809 update_forwarder_flag (src2);
1810
1811 return true;
1812 }
1813
1814 /* Search the predecessors of BB for common insn sequences. When found,
1815 share code between them by redirecting control flow. Return true if
1816 any changes made. */
1817
1818 static bool
1819 try_crossjump_bb (int mode, basic_block bb)
1820 {
1821 edge e, e2, fallthru;
1822 bool changed;
1823 unsigned max, ix, ix2;
1824 basic_block ev, ev2;
1825 edge_iterator ei;
1826
1827 /* Nothing to do if there is not at least two incoming edges. */
1828 if (EDGE_COUNT (bb->preds) < 2)
1829 return false;
1830
1831 /* Don't crossjump if this block ends in a computed jump,
1832 unless we are optimizing for size. */
1833 if (!optimize_size
1834 && bb != EXIT_BLOCK_PTR
1835 && computed_jump_p (BB_END (bb)))
1836 return false;
1837
1838 /* If we are partitioning hot/cold basic blocks, we don't want to
1839 mess up unconditional or indirect jumps that cross between hot
1840 and cold sections.
1841
1842 Basic block partitioning may result in some jumps that appear to
1843 be optimizable (or blocks that appear to be mergeable), but which really
1844 must be left untouched (they are required to make it safely across
1845 partition boundaries). See the comments at the top of
1846 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1847
1848 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1849 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1850 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1851 return false;
1852
1853 /* It is always cheapest to redirect a block that ends in a branch to
1854 a block that falls through into BB, as that adds no branches to the
1855 program. We'll try that combination first. */
1856 fallthru = NULL;
1857 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1858
1859 if (EDGE_COUNT (bb->preds) > max)
1860 return false;
1861
1862 FOR_EACH_EDGE (e, ei, bb->preds)
1863 {
1864 if (e->flags & EDGE_FALLTHRU)
1865 fallthru = e;
1866 }
1867
1868 changed = false;
1869 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1870 {
1871 e = EDGE_PRED (ev, ix);
1872 ix++;
1873
1874 /* As noted above, first try with the fallthru predecessor. */
1875 if (fallthru)
1876 {
1877 /* Don't combine the fallthru edge into anything else.
1878 If there is a match, we'll do it the other way around. */
1879 if (e == fallthru)
1880 continue;
1881 /* If nothing changed since the last attempt, there is nothing
1882 we can do. */
1883 if (!first_pass
1884 && (!(e->src->flags & BB_DIRTY)
1885 && !(fallthru->src->flags & BB_DIRTY)))
1886 continue;
1887
1888 if (try_crossjump_to_edge (mode, e, fallthru))
1889 {
1890 changed = true;
1891 ix = 0;
1892 ev = bb;
1893 continue;
1894 }
1895 }
1896
1897 /* Non-obvious work limiting check: Recognize that we're going
1898 to call try_crossjump_bb on every basic block. So if we have
1899 two blocks with lots of outgoing edges (a switch) and they
1900 share lots of common destinations, then we would do the
1901 cross-jump check once for each common destination.
1902
1903 Now, if the blocks actually are cross-jump candidates, then
1904 all of their destinations will be shared. Which means that
1905 we only need check them for cross-jump candidacy once. We
1906 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1907 choosing to do the check from the block for which the edge
1908 in question is the first successor of A. */
1909 if (EDGE_SUCC (e->src, 0) != e)
1910 continue;
1911
1912 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1913 {
1914 e2 = EDGE_PRED (ev2, ix2);
1915 ix2++;
1916
1917 if (e2 == e)
1918 continue;
1919
1920 /* We've already checked the fallthru edge above. */
1921 if (e2 == fallthru)
1922 continue;
1923
1924 /* The "first successor" check above only prevents multiple
1925 checks of crossjump(A,B). In order to prevent redundant
1926 checks of crossjump(B,A), require that A be the block
1927 with the lowest index. */
1928 if (e->src->index > e2->src->index)
1929 continue;
1930
1931 /* If nothing changed since the last attempt, there is nothing
1932 we can do. */
1933 if (!first_pass
1934 && (!(e->src->flags & BB_DIRTY)
1935 && !(e2->src->flags & BB_DIRTY)))
1936 continue;
1937
1938 if (try_crossjump_to_edge (mode, e, e2))
1939 {
1940 changed = true;
1941 ev2 = bb;
1942 ix = 0;
1943 break;
1944 }
1945 }
1946 }
1947
1948 return changed;
1949 }
1950
1951 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1952 instructions etc. Return nonzero if changes were made. */
1953
1954 static bool
1955 try_optimize_cfg (int mode)
1956 {
1957 bool changed_overall = false;
1958 bool changed;
1959 int iterations = 0;
1960 basic_block bb, b, next;
1961
1962 if (mode & CLEANUP_CROSSJUMP)
1963 add_noreturn_fake_exit_edges ();
1964
1965 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1966 clear_bb_flags ();
1967
1968 FOR_EACH_BB (bb)
1969 update_forwarder_flag (bb);
1970
1971 if (! targetm.cannot_modify_jumps_p ())
1972 {
1973 first_pass = true;
1974 /* Attempt to merge blocks as made possible by edge removal. If
1975 a block has only one successor, and the successor has only
1976 one predecessor, they may be combined. */
1977 do
1978 {
1979 changed = false;
1980 iterations++;
1981
1982 if (dump_file)
1983 fprintf (dump_file,
1984 "\n\ntry_optimize_cfg iteration %i\n\n",
1985 iterations);
1986
1987 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1988 {
1989 basic_block c;
1990 edge s;
1991 bool changed_here = false;
1992
1993 /* Delete trivially dead basic blocks. */
1994 while (EDGE_COUNT (b->preds) == 0)
1995 {
1996 c = b->prev_bb;
1997 if (dump_file)
1998 fprintf (dump_file, "Deleting block %i.\n",
1999 b->index);
2000
2001 delete_basic_block (b);
2002 if (!(mode & CLEANUP_CFGLAYOUT))
2003 changed = true;
2004 b = c;
2005 }
2006
2007 /* Remove code labels no longer used. */
2008 if (single_pred_p (b)
2009 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2010 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2011 && LABEL_P (BB_HEAD (b))
2012 /* If the previous block ends with a branch to this
2013 block, we can't delete the label. Normally this
2014 is a condjump that is yet to be simplified, but
2015 if CASE_DROPS_THRU, this can be a tablejump with
2016 some element going to the same place as the
2017 default (fallthru). */
2018 && (single_pred (b) == ENTRY_BLOCK_PTR
2019 || !JUMP_P (BB_END (single_pred (b)))
2020 || ! label_is_jump_target_p (BB_HEAD (b),
2021 BB_END (single_pred (b)))))
2022 {
2023 rtx label = BB_HEAD (b);
2024
2025 delete_insn_chain (label, label);
2026 /* In the case label is undeletable, move it after the
2027 BASIC_BLOCK note. */
2028 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2029 {
2030 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2031
2032 reorder_insns_nobb (label, label, bb_note);
2033 BB_HEAD (b) = bb_note;
2034 }
2035 if (dump_file)
2036 fprintf (dump_file, "Deleted label in block %i.\n",
2037 b->index);
2038 }
2039
2040 /* If we fall through an empty block, we can remove it. */
2041 if (!(mode & CLEANUP_CFGLAYOUT)
2042 && single_pred_p (b)
2043 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2044 && !LABEL_P (BB_HEAD (b))
2045 && FORWARDER_BLOCK_P (b)
2046 /* Note that forwarder_block_p true ensures that
2047 there is a successor for this block. */
2048 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2049 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2050 {
2051 if (dump_file)
2052 fprintf (dump_file,
2053 "Deleting fallthru block %i.\n",
2054 b->index);
2055
2056 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2057 redirect_edge_succ_nodup (single_pred_edge (b),
2058 single_succ (b));
2059 delete_basic_block (b);
2060 changed = true;
2061 b = c;
2062 }
2063
2064 if (single_succ_p (b)
2065 && (s = single_succ_edge (b))
2066 && !(s->flags & EDGE_COMPLEX)
2067 && (c = s->dest) != EXIT_BLOCK_PTR
2068 && single_pred_p (c)
2069 && b != c)
2070 {
2071 /* When not in cfg_layout mode use code aware of reordering
2072 INSN. This code possibly creates new basic blocks so it
2073 does not fit merge_blocks interface and is kept here in
2074 hope that it will become useless once more of compiler
2075 is transformed to use cfg_layout mode. */
2076
2077 if ((mode & CLEANUP_CFGLAYOUT)
2078 && can_merge_blocks_p (b, c))
2079 {
2080 merge_blocks (b, c);
2081 update_forwarder_flag (b);
2082 changed_here = true;
2083 }
2084 else if (!(mode & CLEANUP_CFGLAYOUT)
2085 /* If the jump insn has side effects,
2086 we can't kill the edge. */
2087 && (!JUMP_P (BB_END (b))
2088 || (reload_completed
2089 ? simplejump_p (BB_END (b))
2090 : (onlyjump_p (BB_END (b))
2091 && !tablejump_p (BB_END (b),
2092 NULL, NULL))))
2093 && (next = merge_blocks_move (s, b, c, mode)))
2094 {
2095 b = next;
2096 changed_here = true;
2097 }
2098 }
2099
2100 /* Simplify branch over branch. */
2101 if ((mode & CLEANUP_EXPENSIVE)
2102 && !(mode & CLEANUP_CFGLAYOUT)
2103 && try_simplify_condjump (b))
2104 changed_here = true;
2105
2106 /* If B has a single outgoing edge, but uses a
2107 non-trivial jump instruction without side-effects, we
2108 can either delete the jump entirely, or replace it
2109 with a simple unconditional jump. */
2110 if (single_succ_p (b)
2111 && single_succ (b) != EXIT_BLOCK_PTR
2112 && onlyjump_p (BB_END (b))
2113 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2114 && try_redirect_by_replacing_jump (single_succ_edge (b),
2115 single_succ (b),
2116 (mode & CLEANUP_CFGLAYOUT) != 0))
2117 {
2118 update_forwarder_flag (b);
2119 changed_here = true;
2120 }
2121
2122 /* Simplify branch to branch. */
2123 if (try_forward_edges (mode, b))
2124 changed_here = true;
2125
2126 /* Look for shared code between blocks. */
2127 if ((mode & CLEANUP_CROSSJUMP)
2128 && try_crossjump_bb (mode, b))
2129 changed_here = true;
2130
2131 /* Don't get confused by the index shift caused by
2132 deleting blocks. */
2133 if (!changed_here)
2134 b = b->next_bb;
2135 else
2136 changed = true;
2137 }
2138
2139 if ((mode & CLEANUP_CROSSJUMP)
2140 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2141 changed = true;
2142
2143 #ifdef ENABLE_CHECKING
2144 if (changed)
2145 verify_flow_info ();
2146 #endif
2147
2148 changed_overall |= changed;
2149 first_pass = false;
2150 }
2151 while (changed);
2152 }
2153
2154 if (mode & CLEANUP_CROSSJUMP)
2155 remove_fake_exit_edges ();
2156
2157 FOR_ALL_BB (b)
2158 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2159
2160 return changed_overall;
2161 }
2162 \f
2163 /* Delete all unreachable basic blocks. */
2164
2165 bool
2166 delete_unreachable_blocks (void)
2167 {
2168 bool changed = false;
2169 basic_block b, next_bb;
2170
2171 find_unreachable_blocks ();
2172
2173 /* Delete all unreachable basic blocks. */
2174
2175 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2176 {
2177 next_bb = b->next_bb;
2178
2179 if (!(b->flags & BB_REACHABLE))
2180 {
2181 delete_basic_block (b);
2182 changed = true;
2183 }
2184 }
2185
2186 if (changed)
2187 tidy_fallthru_edges ();
2188 return changed;
2189 }
2190
2191 /* Merges sequential blocks if possible. */
2192
2193 bool
2194 merge_seq_blocks (void)
2195 {
2196 basic_block bb;
2197 bool changed = false;
2198
2199 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2200 {
2201 if (single_succ_p (bb)
2202 && can_merge_blocks_p (bb, single_succ (bb)))
2203 {
2204 /* Merge the blocks and retry. */
2205 merge_blocks (bb, single_succ (bb));
2206 changed = true;
2207 continue;
2208 }
2209
2210 bb = bb->next_bb;
2211 }
2212
2213 return changed;
2214 }
2215 \f
2216 /* Tidy the CFG by deleting unreachable code and whatnot. */
2217
2218 bool
2219 cleanup_cfg (int mode)
2220 {
2221 bool changed = false;
2222
2223 /* Set the cfglayout mode flag here. We could update all the callers
2224 but that is just inconvenient, especially given that we eventually
2225 want to have cfglayout mode as the default. */
2226 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2227 mode |= CLEANUP_CFGLAYOUT;
2228
2229 timevar_push (TV_CLEANUP_CFG);
2230 if (delete_unreachable_blocks ())
2231 {
2232 changed = true;
2233 /* We've possibly created trivially dead code. Cleanup it right
2234 now to introduce more opportunities for try_optimize_cfg. */
2235 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2236 && !reload_completed)
2237 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2238 }
2239
2240 compact_blocks ();
2241
2242 while (try_optimize_cfg (mode))
2243 {
2244 delete_unreachable_blocks (), changed = true;
2245 if (mode & CLEANUP_UPDATE_LIFE)
2246 {
2247 /* Cleaning up CFG introduces more opportunities for dead code
2248 removal that in turn may introduce more opportunities for
2249 cleaning up the CFG. */
2250 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2251 PROP_DEATH_NOTES
2252 | PROP_SCAN_DEAD_CODE
2253 | PROP_KILL_DEAD_CODE
2254 | ((mode & CLEANUP_LOG_LINKS)
2255 ? PROP_LOG_LINKS : 0)))
2256 break;
2257 }
2258 else if (!(mode & CLEANUP_NO_INSN_DEL)
2259 && (mode & CLEANUP_EXPENSIVE)
2260 && !reload_completed)
2261 {
2262 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2263 break;
2264 }
2265 else
2266 break;
2267
2268 /* Don't call delete_dead_jumptables in cfglayout mode, because
2269 that function assumes that jump tables are in the insns stream.
2270 But we also don't _have_ to delete dead jumptables in cfglayout
2271 mode because we shouldn't even be looking at things that are
2272 not in a basic block. Dead jumptables are cleaned up when
2273 going out of cfglayout mode. */
2274 if (!(mode & CLEANUP_CFGLAYOUT))
2275 delete_dead_jumptables ();
2276 }
2277
2278 timevar_pop (TV_CLEANUP_CFG);
2279
2280 return changed;
2281 }
2282 \f
2283 static unsigned int
2284 rest_of_handle_jump (void)
2285 {
2286 delete_unreachable_blocks ();
2287
2288 if (cfun->tail_call_emit)
2289 fixup_tail_calls ();
2290 return 0;
2291 }
2292
2293 struct tree_opt_pass pass_jump =
2294 {
2295 "sibling", /* name */
2296 NULL, /* gate */
2297 rest_of_handle_jump, /* execute */
2298 NULL, /* sub */
2299 NULL, /* next */
2300 0, /* static_pass_number */
2301 TV_JUMP, /* tv_id */
2302 0, /* properties_required */
2303 0, /* properties_provided */
2304 0, /* properties_destroyed */
2305 TODO_ggc_collect, /* todo_flags_start */
2306 TODO_dump_func |
2307 TODO_verify_flow, /* todo_flags_finish */
2308 'i' /* letter */
2309 };
2310
2311
2312 static unsigned int
2313 rest_of_handle_jump2 (void)
2314 {
2315 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2316 reg_scan (get_insns (), max_reg_num ());
2317 if (dump_file)
2318 dump_flow_info (dump_file, dump_flags);
2319 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2320 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2321
2322 if (optimize)
2323 cleanup_cfg (CLEANUP_EXPENSIVE);
2324
2325 /* Jump optimization, and the removal of NULL pointer checks, may
2326 have reduced the number of instructions substantially. CSE, and
2327 future passes, allocate arrays whose dimensions involve the
2328 maximum instruction UID, so if we can reduce the maximum UID
2329 we'll save big on memory. */
2330 renumber_insns ();
2331 return 0;
2332 }
2333
2334
2335 struct tree_opt_pass pass_jump2 =
2336 {
2337 "jump", /* name */
2338 NULL, /* gate */
2339 rest_of_handle_jump2, /* execute */
2340 NULL, /* sub */
2341 NULL, /* next */
2342 0, /* static_pass_number */
2343 TV_JUMP, /* tv_id */
2344 0, /* properties_required */
2345 0, /* properties_provided */
2346 0, /* properties_destroyed */
2347 TODO_ggc_collect, /* todo_flags_start */
2348 TODO_dump_func, /* todo_flags_finish */
2349 'j' /* letter */
2350 };
2351
2352