inclhack.def (hpux_imaginary_i): Remove spaces.
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
59
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass;
64
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
66 static bool crossjumps_occured;
67
68 static bool try_crossjump_to_edge (int, edge, edge);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
72 static bool old_insns_match_p (int, rtx, rtx);
73
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static int mentions_nonequal_regs (rtx *, void *);
84 static void merge_memattrs (rtx, rtx);
85 \f
86 /* Set flags for newly created block. */
87
88 static void
89 notice_new_block (basic_block bb)
90 {
91 if (!bb)
92 return;
93
94 if (forwarder_block_p (bb))
95 bb->flags |= BB_FORWARDER_BLOCK;
96 }
97
98 /* Recompute forwarder flag after block has been modified. */
99
100 static void
101 update_forwarder_flag (basic_block bb)
102 {
103 if (forwarder_block_p (bb))
104 bb->flags |= BB_FORWARDER_BLOCK;
105 else
106 bb->flags &= ~BB_FORWARDER_BLOCK;
107 }
108 \f
109 /* Simplify a conditional jump around an unconditional jump.
110 Return true if something changed. */
111
112 static bool
113 try_simplify_condjump (basic_block cbranch_block)
114 {
115 basic_block jump_block, jump_dest_block, cbranch_dest_block;
116 edge cbranch_jump_edge, cbranch_fallthru_edge;
117 rtx cbranch_insn;
118
119 /* Verify that there are exactly two successors. */
120 if (EDGE_COUNT (cbranch_block->succs) != 2)
121 return false;
122
123 /* Verify that we've got a normal conditional branch at the end
124 of the block. */
125 cbranch_insn = BB_END (cbranch_block);
126 if (!any_condjump_p (cbranch_insn))
127 return false;
128
129 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131
132 /* The next block must not have multiple predecessors, must not
133 be the last block in the function, and must contain just the
134 unconditional jump. */
135 jump_block = cbranch_fallthru_edge->dest;
136 if (!single_pred_p (jump_block)
137 || jump_block->next_bb == EXIT_BLOCK_PTR
138 || !FORWARDER_BLOCK_P (jump_block))
139 return false;
140 jump_dest_block = single_succ (jump_block);
141
142 /* If we are partitioning hot/cold basic blocks, we don't want to
143 mess up unconditional or indirect jumps that cross between hot
144 and cold sections.
145
146 Basic block partitioning may result in some jumps that appear to
147 be optimizable (or blocks that appear to be mergeable), but which really
148 must be left untouched (they are required to make it safely across
149 partition boundaries). See the comments at the top of
150 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
151
152 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153 || (cbranch_jump_edge->flags & EDGE_CROSSING))
154 return false;
155
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
159
160 if (cbranch_dest_block == EXIT_BLOCK_PTR
161 || !can_fallthru (jump_block, cbranch_dest_block))
162 return false;
163
164 /* Invert the conditional branch. */
165 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
166 return false;
167
168 if (dump_file)
169 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
170 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
171
172 /* Success. Update the CFG to match. Note that after this point
173 the edge variable names appear backwards; the redirection is done
174 this way to preserve edge profile data. */
175 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
176 cbranch_dest_block);
177 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
178 jump_dest_block);
179 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
180 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
181 update_br_prob_note (cbranch_block);
182
183 /* Delete the block with the unconditional jump, and clean up the mess. */
184 delete_basic_block (jump_block);
185 tidy_fallthru_edge (cbranch_jump_edge);
186 update_forwarder_flag (cbranch_block);
187
188 return true;
189 }
190 \f
191 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
192 on register. Used by jump threading. */
193
194 static bool
195 mark_effect (rtx exp, regset nonequal)
196 {
197 int regno;
198 rtx dest;
199 switch (GET_CODE (exp))
200 {
201 /* In case we do clobber the register, mark it as equal, as we know the
202 value is dead so it don't have to match. */
203 case CLOBBER:
204 if (REG_P (XEXP (exp, 0)))
205 {
206 dest = XEXP (exp, 0);
207 regno = REGNO (dest);
208 CLEAR_REGNO_REG_SET (nonequal, regno);
209 if (regno < FIRST_PSEUDO_REGISTER)
210 {
211 int n = hard_regno_nregs[regno][GET_MODE (dest)];
212 while (--n > 0)
213 CLEAR_REGNO_REG_SET (nonequal, regno + n);
214 }
215 }
216 return false;
217
218 case SET:
219 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
220 return false;
221 dest = SET_DEST (exp);
222 if (dest == pc_rtx)
223 return false;
224 if (!REG_P (dest))
225 return true;
226 regno = REGNO (dest);
227 SET_REGNO_REG_SET (nonequal, regno);
228 if (regno < FIRST_PSEUDO_REGISTER)
229 {
230 int n = hard_regno_nregs[regno][GET_MODE (dest)];
231 while (--n > 0)
232 SET_REGNO_REG_SET (nonequal, regno + n);
233 }
234 return false;
235
236 default:
237 return false;
238 }
239 }
240
241 /* Return nonzero if X is a register set in regset DATA.
242 Called via for_each_rtx. */
243 static int
244 mentions_nonequal_regs (rtx *x, void *data)
245 {
246 regset nonequal = (regset) data;
247 if (REG_P (*x))
248 {
249 int regno;
250
251 regno = REGNO (*x);
252 if (REGNO_REG_SET_P (nonequal, regno))
253 return 1;
254 if (regno < FIRST_PSEUDO_REGISTER)
255 {
256 int n = hard_regno_nregs[regno][GET_MODE (*x)];
257 while (--n > 0)
258 if (REGNO_REG_SET_P (nonequal, regno + n))
259 return 1;
260 }
261 }
262 return 0;
263 }
264 /* Attempt to prove that the basic block B will have no side effects and
265 always continues in the same edge if reached via E. Return the edge
266 if exist, NULL otherwise. */
267
268 static edge
269 thread_jump (edge e, basic_block b)
270 {
271 rtx set1, set2, cond1, cond2, insn;
272 enum rtx_code code1, code2, reversed_code2;
273 bool reverse1 = false;
274 unsigned i;
275 regset nonequal;
276 bool failed = false;
277 reg_set_iterator rsi;
278
279 if (b->flags & BB_NONTHREADABLE_BLOCK)
280 return NULL;
281
282 /* At the moment, we do handle only conditional jumps, but later we may
283 want to extend this code to tablejumps and others. */
284 if (EDGE_COUNT (e->src->succs) != 2)
285 return NULL;
286 if (EDGE_COUNT (b->succs) != 2)
287 {
288 b->flags |= BB_NONTHREADABLE_BLOCK;
289 return NULL;
290 }
291
292 /* Second branch must end with onlyjump, as we will eliminate the jump. */
293 if (!any_condjump_p (BB_END (e->src)))
294 return NULL;
295
296 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
297 {
298 b->flags |= BB_NONTHREADABLE_BLOCK;
299 return NULL;
300 }
301
302 set1 = pc_set (BB_END (e->src));
303 set2 = pc_set (BB_END (b));
304 if (((e->flags & EDGE_FALLTHRU) != 0)
305 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
306 reverse1 = true;
307
308 cond1 = XEXP (SET_SRC (set1), 0);
309 cond2 = XEXP (SET_SRC (set2), 0);
310 if (reverse1)
311 code1 = reversed_comparison_code (cond1, BB_END (e->src));
312 else
313 code1 = GET_CODE (cond1);
314
315 code2 = GET_CODE (cond2);
316 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
317
318 if (!comparison_dominates_p (code1, code2)
319 && !comparison_dominates_p (code1, reversed_code2))
320 return NULL;
321
322 /* Ensure that the comparison operators are equivalent.
323 ??? This is far too pessimistic. We should allow swapped operands,
324 different CCmodes, or for example comparisons for interval, that
325 dominate even when operands are not equivalent. */
326 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
327 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
328 return NULL;
329
330 /* Short circuit cases where block B contains some side effects, as we can't
331 safely bypass it. */
332 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
333 insn = NEXT_INSN (insn))
334 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
335 {
336 b->flags |= BB_NONTHREADABLE_BLOCK;
337 return NULL;
338 }
339
340 cselib_init (false);
341
342 /* First process all values computed in the source basic block. */
343 for (insn = NEXT_INSN (BB_HEAD (e->src));
344 insn != NEXT_INSN (BB_END (e->src));
345 insn = NEXT_INSN (insn))
346 if (INSN_P (insn))
347 cselib_process_insn (insn);
348
349 nonequal = BITMAP_ALLOC (NULL);
350 CLEAR_REG_SET (nonequal);
351
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
355
356 for (insn = NEXT_INSN (BB_HEAD (b));
357 insn != NEXT_INSN (BB_END (b)) && !failed;
358 insn = NEXT_INSN (insn))
359 {
360 if (INSN_P (insn))
361 {
362 rtx pat = PATTERN (insn);
363
364 if (GET_CODE (pat) == PARALLEL)
365 {
366 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
367 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 }
369 else
370 failed |= mark_effect (pat, nonequal);
371 }
372
373 cselib_process_insn (insn);
374 }
375
376 /* Later we should clear nonequal of dead registers. So far we don't
377 have life information in cfg_cleanup. */
378 if (failed)
379 {
380 b->flags |= BB_NONTHREADABLE_BLOCK;
381 goto failed_exit;
382 }
383
384 /* cond2 must not mention any register that is not equal to the
385 former block. */
386 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
387 goto failed_exit;
388
389 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
390 goto failed_exit;
391
392 BITMAP_FREE (nonequal);
393 cselib_finish ();
394 if ((comparison_dominates_p (code1, code2) != 0)
395 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
396 return BRANCH_EDGE (b);
397 else
398 return FALLTHRU_EDGE (b);
399
400 failed_exit:
401 BITMAP_FREE (nonequal);
402 cselib_finish ();
403 return NULL;
404 }
405 \f
406 /* Attempt to forward edges leaving basic block B.
407 Return true if successful. */
408
409 static bool
410 try_forward_edges (int mode, basic_block b)
411 {
412 bool changed = false;
413 edge_iterator ei;
414 edge e, *threaded_edges = NULL;
415
416 /* If we are partitioning hot/cold basic blocks, we don't want to
417 mess up unconditional or indirect jumps that cross between hot
418 and cold sections.
419
420 Basic block partitioning may result in some jumps that appear to
421 be optimizable (or blocks that appear to be mergeable), but which really
422 must be left untouched (they are required to make it safely across
423 partition boundaries). See the comments at the top of
424 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
425
426 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
427 return false;
428
429 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
430 {
431 basic_block target, first;
432 int counter, goto_locus;
433 bool threaded = false;
434 int nthreaded_edges = 0;
435 bool may_thread = first_pass | df_get_bb_dirty (b);
436
437 /* Skip complex edges because we don't know how to update them.
438
439 Still handle fallthru edges, as we can succeed to forward fallthru
440 edge to the same place as the branch edge of conditional branch
441 and turn conditional branch to an unconditional branch. */
442 if (e->flags & EDGE_COMPLEX)
443 {
444 ei_next (&ei);
445 continue;
446 }
447
448 target = first = e->dest;
449 counter = NUM_FIXED_BLOCKS;
450 goto_locus = e->goto_locus;
451
452 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
453 up jumps that cross between hot/cold sections.
454
455 Basic block partitioning may result in some jumps that appear
456 to be optimizable (or blocks that appear to be mergeable), but which
457 really must be left untouched (they are required to make it safely
458 across partition boundaries). See the comments at the top of
459 bb-reorder.c:partition_hot_cold_basic_blocks for complete
460 details. */
461
462 if (first != EXIT_BLOCK_PTR
463 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
464 return false;
465
466 while (counter < n_basic_blocks)
467 {
468 basic_block new_target = NULL;
469 bool new_target_threaded = false;
470 may_thread |= df_get_bb_dirty (target);
471
472 if (FORWARDER_BLOCK_P (target)
473 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
474 && single_succ (target) != EXIT_BLOCK_PTR)
475 {
476 /* Bypass trivial infinite loops. */
477 new_target = single_succ (target);
478 if (target == new_target)
479 counter = n_basic_blocks;
480 else if (!optimize)
481 {
482 /* When not optimizing, ensure that edges or forwarder
483 blocks with different locus are not optimized out. */
484 int locus = single_succ_edge (target)->goto_locus;
485
486 if (locus && goto_locus && !locator_eq (locus, goto_locus))
487 counter = n_basic_blocks;
488 else if (locus)
489 goto_locus = locus;
490
491 if (INSN_P (BB_END (target)))
492 {
493 locus = INSN_LOCATOR (BB_END (target));
494
495 if (locus && goto_locus
496 && !locator_eq (locus, goto_locus))
497 counter = n_basic_blocks;
498 else if (locus)
499 goto_locus = locus;
500 }
501 }
502 }
503
504 /* Allow to thread only over one edge at time to simplify updating
505 of probabilities. */
506 else if ((mode & CLEANUP_THREADING) && may_thread)
507 {
508 edge t = thread_jump (e, target);
509 if (t)
510 {
511 if (!threaded_edges)
512 threaded_edges = XNEWVEC (edge, n_basic_blocks);
513 else
514 {
515 int i;
516
517 /* Detect an infinite loop across blocks not
518 including the start block. */
519 for (i = 0; i < nthreaded_edges; ++i)
520 if (threaded_edges[i] == t)
521 break;
522 if (i < nthreaded_edges)
523 {
524 counter = n_basic_blocks;
525 break;
526 }
527 }
528
529 /* Detect an infinite loop across the start block. */
530 if (t->dest == b)
531 break;
532
533 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
534 threaded_edges[nthreaded_edges++] = t;
535
536 new_target = t->dest;
537 new_target_threaded = true;
538 }
539 }
540
541 if (!new_target)
542 break;
543
544 counter++;
545 target = new_target;
546 threaded |= new_target_threaded;
547 }
548
549 if (counter >= n_basic_blocks)
550 {
551 if (dump_file)
552 fprintf (dump_file, "Infinite loop in BB %i.\n",
553 target->index);
554 }
555 else if (target == first)
556 ; /* We didn't do anything. */
557 else
558 {
559 /* Save the values now, as the edge may get removed. */
560 gcov_type edge_count = e->count;
561 int edge_probability = e->probability;
562 int edge_frequency;
563 int n = 0;
564
565 e->goto_locus = goto_locus;
566
567 /* Don't force if target is exit block. */
568 if (threaded && target != EXIT_BLOCK_PTR)
569 {
570 notice_new_block (redirect_edge_and_branch_force (e, target));
571 if (dump_file)
572 fprintf (dump_file, "Conditionals threaded.\n");
573 }
574 else if (!redirect_edge_and_branch (e, target))
575 {
576 if (dump_file)
577 fprintf (dump_file,
578 "Forwarding edge %i->%i to %i failed.\n",
579 b->index, e->dest->index, target->index);
580 ei_next (&ei);
581 continue;
582 }
583
584 /* We successfully forwarded the edge. Now update profile
585 data: for each edge we traversed in the chain, remove
586 the original edge's execution count. */
587 edge_frequency = ((edge_probability * b->frequency
588 + REG_BR_PROB_BASE / 2)
589 / REG_BR_PROB_BASE);
590
591 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
592 b->flags |= BB_FORWARDER_BLOCK;
593
594 do
595 {
596 edge t;
597
598 if (!single_succ_p (first))
599 {
600 gcc_assert (n < nthreaded_edges);
601 t = threaded_edges [n++];
602 gcc_assert (t->src == first);
603 update_bb_profile_for_threading (first, edge_frequency,
604 edge_count, t);
605 update_br_prob_note (first);
606 }
607 else
608 {
609 first->count -= edge_count;
610 if (first->count < 0)
611 first->count = 0;
612 first->frequency -= edge_frequency;
613 if (first->frequency < 0)
614 first->frequency = 0;
615 /* It is possible that as the result of
616 threading we've removed edge as it is
617 threaded to the fallthru edge. Avoid
618 getting out of sync. */
619 if (n < nthreaded_edges
620 && first == threaded_edges [n]->src)
621 n++;
622 t = single_succ_edge (first);
623 }
624
625 t->count -= edge_count;
626 if (t->count < 0)
627 t->count = 0;
628 first = t->dest;
629 }
630 while (first != target);
631
632 changed = true;
633 continue;
634 }
635 ei_next (&ei);
636 }
637
638 if (threaded_edges)
639 free (threaded_edges);
640 return changed;
641 }
642 \f
643
644 /* Blocks A and B are to be merged into a single block. A has no incoming
645 fallthru edge, so it can be moved before B without adding or modifying
646 any jumps (aside from the jump from A to B). */
647
648 static void
649 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
650 {
651 rtx barrier;
652
653 /* If we are partitioning hot/cold basic blocks, we don't want to
654 mess up unconditional or indirect jumps that cross between hot
655 and cold sections.
656
657 Basic block partitioning may result in some jumps that appear to
658 be optimizable (or blocks that appear to be mergeable), but which really
659 must be left untouched (they are required to make it safely across
660 partition boundaries). See the comments at the top of
661 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
662
663 if (BB_PARTITION (a) != BB_PARTITION (b))
664 return;
665
666 barrier = next_nonnote_insn (BB_END (a));
667 gcc_assert (BARRIER_P (barrier));
668 delete_insn (barrier);
669
670 /* Scramble the insn chain. */
671 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
672 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
673 df_set_bb_dirty (a);
674
675 if (dump_file)
676 fprintf (dump_file, "Moved block %d before %d and merged.\n",
677 a->index, b->index);
678
679 /* Swap the records for the two blocks around. */
680
681 unlink_block (a);
682 link_block (a, b->prev_bb);
683
684 /* Now blocks A and B are contiguous. Merge them. */
685 merge_blocks (a, b);
686 }
687
688 /* Blocks A and B are to be merged into a single block. B has no outgoing
689 fallthru edge, so it can be moved after A without adding or modifying
690 any jumps (aside from the jump from A to B). */
691
692 static void
693 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
694 {
695 rtx barrier, real_b_end;
696 rtx label, table;
697
698 /* If we are partitioning hot/cold basic blocks, we don't want to
699 mess up unconditional or indirect jumps that cross between hot
700 and cold sections.
701
702 Basic block partitioning may result in some jumps that appear to
703 be optimizable (or blocks that appear to be mergeable), but which really
704 must be left untouched (they are required to make it safely across
705 partition boundaries). See the comments at the top of
706 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
707
708 if (BB_PARTITION (a) != BB_PARTITION (b))
709 return;
710
711 real_b_end = BB_END (b);
712
713 /* If there is a jump table following block B temporarily add the jump table
714 to block B so that it will also be moved to the correct location. */
715 if (tablejump_p (BB_END (b), &label, &table)
716 && prev_active_insn (label) == BB_END (b))
717 {
718 BB_END (b) = table;
719 }
720
721 /* There had better have been a barrier there. Delete it. */
722 barrier = NEXT_INSN (BB_END (b));
723 if (barrier && BARRIER_P (barrier))
724 delete_insn (barrier);
725
726
727 /* Scramble the insn chain. */
728 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
729
730 /* Restore the real end of b. */
731 BB_END (b) = real_b_end;
732
733 if (dump_file)
734 fprintf (dump_file, "Moved block %d after %d and merged.\n",
735 b->index, a->index);
736
737 /* Now blocks A and B are contiguous. Merge them. */
738 merge_blocks (a, b);
739 }
740
741 /* Attempt to merge basic blocks that are potentially non-adjacent.
742 Return NULL iff the attempt failed, otherwise return basic block
743 where cleanup_cfg should continue. Because the merging commonly
744 moves basic block away or introduces another optimization
745 possibility, return basic block just before B so cleanup_cfg don't
746 need to iterate.
747
748 It may be good idea to return basic block before C in the case
749 C has been moved after B and originally appeared earlier in the
750 insn sequence, but we have no information available about the
751 relative ordering of these two. Hopefully it is not too common. */
752
753 static basic_block
754 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
755 {
756 basic_block next;
757
758 /* If we are partitioning hot/cold basic blocks, we don't want to
759 mess up unconditional or indirect jumps that cross between hot
760 and cold sections.
761
762 Basic block partitioning may result in some jumps that appear to
763 be optimizable (or blocks that appear to be mergeable), but which really
764 must be left untouched (they are required to make it safely across
765 partition boundaries). See the comments at the top of
766 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
767
768 if (BB_PARTITION (b) != BB_PARTITION (c))
769 return NULL;
770
771 /* If B has a fallthru edge to C, no need to move anything. */
772 if (e->flags & EDGE_FALLTHRU)
773 {
774 int b_index = b->index, c_index = c->index;
775 merge_blocks (b, c);
776 update_forwarder_flag (b);
777
778 if (dump_file)
779 fprintf (dump_file, "Merged %d and %d without moving.\n",
780 b_index, c_index);
781
782 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
783 }
784
785 /* Otherwise we will need to move code around. Do that only if expensive
786 transformations are allowed. */
787 else if (mode & CLEANUP_EXPENSIVE)
788 {
789 edge tmp_edge, b_fallthru_edge;
790 bool c_has_outgoing_fallthru;
791 bool b_has_incoming_fallthru;
792 edge_iterator ei;
793
794 /* Avoid overactive code motion, as the forwarder blocks should be
795 eliminated by edge redirection instead. One exception might have
796 been if B is a forwarder block and C has no fallthru edge, but
797 that should be cleaned up by bb-reorder instead. */
798 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
799 return NULL;
800
801 /* We must make sure to not munge nesting of lexical blocks,
802 and loop notes. This is done by squeezing out all the notes
803 and leaving them there to lie. Not ideal, but functional. */
804
805 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
806 if (tmp_edge->flags & EDGE_FALLTHRU)
807 break;
808
809 c_has_outgoing_fallthru = (tmp_edge != NULL);
810
811 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
812 if (tmp_edge->flags & EDGE_FALLTHRU)
813 break;
814
815 b_has_incoming_fallthru = (tmp_edge != NULL);
816 b_fallthru_edge = tmp_edge;
817 next = b->prev_bb;
818 if (next == c)
819 next = next->prev_bb;
820
821 /* Otherwise, we're going to try to move C after B. If C does
822 not have an outgoing fallthru, then it can be moved
823 immediately after B without introducing or modifying jumps. */
824 if (! c_has_outgoing_fallthru)
825 {
826 merge_blocks_move_successor_nojumps (b, c);
827 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
828 }
829
830 /* If B does not have an incoming fallthru, then it can be moved
831 immediately before C without introducing or modifying jumps.
832 C cannot be the first block, so we do not have to worry about
833 accessing a non-existent block. */
834
835 if (b_has_incoming_fallthru)
836 {
837 basic_block bb;
838
839 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840 return NULL;
841 bb = force_nonfallthru (b_fallthru_edge);
842 if (bb)
843 notice_new_block (bb);
844 }
845
846 merge_blocks_move_predecessor_nojumps (b, c);
847 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
848 }
849
850 return NULL;
851 }
852 \f
853
854 /* Removes the memory attributes of MEM expression
855 if they are not equal. */
856
857 void
858 merge_memattrs (rtx x, rtx y)
859 {
860 int i;
861 int j;
862 enum rtx_code code;
863 const char *fmt;
864
865 if (x == y)
866 return;
867 if (x == 0 || y == 0)
868 return;
869
870 code = GET_CODE (x);
871
872 if (code != GET_CODE (y))
873 return;
874
875 if (GET_MODE (x) != GET_MODE (y))
876 return;
877
878 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
879 {
880 if (! MEM_ATTRS (x))
881 MEM_ATTRS (y) = 0;
882 else if (! MEM_ATTRS (y))
883 MEM_ATTRS (x) = 0;
884 else
885 {
886 rtx mem_size;
887
888 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
889 {
890 set_mem_alias_set (x, 0);
891 set_mem_alias_set (y, 0);
892 }
893
894 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
895 {
896 set_mem_expr (x, 0);
897 set_mem_expr (y, 0);
898 set_mem_offset (x, 0);
899 set_mem_offset (y, 0);
900 }
901 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
902 {
903 set_mem_offset (x, 0);
904 set_mem_offset (y, 0);
905 }
906
907 if (!MEM_SIZE (x))
908 mem_size = NULL_RTX;
909 else if (!MEM_SIZE (y))
910 mem_size = NULL_RTX;
911 else
912 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
913 INTVAL (MEM_SIZE (y))));
914 set_mem_size (x, mem_size);
915 set_mem_size (y, mem_size);
916
917 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
918 set_mem_align (y, MEM_ALIGN (x));
919 }
920 }
921
922 fmt = GET_RTX_FORMAT (code);
923 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
924 {
925 switch (fmt[i])
926 {
927 case 'E':
928 /* Two vectors must have the same length. */
929 if (XVECLEN (x, i) != XVECLEN (y, i))
930 return;
931
932 for (j = 0; j < XVECLEN (x, i); j++)
933 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
934
935 break;
936
937 case 'e':
938 merge_memattrs (XEXP (x, i), XEXP (y, i));
939 }
940 }
941 return;
942 }
943
944
945 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
946
947 static bool
948 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
949 {
950 rtx p1, p2;
951
952 /* Verify that I1 and I2 are equivalent. */
953 if (GET_CODE (i1) != GET_CODE (i2))
954 return false;
955
956 /* __builtin_unreachable() may lead to empty blocks (ending with
957 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
958 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
959 return true;
960
961 p1 = PATTERN (i1);
962 p2 = PATTERN (i2);
963
964 if (GET_CODE (p1) != GET_CODE (p2))
965 return false;
966
967 /* If this is a CALL_INSN, compare register usage information.
968 If we don't check this on stack register machines, the two
969 CALL_INSNs might be merged leaving reg-stack.c with mismatching
970 numbers of stack registers in the same basic block.
971 If we don't check this on machines with delay slots, a delay slot may
972 be filled that clobbers a parameter expected by the subroutine.
973
974 ??? We take the simple route for now and assume that if they're
975 equal, they were constructed identically. */
976
977 if (CALL_P (i1)
978 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
979 CALL_INSN_FUNCTION_USAGE (i2))
980 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
981 return false;
982
983 #ifdef STACK_REGS
984 /* If cross_jump_death_matters is not 0, the insn's mode
985 indicates whether or not the insn contains any stack-like
986 regs. */
987
988 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
989 {
990 /* If register stack conversion has already been done, then
991 death notes must also be compared before it is certain that
992 the two instruction streams match. */
993
994 rtx note;
995 HARD_REG_SET i1_regset, i2_regset;
996
997 CLEAR_HARD_REG_SET (i1_regset);
998 CLEAR_HARD_REG_SET (i2_regset);
999
1000 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1001 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1002 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1003
1004 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1005 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1006 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1007
1008 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1009 return false;
1010 }
1011 #endif
1012
1013 if (reload_completed
1014 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1015 return true;
1016
1017 return false;
1018 }
1019 \f
1020 /* Look through the insns at the end of BB1 and BB2 and find the longest
1021 sequence that are equivalent. Store the first insns for that sequence
1022 in *F1 and *F2 and return the sequence length.
1023
1024 To simplify callers of this function, if the blocks match exactly,
1025 store the head of the blocks in *F1 and *F2. */
1026
1027 static int
1028 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1029 basic_block bb2, rtx *f1, rtx *f2)
1030 {
1031 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1032 int ninsns = 0;
1033
1034 /* Skip simple jumps at the end of the blocks. Complex jumps still
1035 need to be compared for equivalence, which we'll do below. */
1036
1037 i1 = BB_END (bb1);
1038 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1039 if (onlyjump_p (i1)
1040 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1041 {
1042 last1 = i1;
1043 i1 = PREV_INSN (i1);
1044 }
1045
1046 i2 = BB_END (bb2);
1047 if (onlyjump_p (i2)
1048 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1049 {
1050 last2 = i2;
1051 /* Count everything except for unconditional jump as insn. */
1052 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1053 ninsns++;
1054 i2 = PREV_INSN (i2);
1055 }
1056
1057 while (true)
1058 {
1059 /* Ignore notes. */
1060 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1061 i1 = PREV_INSN (i1);
1062
1063 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1064 i2 = PREV_INSN (i2);
1065
1066 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1067 break;
1068
1069 if (!old_insns_match_p (mode, i1, i2))
1070 break;
1071
1072 merge_memattrs (i1, i2);
1073
1074 /* Don't begin a cross-jump with a NOTE insn. */
1075 if (INSN_P (i1))
1076 {
1077 /* If the merged insns have different REG_EQUAL notes, then
1078 remove them. */
1079 rtx equiv1 = find_reg_equal_equiv_note (i1);
1080 rtx equiv2 = find_reg_equal_equiv_note (i2);
1081
1082 if (equiv1 && !equiv2)
1083 remove_note (i1, equiv1);
1084 else if (!equiv1 && equiv2)
1085 remove_note (i2, equiv2);
1086 else if (equiv1 && equiv2
1087 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1088 {
1089 remove_note (i1, equiv1);
1090 remove_note (i2, equiv2);
1091 }
1092
1093 afterlast1 = last1, afterlast2 = last2;
1094 last1 = i1, last2 = i2;
1095 ninsns++;
1096 }
1097
1098 i1 = PREV_INSN (i1);
1099 i2 = PREV_INSN (i2);
1100 }
1101
1102 #ifdef HAVE_cc0
1103 /* Don't allow the insn after a compare to be shared by
1104 cross-jumping unless the compare is also shared. */
1105 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1106 last1 = afterlast1, last2 = afterlast2, ninsns--;
1107 #endif
1108
1109 /* Include preceding notes and labels in the cross-jump. One,
1110 this may bring us to the head of the blocks as requested above.
1111 Two, it keeps line number notes as matched as may be. */
1112 if (ninsns)
1113 {
1114 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1115 last1 = PREV_INSN (last1);
1116
1117 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1118 last1 = PREV_INSN (last1);
1119
1120 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1121 last2 = PREV_INSN (last2);
1122
1123 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1124 last2 = PREV_INSN (last2);
1125
1126 *f1 = last1;
1127 *f2 = last2;
1128 }
1129
1130 return ninsns;
1131 }
1132
1133 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1134 the branch instruction. This means that if we commonize the control
1135 flow before end of the basic block, the semantic remains unchanged.
1136
1137 We may assume that there exists one edge with a common destination. */
1138
1139 static bool
1140 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1141 {
1142 int nehedges1 = 0, nehedges2 = 0;
1143 edge fallthru1 = 0, fallthru2 = 0;
1144 edge e1, e2;
1145 edge_iterator ei;
1146
1147 /* If BB1 has only one successor, we may be looking at either an
1148 unconditional jump, or a fake edge to exit. */
1149 if (single_succ_p (bb1)
1150 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1151 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1152 return (single_succ_p (bb2)
1153 && (single_succ_edge (bb2)->flags
1154 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1155 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1156
1157 /* Match conditional jumps - this may get tricky when fallthru and branch
1158 edges are crossed. */
1159 if (EDGE_COUNT (bb1->succs) == 2
1160 && any_condjump_p (BB_END (bb1))
1161 && onlyjump_p (BB_END (bb1)))
1162 {
1163 edge b1, f1, b2, f2;
1164 bool reverse, match;
1165 rtx set1, set2, cond1, cond2;
1166 enum rtx_code code1, code2;
1167
1168 if (EDGE_COUNT (bb2->succs) != 2
1169 || !any_condjump_p (BB_END (bb2))
1170 || !onlyjump_p (BB_END (bb2)))
1171 return false;
1172
1173 b1 = BRANCH_EDGE (bb1);
1174 b2 = BRANCH_EDGE (bb2);
1175 f1 = FALLTHRU_EDGE (bb1);
1176 f2 = FALLTHRU_EDGE (bb2);
1177
1178 /* Get around possible forwarders on fallthru edges. Other cases
1179 should be optimized out already. */
1180 if (FORWARDER_BLOCK_P (f1->dest))
1181 f1 = single_succ_edge (f1->dest);
1182
1183 if (FORWARDER_BLOCK_P (f2->dest))
1184 f2 = single_succ_edge (f2->dest);
1185
1186 /* To simplify use of this function, return false if there are
1187 unneeded forwarder blocks. These will get eliminated later
1188 during cleanup_cfg. */
1189 if (FORWARDER_BLOCK_P (f1->dest)
1190 || FORWARDER_BLOCK_P (f2->dest)
1191 || FORWARDER_BLOCK_P (b1->dest)
1192 || FORWARDER_BLOCK_P (b2->dest))
1193 return false;
1194
1195 if (f1->dest == f2->dest && b1->dest == b2->dest)
1196 reverse = false;
1197 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1198 reverse = true;
1199 else
1200 return false;
1201
1202 set1 = pc_set (BB_END (bb1));
1203 set2 = pc_set (BB_END (bb2));
1204 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1205 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1206 reverse = !reverse;
1207
1208 cond1 = XEXP (SET_SRC (set1), 0);
1209 cond2 = XEXP (SET_SRC (set2), 0);
1210 code1 = GET_CODE (cond1);
1211 if (reverse)
1212 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1213 else
1214 code2 = GET_CODE (cond2);
1215
1216 if (code2 == UNKNOWN)
1217 return false;
1218
1219 /* Verify codes and operands match. */
1220 match = ((code1 == code2
1221 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1222 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1223 || (code1 == swap_condition (code2)
1224 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1225 XEXP (cond2, 0))
1226 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1227 XEXP (cond2, 1))));
1228
1229 /* If we return true, we will join the blocks. Which means that
1230 we will only have one branch prediction bit to work with. Thus
1231 we require the existing branches to have probabilities that are
1232 roughly similar. */
1233 if (match
1234 && optimize_bb_for_speed_p (bb1)
1235 && optimize_bb_for_speed_p (bb2))
1236 {
1237 int prob2;
1238
1239 if (b1->dest == b2->dest)
1240 prob2 = b2->probability;
1241 else
1242 /* Do not use f2 probability as f2 may be forwarded. */
1243 prob2 = REG_BR_PROB_BASE - b2->probability;
1244
1245 /* Fail if the difference in probabilities is greater than 50%.
1246 This rules out two well-predicted branches with opposite
1247 outcomes. */
1248 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1249 {
1250 if (dump_file)
1251 fprintf (dump_file,
1252 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1253 bb1->index, bb2->index, b1->probability, prob2);
1254
1255 return false;
1256 }
1257 }
1258
1259 if (dump_file && match)
1260 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1261 bb1->index, bb2->index);
1262
1263 return match;
1264 }
1265
1266 /* Generic case - we are seeing a computed jump, table jump or trapping
1267 instruction. */
1268
1269 /* Check whether there are tablejumps in the end of BB1 and BB2.
1270 Return true if they are identical. */
1271 {
1272 rtx label1, label2;
1273 rtx table1, table2;
1274
1275 if (tablejump_p (BB_END (bb1), &label1, &table1)
1276 && tablejump_p (BB_END (bb2), &label2, &table2)
1277 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1278 {
1279 /* The labels should never be the same rtx. If they really are same
1280 the jump tables are same too. So disable crossjumping of blocks BB1
1281 and BB2 because when deleting the common insns in the end of BB1
1282 by delete_basic_block () the jump table would be deleted too. */
1283 /* If LABEL2 is referenced in BB1->END do not do anything
1284 because we would loose information when replacing
1285 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1286 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1287 {
1288 /* Set IDENTICAL to true when the tables are identical. */
1289 bool identical = false;
1290 rtx p1, p2;
1291
1292 p1 = PATTERN (table1);
1293 p2 = PATTERN (table2);
1294 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1295 {
1296 identical = true;
1297 }
1298 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1299 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1300 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1301 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1302 {
1303 int i;
1304
1305 identical = true;
1306 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1307 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1308 identical = false;
1309 }
1310
1311 if (identical)
1312 {
1313 replace_label_data rr;
1314 bool match;
1315
1316 /* Temporarily replace references to LABEL1 with LABEL2
1317 in BB1->END so that we could compare the instructions. */
1318 rr.r1 = label1;
1319 rr.r2 = label2;
1320 rr.update_label_nuses = false;
1321 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1322
1323 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1324 if (dump_file && match)
1325 fprintf (dump_file,
1326 "Tablejumps in bb %i and %i match.\n",
1327 bb1->index, bb2->index);
1328
1329 /* Set the original label in BB1->END because when deleting
1330 a block whose end is a tablejump, the tablejump referenced
1331 from the instruction is deleted too. */
1332 rr.r1 = label2;
1333 rr.r2 = label1;
1334 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1335
1336 return match;
1337 }
1338 }
1339 return false;
1340 }
1341 }
1342
1343 /* First ensure that the instructions match. There may be many outgoing
1344 edges so this test is generally cheaper. */
1345 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1346 return false;
1347
1348 /* Search the outgoing edges, ensure that the counts do match, find possible
1349 fallthru and exception handling edges since these needs more
1350 validation. */
1351 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1352 return false;
1353
1354 FOR_EACH_EDGE (e1, ei, bb1->succs)
1355 {
1356 e2 = EDGE_SUCC (bb2, ei.index);
1357
1358 if (e1->flags & EDGE_EH)
1359 nehedges1++;
1360
1361 if (e2->flags & EDGE_EH)
1362 nehedges2++;
1363
1364 if (e1->flags & EDGE_FALLTHRU)
1365 fallthru1 = e1;
1366 if (e2->flags & EDGE_FALLTHRU)
1367 fallthru2 = e2;
1368 }
1369
1370 /* If number of edges of various types does not match, fail. */
1371 if (nehedges1 != nehedges2
1372 || (fallthru1 != 0) != (fallthru2 != 0))
1373 return false;
1374
1375 /* fallthru edges must be forwarded to the same destination. */
1376 if (fallthru1)
1377 {
1378 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1379 ? single_succ (fallthru1->dest): fallthru1->dest);
1380 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1381 ? single_succ (fallthru2->dest): fallthru2->dest);
1382
1383 if (d1 != d2)
1384 return false;
1385 }
1386
1387 /* Ensure the same EH region. */
1388 {
1389 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1390 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1391
1392 if (!n1 && n2)
1393 return false;
1394
1395 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1396 return false;
1397 }
1398
1399 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1400 version of sequence abstraction. */
1401 FOR_EACH_EDGE (e1, ei, bb2->succs)
1402 {
1403 edge e2;
1404 edge_iterator ei;
1405 basic_block d1 = e1->dest;
1406
1407 if (FORWARDER_BLOCK_P (d1))
1408 d1 = EDGE_SUCC (d1, 0)->dest;
1409
1410 FOR_EACH_EDGE (e2, ei, bb1->succs)
1411 {
1412 basic_block d2 = e2->dest;
1413 if (FORWARDER_BLOCK_P (d2))
1414 d2 = EDGE_SUCC (d2, 0)->dest;
1415 if (d1 == d2)
1416 break;
1417 }
1418
1419 if (!e2)
1420 return false;
1421 }
1422
1423 return true;
1424 }
1425
1426 /* Returns true if BB basic block has a preserve label. */
1427
1428 static bool
1429 block_has_preserve_label (basic_block bb)
1430 {
1431 return (bb
1432 && block_label (bb)
1433 && LABEL_PRESERVE_P (block_label (bb)));
1434 }
1435
1436 /* E1 and E2 are edges with the same destination block. Search their
1437 predecessors for common code. If found, redirect control flow from
1438 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1439
1440 static bool
1441 try_crossjump_to_edge (int mode, edge e1, edge e2)
1442 {
1443 int nmatch;
1444 basic_block src1 = e1->src, src2 = e2->src;
1445 basic_block redirect_to, redirect_from, to_remove;
1446 rtx newpos1, newpos2;
1447 edge s;
1448 edge_iterator ei;
1449
1450 newpos1 = newpos2 = NULL_RTX;
1451
1452 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1453 to try this optimization.
1454
1455 Basic block partitioning may result in some jumps that appear to
1456 be optimizable (or blocks that appear to be mergeable), but which really
1457 must be left untouched (they are required to make it safely across
1458 partition boundaries). See the comments at the top of
1459 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1460
1461 if (flag_reorder_blocks_and_partition && reload_completed)
1462 return false;
1463
1464 /* Search backward through forwarder blocks. We don't need to worry
1465 about multiple entry or chained forwarders, as they will be optimized
1466 away. We do this to look past the unconditional jump following a
1467 conditional jump that is required due to the current CFG shape. */
1468 if (single_pred_p (src1)
1469 && FORWARDER_BLOCK_P (src1))
1470 e1 = single_pred_edge (src1), src1 = e1->src;
1471
1472 if (single_pred_p (src2)
1473 && FORWARDER_BLOCK_P (src2))
1474 e2 = single_pred_edge (src2), src2 = e2->src;
1475
1476 /* Nothing to do if we reach ENTRY, or a common source block. */
1477 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1478 return false;
1479 if (src1 == src2)
1480 return false;
1481
1482 /* Seeing more than 1 forwarder blocks would confuse us later... */
1483 if (FORWARDER_BLOCK_P (e1->dest)
1484 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1485 return false;
1486
1487 if (FORWARDER_BLOCK_P (e2->dest)
1488 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1489 return false;
1490
1491 /* Likewise with dead code (possibly newly created by the other optimizations
1492 of cfg_cleanup). */
1493 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1494 return false;
1495
1496 /* Look for the common insn sequence, part the first ... */
1497 if (!outgoing_edges_match (mode, src1, src2))
1498 return false;
1499
1500 /* ... and part the second. */
1501 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1502
1503 /* Don't proceed with the crossjump unless we found a sufficient number
1504 of matching instructions or the 'from' block was totally matched
1505 (such that its predecessors will hopefully be redirected and the
1506 block removed). */
1507 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1508 && (newpos1 != BB_HEAD (src1)))
1509 return false;
1510
1511 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1512 if (block_has_preserve_label (e1->dest)
1513 && (e1->flags & EDGE_ABNORMAL))
1514 return false;
1515
1516 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1517 will be deleted.
1518 If we have tablejumps in the end of SRC1 and SRC2
1519 they have been already compared for equivalence in outgoing_edges_match ()
1520 so replace the references to TABLE1 by references to TABLE2. */
1521 {
1522 rtx label1, label2;
1523 rtx table1, table2;
1524
1525 if (tablejump_p (BB_END (src1), &label1, &table1)
1526 && tablejump_p (BB_END (src2), &label2, &table2)
1527 && label1 != label2)
1528 {
1529 replace_label_data rr;
1530 rtx insn;
1531
1532 /* Replace references to LABEL1 with LABEL2. */
1533 rr.r1 = label1;
1534 rr.r2 = label2;
1535 rr.update_label_nuses = true;
1536 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1537 {
1538 /* Do not replace the label in SRC1->END because when deleting
1539 a block whose end is a tablejump, the tablejump referenced
1540 from the instruction is deleted too. */
1541 if (insn != BB_END (src1))
1542 for_each_rtx (&insn, replace_label, &rr);
1543 }
1544 }
1545 }
1546
1547 /* Avoid splitting if possible. We must always split when SRC2 has
1548 EH predecessor edges, or we may end up with basic blocks with both
1549 normal and EH predecessor edges. */
1550 if (newpos2 == BB_HEAD (src2)
1551 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1552 redirect_to = src2;
1553 else
1554 {
1555 if (newpos2 == BB_HEAD (src2))
1556 {
1557 /* Skip possible basic block header. */
1558 if (LABEL_P (newpos2))
1559 newpos2 = NEXT_INSN (newpos2);
1560 if (NOTE_P (newpos2))
1561 newpos2 = NEXT_INSN (newpos2);
1562 }
1563
1564 if (dump_file)
1565 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1566 src2->index, nmatch);
1567 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1568 }
1569
1570 if (dump_file)
1571 fprintf (dump_file,
1572 "Cross jumping from bb %i to bb %i; %i common insns\n",
1573 src1->index, src2->index, nmatch);
1574
1575 /* We may have some registers visible through the block. */
1576 df_set_bb_dirty (redirect_to);
1577
1578 /* Recompute the frequencies and counts of outgoing edges. */
1579 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1580 {
1581 edge s2;
1582 edge_iterator ei;
1583 basic_block d = s->dest;
1584
1585 if (FORWARDER_BLOCK_P (d))
1586 d = single_succ (d);
1587
1588 FOR_EACH_EDGE (s2, ei, src1->succs)
1589 {
1590 basic_block d2 = s2->dest;
1591 if (FORWARDER_BLOCK_P (d2))
1592 d2 = single_succ (d2);
1593 if (d == d2)
1594 break;
1595 }
1596
1597 s->count += s2->count;
1598
1599 /* Take care to update possible forwarder blocks. We verified
1600 that there is no more than one in the chain, so we can't run
1601 into infinite loop. */
1602 if (FORWARDER_BLOCK_P (s->dest))
1603 {
1604 single_succ_edge (s->dest)->count += s2->count;
1605 s->dest->count += s2->count;
1606 s->dest->frequency += EDGE_FREQUENCY (s);
1607 }
1608
1609 if (FORWARDER_BLOCK_P (s2->dest))
1610 {
1611 single_succ_edge (s2->dest)->count -= s2->count;
1612 if (single_succ_edge (s2->dest)->count < 0)
1613 single_succ_edge (s2->dest)->count = 0;
1614 s2->dest->count -= s2->count;
1615 s2->dest->frequency -= EDGE_FREQUENCY (s);
1616 if (s2->dest->frequency < 0)
1617 s2->dest->frequency = 0;
1618 if (s2->dest->count < 0)
1619 s2->dest->count = 0;
1620 }
1621
1622 if (!redirect_to->frequency && !src1->frequency)
1623 s->probability = (s->probability + s2->probability) / 2;
1624 else
1625 s->probability
1626 = ((s->probability * redirect_to->frequency +
1627 s2->probability * src1->frequency)
1628 / (redirect_to->frequency + src1->frequency));
1629 }
1630
1631 /* Adjust count and frequency for the block. An earlier jump
1632 threading pass may have left the profile in an inconsistent
1633 state (see update_bb_profile_for_threading) so we must be
1634 prepared for overflows. */
1635 redirect_to->count += src1->count;
1636 redirect_to->frequency += src1->frequency;
1637 if (redirect_to->frequency > BB_FREQ_MAX)
1638 redirect_to->frequency = BB_FREQ_MAX;
1639 update_br_prob_note (redirect_to);
1640
1641 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1642
1643 /* Skip possible basic block header. */
1644 if (LABEL_P (newpos1))
1645 newpos1 = NEXT_INSN (newpos1);
1646 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
1647 newpos1 = NEXT_INSN (newpos1);
1648
1649 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1650 to_remove = single_succ (redirect_from);
1651
1652 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1653 delete_basic_block (to_remove);
1654
1655 update_forwarder_flag (redirect_from);
1656 if (redirect_to != src2)
1657 update_forwarder_flag (src2);
1658
1659 return true;
1660 }
1661
1662 /* Search the predecessors of BB for common insn sequences. When found,
1663 share code between them by redirecting control flow. Return true if
1664 any changes made. */
1665
1666 static bool
1667 try_crossjump_bb (int mode, basic_block bb)
1668 {
1669 edge e, e2, fallthru;
1670 bool changed;
1671 unsigned max, ix, ix2;
1672 basic_block ev, ev2;
1673 edge_iterator ei;
1674
1675 /* Nothing to do if there is not at least two incoming edges. */
1676 if (EDGE_COUNT (bb->preds) < 2)
1677 return false;
1678
1679 /* Don't crossjump if this block ends in a computed jump,
1680 unless we are optimizing for size. */
1681 if (optimize_bb_for_size_p (bb)
1682 && bb != EXIT_BLOCK_PTR
1683 && computed_jump_p (BB_END (bb)))
1684 return false;
1685
1686 /* If we are partitioning hot/cold basic blocks, we don't want to
1687 mess up unconditional or indirect jumps that cross between hot
1688 and cold sections.
1689
1690 Basic block partitioning may result in some jumps that appear to
1691 be optimizable (or blocks that appear to be mergeable), but which really
1692 must be left untouched (they are required to make it safely across
1693 partition boundaries). See the comments at the top of
1694 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1695
1696 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1697 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1698 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1699 return false;
1700
1701 /* It is always cheapest to redirect a block that ends in a branch to
1702 a block that falls through into BB, as that adds no branches to the
1703 program. We'll try that combination first. */
1704 fallthru = NULL;
1705 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1706
1707 if (EDGE_COUNT (bb->preds) > max)
1708 return false;
1709
1710 FOR_EACH_EDGE (e, ei, bb->preds)
1711 {
1712 if (e->flags & EDGE_FALLTHRU)
1713 {
1714 fallthru = e;
1715 break;
1716 }
1717 }
1718
1719 changed = false;
1720 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1721 {
1722 e = EDGE_PRED (ev, ix);
1723 ix++;
1724
1725 /* As noted above, first try with the fallthru predecessor (or, a
1726 fallthru predecessor if we are in cfglayout mode). */
1727 if (fallthru)
1728 {
1729 /* Don't combine the fallthru edge into anything else.
1730 If there is a match, we'll do it the other way around. */
1731 if (e == fallthru)
1732 continue;
1733 /* If nothing changed since the last attempt, there is nothing
1734 we can do. */
1735 if (!first_pass
1736 && (!(df_get_bb_dirty (e->src))
1737 && !(df_get_bb_dirty (fallthru->src))))
1738 continue;
1739
1740 if (try_crossjump_to_edge (mode, e, fallthru))
1741 {
1742 changed = true;
1743 ix = 0;
1744 ev = bb;
1745 continue;
1746 }
1747 }
1748
1749 /* Non-obvious work limiting check: Recognize that we're going
1750 to call try_crossjump_bb on every basic block. So if we have
1751 two blocks with lots of outgoing edges (a switch) and they
1752 share lots of common destinations, then we would do the
1753 cross-jump check once for each common destination.
1754
1755 Now, if the blocks actually are cross-jump candidates, then
1756 all of their destinations will be shared. Which means that
1757 we only need check them for cross-jump candidacy once. We
1758 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1759 choosing to do the check from the block for which the edge
1760 in question is the first successor of A. */
1761 if (EDGE_SUCC (e->src, 0) != e)
1762 continue;
1763
1764 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1765 {
1766 e2 = EDGE_PRED (ev2, ix2);
1767 ix2++;
1768
1769 if (e2 == e)
1770 continue;
1771
1772 /* We've already checked the fallthru edge above. */
1773 if (e2 == fallthru)
1774 continue;
1775
1776 /* The "first successor" check above only prevents multiple
1777 checks of crossjump(A,B). In order to prevent redundant
1778 checks of crossjump(B,A), require that A be the block
1779 with the lowest index. */
1780 if (e->src->index > e2->src->index)
1781 continue;
1782
1783 /* If nothing changed since the last attempt, there is nothing
1784 we can do. */
1785 if (!first_pass
1786 && (!(df_get_bb_dirty (e->src))
1787 && !(df_get_bb_dirty (e2->src))))
1788 continue;
1789
1790 if (try_crossjump_to_edge (mode, e, e2))
1791 {
1792 changed = true;
1793 ev2 = bb;
1794 ix = 0;
1795 break;
1796 }
1797 }
1798 }
1799
1800 if (changed)
1801 crossjumps_occured = true;
1802
1803 return changed;
1804 }
1805
1806 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1807 instructions etc. Return nonzero if changes were made. */
1808
1809 static bool
1810 try_optimize_cfg (int mode)
1811 {
1812 bool changed_overall = false;
1813 bool changed;
1814 int iterations = 0;
1815 basic_block bb, b, next;
1816
1817 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1818 clear_bb_flags ();
1819
1820 crossjumps_occured = false;
1821
1822 FOR_EACH_BB (bb)
1823 update_forwarder_flag (bb);
1824
1825 if (! targetm.cannot_modify_jumps_p ())
1826 {
1827 first_pass = true;
1828 /* Attempt to merge blocks as made possible by edge removal. If
1829 a block has only one successor, and the successor has only
1830 one predecessor, they may be combined. */
1831 do
1832 {
1833 changed = false;
1834 iterations++;
1835
1836 if (dump_file)
1837 fprintf (dump_file,
1838 "\n\ntry_optimize_cfg iteration %i\n\n",
1839 iterations);
1840
1841 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1842 {
1843 basic_block c;
1844 edge s;
1845 bool changed_here = false;
1846
1847 /* Delete trivially dead basic blocks. This is either
1848 blocks with no predecessors, or empty blocks with no
1849 successors. However if the empty block with no
1850 successors is the successor of the ENTRY_BLOCK, it is
1851 kept. This ensures that the ENTRY_BLOCK will have a
1852 successor which is a precondition for many RTL
1853 passes. Empty blocks may result from expanding
1854 __builtin_unreachable (). */
1855 if (EDGE_COUNT (b->preds) == 0
1856 || (EDGE_COUNT (b->succs) == 0
1857 && BB_HEAD (b) == BB_END (b)
1858 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
1859 {
1860 c = b->prev_bb;
1861 if (dump_file)
1862 fprintf (dump_file, "Deleting block %i.\n",
1863 b->index);
1864
1865 delete_basic_block (b);
1866 if (!(mode & CLEANUP_CFGLAYOUT))
1867 changed = true;
1868 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1869 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1870 continue;
1871 }
1872
1873 /* Remove code labels no longer used. */
1874 if (single_pred_p (b)
1875 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1876 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1877 && LABEL_P (BB_HEAD (b))
1878 /* If the previous block ends with a branch to this
1879 block, we can't delete the label. Normally this
1880 is a condjump that is yet to be simplified, but
1881 if CASE_DROPS_THRU, this can be a tablejump with
1882 some element going to the same place as the
1883 default (fallthru). */
1884 && (single_pred (b) == ENTRY_BLOCK_PTR
1885 || !JUMP_P (BB_END (single_pred (b)))
1886 || ! label_is_jump_target_p (BB_HEAD (b),
1887 BB_END (single_pred (b)))))
1888 {
1889 rtx label = BB_HEAD (b);
1890
1891 delete_insn_chain (label, label, false);
1892 /* If the case label is undeletable, move it after the
1893 BASIC_BLOCK note. */
1894 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1895 {
1896 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1897
1898 reorder_insns_nobb (label, label, bb_note);
1899 BB_HEAD (b) = bb_note;
1900 if (BB_END (b) == bb_note)
1901 BB_END (b) = label;
1902 }
1903 if (dump_file)
1904 fprintf (dump_file, "Deleted label in block %i.\n",
1905 b->index);
1906 }
1907
1908 /* If we fall through an empty block, we can remove it. */
1909 if (!(mode & CLEANUP_CFGLAYOUT)
1910 && single_pred_p (b)
1911 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1912 && !LABEL_P (BB_HEAD (b))
1913 && FORWARDER_BLOCK_P (b)
1914 /* Note that forwarder_block_p true ensures that
1915 there is a successor for this block. */
1916 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1917 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
1918 {
1919 if (dump_file)
1920 fprintf (dump_file,
1921 "Deleting fallthru block %i.\n",
1922 b->index);
1923
1924 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1925 redirect_edge_succ_nodup (single_pred_edge (b),
1926 single_succ (b));
1927 delete_basic_block (b);
1928 changed = true;
1929 b = c;
1930 continue;
1931 }
1932
1933 if (single_succ_p (b)
1934 && (s = single_succ_edge (b))
1935 && !(s->flags & EDGE_COMPLEX)
1936 && (c = s->dest) != EXIT_BLOCK_PTR
1937 && single_pred_p (c)
1938 && b != c)
1939 {
1940 /* When not in cfg_layout mode use code aware of reordering
1941 INSN. This code possibly creates new basic blocks so it
1942 does not fit merge_blocks interface and is kept here in
1943 hope that it will become useless once more of compiler
1944 is transformed to use cfg_layout mode. */
1945
1946 if ((mode & CLEANUP_CFGLAYOUT)
1947 && can_merge_blocks_p (b, c))
1948 {
1949 merge_blocks (b, c);
1950 update_forwarder_flag (b);
1951 changed_here = true;
1952 }
1953 else if (!(mode & CLEANUP_CFGLAYOUT)
1954 /* If the jump insn has side effects,
1955 we can't kill the edge. */
1956 && (!JUMP_P (BB_END (b))
1957 || (reload_completed
1958 ? simplejump_p (BB_END (b))
1959 : (onlyjump_p (BB_END (b))
1960 && !tablejump_p (BB_END (b),
1961 NULL, NULL))))
1962 && (next = merge_blocks_move (s, b, c, mode)))
1963 {
1964 b = next;
1965 changed_here = true;
1966 }
1967 }
1968
1969 /* Simplify branch over branch. */
1970 if ((mode & CLEANUP_EXPENSIVE)
1971 && !(mode & CLEANUP_CFGLAYOUT)
1972 && try_simplify_condjump (b))
1973 changed_here = true;
1974
1975 /* If B has a single outgoing edge, but uses a
1976 non-trivial jump instruction without side-effects, we
1977 can either delete the jump entirely, or replace it
1978 with a simple unconditional jump. */
1979 if (single_succ_p (b)
1980 && single_succ (b) != EXIT_BLOCK_PTR
1981 && onlyjump_p (BB_END (b))
1982 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1983 && try_redirect_by_replacing_jump (single_succ_edge (b),
1984 single_succ (b),
1985 (mode & CLEANUP_CFGLAYOUT) != 0))
1986 {
1987 update_forwarder_flag (b);
1988 changed_here = true;
1989 }
1990
1991 /* Simplify branch to branch. */
1992 if (try_forward_edges (mode, b))
1993 changed_here = true;
1994
1995 /* Look for shared code between blocks. */
1996 if ((mode & CLEANUP_CROSSJUMP)
1997 && try_crossjump_bb (mode, b))
1998 changed_here = true;
1999
2000 /* Don't get confused by the index shift caused by
2001 deleting blocks. */
2002 if (!changed_here)
2003 b = b->next_bb;
2004 else
2005 changed = true;
2006 }
2007
2008 if ((mode & CLEANUP_CROSSJUMP)
2009 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2010 changed = true;
2011
2012 #ifdef ENABLE_CHECKING
2013 if (changed)
2014 verify_flow_info ();
2015 #endif
2016
2017 changed_overall |= changed;
2018 first_pass = false;
2019 }
2020 while (changed);
2021 }
2022
2023 FOR_ALL_BB (b)
2024 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2025
2026 return changed_overall;
2027 }
2028 \f
2029 /* Delete all unreachable basic blocks. */
2030
2031 bool
2032 delete_unreachable_blocks (void)
2033 {
2034 bool changed = false;
2035 basic_block b, next_bb;
2036
2037 find_unreachable_blocks ();
2038
2039 /* Delete all unreachable basic blocks. */
2040
2041 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2042 {
2043 next_bb = b->next_bb;
2044
2045 if (!(b->flags & BB_REACHABLE))
2046 {
2047 delete_basic_block (b);
2048 changed = true;
2049 }
2050 }
2051
2052 if (changed)
2053 tidy_fallthru_edges ();
2054 return changed;
2055 }
2056
2057 /* Delete any jump tables never referenced. We can't delete them at the
2058 time of removing tablejump insn as they are referenced by the preceding
2059 insns computing the destination, so we delay deleting and garbagecollect
2060 them once life information is computed. */
2061 void
2062 delete_dead_jumptables (void)
2063 {
2064 basic_block bb;
2065
2066 /* A dead jump table does not belong to any basic block. Scan insns
2067 between two adjacent basic blocks. */
2068 FOR_EACH_BB (bb)
2069 {
2070 rtx insn, next;
2071
2072 for (insn = NEXT_INSN (BB_END (bb));
2073 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2074 insn = next)
2075 {
2076 next = NEXT_INSN (insn);
2077 if (LABEL_P (insn)
2078 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2079 && JUMP_TABLE_DATA_P (next))
2080 {
2081 rtx label = insn, jump = next;
2082
2083 if (dump_file)
2084 fprintf (dump_file, "Dead jumptable %i removed\n",
2085 INSN_UID (insn));
2086
2087 next = NEXT_INSN (next);
2088 delete_insn (jump);
2089 delete_insn (label);
2090 }
2091 }
2092 }
2093 }
2094
2095 \f
2096 /* Tidy the CFG by deleting unreachable code and whatnot. */
2097
2098 bool
2099 cleanup_cfg (int mode)
2100 {
2101 bool changed = false;
2102
2103 /* Set the cfglayout mode flag here. We could update all the callers
2104 but that is just inconvenient, especially given that we eventually
2105 want to have cfglayout mode as the default. */
2106 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2107 mode |= CLEANUP_CFGLAYOUT;
2108
2109 timevar_push (TV_CLEANUP_CFG);
2110 if (delete_unreachable_blocks ())
2111 {
2112 changed = true;
2113 /* We've possibly created trivially dead code. Cleanup it right
2114 now to introduce more opportunities for try_optimize_cfg. */
2115 if (!(mode & (CLEANUP_NO_INSN_DEL))
2116 && !reload_completed)
2117 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2118 }
2119
2120 compact_blocks ();
2121
2122 /* To tail-merge blocks ending in the same noreturn function (e.g.
2123 a call to abort) we have to insert fake edges to exit. Do this
2124 here once. The fake edges do not interfere with any other CFG
2125 cleanups. */
2126 if (mode & CLEANUP_CROSSJUMP)
2127 add_noreturn_fake_exit_edges ();
2128
2129 if (!dbg_cnt (cfg_cleanup))
2130 return changed;
2131
2132 while (try_optimize_cfg (mode))
2133 {
2134 delete_unreachable_blocks (), changed = true;
2135 if (!(mode & CLEANUP_NO_INSN_DEL))
2136 {
2137 /* Try to remove some trivially dead insns when doing an expensive
2138 cleanup. But delete_trivially_dead_insns doesn't work after
2139 reload (it only handles pseudos) and run_fast_dce is too costly
2140 to run in every iteration.
2141
2142 For effective cross jumping, we really want to run a fast DCE to
2143 clean up any dead conditions, or they get in the way of performing
2144 useful tail merges.
2145
2146 Other transformations in cleanup_cfg are not so sensitive to dead
2147 code, so delete_trivially_dead_insns or even doing nothing at all
2148 is good enough. */
2149 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2150 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2151 break;
2152 else if ((mode & CLEANUP_CROSSJUMP)
2153 && crossjumps_occured)
2154 run_fast_dce ();
2155 }
2156 else
2157 break;
2158 }
2159
2160 if (mode & CLEANUP_CROSSJUMP)
2161 remove_fake_exit_edges ();
2162
2163 /* Don't call delete_dead_jumptables in cfglayout mode, because
2164 that function assumes that jump tables are in the insns stream.
2165 But we also don't _have_ to delete dead jumptables in cfglayout
2166 mode because we shouldn't even be looking at things that are
2167 not in a basic block. Dead jumptables are cleaned up when
2168 going out of cfglayout mode. */
2169 if (!(mode & CLEANUP_CFGLAYOUT))
2170 delete_dead_jumptables ();
2171
2172 timevar_pop (TV_CLEANUP_CFG);
2173
2174 return changed;
2175 }
2176 \f
2177 static unsigned int
2178 rest_of_handle_jump (void)
2179 {
2180 if (crtl->tail_call_emit)
2181 fixup_tail_calls ();
2182 return 0;
2183 }
2184
2185 struct rtl_opt_pass pass_jump =
2186 {
2187 {
2188 RTL_PASS,
2189 "sibling", /* name */
2190 NULL, /* gate */
2191 rest_of_handle_jump, /* execute */
2192 NULL, /* sub */
2193 NULL, /* next */
2194 0, /* static_pass_number */
2195 TV_JUMP, /* tv_id */
2196 0, /* properties_required */
2197 0, /* properties_provided */
2198 0, /* properties_destroyed */
2199 TODO_ggc_collect, /* todo_flags_start */
2200 TODO_verify_flow, /* todo_flags_finish */
2201 }
2202 };
2203
2204
2205 static unsigned int
2206 rest_of_handle_jump2 (void)
2207 {
2208 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2209 if (dump_file)
2210 dump_flow_info (dump_file, dump_flags);
2211 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2212 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2213 return 0;
2214 }
2215
2216
2217 struct rtl_opt_pass pass_jump2 =
2218 {
2219 {
2220 RTL_PASS,
2221 "jump", /* name */
2222 NULL, /* gate */
2223 rest_of_handle_jump2, /* execute */
2224 NULL, /* sub */
2225 NULL, /* next */
2226 0, /* static_pass_number */
2227 TV_JUMP, /* tv_id */
2228 0, /* properties_required */
2229 0, /* properties_provided */
2230 0, /* properties_destroyed */
2231 TODO_ggc_collect, /* todo_flags_start */
2232 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
2233 }
2234 };
2235
2236