cfgcleanup.c (try_simplify_condjump): Don't remove line notes to avoid unreachable...
[gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
54
55 /* cleanup_cfg maintains following flags for each basic block. */
56
57 enum bb_flags
58 {
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
63 };
64
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
80
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
92 \f
93 /* Set flags for newly created block. */
94
95 static void
96 notice_new_block (basic_block bb)
97 {
98 if (!bb)
99 return;
100
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
103 }
104
105 /* Recompute forwarder flag after block has been modified. */
106
107 static void
108 update_forwarder_flag (basic_block bb)
109 {
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
114 }
115 \f
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
118
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
121 {
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
125
126 /* Verify that there are exactly two successors. */
127 if (!cbranch_block->succ
128 || !cbranch_block->succ->succ_next
129 || cbranch_block->succ->succ_next->succ_next)
130 return false;
131
132 /* Verify that we've got a normal conditional branch at the end
133 of the block. */
134 cbranch_insn = BB_END (cbranch_block);
135 if (!any_condjump_p (cbranch_insn))
136 return false;
137
138 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
139 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
140
141 /* The next block must not have multiple predecessors, must not
142 be the last block in the function, and must contain just the
143 unconditional jump. */
144 jump_block = cbranch_fallthru_edge->dest;
145 if (jump_block->pred->pred_next
146 || jump_block->next_bb == EXIT_BLOCK_PTR
147 || !FORWARDER_BLOCK_P (jump_block))
148 return false;
149 jump_dest_block = jump_block->succ->dest;
150
151 /* If we are partitioning hot/cold basic blocks, we don't want to
152 mess up unconditional or indirect jumps that cross between hot
153 and cold sections. */
154
155 if (flag_reorder_blocks_and_partition
156 && (jump_block->partition != jump_dest_block->partition
157 || cbranch_jump_edge->crossing_edge))
158 return false;
159
160 /* The conditional branch must target the block after the
161 unconditional branch. */
162 cbranch_dest_block = cbranch_jump_edge->dest;
163
164 if (cbranch_dest_block == EXIT_BLOCK_PTR
165 || !can_fallthru (jump_block, cbranch_dest_block))
166 return false;
167
168 /* Invert the conditional branch. */
169 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
170 return false;
171
172 if (dump_file)
173 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
174 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
175
176 /* Success. Update the CFG to match. Note that after this point
177 the edge variable names appear backwards; the redirection is done
178 this way to preserve edge profile data. */
179 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
180 cbranch_dest_block);
181 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
182 jump_dest_block);
183 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
184 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
185 update_br_prob_note (cbranch_block);
186
187 /* Delete the block with the unconditional jump, and clean up the mess. */
188 delete_basic_block (jump_block);
189 tidy_fallthru_edge (cbranch_jump_edge);
190
191 return true;
192 }
193 \f
194 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
195 on register. Used by jump threading. */
196
197 static bool
198 mark_effect (rtx exp, regset nonequal)
199 {
200 int regno;
201 rtx dest;
202 switch (GET_CODE (exp))
203 {
204 /* In case we do clobber the register, mark it as equal, as we know the
205 value is dead so it don't have to match. */
206 case CLOBBER:
207 if (REG_P (XEXP (exp, 0)))
208 {
209 dest = XEXP (exp, 0);
210 regno = REGNO (dest);
211 CLEAR_REGNO_REG_SET (nonequal, regno);
212 if (regno < FIRST_PSEUDO_REGISTER)
213 {
214 int n = hard_regno_nregs[regno][GET_MODE (dest)];
215 while (--n > 0)
216 CLEAR_REGNO_REG_SET (nonequal, regno + n);
217 }
218 }
219 return false;
220
221 case SET:
222 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
223 return false;
224 dest = SET_DEST (exp);
225 if (dest == pc_rtx)
226 return false;
227 if (!REG_P (dest))
228 return true;
229 regno = REGNO (dest);
230 SET_REGNO_REG_SET (nonequal, regno);
231 if (regno < FIRST_PSEUDO_REGISTER)
232 {
233 int n = hard_regno_nregs[regno][GET_MODE (dest)];
234 while (--n > 0)
235 SET_REGNO_REG_SET (nonequal, regno + n);
236 }
237 return false;
238
239 default:
240 return false;
241 }
242 }
243
244 /* Return nonzero if X is a register set in regset DATA.
245 Called via for_each_rtx. */
246 static int
247 mentions_nonequal_regs (rtx *x, void *data)
248 {
249 regset nonequal = (regset) data;
250 if (REG_P (*x))
251 {
252 int regno;
253
254 regno = REGNO (*x);
255 if (REGNO_REG_SET_P (nonequal, regno))
256 return 1;
257 if (regno < FIRST_PSEUDO_REGISTER)
258 {
259 int n = hard_regno_nregs[regno][GET_MODE (*x)];
260 while (--n > 0)
261 if (REGNO_REG_SET_P (nonequal, regno + n))
262 return 1;
263 }
264 }
265 return 0;
266 }
267 /* Attempt to prove that the basic block B will have no side effects and
268 always continues in the same edge if reached via E. Return the edge
269 if exist, NULL otherwise. */
270
271 static edge
272 thread_jump (int mode, edge e, basic_block b)
273 {
274 rtx set1, set2, cond1, cond2, insn;
275 enum rtx_code code1, code2, reversed_code2;
276 bool reverse1 = false;
277 int i;
278 regset nonequal;
279 bool failed = false;
280
281 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
282 return NULL;
283
284 /* At the moment, we do handle only conditional jumps, but later we may
285 want to extend this code to tablejumps and others. */
286 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
287 return NULL;
288 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
289 {
290 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
291 return NULL;
292 }
293
294 /* Second branch must end with onlyjump, as we will eliminate the jump. */
295 if (!any_condjump_p (BB_END (e->src)))
296 return NULL;
297
298 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
299 {
300 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
301 return NULL;
302 }
303
304 set1 = pc_set (BB_END (e->src));
305 set2 = pc_set (BB_END (b));
306 if (((e->flags & EDGE_FALLTHRU) != 0)
307 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
308 reverse1 = true;
309
310 cond1 = XEXP (SET_SRC (set1), 0);
311 cond2 = XEXP (SET_SRC (set2), 0);
312 if (reverse1)
313 code1 = reversed_comparison_code (cond1, BB_END (e->src));
314 else
315 code1 = GET_CODE (cond1);
316
317 code2 = GET_CODE (cond2);
318 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
319
320 if (!comparison_dominates_p (code1, code2)
321 && !comparison_dominates_p (code1, reversed_code2))
322 return NULL;
323
324 /* Ensure that the comparison operators are equivalent.
325 ??? This is far too pessimistic. We should allow swapped operands,
326 different CCmodes, or for example comparisons for interval, that
327 dominate even when operands are not equivalent. */
328 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
329 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
330 return NULL;
331
332 /* Short circuit cases where block B contains some side effects, as we can't
333 safely bypass it. */
334 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
335 insn = NEXT_INSN (insn))
336 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
337 {
338 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
339 return NULL;
340 }
341
342 cselib_init (false);
343
344 /* First process all values computed in the source basic block. */
345 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
346 insn = NEXT_INSN (insn))
347 if (INSN_P (insn))
348 cselib_process_insn (insn);
349
350 nonequal = BITMAP_XMALLOC();
351 CLEAR_REG_SET (nonequal);
352
353 /* Now assume that we've continued by the edge E to B and continue
354 processing as if it were same basic block.
355 Our goal is to prove that whole block is an NOOP. */
356
357 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
358 insn = NEXT_INSN (insn))
359 {
360 if (INSN_P (insn))
361 {
362 rtx pat = PATTERN (insn);
363
364 if (GET_CODE (pat) == PARALLEL)
365 {
366 for (i = 0; i < XVECLEN (pat, 0); i++)
367 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 }
369 else
370 failed |= mark_effect (pat, nonequal);
371 }
372
373 cselib_process_insn (insn);
374 }
375
376 /* Later we should clear nonequal of dead registers. So far we don't
377 have life information in cfg_cleanup. */
378 if (failed)
379 {
380 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
381 goto failed_exit;
382 }
383
384 /* cond2 must not mention any register that is not equal to the
385 former block. */
386 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
387 goto failed_exit;
388
389 /* In case liveness information is available, we need to prove equivalence
390 only of the live values. */
391 if (mode & CLEANUP_UPDATE_LIFE)
392 AND_REG_SET (nonequal, b->global_live_at_end);
393
394 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
395
396 BITMAP_XFREE (nonequal);
397 cselib_finish ();
398 if ((comparison_dominates_p (code1, code2) != 0)
399 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
400 return BRANCH_EDGE (b);
401 else
402 return FALLTHRU_EDGE (b);
403
404 failed_exit:
405 BITMAP_XFREE (nonequal);
406 cselib_finish ();
407 return NULL;
408 }
409 \f
410 /* Attempt to forward edges leaving basic block B.
411 Return true if successful. */
412
413 static bool
414 try_forward_edges (int mode, basic_block b)
415 {
416 bool changed = false;
417 edge e, next, *threaded_edges = NULL;
418
419 /* If we are partitioning hot/cold basic blocks, we don't want to
420 mess up unconditional or indirect jumps that cross between hot
421 and cold sections. */
422
423 if (flag_reorder_blocks_and_partition
424 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
425 return false;
426
427 for (e = b->succ; e; e = next)
428 {
429 basic_block target, first;
430 int counter;
431 bool threaded = false;
432 int nthreaded_edges = 0;
433 bool may_thread = first_pass | (b->flags & BB_DIRTY);
434
435 next = e->succ_next;
436
437 /* Skip complex edges because we don't know how to update them.
438
439 Still handle fallthru edges, as we can succeed to forward fallthru
440 edge to the same place as the branch edge of conditional branch
441 and turn conditional branch to an unconditional branch. */
442 if (e->flags & EDGE_COMPLEX)
443 continue;
444
445 target = first = e->dest;
446 counter = 0;
447
448 while (counter < n_basic_blocks)
449 {
450 basic_block new_target = NULL;
451 bool new_target_threaded = false;
452 may_thread |= target->flags & BB_DIRTY;
453
454 if (FORWARDER_BLOCK_P (target)
455 && target->succ->dest != EXIT_BLOCK_PTR)
456 {
457 /* Bypass trivial infinite loops. */
458 if (target == target->succ->dest)
459 counter = n_basic_blocks;
460 new_target = target->succ->dest;
461 }
462
463 /* Allow to thread only over one edge at time to simplify updating
464 of probabilities. */
465 else if ((mode & CLEANUP_THREADING) && may_thread)
466 {
467 edge t = thread_jump (mode, e, target);
468 if (t)
469 {
470 if (!threaded_edges)
471 threaded_edges = xmalloc (sizeof (*threaded_edges)
472 * n_basic_blocks);
473 else
474 {
475 int i;
476
477 /* Detect an infinite loop across blocks not
478 including the start block. */
479 for (i = 0; i < nthreaded_edges; ++i)
480 if (threaded_edges[i] == t)
481 break;
482 if (i < nthreaded_edges)
483 {
484 counter = n_basic_blocks;
485 break;
486 }
487 }
488
489 /* Detect an infinite loop across the start block. */
490 if (t->dest == b)
491 break;
492
493 if (nthreaded_edges >= n_basic_blocks)
494 abort ();
495 threaded_edges[nthreaded_edges++] = t;
496
497 new_target = t->dest;
498 new_target_threaded = true;
499 }
500 }
501
502 if (!new_target)
503 break;
504
505 /* Avoid killing of loop pre-headers, as it is the place loop
506 optimizer wants to hoist code to.
507
508 For fallthru forwarders, the LOOP_BEG note must appear between
509 the header of block and CODE_LABEL of the loop, for non forwarders
510 it must appear before the JUMP_INSN. */
511 if ((mode & CLEANUP_PRE_LOOP) && optimize)
512 {
513 rtx insn = (target->succ->flags & EDGE_FALLTHRU
514 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
515
516 if (!NOTE_P (insn))
517 insn = NEXT_INSN (insn);
518
519 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
520 insn = NEXT_INSN (insn))
521 if (NOTE_P (insn)
522 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
523 break;
524
525 if (NOTE_P (insn))
526 break;
527
528 /* Do not clean up branches to just past the end of a loop
529 at this time; it can mess up the loop optimizer's
530 recognition of some patterns. */
531
532 insn = PREV_INSN (BB_HEAD (target));
533 if (insn && NOTE_P (insn)
534 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
535 break;
536 }
537
538 counter++;
539 target = new_target;
540 threaded |= new_target_threaded;
541 }
542
543 if (counter >= n_basic_blocks)
544 {
545 if (dump_file)
546 fprintf (dump_file, "Infinite loop in BB %i.\n",
547 target->index);
548 }
549 else if (target == first)
550 ; /* We didn't do anything. */
551 else
552 {
553 /* Save the values now, as the edge may get removed. */
554 gcov_type edge_count = e->count;
555 int edge_probability = e->probability;
556 int edge_frequency;
557 int n = 0;
558
559 /* Don't force if target is exit block. */
560 if (threaded && target != EXIT_BLOCK_PTR)
561 {
562 notice_new_block (redirect_edge_and_branch_force (e, target));
563 if (dump_file)
564 fprintf (dump_file, "Conditionals threaded.\n");
565 }
566 else if (!redirect_edge_and_branch (e, target))
567 {
568 if (dump_file)
569 fprintf (dump_file,
570 "Forwarding edge %i->%i to %i failed.\n",
571 b->index, e->dest->index, target->index);
572 continue;
573 }
574
575 /* We successfully forwarded the edge. Now update profile
576 data: for each edge we traversed in the chain, remove
577 the original edge's execution count. */
578 edge_frequency = ((edge_probability * b->frequency
579 + REG_BR_PROB_BASE / 2)
580 / REG_BR_PROB_BASE);
581
582 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
583 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
584
585 do
586 {
587 edge t;
588
589 first->count -= edge_count;
590 if (first->count < 0)
591 first->count = 0;
592 first->frequency -= edge_frequency;
593 if (first->frequency < 0)
594 first->frequency = 0;
595 if (first->succ->succ_next)
596 {
597 edge e;
598 int prob;
599 if (n >= nthreaded_edges)
600 abort ();
601 t = threaded_edges [n++];
602 if (t->src != first)
603 abort ();
604 if (first->frequency)
605 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
606 else
607 prob = 0;
608 if (prob > t->probability)
609 prob = t->probability;
610 t->probability -= prob;
611 prob = REG_BR_PROB_BASE - prob;
612 if (prob <= 0)
613 {
614 first->succ->probability = REG_BR_PROB_BASE;
615 first->succ->succ_next->probability = 0;
616 }
617 else
618 for (e = first->succ; e; e = e->succ_next)
619 e->probability = ((e->probability * REG_BR_PROB_BASE)
620 / (double) prob);
621 update_br_prob_note (first);
622 }
623 else
624 {
625 /* It is possible that as the result of
626 threading we've removed edge as it is
627 threaded to the fallthru edge. Avoid
628 getting out of sync. */
629 if (n < nthreaded_edges
630 && first == threaded_edges [n]->src)
631 n++;
632 t = first->succ;
633 }
634
635 t->count -= edge_count;
636 if (t->count < 0)
637 t->count = 0;
638 first = t->dest;
639 }
640 while (first != target);
641
642 changed = true;
643 }
644 }
645
646 if (threaded_edges)
647 free (threaded_edges);
648 return changed;
649 }
650 \f
651
652 /* Blocks A and B are to be merged into a single block. A has no incoming
653 fallthru edge, so it can be moved before B without adding or modifying
654 any jumps (aside from the jump from A to B). */
655
656 static void
657 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
658 {
659 rtx barrier;
660
661 /* If we are partitioning hot/cold basic blocks, we don't want to
662 mess up unconditional or indirect jumps that cross between hot
663 and cold sections. */
664
665 if (flag_reorder_blocks_and_partition
666 && (a->partition != b->partition
667 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
668 return;
669
670 barrier = next_nonnote_insn (BB_END (a));
671 if (!BARRIER_P (barrier))
672 abort ();
673 delete_insn (barrier);
674
675 /* Move block and loop notes out of the chain so that we do not
676 disturb their order.
677
678 ??? A better solution would be to squeeze out all the non-nested notes
679 and adjust the block trees appropriately. Even better would be to have
680 a tighter connection between block trees and rtl so that this is not
681 necessary. */
682 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
683 abort ();
684
685 /* Scramble the insn chain. */
686 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
687 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
688 a->flags |= BB_DIRTY;
689
690 if (dump_file)
691 fprintf (dump_file, "Moved block %d before %d and merged.\n",
692 a->index, b->index);
693
694 /* Swap the records for the two blocks around. */
695
696 unlink_block (a);
697 link_block (a, b->prev_bb);
698
699 /* Now blocks A and B are contiguous. Merge them. */
700 merge_blocks (a, b);
701 }
702
703 /* Blocks A and B are to be merged into a single block. B has no outgoing
704 fallthru edge, so it can be moved after A without adding or modifying
705 any jumps (aside from the jump from A to B). */
706
707 static void
708 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
709 {
710 rtx barrier, real_b_end;
711 rtx label, table;
712
713 /* If we are partitioning hot/cold basic blocks, we don't want to
714 mess up unconditional or indirect jumps that cross between hot
715 and cold sections. */
716
717 if (flag_reorder_blocks_and_partition
718 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
719 || a->partition != b->partition))
720 return;
721
722 real_b_end = BB_END (b);
723
724 /* If there is a jump table following block B temporarily add the jump table
725 to block B so that it will also be moved to the correct location. */
726 if (tablejump_p (BB_END (b), &label, &table)
727 && prev_active_insn (label) == BB_END (b))
728 {
729 BB_END (b) = table;
730 }
731
732 /* There had better have been a barrier there. Delete it. */
733 barrier = NEXT_INSN (BB_END (b));
734 if (barrier && BARRIER_P (barrier))
735 delete_insn (barrier);
736
737 /* Move block and loop notes out of the chain so that we do not
738 disturb their order.
739
740 ??? A better solution would be to squeeze out all the non-nested notes
741 and adjust the block trees appropriately. Even better would be to have
742 a tighter connection between block trees and rtl so that this is not
743 necessary. */
744 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
745 abort ();
746
747 /* Scramble the insn chain. */
748 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
749
750 /* Restore the real end of b. */
751 BB_END (b) = real_b_end;
752
753 if (dump_file)
754 fprintf (dump_file, "Moved block %d after %d and merged.\n",
755 b->index, a->index);
756
757 /* Now blocks A and B are contiguous. Merge them. */
758 merge_blocks (a, b);
759 }
760
761 /* Attempt to merge basic blocks that are potentially non-adjacent.
762 Return NULL iff the attempt failed, otherwise return basic block
763 where cleanup_cfg should continue. Because the merging commonly
764 moves basic block away or introduces another optimization
765 possibility, return basic block just before B so cleanup_cfg don't
766 need to iterate.
767
768 It may be good idea to return basic block before C in the case
769 C has been moved after B and originally appeared earlier in the
770 insn sequence, but we have no information available about the
771 relative ordering of these two. Hopefully it is not too common. */
772
773 static basic_block
774 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
775 {
776 basic_block next;
777
778 /* If we are partitioning hot/cold basic blocks, we don't want to
779 mess up unconditional or indirect jumps that cross between hot
780 and cold sections. */
781
782 if (flag_reorder_blocks_and_partition
783 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
784 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
785 || b->partition != c->partition))
786 return NULL;
787
788
789
790 /* If B has a fallthru edge to C, no need to move anything. */
791 if (e->flags & EDGE_FALLTHRU)
792 {
793 int b_index = b->index, c_index = c->index;
794 merge_blocks (b, c);
795 update_forwarder_flag (b);
796
797 if (dump_file)
798 fprintf (dump_file, "Merged %d and %d without moving.\n",
799 b_index, c_index);
800
801 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
802 }
803
804 /* Otherwise we will need to move code around. Do that only if expensive
805 transformations are allowed. */
806 else if (mode & CLEANUP_EXPENSIVE)
807 {
808 edge tmp_edge, b_fallthru_edge;
809 bool c_has_outgoing_fallthru;
810 bool b_has_incoming_fallthru;
811
812 /* Avoid overactive code motion, as the forwarder blocks should be
813 eliminated by edge redirection instead. One exception might have
814 been if B is a forwarder block and C has no fallthru edge, but
815 that should be cleaned up by bb-reorder instead. */
816 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
817 return NULL;
818
819 /* We must make sure to not munge nesting of lexical blocks,
820 and loop notes. This is done by squeezing out all the notes
821 and leaving them there to lie. Not ideal, but functional. */
822
823 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
824 if (tmp_edge->flags & EDGE_FALLTHRU)
825 break;
826
827 c_has_outgoing_fallthru = (tmp_edge != NULL);
828
829 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
830 if (tmp_edge->flags & EDGE_FALLTHRU)
831 break;
832
833 b_has_incoming_fallthru = (tmp_edge != NULL);
834 b_fallthru_edge = tmp_edge;
835 next = b->prev_bb;
836 if (next == c)
837 next = next->prev_bb;
838
839 /* Otherwise, we're going to try to move C after B. If C does
840 not have an outgoing fallthru, then it can be moved
841 immediately after B without introducing or modifying jumps. */
842 if (! c_has_outgoing_fallthru)
843 {
844 merge_blocks_move_successor_nojumps (b, c);
845 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
846 }
847
848 /* If B does not have an incoming fallthru, then it can be moved
849 immediately before C without introducing or modifying jumps.
850 C cannot be the first block, so we do not have to worry about
851 accessing a non-existent block. */
852
853 if (b_has_incoming_fallthru)
854 {
855 basic_block bb;
856
857 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
858 return NULL;
859 bb = force_nonfallthru (b_fallthru_edge);
860 if (bb)
861 notice_new_block (bb);
862 }
863
864 merge_blocks_move_predecessor_nojumps (b, c);
865 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
866 }
867
868 return NULL;
869 }
870 \f
871
872 /* Removes the memory attributes of MEM expression
873 if they are not equal. */
874
875 void
876 merge_memattrs (rtx x, rtx y)
877 {
878 int i;
879 int j;
880 enum rtx_code code;
881 const char *fmt;
882
883 if (x == y)
884 return;
885 if (x == 0 || y == 0)
886 return;
887
888 code = GET_CODE (x);
889
890 if (code != GET_CODE (y))
891 return;
892
893 if (GET_MODE (x) != GET_MODE (y))
894 return;
895
896 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
897 {
898 if (! MEM_ATTRS (x))
899 MEM_ATTRS (y) = 0;
900 else if (! MEM_ATTRS (y))
901 MEM_ATTRS (x) = 0;
902 else
903 {
904 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
905 {
906 set_mem_alias_set (x, 0);
907 set_mem_alias_set (y, 0);
908 }
909
910 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
911 {
912 set_mem_expr (x, 0);
913 set_mem_expr (y, 0);
914 set_mem_offset (x, 0);
915 set_mem_offset (y, 0);
916 }
917 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
918 {
919 set_mem_offset (x, 0);
920 set_mem_offset (y, 0);
921 }
922
923 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
924 set_mem_size (y, MEM_SIZE (x));
925
926 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
927 set_mem_align (y, MEM_ALIGN (x));
928 }
929 }
930
931 fmt = GET_RTX_FORMAT (code);
932 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
933 {
934 switch (fmt[i])
935 {
936 case 'E':
937 /* Two vectors must have the same length. */
938 if (XVECLEN (x, i) != XVECLEN (y, i))
939 return;
940
941 for (j = 0; j < XVECLEN (x, i); j++)
942 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
943
944 break;
945
946 case 'e':
947 merge_memattrs (XEXP (x, i), XEXP (y, i));
948 }
949 }
950 return;
951 }
952
953
954 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
955
956 static bool
957 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
958 {
959 rtx p1, p2;
960
961 /* Verify that I1 and I2 are equivalent. */
962 if (GET_CODE (i1) != GET_CODE (i2))
963 return false;
964
965 p1 = PATTERN (i1);
966 p2 = PATTERN (i2);
967
968 if (GET_CODE (p1) != GET_CODE (p2))
969 return false;
970
971 /* If this is a CALL_INSN, compare register usage information.
972 If we don't check this on stack register machines, the two
973 CALL_INSNs might be merged leaving reg-stack.c with mismatching
974 numbers of stack registers in the same basic block.
975 If we don't check this on machines with delay slots, a delay slot may
976 be filled that clobbers a parameter expected by the subroutine.
977
978 ??? We take the simple route for now and assume that if they're
979 equal, they were constructed identically. */
980
981 if (CALL_P (i1)
982 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
983 CALL_INSN_FUNCTION_USAGE (i2))
984 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
985 return false;
986
987 #ifdef STACK_REGS
988 /* If cross_jump_death_matters is not 0, the insn's mode
989 indicates whether or not the insn contains any stack-like
990 regs. */
991
992 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
993 {
994 /* If register stack conversion has already been done, then
995 death notes must also be compared before it is certain that
996 the two instruction streams match. */
997
998 rtx note;
999 HARD_REG_SET i1_regset, i2_regset;
1000
1001 CLEAR_HARD_REG_SET (i1_regset);
1002 CLEAR_HARD_REG_SET (i2_regset);
1003
1004 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1005 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1006 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1007
1008 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1009 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1010 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1011
1012 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1013
1014 return false;
1015
1016 done:
1017 ;
1018 }
1019 #endif
1020
1021 if (reload_completed
1022 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1023 return true;
1024
1025 /* Do not do EQUIV substitution after reload. First, we're undoing the
1026 work of reload_cse. Second, we may be undoing the work of the post-
1027 reload splitting pass. */
1028 /* ??? Possibly add a new phase switch variable that can be used by
1029 targets to disallow the troublesome insns after splitting. */
1030 if (!reload_completed)
1031 {
1032 /* The following code helps take care of G++ cleanups. */
1033 rtx equiv1 = find_reg_equal_equiv_note (i1);
1034 rtx equiv2 = find_reg_equal_equiv_note (i2);
1035
1036 if (equiv1 && equiv2
1037 /* If the equivalences are not to a constant, they may
1038 reference pseudos that no longer exist, so we can't
1039 use them. */
1040 && (! reload_completed
1041 || (CONSTANT_P (XEXP (equiv1, 0))
1042 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1043 {
1044 rtx s1 = single_set (i1);
1045 rtx s2 = single_set (i2);
1046 if (s1 != 0 && s2 != 0
1047 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1048 {
1049 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1050 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1051 if (! rtx_renumbered_equal_p (p1, p2))
1052 cancel_changes (0);
1053 else if (apply_change_group ())
1054 return true;
1055 }
1056 }
1057 }
1058
1059 return false;
1060 }
1061 \f
1062 /* Look through the insns at the end of BB1 and BB2 and find the longest
1063 sequence that are equivalent. Store the first insns for that sequence
1064 in *F1 and *F2 and return the sequence length.
1065
1066 To simplify callers of this function, if the blocks match exactly,
1067 store the head of the blocks in *F1 and *F2. */
1068
1069 static int
1070 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1071 basic_block bb2, rtx *f1, rtx *f2)
1072 {
1073 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1074 int ninsns = 0;
1075
1076 /* Skip simple jumps at the end of the blocks. Complex jumps still
1077 need to be compared for equivalence, which we'll do below. */
1078
1079 i1 = BB_END (bb1);
1080 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1081 if (onlyjump_p (i1)
1082 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1083 {
1084 last1 = i1;
1085 i1 = PREV_INSN (i1);
1086 }
1087
1088 i2 = BB_END (bb2);
1089 if (onlyjump_p (i2)
1090 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1091 {
1092 last2 = i2;
1093 /* Count everything except for unconditional jump as insn. */
1094 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1095 ninsns++;
1096 i2 = PREV_INSN (i2);
1097 }
1098
1099 while (true)
1100 {
1101 /* Ignore notes. */
1102 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1103 i1 = PREV_INSN (i1);
1104
1105 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1106 i2 = PREV_INSN (i2);
1107
1108 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1109 break;
1110
1111 if (!insns_match_p (mode, i1, i2))
1112 break;
1113
1114 merge_memattrs (i1, i2);
1115
1116 /* Don't begin a cross-jump with a NOTE insn. */
1117 if (INSN_P (i1))
1118 {
1119 /* If the merged insns have different REG_EQUAL notes, then
1120 remove them. */
1121 rtx equiv1 = find_reg_equal_equiv_note (i1);
1122 rtx equiv2 = find_reg_equal_equiv_note (i2);
1123
1124 if (equiv1 && !equiv2)
1125 remove_note (i1, equiv1);
1126 else if (!equiv1 && equiv2)
1127 remove_note (i2, equiv2);
1128 else if (equiv1 && equiv2
1129 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1130 {
1131 remove_note (i1, equiv1);
1132 remove_note (i2, equiv2);
1133 }
1134
1135 afterlast1 = last1, afterlast2 = last2;
1136 last1 = i1, last2 = i2;
1137 ninsns++;
1138 }
1139
1140 i1 = PREV_INSN (i1);
1141 i2 = PREV_INSN (i2);
1142 }
1143
1144 #ifdef HAVE_cc0
1145 /* Don't allow the insn after a compare to be shared by
1146 cross-jumping unless the compare is also shared. */
1147 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1148 last1 = afterlast1, last2 = afterlast2, ninsns--;
1149 #endif
1150
1151 /* Include preceding notes and labels in the cross-jump. One,
1152 this may bring us to the head of the blocks as requested above.
1153 Two, it keeps line number notes as matched as may be. */
1154 if (ninsns)
1155 {
1156 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1157 last1 = PREV_INSN (last1);
1158
1159 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1160 last1 = PREV_INSN (last1);
1161
1162 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1163 last2 = PREV_INSN (last2);
1164
1165 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1166 last2 = PREV_INSN (last2);
1167
1168 *f1 = last1;
1169 *f2 = last2;
1170 }
1171
1172 return ninsns;
1173 }
1174
1175 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1176 the branch instruction. This means that if we commonize the control
1177 flow before end of the basic block, the semantic remains unchanged.
1178
1179 We may assume that there exists one edge with a common destination. */
1180
1181 static bool
1182 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1183 {
1184 int nehedges1 = 0, nehedges2 = 0;
1185 edge fallthru1 = 0, fallthru2 = 0;
1186 edge e1, e2;
1187
1188 /* If BB1 has only one successor, we may be looking at either an
1189 unconditional jump, or a fake edge to exit. */
1190 if (bb1->succ && !bb1->succ->succ_next
1191 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1192 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1193 return (bb2->succ && !bb2->succ->succ_next
1194 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1195 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1196
1197 /* Match conditional jumps - this may get tricky when fallthru and branch
1198 edges are crossed. */
1199 if (bb1->succ
1200 && bb1->succ->succ_next
1201 && !bb1->succ->succ_next->succ_next
1202 && any_condjump_p (BB_END (bb1))
1203 && onlyjump_p (BB_END (bb1)))
1204 {
1205 edge b1, f1, b2, f2;
1206 bool reverse, match;
1207 rtx set1, set2, cond1, cond2;
1208 enum rtx_code code1, code2;
1209
1210 if (!bb2->succ
1211 || !bb2->succ->succ_next
1212 || bb2->succ->succ_next->succ_next
1213 || !any_condjump_p (BB_END (bb2))
1214 || !onlyjump_p (BB_END (bb2)))
1215 return false;
1216
1217 b1 = BRANCH_EDGE (bb1);
1218 b2 = BRANCH_EDGE (bb2);
1219 f1 = FALLTHRU_EDGE (bb1);
1220 f2 = FALLTHRU_EDGE (bb2);
1221
1222 /* Get around possible forwarders on fallthru edges. Other cases
1223 should be optimized out already. */
1224 if (FORWARDER_BLOCK_P (f1->dest))
1225 f1 = f1->dest->succ;
1226
1227 if (FORWARDER_BLOCK_P (f2->dest))
1228 f2 = f2->dest->succ;
1229
1230 /* To simplify use of this function, return false if there are
1231 unneeded forwarder blocks. These will get eliminated later
1232 during cleanup_cfg. */
1233 if (FORWARDER_BLOCK_P (f1->dest)
1234 || FORWARDER_BLOCK_P (f2->dest)
1235 || FORWARDER_BLOCK_P (b1->dest)
1236 || FORWARDER_BLOCK_P (b2->dest))
1237 return false;
1238
1239 if (f1->dest == f2->dest && b1->dest == b2->dest)
1240 reverse = false;
1241 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1242 reverse = true;
1243 else
1244 return false;
1245
1246 set1 = pc_set (BB_END (bb1));
1247 set2 = pc_set (BB_END (bb2));
1248 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1249 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1250 reverse = !reverse;
1251
1252 cond1 = XEXP (SET_SRC (set1), 0);
1253 cond2 = XEXP (SET_SRC (set2), 0);
1254 code1 = GET_CODE (cond1);
1255 if (reverse)
1256 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1257 else
1258 code2 = GET_CODE (cond2);
1259
1260 if (code2 == UNKNOWN)
1261 return false;
1262
1263 /* Verify codes and operands match. */
1264 match = ((code1 == code2
1265 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1266 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1267 || (code1 == swap_condition (code2)
1268 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1269 XEXP (cond2, 0))
1270 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1271 XEXP (cond2, 1))));
1272
1273 /* If we return true, we will join the blocks. Which means that
1274 we will only have one branch prediction bit to work with. Thus
1275 we require the existing branches to have probabilities that are
1276 roughly similar. */
1277 if (match
1278 && !optimize_size
1279 && maybe_hot_bb_p (bb1)
1280 && maybe_hot_bb_p (bb2))
1281 {
1282 int prob2;
1283
1284 if (b1->dest == b2->dest)
1285 prob2 = b2->probability;
1286 else
1287 /* Do not use f2 probability as f2 may be forwarded. */
1288 prob2 = REG_BR_PROB_BASE - b2->probability;
1289
1290 /* Fail if the difference in probabilities is greater than 50%.
1291 This rules out two well-predicted branches with opposite
1292 outcomes. */
1293 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1294 {
1295 if (dump_file)
1296 fprintf (dump_file,
1297 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1298 bb1->index, bb2->index, b1->probability, prob2);
1299
1300 return false;
1301 }
1302 }
1303
1304 if (dump_file && match)
1305 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1306 bb1->index, bb2->index);
1307
1308 return match;
1309 }
1310
1311 /* Generic case - we are seeing a computed jump, table jump or trapping
1312 instruction. */
1313
1314 #ifndef CASE_DROPS_THROUGH
1315 /* Check whether there are tablejumps in the end of BB1 and BB2.
1316 Return true if they are identical. */
1317 {
1318 rtx label1, label2;
1319 rtx table1, table2;
1320
1321 if (tablejump_p (BB_END (bb1), &label1, &table1)
1322 && tablejump_p (BB_END (bb2), &label2, &table2)
1323 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1324 {
1325 /* The labels should never be the same rtx. If they really are same
1326 the jump tables are same too. So disable crossjumping of blocks BB1
1327 and BB2 because when deleting the common insns in the end of BB1
1328 by delete_basic_block () the jump table would be deleted too. */
1329 /* If LABEL2 is referenced in BB1->END do not do anything
1330 because we would loose information when replacing
1331 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1332 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1333 {
1334 /* Set IDENTICAL to true when the tables are identical. */
1335 bool identical = false;
1336 rtx p1, p2;
1337
1338 p1 = PATTERN (table1);
1339 p2 = PATTERN (table2);
1340 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1341 {
1342 identical = true;
1343 }
1344 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1345 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1346 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1347 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1348 {
1349 int i;
1350
1351 identical = true;
1352 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1353 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1354 identical = false;
1355 }
1356
1357 if (identical)
1358 {
1359 replace_label_data rr;
1360 bool match;
1361
1362 /* Temporarily replace references to LABEL1 with LABEL2
1363 in BB1->END so that we could compare the instructions. */
1364 rr.r1 = label1;
1365 rr.r2 = label2;
1366 rr.update_label_nuses = false;
1367 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1368
1369 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1370 if (dump_file && match)
1371 fprintf (dump_file,
1372 "Tablejumps in bb %i and %i match.\n",
1373 bb1->index, bb2->index);
1374
1375 /* Set the original label in BB1->END because when deleting
1376 a block whose end is a tablejump, the tablejump referenced
1377 from the instruction is deleted too. */
1378 rr.r1 = label2;
1379 rr.r2 = label1;
1380 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1381
1382 return match;
1383 }
1384 }
1385 return false;
1386 }
1387 }
1388 #endif
1389
1390 /* First ensure that the instructions match. There may be many outgoing
1391 edges so this test is generally cheaper. */
1392 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1393 return false;
1394
1395 /* Search the outgoing edges, ensure that the counts do match, find possible
1396 fallthru and exception handling edges since these needs more
1397 validation. */
1398 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1399 e1 = e1->succ_next, e2 = e2->succ_next)
1400 {
1401 if (e1->flags & EDGE_EH)
1402 nehedges1++;
1403
1404 if (e2->flags & EDGE_EH)
1405 nehedges2++;
1406
1407 if (e1->flags & EDGE_FALLTHRU)
1408 fallthru1 = e1;
1409 if (e2->flags & EDGE_FALLTHRU)
1410 fallthru2 = e2;
1411 }
1412
1413 /* If number of edges of various types does not match, fail. */
1414 if (e1 || e2
1415 || nehedges1 != nehedges2
1416 || (fallthru1 != 0) != (fallthru2 != 0))
1417 return false;
1418
1419 /* fallthru edges must be forwarded to the same destination. */
1420 if (fallthru1)
1421 {
1422 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1423 ? fallthru1->dest->succ->dest: fallthru1->dest);
1424 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1425 ? fallthru2->dest->succ->dest: fallthru2->dest);
1426
1427 if (d1 != d2)
1428 return false;
1429 }
1430
1431 /* Ensure the same EH region. */
1432 {
1433 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1434 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1435
1436 if (!n1 && n2)
1437 return false;
1438
1439 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1440 return false;
1441 }
1442
1443 /* We don't need to match the rest of edges as above checks should be enough
1444 to ensure that they are equivalent. */
1445 return true;
1446 }
1447
1448 /* E1 and E2 are edges with the same destination block. Search their
1449 predecessors for common code. If found, redirect control flow from
1450 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1451
1452 static bool
1453 try_crossjump_to_edge (int mode, edge e1, edge e2)
1454 {
1455 int nmatch;
1456 basic_block src1 = e1->src, src2 = e2->src;
1457 basic_block redirect_to, redirect_from, to_remove;
1458 rtx newpos1, newpos2;
1459 edge s;
1460
1461 newpos1 = newpos2 = NULL_RTX;
1462
1463 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1464 to try this optimization. */
1465
1466 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1467 return false;
1468
1469 /* Search backward through forwarder blocks. We don't need to worry
1470 about multiple entry or chained forwarders, as they will be optimized
1471 away. We do this to look past the unconditional jump following a
1472 conditional jump that is required due to the current CFG shape. */
1473 if (src1->pred
1474 && !src1->pred->pred_next
1475 && FORWARDER_BLOCK_P (src1))
1476 e1 = src1->pred, src1 = e1->src;
1477
1478 if (src2->pred
1479 && !src2->pred->pred_next
1480 && FORWARDER_BLOCK_P (src2))
1481 e2 = src2->pred, src2 = e2->src;
1482
1483 /* Nothing to do if we reach ENTRY, or a common source block. */
1484 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1485 return false;
1486 if (src1 == src2)
1487 return false;
1488
1489 /* Seeing more than 1 forwarder blocks would confuse us later... */
1490 if (FORWARDER_BLOCK_P (e1->dest)
1491 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1492 return false;
1493
1494 if (FORWARDER_BLOCK_P (e2->dest)
1495 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1496 return false;
1497
1498 /* Likewise with dead code (possibly newly created by the other optimizations
1499 of cfg_cleanup). */
1500 if (!src1->pred || !src2->pred)
1501 return false;
1502
1503 /* Look for the common insn sequence, part the first ... */
1504 if (!outgoing_edges_match (mode, src1, src2))
1505 return false;
1506
1507 /* ... and part the second. */
1508 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1509 if (!nmatch)
1510 return false;
1511
1512 #ifndef CASE_DROPS_THROUGH
1513 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1514 will be deleted.
1515 If we have tablejumps in the end of SRC1 and SRC2
1516 they have been already compared for equivalence in outgoing_edges_match ()
1517 so replace the references to TABLE1 by references to TABLE2. */
1518 {
1519 rtx label1, label2;
1520 rtx table1, table2;
1521
1522 if (tablejump_p (BB_END (src1), &label1, &table1)
1523 && tablejump_p (BB_END (src2), &label2, &table2)
1524 && label1 != label2)
1525 {
1526 replace_label_data rr;
1527 rtx insn;
1528
1529 /* Replace references to LABEL1 with LABEL2. */
1530 rr.r1 = label1;
1531 rr.r2 = label2;
1532 rr.update_label_nuses = true;
1533 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1534 {
1535 /* Do not replace the label in SRC1->END because when deleting
1536 a block whose end is a tablejump, the tablejump referenced
1537 from the instruction is deleted too. */
1538 if (insn != BB_END (src1))
1539 for_each_rtx (&insn, replace_label, &rr);
1540 }
1541 }
1542 }
1543 #endif
1544
1545 /* Avoid splitting if possible. */
1546 if (newpos2 == BB_HEAD (src2))
1547 redirect_to = src2;
1548 else
1549 {
1550 if (dump_file)
1551 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1552 src2->index, nmatch);
1553 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1554 }
1555
1556 if (dump_file)
1557 fprintf (dump_file,
1558 "Cross jumping from bb %i to bb %i; %i common insns\n",
1559 src1->index, src2->index, nmatch);
1560
1561 redirect_to->count += src1->count;
1562 redirect_to->frequency += src1->frequency;
1563 /* We may have some registers visible trought the block. */
1564 redirect_to->flags |= BB_DIRTY;
1565
1566 /* Recompute the frequencies and counts of outgoing edges. */
1567 for (s = redirect_to->succ; s; s = s->succ_next)
1568 {
1569 edge s2;
1570 basic_block d = s->dest;
1571
1572 if (FORWARDER_BLOCK_P (d))
1573 d = d->succ->dest;
1574
1575 for (s2 = src1->succ; ; s2 = s2->succ_next)
1576 {
1577 basic_block d2 = s2->dest;
1578 if (FORWARDER_BLOCK_P (d2))
1579 d2 = d2->succ->dest;
1580 if (d == d2)
1581 break;
1582 }
1583
1584 s->count += s2->count;
1585
1586 /* Take care to update possible forwarder blocks. We verified
1587 that there is no more than one in the chain, so we can't run
1588 into infinite loop. */
1589 if (FORWARDER_BLOCK_P (s->dest))
1590 {
1591 s->dest->succ->count += s2->count;
1592 s->dest->count += s2->count;
1593 s->dest->frequency += EDGE_FREQUENCY (s);
1594 }
1595
1596 if (FORWARDER_BLOCK_P (s2->dest))
1597 {
1598 s2->dest->succ->count -= s2->count;
1599 if (s2->dest->succ->count < 0)
1600 s2->dest->succ->count = 0;
1601 s2->dest->count -= s2->count;
1602 s2->dest->frequency -= EDGE_FREQUENCY (s);
1603 if (s2->dest->frequency < 0)
1604 s2->dest->frequency = 0;
1605 if (s2->dest->count < 0)
1606 s2->dest->count = 0;
1607 }
1608
1609 if (!redirect_to->frequency && !src1->frequency)
1610 s->probability = (s->probability + s2->probability) / 2;
1611 else
1612 s->probability
1613 = ((s->probability * redirect_to->frequency +
1614 s2->probability * src1->frequency)
1615 / (redirect_to->frequency + src1->frequency));
1616 }
1617
1618 update_br_prob_note (redirect_to);
1619
1620 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1621
1622 /* Skip possible basic block header. */
1623 if (LABEL_P (newpos1))
1624 newpos1 = NEXT_INSN (newpos1);
1625
1626 if (NOTE_P (newpos1))
1627 newpos1 = NEXT_INSN (newpos1);
1628
1629 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1630 to_remove = redirect_from->succ->dest;
1631
1632 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1633 delete_basic_block (to_remove);
1634
1635 update_forwarder_flag (redirect_from);
1636
1637 return true;
1638 }
1639
1640 /* Search the predecessors of BB for common insn sequences. When found,
1641 share code between them by redirecting control flow. Return true if
1642 any changes made. */
1643
1644 static bool
1645 try_crossjump_bb (int mode, basic_block bb)
1646 {
1647 edge e, e2, nexte2, nexte, fallthru;
1648 bool changed;
1649 int n = 0, max;
1650
1651 /* Nothing to do if there is not at least two incoming edges. */
1652 if (!bb->pred || !bb->pred->pred_next)
1653 return false;
1654
1655 /* If we are partitioning hot/cold basic blocks, we don't want to
1656 mess up unconditional or indirect jumps that cross between hot
1657 and cold sections. */
1658
1659 if (flag_reorder_blocks_and_partition
1660 && (bb->pred->src->partition != bb->pred->pred_next->src->partition
1661 || bb->pred->crossing_edge))
1662 return false;
1663
1664 /* It is always cheapest to redirect a block that ends in a branch to
1665 a block that falls through into BB, as that adds no branches to the
1666 program. We'll try that combination first. */
1667 fallthru = NULL;
1668 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1669 for (e = bb->pred; e ; e = e->pred_next, n++)
1670 {
1671 if (e->flags & EDGE_FALLTHRU)
1672 fallthru = e;
1673 if (n > max)
1674 return false;
1675 }
1676
1677 changed = false;
1678 for (e = bb->pred; e; e = nexte)
1679 {
1680 nexte = e->pred_next;
1681
1682 /* As noted above, first try with the fallthru predecessor. */
1683 if (fallthru)
1684 {
1685 /* Don't combine the fallthru edge into anything else.
1686 If there is a match, we'll do it the other way around. */
1687 if (e == fallthru)
1688 continue;
1689 /* If nothing changed since the last attempt, there is nothing
1690 we can do. */
1691 if (!first_pass
1692 && (!(e->src->flags & BB_DIRTY)
1693 && !(fallthru->src->flags & BB_DIRTY)))
1694 continue;
1695
1696 if (try_crossjump_to_edge (mode, e, fallthru))
1697 {
1698 changed = true;
1699 nexte = bb->pred;
1700 continue;
1701 }
1702 }
1703
1704 /* Non-obvious work limiting check: Recognize that we're going
1705 to call try_crossjump_bb on every basic block. So if we have
1706 two blocks with lots of outgoing edges (a switch) and they
1707 share lots of common destinations, then we would do the
1708 cross-jump check once for each common destination.
1709
1710 Now, if the blocks actually are cross-jump candidates, then
1711 all of their destinations will be shared. Which means that
1712 we only need check them for cross-jump candidacy once. We
1713 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1714 choosing to do the check from the block for which the edge
1715 in question is the first successor of A. */
1716 if (e->src->succ != e)
1717 continue;
1718
1719 for (e2 = bb->pred; e2; e2 = nexte2)
1720 {
1721 nexte2 = e2->pred_next;
1722
1723 if (e2 == e)
1724 continue;
1725
1726 /* We've already checked the fallthru edge above. */
1727 if (e2 == fallthru)
1728 continue;
1729
1730 /* The "first successor" check above only prevents multiple
1731 checks of crossjump(A,B). In order to prevent redundant
1732 checks of crossjump(B,A), require that A be the block
1733 with the lowest index. */
1734 if (e->src->index > e2->src->index)
1735 continue;
1736
1737 /* If nothing changed since the last attempt, there is nothing
1738 we can do. */
1739 if (!first_pass
1740 && (!(e->src->flags & BB_DIRTY)
1741 && !(e2->src->flags & BB_DIRTY)))
1742 continue;
1743
1744 if (try_crossjump_to_edge (mode, e, e2))
1745 {
1746 changed = true;
1747 nexte = bb->pred;
1748 break;
1749 }
1750 }
1751 }
1752
1753 return changed;
1754 }
1755
1756 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1757 instructions etc. Return nonzero if changes were made. */
1758
1759 static bool
1760 try_optimize_cfg (int mode)
1761 {
1762 bool changed_overall = false;
1763 bool changed;
1764 int iterations = 0;
1765 basic_block bb, b, next;
1766
1767 if (mode & CLEANUP_CROSSJUMP)
1768 add_noreturn_fake_exit_edges ();
1769
1770 FOR_EACH_BB (bb)
1771 update_forwarder_flag (bb);
1772
1773 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1774 clear_bb_flags ();
1775
1776 if (! targetm.cannot_modify_jumps_p ())
1777 {
1778 first_pass = true;
1779 /* Attempt to merge blocks as made possible by edge removal. If
1780 a block has only one successor, and the successor has only
1781 one predecessor, they may be combined. */
1782 do
1783 {
1784 changed = false;
1785 iterations++;
1786
1787 if (dump_file)
1788 fprintf (dump_file,
1789 "\n\ntry_optimize_cfg iteration %i\n\n",
1790 iterations);
1791
1792 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1793 {
1794 basic_block c;
1795 edge s;
1796 bool changed_here = false;
1797
1798 /* Delete trivially dead basic blocks. */
1799 while (b->pred == NULL)
1800 {
1801 c = b->prev_bb;
1802 if (dump_file)
1803 fprintf (dump_file, "Deleting block %i.\n",
1804 b->index);
1805
1806 delete_basic_block (b);
1807 if (!(mode & CLEANUP_CFGLAYOUT))
1808 changed = true;
1809 b = c;
1810 }
1811
1812 /* Remove code labels no longer used. */
1813 if (b->pred->pred_next == NULL
1814 && (b->pred->flags & EDGE_FALLTHRU)
1815 && !(b->pred->flags & EDGE_COMPLEX)
1816 && LABEL_P (BB_HEAD (b))
1817 /* If the previous block ends with a branch to this
1818 block, we can't delete the label. Normally this
1819 is a condjump that is yet to be simplified, but
1820 if CASE_DROPS_THRU, this can be a tablejump with
1821 some element going to the same place as the
1822 default (fallthru). */
1823 && (b->pred->src == ENTRY_BLOCK_PTR
1824 || !JUMP_P (BB_END (b->pred->src))
1825 || ! label_is_jump_target_p (BB_HEAD (b),
1826 BB_END (b->pred->src))))
1827 {
1828 rtx label = BB_HEAD (b);
1829
1830 delete_insn_chain (label, label);
1831 /* In the case label is undeletable, move it after the
1832 BASIC_BLOCK note. */
1833 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1834 {
1835 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1836
1837 reorder_insns_nobb (label, label, bb_note);
1838 BB_HEAD (b) = bb_note;
1839 }
1840 if (dump_file)
1841 fprintf (dump_file, "Deleted label in block %i.\n",
1842 b->index);
1843 }
1844
1845 /* If we fall through an empty block, we can remove it. */
1846 if (!(mode & CLEANUP_CFGLAYOUT)
1847 && b->pred->pred_next == NULL
1848 && (b->pred->flags & EDGE_FALLTHRU)
1849 && !LABEL_P (BB_HEAD (b))
1850 && FORWARDER_BLOCK_P (b)
1851 /* Note that forwarder_block_p true ensures that
1852 there is a successor for this block. */
1853 && (b->succ->flags & EDGE_FALLTHRU)
1854 && n_basic_blocks > 1)
1855 {
1856 if (dump_file)
1857 fprintf (dump_file,
1858 "Deleting fallthru block %i.\n",
1859 b->index);
1860
1861 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1862 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1863 delete_basic_block (b);
1864 changed = true;
1865 b = c;
1866 }
1867
1868 if ((s = b->succ) != NULL
1869 && s->succ_next == NULL
1870 && !(s->flags & EDGE_COMPLEX)
1871 && (c = s->dest) != EXIT_BLOCK_PTR
1872 && c->pred->pred_next == NULL
1873 && b != c)
1874 {
1875 /* When not in cfg_layout mode use code aware of reordering
1876 INSN. This code possibly creates new basic blocks so it
1877 does not fit merge_blocks interface and is kept here in
1878 hope that it will become useless once more of compiler
1879 is transformed to use cfg_layout mode. */
1880
1881 if ((mode & CLEANUP_CFGLAYOUT)
1882 && can_merge_blocks_p (b, c))
1883 {
1884 merge_blocks (b, c);
1885 update_forwarder_flag (b);
1886 changed_here = true;
1887 }
1888 else if (!(mode & CLEANUP_CFGLAYOUT)
1889 /* If the jump insn has side effects,
1890 we can't kill the edge. */
1891 && (!JUMP_P (BB_END (b))
1892 || (reload_completed
1893 ? simplejump_p (BB_END (b))
1894 : (onlyjump_p (BB_END (b))
1895 && !tablejump_p (BB_END (b),
1896 NULL, NULL))))
1897 && (next = merge_blocks_move (s, b, c, mode)))
1898 {
1899 b = next;
1900 changed_here = true;
1901 }
1902 }
1903
1904 /* Simplify branch over branch. */
1905 if ((mode & CLEANUP_EXPENSIVE)
1906 && !(mode & CLEANUP_CFGLAYOUT)
1907 && try_simplify_condjump (b))
1908 changed_here = true;
1909
1910 /* If B has a single outgoing edge, but uses a
1911 non-trivial jump instruction without side-effects, we
1912 can either delete the jump entirely, or replace it
1913 with a simple unconditional jump. */
1914 if (b->succ
1915 && ! b->succ->succ_next
1916 && b->succ->dest != EXIT_BLOCK_PTR
1917 && onlyjump_p (BB_END (b))
1918 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1919 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1920 (mode & CLEANUP_CFGLAYOUT) != 0))
1921 {
1922 update_forwarder_flag (b);
1923 changed_here = true;
1924 }
1925
1926 /* Simplify branch to branch. */
1927 if (try_forward_edges (mode, b))
1928 changed_here = true;
1929
1930 /* Look for shared code between blocks. */
1931 if ((mode & CLEANUP_CROSSJUMP)
1932 && try_crossjump_bb (mode, b))
1933 changed_here = true;
1934
1935 /* Don't get confused by the index shift caused by
1936 deleting blocks. */
1937 if (!changed_here)
1938 b = b->next_bb;
1939 else
1940 changed = true;
1941 }
1942
1943 if ((mode & CLEANUP_CROSSJUMP)
1944 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1945 changed = true;
1946
1947 #ifdef ENABLE_CHECKING
1948 if (changed)
1949 verify_flow_info ();
1950 #endif
1951
1952 changed_overall |= changed;
1953 first_pass = false;
1954 }
1955 while (changed);
1956 }
1957
1958 if (mode & CLEANUP_CROSSJUMP)
1959 remove_fake_exit_edges ();
1960
1961 clear_aux_for_blocks ();
1962
1963 return changed_overall;
1964 }
1965 \f
1966 /* Delete all unreachable basic blocks. */
1967
1968 bool
1969 delete_unreachable_blocks (void)
1970 {
1971 bool changed = false;
1972 basic_block b, next_bb;
1973
1974 find_unreachable_blocks ();
1975
1976 /* Delete all unreachable basic blocks. */
1977
1978 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1979 {
1980 next_bb = b->next_bb;
1981
1982 if (!(b->flags & BB_REACHABLE))
1983 {
1984 delete_basic_block (b);
1985 changed = true;
1986 }
1987 }
1988
1989 if (changed)
1990 tidy_fallthru_edges ();
1991 return changed;
1992 }
1993
1994 /* Merges sequential blocks if possible. */
1995
1996 bool
1997 merge_seq_blocks (void)
1998 {
1999 basic_block bb;
2000 bool changed = false;
2001
2002 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2003 {
2004 if (bb->succ
2005 && !bb->succ->succ_next
2006 && can_merge_blocks_p (bb, bb->succ->dest))
2007 {
2008 /* Merge the blocks and retry. */
2009 merge_blocks (bb, bb->succ->dest);
2010 changed = true;
2011 continue;
2012 }
2013
2014 bb = bb->next_bb;
2015 }
2016
2017 return changed;
2018 }
2019 \f
2020 /* Tidy the CFG by deleting unreachable code and whatnot. */
2021
2022 bool
2023 cleanup_cfg (int mode)
2024 {
2025 bool changed = false;
2026
2027 timevar_push (TV_CLEANUP_CFG);
2028 if (delete_unreachable_blocks ())
2029 {
2030 changed = true;
2031 /* We've possibly created trivially dead code. Cleanup it right
2032 now to introduce more opportunities for try_optimize_cfg. */
2033 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2034 && !reload_completed)
2035 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2036 }
2037
2038 compact_blocks ();
2039
2040 while (try_optimize_cfg (mode))
2041 {
2042 delete_unreachable_blocks (), changed = true;
2043 if (mode & CLEANUP_UPDATE_LIFE)
2044 {
2045 /* Cleaning up CFG introduces more opportunities for dead code
2046 removal that in turn may introduce more opportunities for
2047 cleaning up the CFG. */
2048 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2049 PROP_DEATH_NOTES
2050 | PROP_SCAN_DEAD_CODE
2051 | PROP_KILL_DEAD_CODE
2052 | ((mode & CLEANUP_LOG_LINKS)
2053 ? PROP_LOG_LINKS : 0)))
2054 break;
2055 }
2056 else if (!(mode & CLEANUP_NO_INSN_DEL)
2057 && (mode & CLEANUP_EXPENSIVE)
2058 && !reload_completed)
2059 {
2060 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2061 break;
2062 }
2063 else
2064 break;
2065 delete_dead_jumptables ();
2066 }
2067
2068 /* Kill the data we won't maintain. */
2069 free_EXPR_LIST_list (&label_value_list);
2070 timevar_pop (TV_CLEANUP_CFG);
2071
2072 return changed;
2073 }