Fix typo.
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "output.h"
29 #include "toplev.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "ggc.h"
33 #include "langhooks.h"
34 #include "tree-flow.h"
35 #include "timevar.h"
36 #include "tree-dump.h"
37 #include "tree-pass.h"
38 #include "toplev.h"
39 #include "except.h"
40 #include "cfgloop.h"
41 #include "cfglayout.h"
42 #include "hashtab.h"
43 #include "tree-ssa-propagate.h"
44 #include "tree-scalar-evolution.h"
45
46 /* The set of blocks in that at least one of the following changes happened:
47 -- the statement at the end of the block was changed
48 -- the block was newly created
49 -- the set of the predecessors of the block changed
50 -- the set of the successors of the block changed
51 ??? Maybe we could track these changes separately, since they determine
52 what cleanups it makes sense to try on the block. */
53 bitmap cfgcleanup_altered_bbs;
54
55 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
56
57 static bool
58 remove_fallthru_edge (VEC(edge,gc) *ev)
59 {
60 edge_iterator ei;
61 edge e;
62
63 FOR_EACH_EDGE (e, ei, ev)
64 if ((e->flags & EDGE_FALLTHRU) != 0)
65 {
66 remove_edge_and_dominated_blocks (e);
67 return true;
68 }
69 return false;
70 }
71
72
73 /* Disconnect an unreachable block in the control expression starting
74 at block BB. */
75
76 static bool
77 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
78 {
79 edge taken_edge;
80 bool retval = false;
81 gimple stmt = gsi_stmt (gsi);
82 tree val;
83
84 if (!single_succ_p (bb))
85 {
86 edge e;
87 edge_iterator ei;
88 bool warned;
89 location_t loc;
90
91 fold_defer_overflow_warnings ();
92 loc = gimple_location (stmt);
93 switch (gimple_code (stmt))
94 {
95 case GIMPLE_COND:
96 {
97 tree lhs = gimple_cond_lhs (stmt);
98 tree rhs = gimple_cond_rhs (stmt);
99 /* For conditions try harder and lookup single-argument
100 PHI nodes. Only do so from the same basic-block though
101 as other basic-blocks may be dead already. */
102 if (TREE_CODE (lhs) == SSA_NAME
103 && !name_registered_for_update_p (lhs))
104 {
105 gimple def_stmt = SSA_NAME_DEF_STMT (lhs);
106 if (gimple_code (def_stmt) == GIMPLE_PHI
107 && gimple_phi_num_args (def_stmt) == 1
108 && gimple_bb (def_stmt) == gimple_bb (stmt)
109 && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
110 || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
111 0))))
112 lhs = PHI_ARG_DEF (def_stmt, 0);
113 }
114 if (TREE_CODE (rhs) == SSA_NAME
115 && !name_registered_for_update_p (rhs))
116 {
117 gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
118 if (gimple_code (def_stmt) == GIMPLE_PHI
119 && gimple_phi_num_args (def_stmt) == 1
120 && gimple_bb (def_stmt) == gimple_bb (stmt)
121 && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
122 || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
123 0))))
124 rhs = PHI_ARG_DEF (def_stmt, 0);
125 }
126 val = fold_binary_loc (loc, gimple_cond_code (stmt),
127 boolean_type_node, lhs, rhs);
128 break;
129 }
130
131 case GIMPLE_SWITCH:
132 val = gimple_switch_index (stmt);
133 break;
134
135 default:
136 val = NULL_TREE;
137 }
138 taken_edge = find_taken_edge (bb, val);
139 if (!taken_edge)
140 {
141 fold_undefer_and_ignore_overflow_warnings ();
142 return false;
143 }
144
145 /* Remove all the edges except the one that is always executed. */
146 warned = false;
147 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
148 {
149 if (e != taken_edge)
150 {
151 if (!warned)
152 {
153 fold_undefer_overflow_warnings
154 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
155 warned = true;
156 }
157
158 taken_edge->probability += e->probability;
159 taken_edge->count += e->count;
160 remove_edge_and_dominated_blocks (e);
161 retval = true;
162 }
163 else
164 ei_next (&ei);
165 }
166 if (!warned)
167 fold_undefer_and_ignore_overflow_warnings ();
168 if (taken_edge->probability > REG_BR_PROB_BASE)
169 taken_edge->probability = REG_BR_PROB_BASE;
170 }
171 else
172 taken_edge = single_succ_edge (bb);
173
174 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
175 gsi_remove (&gsi, true);
176 taken_edge->flags = EDGE_FALLTHRU;
177
178 return retval;
179 }
180
181 /* Try to remove superfluous control structures in basic block BB. Returns
182 true if anything changes. */
183
184 static bool
185 cleanup_control_flow_bb (basic_block bb)
186 {
187 gimple_stmt_iterator gsi;
188 bool retval = false;
189 gimple stmt;
190
191 /* If the last statement of the block could throw and now cannot,
192 we need to prune cfg. */
193 retval |= gimple_purge_dead_eh_edges (bb);
194
195 gsi = gsi_last_bb (bb);
196 if (gsi_end_p (gsi))
197 return retval;
198
199 stmt = gsi_stmt (gsi);
200
201 if (gimple_code (stmt) == GIMPLE_COND
202 || gimple_code (stmt) == GIMPLE_SWITCH)
203 retval |= cleanup_control_expr_graph (bb, gsi);
204 else if (gimple_code (stmt) == GIMPLE_GOTO
205 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
206 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
207 == LABEL_DECL))
208 {
209 /* If we had a computed goto which has a compile-time determinable
210 destination, then we can eliminate the goto. */
211 edge e;
212 tree label;
213 edge_iterator ei;
214 basic_block target_block;
215
216 /* First look at all the outgoing edges. Delete any outgoing
217 edges which do not go to the right block. For the one
218 edge which goes to the right block, fix up its flags. */
219 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
220 target_block = label_to_block (label);
221 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
222 {
223 if (e->dest != target_block)
224 remove_edge_and_dominated_blocks (e);
225 else
226 {
227 /* Turn off the EDGE_ABNORMAL flag. */
228 e->flags &= ~EDGE_ABNORMAL;
229
230 /* And set EDGE_FALLTHRU. */
231 e->flags |= EDGE_FALLTHRU;
232 ei_next (&ei);
233 }
234 }
235
236 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
237 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
238
239 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
240 relevant information we need. */
241 gsi_remove (&gsi, true);
242 retval = true;
243 }
244
245 /* Check for indirect calls that have been turned into
246 noreturn calls. */
247 else if (is_gimple_call (stmt)
248 && gimple_call_noreturn_p (stmt)
249 && remove_fallthru_edge (bb->succs))
250 retval = true;
251
252 return retval;
253 }
254
255 /* Return true if basic block BB does nothing except pass control
256 flow to another block and that we can safely insert a label at
257 the start of the successor block.
258
259 As a precondition, we require that BB be not equal to
260 ENTRY_BLOCK_PTR. */
261
262 static bool
263 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
264 {
265 gimple_stmt_iterator gsi;
266 location_t locus;
267
268 /* BB must have a single outgoing edge. */
269 if (single_succ_p (bb) != 1
270 /* If PHI_WANTED is false, BB must not have any PHI nodes.
271 Otherwise, BB must have PHI nodes. */
272 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
273 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */
274 || single_succ (bb) == EXIT_BLOCK_PTR
275 /* Nor should this be an infinite loop. */
276 || single_succ (bb) == bb
277 /* BB may not have an abnormal outgoing edge. */
278 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
279 return false;
280
281 #if ENABLE_CHECKING
282 gcc_assert (bb != ENTRY_BLOCK_PTR);
283 #endif
284
285 locus = single_succ_edge (bb)->goto_locus;
286
287 /* There should not be an edge coming from entry, or an EH edge. */
288 {
289 edge_iterator ei;
290 edge e;
291
292 FOR_EACH_EDGE (e, ei, bb->preds)
293 if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
294 return false;
295 /* If goto_locus of any of the edges differs, prevent removing
296 the forwarder block for -O0. */
297 else if (optimize == 0 && e->goto_locus != locus)
298 return false;
299 }
300
301 /* Now walk through the statements backward. We can ignore labels,
302 anything else means this is not a forwarder block. */
303 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
304 {
305 gimple stmt = gsi_stmt (gsi);
306
307 switch (gimple_code (stmt))
308 {
309 case GIMPLE_LABEL:
310 if (DECL_NONLOCAL (gimple_label_label (stmt)))
311 return false;
312 if (optimize == 0 && gimple_location (stmt) != locus)
313 return false;
314 break;
315
316 /* ??? For now, hope there's a corresponding debug
317 assignment at the destination. */
318 case GIMPLE_DEBUG:
319 break;
320
321 default:
322 return false;
323 }
324 }
325
326 if (current_loops)
327 {
328 basic_block dest;
329 /* Protect loop latches, headers and preheaders. */
330 if (bb->loop_father->header == bb)
331 return false;
332 dest = EDGE_SUCC (bb, 0)->dest;
333
334 if (dest->loop_father->header == dest)
335 return false;
336 }
337 return true;
338 }
339
340 /* Return true if BB has at least one abnormal incoming edge. */
341
342 static inline bool
343 has_abnormal_incoming_edge_p (basic_block bb)
344 {
345 edge e;
346 edge_iterator ei;
347
348 FOR_EACH_EDGE (e, ei, bb->preds)
349 if (e->flags & EDGE_ABNORMAL)
350 return true;
351
352 return false;
353 }
354
355 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
356 those alternatives are equal in each of the PHI nodes, then return
357 true, else return false. */
358
359 static bool
360 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
361 {
362 int n1 = e1->dest_idx;
363 int n2 = e2->dest_idx;
364 gimple_stmt_iterator gsi;
365
366 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
367 {
368 gimple phi = gsi_stmt (gsi);
369 tree val1 = gimple_phi_arg_def (phi, n1);
370 tree val2 = gimple_phi_arg_def (phi, n2);
371
372 gcc_assert (val1 != NULL_TREE);
373 gcc_assert (val2 != NULL_TREE);
374
375 if (!operand_equal_for_phi_arg_p (val1, val2))
376 return false;
377 }
378
379 return true;
380 }
381
382 /* Removes forwarder block BB. Returns false if this failed. */
383
384 static bool
385 remove_forwarder_block (basic_block bb)
386 {
387 edge succ = single_succ_edge (bb), e, s;
388 basic_block dest = succ->dest;
389 gimple label;
390 edge_iterator ei;
391 gimple_stmt_iterator gsi, gsi_to;
392 bool can_move_debug_stmts;
393
394 /* We check for infinite loops already in tree_forwarder_block_p.
395 However it may happen that the infinite loop is created
396 afterwards due to removal of forwarders. */
397 if (dest == bb)
398 return false;
399
400 /* If the destination block consists of a nonlocal label or is a
401 EH landing pad, do not merge it. */
402 label = first_stmt (dest);
403 if (label
404 && gimple_code (label) == GIMPLE_LABEL
405 && (DECL_NONLOCAL (gimple_label_label (label))
406 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
407 return false;
408
409 /* If there is an abnormal edge to basic block BB, but not into
410 dest, problems might occur during removal of the phi node at out
411 of ssa due to overlapping live ranges of registers.
412
413 If there is an abnormal edge in DEST, the problems would occur
414 anyway since cleanup_dead_labels would then merge the labels for
415 two different eh regions, and rest of exception handling code
416 does not like it.
417
418 So if there is an abnormal edge to BB, proceed only if there is
419 no abnormal edge to DEST and there are no phi nodes in DEST. */
420 if (has_abnormal_incoming_edge_p (bb)
421 && (has_abnormal_incoming_edge_p (dest)
422 || !gimple_seq_empty_p (phi_nodes (dest))))
423 return false;
424
425 /* If there are phi nodes in DEST, and some of the blocks that are
426 predecessors of BB are also predecessors of DEST, check that the
427 phi node arguments match. */
428 if (!gimple_seq_empty_p (phi_nodes (dest)))
429 {
430 FOR_EACH_EDGE (e, ei, bb->preds)
431 {
432 s = find_edge (e->src, dest);
433 if (!s)
434 continue;
435
436 if (!phi_alternatives_equal (dest, succ, s))
437 return false;
438 }
439 }
440
441 can_move_debug_stmts = single_pred_p (dest);
442
443 /* Redirect the edges. */
444 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
445 {
446 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
447
448 if (e->flags & EDGE_ABNORMAL)
449 {
450 /* If there is an abnormal edge, redirect it anyway, and
451 move the labels to the new block to make it legal. */
452 s = redirect_edge_succ_nodup (e, dest);
453 }
454 else
455 s = redirect_edge_and_branch (e, dest);
456
457 if (s == e)
458 {
459 /* Create arguments for the phi nodes, since the edge was not
460 here before. */
461 for (gsi = gsi_start_phis (dest);
462 !gsi_end_p (gsi);
463 gsi_next (&gsi))
464 {
465 gimple phi = gsi_stmt (gsi);
466 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
467 add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l);
468 }
469 }
470 }
471
472 /* Move nonlocal labels and computed goto targets as well as user
473 defined labels and labels with an EH landing pad number to the
474 new block, so that the redirection of the abnormal edges works,
475 jump targets end up in a sane place and debug information for
476 labels is retained. */
477 gsi_to = gsi_start_bb (dest);
478 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
479 {
480 tree decl;
481 label = gsi_stmt (gsi);
482 if (is_gimple_debug (label))
483 break;
484 decl = gimple_label_label (label);
485 if (EH_LANDING_PAD_NR (decl) != 0
486 || DECL_NONLOCAL (decl)
487 || FORCED_LABEL (decl)
488 || !DECL_ARTIFICIAL (decl))
489 {
490 gsi_remove (&gsi, false);
491 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
492 }
493 else
494 gsi_next (&gsi);
495 }
496
497 /* Move debug statements if the destination has just a single
498 predecessor. */
499 if (can_move_debug_stmts)
500 {
501 gsi_to = gsi_after_labels (dest);
502 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
503 {
504 gimple debug = gsi_stmt (gsi);
505 if (!is_gimple_debug (debug))
506 break;
507 gsi_remove (&gsi, false);
508 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
509 }
510 }
511
512 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
513
514 /* Update the dominators. */
515 if (dom_info_available_p (CDI_DOMINATORS))
516 {
517 basic_block dom, dombb, domdest;
518
519 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
520 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
521 if (domdest == bb)
522 {
523 /* Shortcut to avoid calling (relatively expensive)
524 nearest_common_dominator unless necessary. */
525 dom = dombb;
526 }
527 else
528 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
529
530 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
531 }
532
533 /* And kill the forwarder block. */
534 delete_basic_block (bb);
535
536 return true;
537 }
538
539 /* STMT is a call that has been discovered noreturn. Fixup the CFG
540 and remove LHS. Return true if something changed. */
541
542 bool
543 fixup_noreturn_call (gimple stmt)
544 {
545 basic_block bb = gimple_bb (stmt);
546 bool changed = false;
547
548 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
549 return false;
550
551 /* First split basic block if stmt is not last. */
552 if (stmt != gsi_stmt (gsi_last_bb (bb)))
553 split_block (bb, stmt);
554
555 changed |= remove_fallthru_edge (bb->succs);
556
557 /* If there is LHS, remove it. */
558 if (gimple_call_lhs (stmt))
559 {
560 tree op = gimple_call_lhs (stmt);
561 gimple_call_set_lhs (stmt, NULL_TREE);
562
563 /* We need to remove SSA name to avoid checking errors.
564 All uses are dominated by the noreturn and thus will
565 be removed afterwards.
566 We proactively remove affected non-PHI statements to avoid
567 fixup_cfg from trying to update them and crashing. */
568 if (TREE_CODE (op) == SSA_NAME)
569 {
570 use_operand_p use_p;
571 imm_use_iterator iter;
572 gimple use_stmt;
573 bitmap_iterator bi;
574 unsigned int bb_index;
575
576 bitmap blocks = BITMAP_ALLOC (NULL);
577
578 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
579 {
580 if (gimple_code (use_stmt) != GIMPLE_PHI)
581 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
582 else
583 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
584 SET_USE (use_p, error_mark_node);
585 }
586 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
587 delete_basic_block (BASIC_BLOCK (bb_index));
588 BITMAP_FREE (blocks);
589 release_ssa_name (op);
590 }
591 update_stmt (stmt);
592 changed = true;
593 }
594 return changed;
595 }
596
597
598 /* Split basic blocks on calls in the middle of a basic block that are now
599 known not to return, and remove the unreachable code. */
600
601 static bool
602 split_bbs_on_noreturn_calls (void)
603 {
604 bool changed = false;
605 gimple stmt;
606 basic_block bb;
607
608 /* Detect cases where a mid-block call is now known not to return. */
609 if (cfun->gimple_df)
610 while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
611 {
612 stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
613 bb = gimple_bb (stmt);
614 /* BB might be deleted at this point, so verify first
615 BB is present in the cfg. */
616 if (bb == NULL
617 || bb->index < NUM_FIXED_BLOCKS
618 || bb->index >= n_basic_blocks
619 || BASIC_BLOCK (bb->index) != bb
620 || !gimple_call_noreturn_p (stmt))
621 continue;
622
623 changed |= fixup_noreturn_call (stmt);
624 }
625
626 return changed;
627 }
628
629 /* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it. */
630
631 static bool
632 cleanup_omp_return (basic_block bb)
633 {
634 gimple stmt = last_stmt (bb);
635 basic_block control_bb;
636
637 if (stmt == NULL
638 || gimple_code (stmt) != GIMPLE_OMP_RETURN
639 || !single_pred_p (bb))
640 return false;
641
642 control_bb = single_pred (bb);
643 stmt = last_stmt (control_bb);
644
645 if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH)
646 return false;
647
648 /* The block with the control statement normally has two entry edges -- one
649 from entry, one from continue. If continue is removed, return is
650 unreachable, so we remove it here as well. */
651 if (EDGE_COUNT (control_bb->preds) == 2)
652 return false;
653
654 gcc_assert (EDGE_COUNT (control_bb->preds) == 1);
655 remove_edge_and_dominated_blocks (single_pred_edge (bb));
656 return true;
657 }
658
659 /* Tries to cleanup cfg in basic block BB. Returns true if anything
660 changes. */
661
662 static bool
663 cleanup_tree_cfg_bb (basic_block bb)
664 {
665 bool retval = false;
666
667 if (cleanup_omp_return (bb))
668 return true;
669
670 retval = cleanup_control_flow_bb (bb);
671
672 if (tree_forwarder_block_p (bb, false)
673 && remove_forwarder_block (bb))
674 return true;
675
676 /* Merging the blocks may create new opportunities for folding
677 conditional branches (due to the elimination of single-valued PHI
678 nodes). */
679 if (single_succ_p (bb)
680 && can_merge_blocks_p (bb, single_succ (bb)))
681 {
682 merge_blocks (bb, single_succ (bb));
683 return true;
684 }
685
686 return retval;
687 }
688
689 /* Iterate the cfg cleanups, while anything changes. */
690
691 static bool
692 cleanup_tree_cfg_1 (void)
693 {
694 bool retval = false;
695 basic_block bb;
696 unsigned i, n;
697
698 retval |= split_bbs_on_noreturn_calls ();
699
700 /* Prepare the worklists of altered blocks. */
701 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
702
703 /* During forwarder block cleanup, we may redirect edges out of
704 SWITCH_EXPRs, which can get expensive. So we want to enable
705 recording of edge to CASE_LABEL_EXPR. */
706 start_recording_case_labels ();
707
708 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
709 since the basic blocks may get removed. */
710 n = last_basic_block;
711 for (i = NUM_FIXED_BLOCKS; i < n; i++)
712 {
713 bb = BASIC_BLOCK (i);
714 if (bb)
715 retval |= cleanup_tree_cfg_bb (bb);
716 }
717
718 /* Now process the altered blocks, as long as any are available. */
719 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
720 {
721 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
722 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
723 if (i < NUM_FIXED_BLOCKS)
724 continue;
725
726 bb = BASIC_BLOCK (i);
727 if (!bb)
728 continue;
729
730 retval |= cleanup_tree_cfg_bb (bb);
731
732 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
733 calls. */
734 retval |= split_bbs_on_noreturn_calls ();
735 }
736
737 end_recording_case_labels ();
738 BITMAP_FREE (cfgcleanup_altered_bbs);
739 return retval;
740 }
741
742
743 /* Remove unreachable blocks and other miscellaneous clean up work.
744 Return true if the flowgraph was modified, false otherwise. */
745
746 static bool
747 cleanup_tree_cfg_noloop (void)
748 {
749 bool changed;
750
751 timevar_push (TV_TREE_CLEANUP_CFG);
752
753 /* Iterate until there are no more cleanups left to do. If any
754 iteration changed the flowgraph, set CHANGED to true.
755
756 If dominance information is available, there cannot be any unreachable
757 blocks. */
758 if (!dom_info_available_p (CDI_DOMINATORS))
759 {
760 changed = delete_unreachable_blocks ();
761 calculate_dominance_info (CDI_DOMINATORS);
762 }
763 else
764 {
765 #ifdef ENABLE_CHECKING
766 verify_dominators (CDI_DOMINATORS);
767 #endif
768 changed = false;
769 }
770
771 changed |= cleanup_tree_cfg_1 ();
772
773 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
774 compact_blocks ();
775
776 #ifdef ENABLE_CHECKING
777 verify_flow_info ();
778 #endif
779
780 timevar_pop (TV_TREE_CLEANUP_CFG);
781
782 if (changed && current_loops)
783 loops_state_set (LOOPS_NEED_FIXUP);
784
785 return changed;
786 }
787
788 /* Repairs loop structures. */
789
790 static void
791 repair_loop_structures (void)
792 {
793 bitmap changed_bbs = BITMAP_ALLOC (NULL);
794 fix_loop_structure (changed_bbs);
795
796 /* This usually does nothing. But sometimes parts of cfg that originally
797 were inside a loop get out of it due to edge removal (since they
798 become unreachable by back edges from latch). */
799 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
800 rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa);
801
802 BITMAP_FREE (changed_bbs);
803
804 #ifdef ENABLE_CHECKING
805 verify_loop_structure ();
806 #endif
807 scev_reset ();
808
809 loops_state_clear (LOOPS_NEED_FIXUP);
810 }
811
812 /* Cleanup cfg and repair loop structures. */
813
814 bool
815 cleanup_tree_cfg (void)
816 {
817 bool changed = cleanup_tree_cfg_noloop ();
818
819 if (current_loops != NULL
820 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
821 repair_loop_structures ();
822
823 return changed;
824 }
825
826 /* Merge the PHI nodes at BB into those at BB's sole successor. */
827
828 static void
829 remove_forwarder_block_with_phi (basic_block bb)
830 {
831 edge succ = single_succ_edge (bb);
832 basic_block dest = succ->dest;
833 gimple label;
834 basic_block dombb, domdest, dom;
835
836 /* We check for infinite loops already in tree_forwarder_block_p.
837 However it may happen that the infinite loop is created
838 afterwards due to removal of forwarders. */
839 if (dest == bb)
840 return;
841
842 /* If the destination block consists of a nonlocal label, do not
843 merge it. */
844 label = first_stmt (dest);
845 if (label
846 && gimple_code (label) == GIMPLE_LABEL
847 && DECL_NONLOCAL (gimple_label_label (label)))
848 return;
849
850 /* Redirect each incoming edge to BB to DEST. */
851 while (EDGE_COUNT (bb->preds) > 0)
852 {
853 edge e = EDGE_PRED (bb, 0), s;
854 gimple_stmt_iterator gsi;
855
856 s = find_edge (e->src, dest);
857 if (s)
858 {
859 /* We already have an edge S from E->src to DEST. If S and
860 E->dest's sole successor edge have the same PHI arguments
861 at DEST, redirect S to DEST. */
862 if (phi_alternatives_equal (dest, s, succ))
863 {
864 e = redirect_edge_and_branch (e, dest);
865 redirect_edge_var_map_clear (e);
866 continue;
867 }
868
869 /* PHI arguments are different. Create a forwarder block by
870 splitting E so that we can merge PHI arguments on E to
871 DEST. */
872 e = single_succ_edge (split_edge (e));
873 }
874
875 s = redirect_edge_and_branch (e, dest);
876
877 /* redirect_edge_and_branch must not create a new edge. */
878 gcc_assert (s == e);
879
880 /* Add to the PHI nodes at DEST each PHI argument removed at the
881 destination of E. */
882 for (gsi = gsi_start_phis (dest);
883 !gsi_end_p (gsi);
884 gsi_next (&gsi))
885 {
886 gimple phi = gsi_stmt (gsi);
887 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
888 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
889
890 if (TREE_CODE (def) == SSA_NAME)
891 {
892 edge_var_map_vector head;
893 edge_var_map *vm;
894 size_t i;
895
896 /* If DEF is one of the results of PHI nodes removed during
897 redirection, replace it with the PHI argument that used
898 to be on E. */
899 head = redirect_edge_var_map_vector (e);
900 for (i = 0; VEC_iterate (edge_var_map, head, i, vm); ++i)
901 {
902 tree old_arg = redirect_edge_var_map_result (vm);
903 tree new_arg = redirect_edge_var_map_def (vm);
904
905 if (def == old_arg)
906 {
907 def = new_arg;
908 locus = redirect_edge_var_map_location (vm);
909 break;
910 }
911 }
912 }
913
914 add_phi_arg (phi, def, s, locus);
915 }
916
917 redirect_edge_var_map_clear (e);
918 }
919
920 /* Update the dominators. */
921 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
922 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
923 if (domdest == bb)
924 {
925 /* Shortcut to avoid calling (relatively expensive)
926 nearest_common_dominator unless necessary. */
927 dom = dombb;
928 }
929 else
930 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
931
932 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
933
934 /* Remove BB since all of BB's incoming edges have been redirected
935 to DEST. */
936 delete_basic_block (bb);
937 }
938
939 /* This pass merges PHI nodes if one feeds into another. For example,
940 suppose we have the following:
941
942 goto <bb 9> (<L9>);
943
944 <L8>:;
945 tem_17 = foo ();
946
947 # tem_6 = PHI <tem_17(8), tem_23(7)>;
948 <L9>:;
949
950 # tem_3 = PHI <tem_6(9), tem_2(5)>;
951 <L10>:;
952
953 Then we merge the first PHI node into the second one like so:
954
955 goto <bb 9> (<L10>);
956
957 <L8>:;
958 tem_17 = foo ();
959
960 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
961 <L10>:;
962 */
963
964 static unsigned int
965 merge_phi_nodes (void)
966 {
967 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
968 basic_block *current = worklist;
969 basic_block bb;
970
971 calculate_dominance_info (CDI_DOMINATORS);
972
973 /* Find all PHI nodes that we may be able to merge. */
974 FOR_EACH_BB (bb)
975 {
976 basic_block dest;
977
978 /* Look for a forwarder block with PHI nodes. */
979 if (!tree_forwarder_block_p (bb, true))
980 continue;
981
982 dest = single_succ (bb);
983
984 /* We have to feed into another basic block with PHI
985 nodes. */
986 if (gimple_seq_empty_p (phi_nodes (dest))
987 /* We don't want to deal with a basic block with
988 abnormal edges. */
989 || has_abnormal_incoming_edge_p (bb))
990 continue;
991
992 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
993 {
994 /* If BB does not dominate DEST, then the PHI nodes at
995 DEST must be the only users of the results of the PHI
996 nodes at BB. */
997 *current++ = bb;
998 }
999 else
1000 {
1001 gimple_stmt_iterator gsi;
1002 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1003
1004 /* BB dominates DEST. There may be many users of the PHI
1005 nodes in BB. However, there is still a trivial case we
1006 can handle. If the result of every PHI in BB is used
1007 only by a PHI in DEST, then we can trivially merge the
1008 PHI nodes from BB into DEST. */
1009 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1010 gsi_next (&gsi))
1011 {
1012 gimple phi = gsi_stmt (gsi);
1013 tree result = gimple_phi_result (phi);
1014 use_operand_p imm_use;
1015 gimple use_stmt;
1016
1017 /* If the PHI's result is never used, then we can just
1018 ignore it. */
1019 if (has_zero_uses (result))
1020 continue;
1021
1022 /* Get the single use of the result of this PHI node. */
1023 if (!single_imm_use (result, &imm_use, &use_stmt)
1024 || gimple_code (use_stmt) != GIMPLE_PHI
1025 || gimple_bb (use_stmt) != dest
1026 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1027 break;
1028 }
1029
1030 /* If the loop above iterated through all the PHI nodes
1031 in BB, then we can merge the PHIs from BB into DEST. */
1032 if (gsi_end_p (gsi))
1033 *current++ = bb;
1034 }
1035 }
1036
1037 /* Now let's drain WORKLIST. */
1038 while (current != worklist)
1039 {
1040 bb = *--current;
1041 remove_forwarder_block_with_phi (bb);
1042 }
1043
1044 free (worklist);
1045 return 0;
1046 }
1047
1048 static bool
1049 gate_merge_phi (void)
1050 {
1051 return 1;
1052 }
1053
1054 struct gimple_opt_pass pass_merge_phi =
1055 {
1056 {
1057 GIMPLE_PASS,
1058 "mergephi", /* name */
1059 gate_merge_phi, /* gate */
1060 merge_phi_nodes, /* execute */
1061 NULL, /* sub */
1062 NULL, /* next */
1063 0, /* static_pass_number */
1064 TV_TREE_MERGE_PHI, /* tv_id */
1065 PROP_cfg | PROP_ssa, /* properties_required */
1066 0, /* properties_provided */
1067 0, /* properties_destroyed */
1068 0, /* todo_flags_start */
1069 TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */
1070 | TODO_verify_ssa
1071 }
1072 };