gimplify-be.h: New file.
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "diagnostic-core.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "ggc.h"
31 #include "langhooks.h"
32 #include "gimple.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-cfg.h"
37 #include "tree-phinodes.h"
38 #include "ssa-iterators.h"
39 #include "tree-ssanames.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-dfa.h"
42 #include "tree-ssa.h"
43 #include "tree-pass.h"
44 #include "except.h"
45 #include "cfgloop.h"
46 #include "hashtab.h"
47 #include "tree-ssa-propagate.h"
48 #include "tree-scalar-evolution.h"
49
50 /* The set of blocks in that at least one of the following changes happened:
51 -- the statement at the end of the block was changed
52 -- the block was newly created
53 -- the set of the predecessors of the block changed
54 -- the set of the successors of the block changed
55 ??? Maybe we could track these changes separately, since they determine
56 what cleanups it makes sense to try on the block. */
57 bitmap cfgcleanup_altered_bbs;
58
59 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
60
61 static bool
62 remove_fallthru_edge (vec<edge, va_gc> *ev)
63 {
64 edge_iterator ei;
65 edge e;
66
67 FOR_EACH_EDGE (e, ei, ev)
68 if ((e->flags & EDGE_FALLTHRU) != 0)
69 {
70 if (e->flags & EDGE_COMPLEX)
71 e->flags &= ~EDGE_FALLTHRU;
72 else
73 remove_edge_and_dominated_blocks (e);
74 return true;
75 }
76 return false;
77 }
78
79
80 /* Disconnect an unreachable block in the control expression starting
81 at block BB. */
82
83 static bool
84 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
85 {
86 edge taken_edge;
87 bool retval = false;
88 gimple stmt = gsi_stmt (gsi);
89 tree val;
90
91 if (!single_succ_p (bb))
92 {
93 edge e;
94 edge_iterator ei;
95 bool warned;
96 location_t loc;
97
98 fold_defer_overflow_warnings ();
99 loc = gimple_location (stmt);
100 switch (gimple_code (stmt))
101 {
102 case GIMPLE_COND:
103 val = fold_binary_loc (loc, gimple_cond_code (stmt),
104 boolean_type_node,
105 gimple_cond_lhs (stmt),
106 gimple_cond_rhs (stmt));
107 break;
108
109 case GIMPLE_SWITCH:
110 val = gimple_switch_index (stmt);
111 break;
112
113 default:
114 val = NULL_TREE;
115 }
116 taken_edge = find_taken_edge (bb, val);
117 if (!taken_edge)
118 {
119 fold_undefer_and_ignore_overflow_warnings ();
120 return false;
121 }
122
123 /* Remove all the edges except the one that is always executed. */
124 warned = false;
125 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
126 {
127 if (e != taken_edge)
128 {
129 if (!warned)
130 {
131 fold_undefer_overflow_warnings
132 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
133 warned = true;
134 }
135
136 taken_edge->probability += e->probability;
137 taken_edge->count += e->count;
138 remove_edge_and_dominated_blocks (e);
139 retval = true;
140 }
141 else
142 ei_next (&ei);
143 }
144 if (!warned)
145 fold_undefer_and_ignore_overflow_warnings ();
146 if (taken_edge->probability > REG_BR_PROB_BASE)
147 taken_edge->probability = REG_BR_PROB_BASE;
148 }
149 else
150 taken_edge = single_succ_edge (bb);
151
152 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
153 gsi_remove (&gsi, true);
154 taken_edge->flags = EDGE_FALLTHRU;
155
156 return retval;
157 }
158
159 /* Try to remove superfluous control structures in basic block BB. Returns
160 true if anything changes. */
161
162 static bool
163 cleanup_control_flow_bb (basic_block bb)
164 {
165 gimple_stmt_iterator gsi;
166 bool retval = false;
167 gimple stmt;
168
169 /* If the last statement of the block could throw and now cannot,
170 we need to prune cfg. */
171 retval |= gimple_purge_dead_eh_edges (bb);
172
173 gsi = gsi_last_bb (bb);
174 if (gsi_end_p (gsi))
175 return retval;
176
177 stmt = gsi_stmt (gsi);
178
179 if (gimple_code (stmt) == GIMPLE_COND
180 || gimple_code (stmt) == GIMPLE_SWITCH)
181 retval |= cleanup_control_expr_graph (bb, gsi);
182 else if (gimple_code (stmt) == GIMPLE_GOTO
183 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
184 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
185 == LABEL_DECL))
186 {
187 /* If we had a computed goto which has a compile-time determinable
188 destination, then we can eliminate the goto. */
189 edge e;
190 tree label;
191 edge_iterator ei;
192 basic_block target_block;
193
194 /* First look at all the outgoing edges. Delete any outgoing
195 edges which do not go to the right block. For the one
196 edge which goes to the right block, fix up its flags. */
197 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
198 target_block = label_to_block (label);
199 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
200 {
201 if (e->dest != target_block)
202 remove_edge_and_dominated_blocks (e);
203 else
204 {
205 /* Turn off the EDGE_ABNORMAL flag. */
206 e->flags &= ~EDGE_ABNORMAL;
207
208 /* And set EDGE_FALLTHRU. */
209 e->flags |= EDGE_FALLTHRU;
210 ei_next (&ei);
211 }
212 }
213
214 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
215 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
216
217 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
218 relevant information we need. */
219 gsi_remove (&gsi, true);
220 retval = true;
221 }
222
223 /* Check for indirect calls that have been turned into
224 noreturn calls. */
225 else if (is_gimple_call (stmt)
226 && gimple_call_noreturn_p (stmt)
227 && remove_fallthru_edge (bb->succs))
228 retval = true;
229
230 return retval;
231 }
232
233 /* Return true if basic block BB does nothing except pass control
234 flow to another block and that we can safely insert a label at
235 the start of the successor block.
236
237 As a precondition, we require that BB be not equal to
238 ENTRY_BLOCK_PTR. */
239
240 static bool
241 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
242 {
243 gimple_stmt_iterator gsi;
244 location_t locus;
245
246 /* BB must have a single outgoing edge. */
247 if (single_succ_p (bb) != 1
248 /* If PHI_WANTED is false, BB must not have any PHI nodes.
249 Otherwise, BB must have PHI nodes. */
250 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
251 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */
252 || single_succ (bb) == EXIT_BLOCK_PTR
253 /* Nor should this be an infinite loop. */
254 || single_succ (bb) == bb
255 /* BB may not have an abnormal outgoing edge. */
256 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
257 return false;
258
259 gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
260
261 locus = single_succ_edge (bb)->goto_locus;
262
263 /* There should not be an edge coming from entry, or an EH edge. */
264 {
265 edge_iterator ei;
266 edge e;
267
268 FOR_EACH_EDGE (e, ei, bb->preds)
269 if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
270 return false;
271 /* If goto_locus of any of the edges differs, prevent removing
272 the forwarder block for -O0. */
273 else if (optimize == 0 && e->goto_locus != locus)
274 return false;
275 }
276
277 /* Now walk through the statements backward. We can ignore labels,
278 anything else means this is not a forwarder block. */
279 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
280 {
281 gimple stmt = gsi_stmt (gsi);
282
283 switch (gimple_code (stmt))
284 {
285 case GIMPLE_LABEL:
286 if (DECL_NONLOCAL (gimple_label_label (stmt)))
287 return false;
288 if (optimize == 0 && gimple_location (stmt) != locus)
289 return false;
290 break;
291
292 /* ??? For now, hope there's a corresponding debug
293 assignment at the destination. */
294 case GIMPLE_DEBUG:
295 break;
296
297 default:
298 return false;
299 }
300 }
301
302 if (current_loops)
303 {
304 basic_block dest;
305 /* Protect loop latches, headers and preheaders. */
306 if (bb->loop_father->header == bb)
307 return false;
308 dest = EDGE_SUCC (bb, 0)->dest;
309
310 if (dest->loop_father->header == dest)
311 return false;
312 }
313 return true;
314 }
315
316 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
317 those alternatives are equal in each of the PHI nodes, then return
318 true, else return false. */
319
320 static bool
321 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
322 {
323 int n1 = e1->dest_idx;
324 int n2 = e2->dest_idx;
325 gimple_stmt_iterator gsi;
326
327 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
328 {
329 gimple phi = gsi_stmt (gsi);
330 tree val1 = gimple_phi_arg_def (phi, n1);
331 tree val2 = gimple_phi_arg_def (phi, n2);
332
333 gcc_assert (val1 != NULL_TREE);
334 gcc_assert (val2 != NULL_TREE);
335
336 if (!operand_equal_for_phi_arg_p (val1, val2))
337 return false;
338 }
339
340 return true;
341 }
342
343 /* Removes forwarder block BB. Returns false if this failed. */
344
345 static bool
346 remove_forwarder_block (basic_block bb)
347 {
348 edge succ = single_succ_edge (bb), e, s;
349 basic_block dest = succ->dest;
350 gimple label;
351 edge_iterator ei;
352 gimple_stmt_iterator gsi, gsi_to;
353 bool can_move_debug_stmts;
354
355 /* We check for infinite loops already in tree_forwarder_block_p.
356 However it may happen that the infinite loop is created
357 afterwards due to removal of forwarders. */
358 if (dest == bb)
359 return false;
360
361 /* If the destination block consists of a nonlocal label or is a
362 EH landing pad, do not merge it. */
363 label = first_stmt (dest);
364 if (label
365 && gimple_code (label) == GIMPLE_LABEL
366 && (DECL_NONLOCAL (gimple_label_label (label))
367 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
368 return false;
369
370 /* If there is an abnormal edge to basic block BB, but not into
371 dest, problems might occur during removal of the phi node at out
372 of ssa due to overlapping live ranges of registers.
373
374 If there is an abnormal edge in DEST, the problems would occur
375 anyway since cleanup_dead_labels would then merge the labels for
376 two different eh regions, and rest of exception handling code
377 does not like it.
378
379 So if there is an abnormal edge to BB, proceed only if there is
380 no abnormal edge to DEST and there are no phi nodes in DEST. */
381 if (bb_has_abnormal_pred (bb)
382 && (bb_has_abnormal_pred (dest)
383 || !gimple_seq_empty_p (phi_nodes (dest))))
384 return false;
385
386 /* If there are phi nodes in DEST, and some of the blocks that are
387 predecessors of BB are also predecessors of DEST, check that the
388 phi node arguments match. */
389 if (!gimple_seq_empty_p (phi_nodes (dest)))
390 {
391 FOR_EACH_EDGE (e, ei, bb->preds)
392 {
393 s = find_edge (e->src, dest);
394 if (!s)
395 continue;
396
397 if (!phi_alternatives_equal (dest, succ, s))
398 return false;
399 }
400 }
401
402 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
403
404 /* Redirect the edges. */
405 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
406 {
407 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
408
409 if (e->flags & EDGE_ABNORMAL)
410 {
411 /* If there is an abnormal edge, redirect it anyway, and
412 move the labels to the new block to make it legal. */
413 s = redirect_edge_succ_nodup (e, dest);
414 }
415 else
416 s = redirect_edge_and_branch (e, dest);
417
418 if (s == e)
419 {
420 /* Create arguments for the phi nodes, since the edge was not
421 here before. */
422 for (gsi = gsi_start_phis (dest);
423 !gsi_end_p (gsi);
424 gsi_next (&gsi))
425 {
426 gimple phi = gsi_stmt (gsi);
427 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
428 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
429 add_phi_arg (phi, unshare_expr (def), s, l);
430 }
431 }
432 }
433
434 /* Move nonlocal labels and computed goto targets as well as user
435 defined labels and labels with an EH landing pad number to the
436 new block, so that the redirection of the abnormal edges works,
437 jump targets end up in a sane place and debug information for
438 labels is retained. */
439 gsi_to = gsi_start_bb (dest);
440 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
441 {
442 tree decl;
443 label = gsi_stmt (gsi);
444 if (is_gimple_debug (label))
445 break;
446 decl = gimple_label_label (label);
447 if (EH_LANDING_PAD_NR (decl) != 0
448 || DECL_NONLOCAL (decl)
449 || FORCED_LABEL (decl)
450 || !DECL_ARTIFICIAL (decl))
451 {
452 gsi_remove (&gsi, false);
453 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
454 }
455 else
456 gsi_next (&gsi);
457 }
458
459 /* Move debug statements if the destination has a single predecessor. */
460 if (can_move_debug_stmts)
461 {
462 gsi_to = gsi_after_labels (dest);
463 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
464 {
465 gimple debug = gsi_stmt (gsi);
466 if (!is_gimple_debug (debug))
467 break;
468 gsi_remove (&gsi, false);
469 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
470 }
471 }
472
473 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
474
475 /* Update the dominators. */
476 if (dom_info_available_p (CDI_DOMINATORS))
477 {
478 basic_block dom, dombb, domdest;
479
480 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
481 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
482 if (domdest == bb)
483 {
484 /* Shortcut to avoid calling (relatively expensive)
485 nearest_common_dominator unless necessary. */
486 dom = dombb;
487 }
488 else
489 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
490
491 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
492 }
493
494 /* And kill the forwarder block. */
495 delete_basic_block (bb);
496
497 return true;
498 }
499
500 /* STMT is a call that has been discovered noreturn. Fixup the CFG
501 and remove LHS. Return true if something changed. */
502
503 bool
504 fixup_noreturn_call (gimple stmt)
505 {
506 basic_block bb = gimple_bb (stmt);
507 bool changed = false;
508
509 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
510 return false;
511
512 /* First split basic block if stmt is not last. */
513 if (stmt != gsi_stmt (gsi_last_bb (bb)))
514 split_block (bb, stmt);
515
516 changed |= remove_fallthru_edge (bb->succs);
517
518 /* If there is LHS, remove it. */
519 if (gimple_call_lhs (stmt))
520 {
521 tree op = gimple_call_lhs (stmt);
522 gimple_call_set_lhs (stmt, NULL_TREE);
523
524 /* We need to remove SSA name to avoid checking errors.
525 All uses are dominated by the noreturn and thus will
526 be removed afterwards.
527 We proactively remove affected non-PHI statements to avoid
528 fixup_cfg from trying to update them and crashing. */
529 if (TREE_CODE (op) == SSA_NAME)
530 {
531 use_operand_p use_p;
532 imm_use_iterator iter;
533 gimple use_stmt;
534 bitmap_iterator bi;
535 unsigned int bb_index;
536
537 bitmap blocks = BITMAP_ALLOC (NULL);
538
539 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
540 {
541 if (gimple_code (use_stmt) != GIMPLE_PHI)
542 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
543 else
544 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
545 SET_USE (use_p, error_mark_node);
546 }
547 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
548 delete_basic_block (BASIC_BLOCK (bb_index));
549 BITMAP_FREE (blocks);
550 release_ssa_name (op);
551 }
552 update_stmt (stmt);
553 changed = true;
554 }
555 /* Similarly remove VDEF if there is any. */
556 else if (gimple_vdef (stmt))
557 update_stmt (stmt);
558 return changed;
559 }
560
561
562 /* Split basic blocks on calls in the middle of a basic block that are now
563 known not to return, and remove the unreachable code. */
564
565 static bool
566 split_bbs_on_noreturn_calls (void)
567 {
568 bool changed = false;
569 gimple stmt;
570 basic_block bb;
571
572 /* Detect cases where a mid-block call is now known not to return. */
573 if (cfun->gimple_df)
574 while (vec_safe_length (MODIFIED_NORETURN_CALLS (cfun)))
575 {
576 stmt = MODIFIED_NORETURN_CALLS (cfun)->pop ();
577 bb = gimple_bb (stmt);
578 /* BB might be deleted at this point, so verify first
579 BB is present in the cfg. */
580 if (bb == NULL
581 || bb->index < NUM_FIXED_BLOCKS
582 || bb->index >= last_basic_block
583 || BASIC_BLOCK (bb->index) != bb
584 || !gimple_call_noreturn_p (stmt))
585 continue;
586
587 changed |= fixup_noreturn_call (stmt);
588 }
589
590 return changed;
591 }
592
593 /* Tries to cleanup cfg in basic block BB. Returns true if anything
594 changes. */
595
596 static bool
597 cleanup_tree_cfg_bb (basic_block bb)
598 {
599 bool retval = cleanup_control_flow_bb (bb);
600
601 if (tree_forwarder_block_p (bb, false)
602 && remove_forwarder_block (bb))
603 return true;
604
605 /* Merging the blocks may create new opportunities for folding
606 conditional branches (due to the elimination of single-valued PHI
607 nodes). */
608 if (single_succ_p (bb)
609 && can_merge_blocks_p (bb, single_succ (bb)))
610 {
611 merge_blocks (bb, single_succ (bb));
612 return true;
613 }
614
615 return retval;
616 }
617
618 /* Iterate the cfg cleanups, while anything changes. */
619
620 static bool
621 cleanup_tree_cfg_1 (void)
622 {
623 bool retval = false;
624 basic_block bb;
625 unsigned i, n;
626
627 retval |= split_bbs_on_noreturn_calls ();
628
629 /* Prepare the worklists of altered blocks. */
630 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
631
632 /* During forwarder block cleanup, we may redirect edges out of
633 SWITCH_EXPRs, which can get expensive. So we want to enable
634 recording of edge to CASE_LABEL_EXPR. */
635 start_recording_case_labels ();
636
637 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
638 since the basic blocks may get removed. */
639 n = last_basic_block;
640 for (i = NUM_FIXED_BLOCKS; i < n; i++)
641 {
642 bb = BASIC_BLOCK (i);
643 if (bb)
644 retval |= cleanup_tree_cfg_bb (bb);
645 }
646
647 /* Now process the altered blocks, as long as any are available. */
648 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
649 {
650 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
651 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
652 if (i < NUM_FIXED_BLOCKS)
653 continue;
654
655 bb = BASIC_BLOCK (i);
656 if (!bb)
657 continue;
658
659 retval |= cleanup_tree_cfg_bb (bb);
660
661 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
662 calls. */
663 retval |= split_bbs_on_noreturn_calls ();
664 }
665
666 end_recording_case_labels ();
667 BITMAP_FREE (cfgcleanup_altered_bbs);
668 return retval;
669 }
670
671
672 /* Remove unreachable blocks and other miscellaneous clean up work.
673 Return true if the flowgraph was modified, false otherwise. */
674
675 static bool
676 cleanup_tree_cfg_noloop (void)
677 {
678 bool changed;
679
680 timevar_push (TV_TREE_CLEANUP_CFG);
681
682 /* Iterate until there are no more cleanups left to do. If any
683 iteration changed the flowgraph, set CHANGED to true.
684
685 If dominance information is available, there cannot be any unreachable
686 blocks. */
687 if (!dom_info_available_p (CDI_DOMINATORS))
688 {
689 changed = delete_unreachable_blocks ();
690 calculate_dominance_info (CDI_DOMINATORS);
691 }
692 else
693 {
694 #ifdef ENABLE_CHECKING
695 verify_dominators (CDI_DOMINATORS);
696 #endif
697 changed = false;
698 }
699
700 changed |= cleanup_tree_cfg_1 ();
701
702 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
703 compact_blocks ();
704
705 #ifdef ENABLE_CHECKING
706 verify_flow_info ();
707 #endif
708
709 timevar_pop (TV_TREE_CLEANUP_CFG);
710
711 if (changed && current_loops)
712 loops_state_set (LOOPS_NEED_FIXUP);
713
714 return changed;
715 }
716
717 /* Repairs loop structures. */
718
719 static void
720 repair_loop_structures (void)
721 {
722 bitmap changed_bbs;
723 unsigned n_new_loops;
724
725 calculate_dominance_info (CDI_DOMINATORS);
726
727 timevar_push (TV_REPAIR_LOOPS);
728 changed_bbs = BITMAP_ALLOC (NULL);
729 n_new_loops = fix_loop_structure (changed_bbs);
730
731 /* This usually does nothing. But sometimes parts of cfg that originally
732 were inside a loop get out of it due to edge removal (since they
733 become unreachable by back edges from latch). Also a former
734 irreducible loop can become reducible - in this case force a full
735 rewrite into loop-closed SSA form. */
736 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
737 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
738 TODO_update_ssa);
739
740 BITMAP_FREE (changed_bbs);
741
742 #ifdef ENABLE_CHECKING
743 verify_loop_structure ();
744 #endif
745 scev_reset ();
746
747 timevar_pop (TV_REPAIR_LOOPS);
748 }
749
750 /* Cleanup cfg and repair loop structures. */
751
752 bool
753 cleanup_tree_cfg (void)
754 {
755 bool changed = cleanup_tree_cfg_noloop ();
756
757 if (current_loops != NULL
758 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
759 repair_loop_structures ();
760
761 return changed;
762 }
763
764 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
765 Returns true if successful. */
766
767 static bool
768 remove_forwarder_block_with_phi (basic_block bb)
769 {
770 edge succ = single_succ_edge (bb);
771 basic_block dest = succ->dest;
772 gimple label;
773 basic_block dombb, domdest, dom;
774
775 /* We check for infinite loops already in tree_forwarder_block_p.
776 However it may happen that the infinite loop is created
777 afterwards due to removal of forwarders. */
778 if (dest == bb)
779 return false;
780
781 /* If the destination block consists of a nonlocal label, do not
782 merge it. */
783 label = first_stmt (dest);
784 if (label
785 && gimple_code (label) == GIMPLE_LABEL
786 && DECL_NONLOCAL (gimple_label_label (label)))
787 return false;
788
789 /* Redirect each incoming edge to BB to DEST. */
790 while (EDGE_COUNT (bb->preds) > 0)
791 {
792 edge e = EDGE_PRED (bb, 0), s;
793 gimple_stmt_iterator gsi;
794
795 s = find_edge (e->src, dest);
796 if (s)
797 {
798 /* We already have an edge S from E->src to DEST. If S and
799 E->dest's sole successor edge have the same PHI arguments
800 at DEST, redirect S to DEST. */
801 if (phi_alternatives_equal (dest, s, succ))
802 {
803 e = redirect_edge_and_branch (e, dest);
804 redirect_edge_var_map_clear (e);
805 continue;
806 }
807
808 /* PHI arguments are different. Create a forwarder block by
809 splitting E so that we can merge PHI arguments on E to
810 DEST. */
811 e = single_succ_edge (split_edge (e));
812 }
813
814 s = redirect_edge_and_branch (e, dest);
815
816 /* redirect_edge_and_branch must not create a new edge. */
817 gcc_assert (s == e);
818
819 /* Add to the PHI nodes at DEST each PHI argument removed at the
820 destination of E. */
821 for (gsi = gsi_start_phis (dest);
822 !gsi_end_p (gsi);
823 gsi_next (&gsi))
824 {
825 gimple phi = gsi_stmt (gsi);
826 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
827 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
828
829 if (TREE_CODE (def) == SSA_NAME)
830 {
831 edge_var_map_vector *head;
832 edge_var_map *vm;
833 size_t i;
834
835 /* If DEF is one of the results of PHI nodes removed during
836 redirection, replace it with the PHI argument that used
837 to be on E. */
838 head = redirect_edge_var_map_vector (e);
839 FOR_EACH_VEC_SAFE_ELT (head, i, vm)
840 {
841 tree old_arg = redirect_edge_var_map_result (vm);
842 tree new_arg = redirect_edge_var_map_def (vm);
843
844 if (def == old_arg)
845 {
846 def = new_arg;
847 locus = redirect_edge_var_map_location (vm);
848 break;
849 }
850 }
851 }
852
853 add_phi_arg (phi, def, s, locus);
854 }
855
856 redirect_edge_var_map_clear (e);
857 }
858
859 /* Update the dominators. */
860 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
861 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
862 if (domdest == bb)
863 {
864 /* Shortcut to avoid calling (relatively expensive)
865 nearest_common_dominator unless necessary. */
866 dom = dombb;
867 }
868 else
869 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
870
871 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
872
873 /* Remove BB since all of BB's incoming edges have been redirected
874 to DEST. */
875 delete_basic_block (bb);
876
877 return true;
878 }
879
880 /* This pass merges PHI nodes if one feeds into another. For example,
881 suppose we have the following:
882
883 goto <bb 9> (<L9>);
884
885 <L8>:;
886 tem_17 = foo ();
887
888 # tem_6 = PHI <tem_17(8), tem_23(7)>;
889 <L9>:;
890
891 # tem_3 = PHI <tem_6(9), tem_2(5)>;
892 <L10>:;
893
894 Then we merge the first PHI node into the second one like so:
895
896 goto <bb 9> (<L10>);
897
898 <L8>:;
899 tem_17 = foo ();
900
901 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
902 <L10>:;
903 */
904
905 static unsigned int
906 merge_phi_nodes (void)
907 {
908 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
909 basic_block *current = worklist;
910 basic_block bb;
911
912 calculate_dominance_info (CDI_DOMINATORS);
913
914 /* Find all PHI nodes that we may be able to merge. */
915 FOR_EACH_BB (bb)
916 {
917 basic_block dest;
918
919 /* Look for a forwarder block with PHI nodes. */
920 if (!tree_forwarder_block_p (bb, true))
921 continue;
922
923 dest = single_succ (bb);
924
925 /* We have to feed into another basic block with PHI
926 nodes. */
927 if (gimple_seq_empty_p (phi_nodes (dest))
928 /* We don't want to deal with a basic block with
929 abnormal edges. */
930 || bb_has_abnormal_pred (bb))
931 continue;
932
933 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
934 {
935 /* If BB does not dominate DEST, then the PHI nodes at
936 DEST must be the only users of the results of the PHI
937 nodes at BB. */
938 *current++ = bb;
939 }
940 else
941 {
942 gimple_stmt_iterator gsi;
943 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
944
945 /* BB dominates DEST. There may be many users of the PHI
946 nodes in BB. However, there is still a trivial case we
947 can handle. If the result of every PHI in BB is used
948 only by a PHI in DEST, then we can trivially merge the
949 PHI nodes from BB into DEST. */
950 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
951 gsi_next (&gsi))
952 {
953 gimple phi = gsi_stmt (gsi);
954 tree result = gimple_phi_result (phi);
955 use_operand_p imm_use;
956 gimple use_stmt;
957
958 /* If the PHI's result is never used, then we can just
959 ignore it. */
960 if (has_zero_uses (result))
961 continue;
962
963 /* Get the single use of the result of this PHI node. */
964 if (!single_imm_use (result, &imm_use, &use_stmt)
965 || gimple_code (use_stmt) != GIMPLE_PHI
966 || gimple_bb (use_stmt) != dest
967 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
968 break;
969 }
970
971 /* If the loop above iterated through all the PHI nodes
972 in BB, then we can merge the PHIs from BB into DEST. */
973 if (gsi_end_p (gsi))
974 *current++ = bb;
975 }
976 }
977
978 /* Now let's drain WORKLIST. */
979 bool changed = false;
980 while (current != worklist)
981 {
982 bb = *--current;
983 changed |= remove_forwarder_block_with_phi (bb);
984 }
985 free (worklist);
986
987 /* Removing forwarder blocks can cause formerly irreducible loops
988 to become reducible if we merged two entry blocks. */
989 if (changed
990 && current_loops)
991 loops_state_set (LOOPS_NEED_FIXUP);
992
993 return 0;
994 }
995
996 static bool
997 gate_merge_phi (void)
998 {
999 return 1;
1000 }
1001
1002 namespace {
1003
1004 const pass_data pass_data_merge_phi =
1005 {
1006 GIMPLE_PASS, /* type */
1007 "mergephi", /* name */
1008 OPTGROUP_NONE, /* optinfo_flags */
1009 true, /* has_gate */
1010 true, /* has_execute */
1011 TV_TREE_MERGE_PHI, /* tv_id */
1012 ( PROP_cfg | PROP_ssa ), /* properties_required */
1013 0, /* properties_provided */
1014 0, /* properties_destroyed */
1015 0, /* todo_flags_start */
1016 TODO_verify_ssa, /* todo_flags_finish */
1017 };
1018
1019 class pass_merge_phi : public gimple_opt_pass
1020 {
1021 public:
1022 pass_merge_phi (gcc::context *ctxt)
1023 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1024 {}
1025
1026 /* opt_pass methods: */
1027 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1028 bool gate () { return gate_merge_phi (); }
1029 unsigned int execute () { return merge_phi_nodes (); }
1030
1031 }; // class pass_merge_phi
1032
1033 } // anon namespace
1034
1035 gimple_opt_pass *
1036 make_pass_merge_phi (gcc::context *ctxt)
1037 {
1038 return new pass_merge_phi (ctxt);
1039 }
1040
1041 /* Pass: cleanup the CFG just before expanding trees to RTL.
1042 This is just a round of label cleanups and case node grouping
1043 because after the tree optimizers have run such cleanups may
1044 be necessary. */
1045
1046 static unsigned int
1047 execute_cleanup_cfg_post_optimizing (void)
1048 {
1049 unsigned int todo = 0;
1050 if (cleanup_tree_cfg ())
1051 todo |= TODO_update_ssa;
1052 maybe_remove_unreachable_handlers ();
1053 cleanup_dead_labels ();
1054 group_case_labels ();
1055 if ((flag_compare_debug_opt || flag_compare_debug)
1056 && flag_dump_final_insns)
1057 {
1058 FILE *final_output = fopen (flag_dump_final_insns, "a");
1059
1060 if (!final_output)
1061 {
1062 error ("could not open final insn dump file %qs: %m",
1063 flag_dump_final_insns);
1064 flag_dump_final_insns = NULL;
1065 }
1066 else
1067 {
1068 int save_unnumbered = flag_dump_unnumbered;
1069 int save_noaddr = flag_dump_noaddr;
1070
1071 flag_dump_noaddr = flag_dump_unnumbered = 1;
1072 fprintf (final_output, "\n");
1073 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1074 flag_dump_noaddr = save_noaddr;
1075 flag_dump_unnumbered = save_unnumbered;
1076 if (fclose (final_output))
1077 {
1078 error ("could not close final insn dump file %qs: %m",
1079 flag_dump_final_insns);
1080 flag_dump_final_insns = NULL;
1081 }
1082 }
1083 }
1084 return todo;
1085 }
1086
1087 namespace {
1088
1089 const pass_data pass_data_cleanup_cfg_post_optimizing =
1090 {
1091 GIMPLE_PASS, /* type */
1092 "optimized", /* name */
1093 OPTGROUP_NONE, /* optinfo_flags */
1094 false, /* has_gate */
1095 true, /* has_execute */
1096 TV_TREE_CLEANUP_CFG, /* tv_id */
1097 PROP_cfg, /* properties_required */
1098 0, /* properties_provided */
1099 0, /* properties_destroyed */
1100 0, /* todo_flags_start */
1101 TODO_remove_unused_locals, /* todo_flags_finish */
1102 };
1103
1104 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1105 {
1106 public:
1107 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1108 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1109 {}
1110
1111 /* opt_pass methods: */
1112 unsigned int execute () {
1113 return execute_cleanup_cfg_post_optimizing ();
1114 }
1115
1116 }; // class pass_cleanup_cfg_post_optimizing
1117
1118 } // anon namespace
1119
1120 gimple_opt_pass *
1121 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1122 {
1123 return new pass_cleanup_cfg_post_optimizing (ctxt);
1124 }
1125
1126