Factor unrelated declarations out of tree.h.
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "diagnostic-core.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "ggc.h"
31 #include "langhooks.h"
32 #include "gimple.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-cfg.h"
37 #include "tree-phinodes.h"
38 #include "ssa-iterators.h"
39 #include "stringpool.h"
40 #include "tree-ssanames.h"
41 #include "tree-ssa-loop-manip.h"
42 #include "expr.h"
43 #include "tree-dfa.h"
44 #include "tree-ssa.h"
45 #include "tree-pass.h"
46 #include "except.h"
47 #include "cfgloop.h"
48 #include "hashtab.h"
49 #include "tree-ssa-propagate.h"
50 #include "tree-scalar-evolution.h"
51
52 /* The set of blocks in that at least one of the following changes happened:
53 -- the statement at the end of the block was changed
54 -- the block was newly created
55 -- the set of the predecessors of the block changed
56 -- the set of the successors of the block changed
57 ??? Maybe we could track these changes separately, since they determine
58 what cleanups it makes sense to try on the block. */
59 bitmap cfgcleanup_altered_bbs;
60
61 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
62
63 static bool
64 remove_fallthru_edge (vec<edge, va_gc> *ev)
65 {
66 edge_iterator ei;
67 edge e;
68
69 FOR_EACH_EDGE (e, ei, ev)
70 if ((e->flags & EDGE_FALLTHRU) != 0)
71 {
72 if (e->flags & EDGE_COMPLEX)
73 e->flags &= ~EDGE_FALLTHRU;
74 else
75 remove_edge_and_dominated_blocks (e);
76 return true;
77 }
78 return false;
79 }
80
81
82 /* Disconnect an unreachable block in the control expression starting
83 at block BB. */
84
85 static bool
86 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
87 {
88 edge taken_edge;
89 bool retval = false;
90 gimple stmt = gsi_stmt (gsi);
91 tree val;
92
93 if (!single_succ_p (bb))
94 {
95 edge e;
96 edge_iterator ei;
97 bool warned;
98 location_t loc;
99
100 fold_defer_overflow_warnings ();
101 loc = gimple_location (stmt);
102 switch (gimple_code (stmt))
103 {
104 case GIMPLE_COND:
105 val = fold_binary_loc (loc, gimple_cond_code (stmt),
106 boolean_type_node,
107 gimple_cond_lhs (stmt),
108 gimple_cond_rhs (stmt));
109 break;
110
111 case GIMPLE_SWITCH:
112 val = gimple_switch_index (stmt);
113 break;
114
115 default:
116 val = NULL_TREE;
117 }
118 taken_edge = find_taken_edge (bb, val);
119 if (!taken_edge)
120 {
121 fold_undefer_and_ignore_overflow_warnings ();
122 return false;
123 }
124
125 /* Remove all the edges except the one that is always executed. */
126 warned = false;
127 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
128 {
129 if (e != taken_edge)
130 {
131 if (!warned)
132 {
133 fold_undefer_overflow_warnings
134 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
135 warned = true;
136 }
137
138 taken_edge->probability += e->probability;
139 taken_edge->count += e->count;
140 remove_edge_and_dominated_blocks (e);
141 retval = true;
142 }
143 else
144 ei_next (&ei);
145 }
146 if (!warned)
147 fold_undefer_and_ignore_overflow_warnings ();
148 if (taken_edge->probability > REG_BR_PROB_BASE)
149 taken_edge->probability = REG_BR_PROB_BASE;
150 }
151 else
152 taken_edge = single_succ_edge (bb);
153
154 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
155 gsi_remove (&gsi, true);
156 taken_edge->flags = EDGE_FALLTHRU;
157
158 return retval;
159 }
160
161 /* Try to remove superfluous control structures in basic block BB. Returns
162 true if anything changes. */
163
164 static bool
165 cleanup_control_flow_bb (basic_block bb)
166 {
167 gimple_stmt_iterator gsi;
168 bool retval = false;
169 gimple stmt;
170
171 /* If the last statement of the block could throw and now cannot,
172 we need to prune cfg. */
173 retval |= gimple_purge_dead_eh_edges (bb);
174
175 gsi = gsi_last_bb (bb);
176 if (gsi_end_p (gsi))
177 return retval;
178
179 stmt = gsi_stmt (gsi);
180
181 if (gimple_code (stmt) == GIMPLE_COND
182 || gimple_code (stmt) == GIMPLE_SWITCH)
183 retval |= cleanup_control_expr_graph (bb, gsi);
184 else if (gimple_code (stmt) == GIMPLE_GOTO
185 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
186 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
187 == LABEL_DECL))
188 {
189 /* If we had a computed goto which has a compile-time determinable
190 destination, then we can eliminate the goto. */
191 edge e;
192 tree label;
193 edge_iterator ei;
194 basic_block target_block;
195
196 /* First look at all the outgoing edges. Delete any outgoing
197 edges which do not go to the right block. For the one
198 edge which goes to the right block, fix up its flags. */
199 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
200 target_block = label_to_block (label);
201 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
202 {
203 if (e->dest != target_block)
204 remove_edge_and_dominated_blocks (e);
205 else
206 {
207 /* Turn off the EDGE_ABNORMAL flag. */
208 e->flags &= ~EDGE_ABNORMAL;
209
210 /* And set EDGE_FALLTHRU. */
211 e->flags |= EDGE_FALLTHRU;
212 ei_next (&ei);
213 }
214 }
215
216 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
217 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
218
219 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
220 relevant information we need. */
221 gsi_remove (&gsi, true);
222 retval = true;
223 }
224
225 /* Check for indirect calls that have been turned into
226 noreturn calls. */
227 else if (is_gimple_call (stmt)
228 && gimple_call_noreturn_p (stmt)
229 && remove_fallthru_edge (bb->succs))
230 retval = true;
231
232 return retval;
233 }
234
235 /* Return true if basic block BB does nothing except pass control
236 flow to another block and that we can safely insert a label at
237 the start of the successor block.
238
239 As a precondition, we require that BB be not equal to
240 ENTRY_BLOCK_PTR. */
241
242 static bool
243 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
244 {
245 gimple_stmt_iterator gsi;
246 location_t locus;
247
248 /* BB must have a single outgoing edge. */
249 if (single_succ_p (bb) != 1
250 /* If PHI_WANTED is false, BB must not have any PHI nodes.
251 Otherwise, BB must have PHI nodes. */
252 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
253 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */
254 || single_succ (bb) == EXIT_BLOCK_PTR
255 /* Nor should this be an infinite loop. */
256 || single_succ (bb) == bb
257 /* BB may not have an abnormal outgoing edge. */
258 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
259 return false;
260
261 gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
262
263 locus = single_succ_edge (bb)->goto_locus;
264
265 /* There should not be an edge coming from entry, or an EH edge. */
266 {
267 edge_iterator ei;
268 edge e;
269
270 FOR_EACH_EDGE (e, ei, bb->preds)
271 if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
272 return false;
273 /* If goto_locus of any of the edges differs, prevent removing
274 the forwarder block for -O0. */
275 else if (optimize == 0 && e->goto_locus != locus)
276 return false;
277 }
278
279 /* Now walk through the statements backward. We can ignore labels,
280 anything else means this is not a forwarder block. */
281 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
282 {
283 gimple stmt = gsi_stmt (gsi);
284
285 switch (gimple_code (stmt))
286 {
287 case GIMPLE_LABEL:
288 if (DECL_NONLOCAL (gimple_label_label (stmt)))
289 return false;
290 if (optimize == 0 && gimple_location (stmt) != locus)
291 return false;
292 break;
293
294 /* ??? For now, hope there's a corresponding debug
295 assignment at the destination. */
296 case GIMPLE_DEBUG:
297 break;
298
299 default:
300 return false;
301 }
302 }
303
304 if (current_loops)
305 {
306 basic_block dest;
307 /* Protect loop latches, headers and preheaders. */
308 if (bb->loop_father->header == bb)
309 return false;
310 dest = EDGE_SUCC (bb, 0)->dest;
311
312 if (dest->loop_father->header == dest)
313 return false;
314 }
315 return true;
316 }
317
318 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
319 those alternatives are equal in each of the PHI nodes, then return
320 true, else return false. */
321
322 static bool
323 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
324 {
325 int n1 = e1->dest_idx;
326 int n2 = e2->dest_idx;
327 gimple_stmt_iterator gsi;
328
329 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
330 {
331 gimple phi = gsi_stmt (gsi);
332 tree val1 = gimple_phi_arg_def (phi, n1);
333 tree val2 = gimple_phi_arg_def (phi, n2);
334
335 gcc_assert (val1 != NULL_TREE);
336 gcc_assert (val2 != NULL_TREE);
337
338 if (!operand_equal_for_phi_arg_p (val1, val2))
339 return false;
340 }
341
342 return true;
343 }
344
345 /* Removes forwarder block BB. Returns false if this failed. */
346
347 static bool
348 remove_forwarder_block (basic_block bb)
349 {
350 edge succ = single_succ_edge (bb), e, s;
351 basic_block dest = succ->dest;
352 gimple label;
353 edge_iterator ei;
354 gimple_stmt_iterator gsi, gsi_to;
355 bool can_move_debug_stmts;
356
357 /* We check for infinite loops already in tree_forwarder_block_p.
358 However it may happen that the infinite loop is created
359 afterwards due to removal of forwarders. */
360 if (dest == bb)
361 return false;
362
363 /* If the destination block consists of a nonlocal label or is a
364 EH landing pad, do not merge it. */
365 label = first_stmt (dest);
366 if (label
367 && gimple_code (label) == GIMPLE_LABEL
368 && (DECL_NONLOCAL (gimple_label_label (label))
369 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
370 return false;
371
372 /* If there is an abnormal edge to basic block BB, but not into
373 dest, problems might occur during removal of the phi node at out
374 of ssa due to overlapping live ranges of registers.
375
376 If there is an abnormal edge in DEST, the problems would occur
377 anyway since cleanup_dead_labels would then merge the labels for
378 two different eh regions, and rest of exception handling code
379 does not like it.
380
381 So if there is an abnormal edge to BB, proceed only if there is
382 no abnormal edge to DEST and there are no phi nodes in DEST. */
383 if (bb_has_abnormal_pred (bb)
384 && (bb_has_abnormal_pred (dest)
385 || !gimple_seq_empty_p (phi_nodes (dest))))
386 return false;
387
388 /* If there are phi nodes in DEST, and some of the blocks that are
389 predecessors of BB are also predecessors of DEST, check that the
390 phi node arguments match. */
391 if (!gimple_seq_empty_p (phi_nodes (dest)))
392 {
393 FOR_EACH_EDGE (e, ei, bb->preds)
394 {
395 s = find_edge (e->src, dest);
396 if (!s)
397 continue;
398
399 if (!phi_alternatives_equal (dest, succ, s))
400 return false;
401 }
402 }
403
404 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
405
406 /* Redirect the edges. */
407 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
408 {
409 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
410
411 if (e->flags & EDGE_ABNORMAL)
412 {
413 /* If there is an abnormal edge, redirect it anyway, and
414 move the labels to the new block to make it legal. */
415 s = redirect_edge_succ_nodup (e, dest);
416 }
417 else
418 s = redirect_edge_and_branch (e, dest);
419
420 if (s == e)
421 {
422 /* Create arguments for the phi nodes, since the edge was not
423 here before. */
424 for (gsi = gsi_start_phis (dest);
425 !gsi_end_p (gsi);
426 gsi_next (&gsi))
427 {
428 gimple phi = gsi_stmt (gsi);
429 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
430 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
431 add_phi_arg (phi, unshare_expr (def), s, l);
432 }
433 }
434 }
435
436 /* Move nonlocal labels and computed goto targets as well as user
437 defined labels and labels with an EH landing pad number to the
438 new block, so that the redirection of the abnormal edges works,
439 jump targets end up in a sane place and debug information for
440 labels is retained. */
441 gsi_to = gsi_start_bb (dest);
442 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
443 {
444 tree decl;
445 label = gsi_stmt (gsi);
446 if (is_gimple_debug (label))
447 break;
448 decl = gimple_label_label (label);
449 if (EH_LANDING_PAD_NR (decl) != 0
450 || DECL_NONLOCAL (decl)
451 || FORCED_LABEL (decl)
452 || !DECL_ARTIFICIAL (decl))
453 {
454 gsi_remove (&gsi, false);
455 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
456 }
457 else
458 gsi_next (&gsi);
459 }
460
461 /* Move debug statements if the destination has a single predecessor. */
462 if (can_move_debug_stmts)
463 {
464 gsi_to = gsi_after_labels (dest);
465 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
466 {
467 gimple debug = gsi_stmt (gsi);
468 if (!is_gimple_debug (debug))
469 break;
470 gsi_remove (&gsi, false);
471 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
472 }
473 }
474
475 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
476
477 /* Update the dominators. */
478 if (dom_info_available_p (CDI_DOMINATORS))
479 {
480 basic_block dom, dombb, domdest;
481
482 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
483 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
484 if (domdest == bb)
485 {
486 /* Shortcut to avoid calling (relatively expensive)
487 nearest_common_dominator unless necessary. */
488 dom = dombb;
489 }
490 else
491 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
492
493 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
494 }
495
496 /* And kill the forwarder block. */
497 delete_basic_block (bb);
498
499 return true;
500 }
501
502 /* STMT is a call that has been discovered noreturn. Fixup the CFG
503 and remove LHS. Return true if something changed. */
504
505 bool
506 fixup_noreturn_call (gimple stmt)
507 {
508 basic_block bb = gimple_bb (stmt);
509 bool changed = false;
510
511 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
512 return false;
513
514 /* First split basic block if stmt is not last. */
515 if (stmt != gsi_stmt (gsi_last_bb (bb)))
516 split_block (bb, stmt);
517
518 changed |= remove_fallthru_edge (bb->succs);
519
520 /* If there is LHS, remove it. */
521 if (gimple_call_lhs (stmt))
522 {
523 tree op = gimple_call_lhs (stmt);
524 gimple_call_set_lhs (stmt, NULL_TREE);
525
526 /* We need to remove SSA name to avoid checking errors.
527 All uses are dominated by the noreturn and thus will
528 be removed afterwards.
529 We proactively remove affected non-PHI statements to avoid
530 fixup_cfg from trying to update them and crashing. */
531 if (TREE_CODE (op) == SSA_NAME)
532 {
533 use_operand_p use_p;
534 imm_use_iterator iter;
535 gimple use_stmt;
536 bitmap_iterator bi;
537 unsigned int bb_index;
538
539 bitmap blocks = BITMAP_ALLOC (NULL);
540
541 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
542 {
543 if (gimple_code (use_stmt) != GIMPLE_PHI)
544 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
545 else
546 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
547 SET_USE (use_p, error_mark_node);
548 }
549 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
550 delete_basic_block (BASIC_BLOCK (bb_index));
551 BITMAP_FREE (blocks);
552 release_ssa_name (op);
553 }
554 update_stmt (stmt);
555 changed = true;
556 }
557 /* Similarly remove VDEF if there is any. */
558 else if (gimple_vdef (stmt))
559 update_stmt (stmt);
560 return changed;
561 }
562
563
564 /* Split basic blocks on calls in the middle of a basic block that are now
565 known not to return, and remove the unreachable code. */
566
567 static bool
568 split_bbs_on_noreturn_calls (void)
569 {
570 bool changed = false;
571 gimple stmt;
572 basic_block bb;
573
574 /* Detect cases where a mid-block call is now known not to return. */
575 if (cfun->gimple_df)
576 while (vec_safe_length (MODIFIED_NORETURN_CALLS (cfun)))
577 {
578 stmt = MODIFIED_NORETURN_CALLS (cfun)->pop ();
579 bb = gimple_bb (stmt);
580 /* BB might be deleted at this point, so verify first
581 BB is present in the cfg. */
582 if (bb == NULL
583 || bb->index < NUM_FIXED_BLOCKS
584 || bb->index >= last_basic_block
585 || BASIC_BLOCK (bb->index) != bb
586 || !gimple_call_noreturn_p (stmt))
587 continue;
588
589 changed |= fixup_noreturn_call (stmt);
590 }
591
592 return changed;
593 }
594
595 /* Tries to cleanup cfg in basic block BB. Returns true if anything
596 changes. */
597
598 static bool
599 cleanup_tree_cfg_bb (basic_block bb)
600 {
601 bool retval = cleanup_control_flow_bb (bb);
602
603 if (tree_forwarder_block_p (bb, false)
604 && remove_forwarder_block (bb))
605 return true;
606
607 /* Merging the blocks may create new opportunities for folding
608 conditional branches (due to the elimination of single-valued PHI
609 nodes). */
610 if (single_succ_p (bb)
611 && can_merge_blocks_p (bb, single_succ (bb)))
612 {
613 merge_blocks (bb, single_succ (bb));
614 return true;
615 }
616
617 return retval;
618 }
619
620 /* Iterate the cfg cleanups, while anything changes. */
621
622 static bool
623 cleanup_tree_cfg_1 (void)
624 {
625 bool retval = false;
626 basic_block bb;
627 unsigned i, n;
628
629 retval |= split_bbs_on_noreturn_calls ();
630
631 /* Prepare the worklists of altered blocks. */
632 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
633
634 /* During forwarder block cleanup, we may redirect edges out of
635 SWITCH_EXPRs, which can get expensive. So we want to enable
636 recording of edge to CASE_LABEL_EXPR. */
637 start_recording_case_labels ();
638
639 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
640 since the basic blocks may get removed. */
641 n = last_basic_block;
642 for (i = NUM_FIXED_BLOCKS; i < n; i++)
643 {
644 bb = BASIC_BLOCK (i);
645 if (bb)
646 retval |= cleanup_tree_cfg_bb (bb);
647 }
648
649 /* Now process the altered blocks, as long as any are available. */
650 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
651 {
652 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
653 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
654 if (i < NUM_FIXED_BLOCKS)
655 continue;
656
657 bb = BASIC_BLOCK (i);
658 if (!bb)
659 continue;
660
661 retval |= cleanup_tree_cfg_bb (bb);
662
663 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
664 calls. */
665 retval |= split_bbs_on_noreturn_calls ();
666 }
667
668 end_recording_case_labels ();
669 BITMAP_FREE (cfgcleanup_altered_bbs);
670 return retval;
671 }
672
673
674 /* Remove unreachable blocks and other miscellaneous clean up work.
675 Return true if the flowgraph was modified, false otherwise. */
676
677 static bool
678 cleanup_tree_cfg_noloop (void)
679 {
680 bool changed;
681
682 timevar_push (TV_TREE_CLEANUP_CFG);
683
684 /* Iterate until there are no more cleanups left to do. If any
685 iteration changed the flowgraph, set CHANGED to true.
686
687 If dominance information is available, there cannot be any unreachable
688 blocks. */
689 if (!dom_info_available_p (CDI_DOMINATORS))
690 {
691 changed = delete_unreachable_blocks ();
692 calculate_dominance_info (CDI_DOMINATORS);
693 }
694 else
695 {
696 #ifdef ENABLE_CHECKING
697 verify_dominators (CDI_DOMINATORS);
698 #endif
699 changed = false;
700 }
701
702 changed |= cleanup_tree_cfg_1 ();
703
704 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
705 compact_blocks ();
706
707 #ifdef ENABLE_CHECKING
708 verify_flow_info ();
709 #endif
710
711 timevar_pop (TV_TREE_CLEANUP_CFG);
712
713 if (changed && current_loops)
714 loops_state_set (LOOPS_NEED_FIXUP);
715
716 return changed;
717 }
718
719 /* Repairs loop structures. */
720
721 static void
722 repair_loop_structures (void)
723 {
724 bitmap changed_bbs;
725 unsigned n_new_loops;
726
727 calculate_dominance_info (CDI_DOMINATORS);
728
729 timevar_push (TV_REPAIR_LOOPS);
730 changed_bbs = BITMAP_ALLOC (NULL);
731 n_new_loops = fix_loop_structure (changed_bbs);
732
733 /* This usually does nothing. But sometimes parts of cfg that originally
734 were inside a loop get out of it due to edge removal (since they
735 become unreachable by back edges from latch). Also a former
736 irreducible loop can become reducible - in this case force a full
737 rewrite into loop-closed SSA form. */
738 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
739 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
740 TODO_update_ssa);
741
742 BITMAP_FREE (changed_bbs);
743
744 #ifdef ENABLE_CHECKING
745 verify_loop_structure ();
746 #endif
747 scev_reset ();
748
749 timevar_pop (TV_REPAIR_LOOPS);
750 }
751
752 /* Cleanup cfg and repair loop structures. */
753
754 bool
755 cleanup_tree_cfg (void)
756 {
757 bool changed = cleanup_tree_cfg_noloop ();
758
759 if (current_loops != NULL
760 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
761 repair_loop_structures ();
762
763 return changed;
764 }
765
766 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
767 Returns true if successful. */
768
769 static bool
770 remove_forwarder_block_with_phi (basic_block bb)
771 {
772 edge succ = single_succ_edge (bb);
773 basic_block dest = succ->dest;
774 gimple label;
775 basic_block dombb, domdest, dom;
776
777 /* We check for infinite loops already in tree_forwarder_block_p.
778 However it may happen that the infinite loop is created
779 afterwards due to removal of forwarders. */
780 if (dest == bb)
781 return false;
782
783 /* If the destination block consists of a nonlocal label, do not
784 merge it. */
785 label = first_stmt (dest);
786 if (label
787 && gimple_code (label) == GIMPLE_LABEL
788 && DECL_NONLOCAL (gimple_label_label (label)))
789 return false;
790
791 /* Redirect each incoming edge to BB to DEST. */
792 while (EDGE_COUNT (bb->preds) > 0)
793 {
794 edge e = EDGE_PRED (bb, 0), s;
795 gimple_stmt_iterator gsi;
796
797 s = find_edge (e->src, dest);
798 if (s)
799 {
800 /* We already have an edge S from E->src to DEST. If S and
801 E->dest's sole successor edge have the same PHI arguments
802 at DEST, redirect S to DEST. */
803 if (phi_alternatives_equal (dest, s, succ))
804 {
805 e = redirect_edge_and_branch (e, dest);
806 redirect_edge_var_map_clear (e);
807 continue;
808 }
809
810 /* PHI arguments are different. Create a forwarder block by
811 splitting E so that we can merge PHI arguments on E to
812 DEST. */
813 e = single_succ_edge (split_edge (e));
814 }
815
816 s = redirect_edge_and_branch (e, dest);
817
818 /* redirect_edge_and_branch must not create a new edge. */
819 gcc_assert (s == e);
820
821 /* Add to the PHI nodes at DEST each PHI argument removed at the
822 destination of E. */
823 for (gsi = gsi_start_phis (dest);
824 !gsi_end_p (gsi);
825 gsi_next (&gsi))
826 {
827 gimple phi = gsi_stmt (gsi);
828 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
829 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
830
831 if (TREE_CODE (def) == SSA_NAME)
832 {
833 edge_var_map_vector *head;
834 edge_var_map *vm;
835 size_t i;
836
837 /* If DEF is one of the results of PHI nodes removed during
838 redirection, replace it with the PHI argument that used
839 to be on E. */
840 head = redirect_edge_var_map_vector (e);
841 FOR_EACH_VEC_SAFE_ELT (head, i, vm)
842 {
843 tree old_arg = redirect_edge_var_map_result (vm);
844 tree new_arg = redirect_edge_var_map_def (vm);
845
846 if (def == old_arg)
847 {
848 def = new_arg;
849 locus = redirect_edge_var_map_location (vm);
850 break;
851 }
852 }
853 }
854
855 add_phi_arg (phi, def, s, locus);
856 }
857
858 redirect_edge_var_map_clear (e);
859 }
860
861 /* Update the dominators. */
862 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
863 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
864 if (domdest == bb)
865 {
866 /* Shortcut to avoid calling (relatively expensive)
867 nearest_common_dominator unless necessary. */
868 dom = dombb;
869 }
870 else
871 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
872
873 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
874
875 /* Remove BB since all of BB's incoming edges have been redirected
876 to DEST. */
877 delete_basic_block (bb);
878
879 return true;
880 }
881
882 /* This pass merges PHI nodes if one feeds into another. For example,
883 suppose we have the following:
884
885 goto <bb 9> (<L9>);
886
887 <L8>:;
888 tem_17 = foo ();
889
890 # tem_6 = PHI <tem_17(8), tem_23(7)>;
891 <L9>:;
892
893 # tem_3 = PHI <tem_6(9), tem_2(5)>;
894 <L10>:;
895
896 Then we merge the first PHI node into the second one like so:
897
898 goto <bb 9> (<L10>);
899
900 <L8>:;
901 tem_17 = foo ();
902
903 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
904 <L10>:;
905 */
906
907 static unsigned int
908 merge_phi_nodes (void)
909 {
910 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
911 basic_block *current = worklist;
912 basic_block bb;
913
914 calculate_dominance_info (CDI_DOMINATORS);
915
916 /* Find all PHI nodes that we may be able to merge. */
917 FOR_EACH_BB (bb)
918 {
919 basic_block dest;
920
921 /* Look for a forwarder block with PHI nodes. */
922 if (!tree_forwarder_block_p (bb, true))
923 continue;
924
925 dest = single_succ (bb);
926
927 /* We have to feed into another basic block with PHI
928 nodes. */
929 if (gimple_seq_empty_p (phi_nodes (dest))
930 /* We don't want to deal with a basic block with
931 abnormal edges. */
932 || bb_has_abnormal_pred (bb))
933 continue;
934
935 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
936 {
937 /* If BB does not dominate DEST, then the PHI nodes at
938 DEST must be the only users of the results of the PHI
939 nodes at BB. */
940 *current++ = bb;
941 }
942 else
943 {
944 gimple_stmt_iterator gsi;
945 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
946
947 /* BB dominates DEST. There may be many users of the PHI
948 nodes in BB. However, there is still a trivial case we
949 can handle. If the result of every PHI in BB is used
950 only by a PHI in DEST, then we can trivially merge the
951 PHI nodes from BB into DEST. */
952 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
953 gsi_next (&gsi))
954 {
955 gimple phi = gsi_stmt (gsi);
956 tree result = gimple_phi_result (phi);
957 use_operand_p imm_use;
958 gimple use_stmt;
959
960 /* If the PHI's result is never used, then we can just
961 ignore it. */
962 if (has_zero_uses (result))
963 continue;
964
965 /* Get the single use of the result of this PHI node. */
966 if (!single_imm_use (result, &imm_use, &use_stmt)
967 || gimple_code (use_stmt) != GIMPLE_PHI
968 || gimple_bb (use_stmt) != dest
969 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
970 break;
971 }
972
973 /* If the loop above iterated through all the PHI nodes
974 in BB, then we can merge the PHIs from BB into DEST. */
975 if (gsi_end_p (gsi))
976 *current++ = bb;
977 }
978 }
979
980 /* Now let's drain WORKLIST. */
981 bool changed = false;
982 while (current != worklist)
983 {
984 bb = *--current;
985 changed |= remove_forwarder_block_with_phi (bb);
986 }
987 free (worklist);
988
989 /* Removing forwarder blocks can cause formerly irreducible loops
990 to become reducible if we merged two entry blocks. */
991 if (changed
992 && current_loops)
993 loops_state_set (LOOPS_NEED_FIXUP);
994
995 return 0;
996 }
997
998 static bool
999 gate_merge_phi (void)
1000 {
1001 return 1;
1002 }
1003
1004 namespace {
1005
1006 const pass_data pass_data_merge_phi =
1007 {
1008 GIMPLE_PASS, /* type */
1009 "mergephi", /* name */
1010 OPTGROUP_NONE, /* optinfo_flags */
1011 true, /* has_gate */
1012 true, /* has_execute */
1013 TV_TREE_MERGE_PHI, /* tv_id */
1014 ( PROP_cfg | PROP_ssa ), /* properties_required */
1015 0, /* properties_provided */
1016 0, /* properties_destroyed */
1017 0, /* todo_flags_start */
1018 TODO_verify_ssa, /* todo_flags_finish */
1019 };
1020
1021 class pass_merge_phi : public gimple_opt_pass
1022 {
1023 public:
1024 pass_merge_phi (gcc::context *ctxt)
1025 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1026 {}
1027
1028 /* opt_pass methods: */
1029 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1030 bool gate () { return gate_merge_phi (); }
1031 unsigned int execute () { return merge_phi_nodes (); }
1032
1033 }; // class pass_merge_phi
1034
1035 } // anon namespace
1036
1037 gimple_opt_pass *
1038 make_pass_merge_phi (gcc::context *ctxt)
1039 {
1040 return new pass_merge_phi (ctxt);
1041 }
1042
1043 /* Pass: cleanup the CFG just before expanding trees to RTL.
1044 This is just a round of label cleanups and case node grouping
1045 because after the tree optimizers have run such cleanups may
1046 be necessary. */
1047
1048 static unsigned int
1049 execute_cleanup_cfg_post_optimizing (void)
1050 {
1051 unsigned int todo = 0;
1052 if (cleanup_tree_cfg ())
1053 todo |= TODO_update_ssa;
1054 maybe_remove_unreachable_handlers ();
1055 cleanup_dead_labels ();
1056 group_case_labels ();
1057 if ((flag_compare_debug_opt || flag_compare_debug)
1058 && flag_dump_final_insns)
1059 {
1060 FILE *final_output = fopen (flag_dump_final_insns, "a");
1061
1062 if (!final_output)
1063 {
1064 error ("could not open final insn dump file %qs: %m",
1065 flag_dump_final_insns);
1066 flag_dump_final_insns = NULL;
1067 }
1068 else
1069 {
1070 int save_unnumbered = flag_dump_unnumbered;
1071 int save_noaddr = flag_dump_noaddr;
1072
1073 flag_dump_noaddr = flag_dump_unnumbered = 1;
1074 fprintf (final_output, "\n");
1075 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1076 flag_dump_noaddr = save_noaddr;
1077 flag_dump_unnumbered = save_unnumbered;
1078 if (fclose (final_output))
1079 {
1080 error ("could not close final insn dump file %qs: %m",
1081 flag_dump_final_insns);
1082 flag_dump_final_insns = NULL;
1083 }
1084 }
1085 }
1086 return todo;
1087 }
1088
1089 namespace {
1090
1091 const pass_data pass_data_cleanup_cfg_post_optimizing =
1092 {
1093 GIMPLE_PASS, /* type */
1094 "optimized", /* name */
1095 OPTGROUP_NONE, /* optinfo_flags */
1096 false, /* has_gate */
1097 true, /* has_execute */
1098 TV_TREE_CLEANUP_CFG, /* tv_id */
1099 PROP_cfg, /* properties_required */
1100 0, /* properties_provided */
1101 0, /* properties_destroyed */
1102 0, /* todo_flags_start */
1103 TODO_remove_unused_locals, /* todo_flags_finish */
1104 };
1105
1106 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1107 {
1108 public:
1109 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1110 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1111 {}
1112
1113 /* opt_pass methods: */
1114 unsigned int execute () {
1115 return execute_cleanup_cfg_post_optimizing ();
1116 }
1117
1118 }; // class pass_cleanup_cfg_post_optimizing
1119
1120 } // anon namespace
1121
1122 gimple_opt_pass *
1123 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1124 {
1125 return new pass_cleanup_cfg_post_optimizing (ctxt);
1126 }
1127
1128