438b55442bfca57e794d06c0c68f65d7da335a57
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "predict.h"
27 #include "vec.h"
28 #include "hashtab.h"
29 #include "hash-set.h"
30 #include "machmode.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "cfganal.h"
37 #include "cfgcleanup.h"
38 #include "basic-block.h"
39 #include "diagnostic-core.h"
40 #include "flags.h"
41 #include "langhooks.h"
42 #include "tree-ssa-alias.h"
43 #include "internal-fn.h"
44 #include "tree-eh.h"
45 #include "gimple-expr.h"
46 #include "is-a.h"
47 #include "gimple.h"
48 #include "gimplify.h"
49 #include "gimple-iterator.h"
50 #include "gimple-ssa.h"
51 #include "tree-cfg.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-ssa-loop-manip.h"
57 #include "expr.h"
58 #include "tree-dfa.h"
59 #include "tree-ssa.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "cfgloop.h"
63 #include "tree-ssa-propagate.h"
64 #include "tree-scalar-evolution.h"
65
66 /* The set of blocks in that at least one of the following changes happened:
67 -- the statement at the end of the block was changed
68 -- the block was newly created
69 -- the set of the predecessors of the block changed
70 -- the set of the successors of the block changed
71 ??? Maybe we could track these changes separately, since they determine
72 what cleanups it makes sense to try on the block. */
73 bitmap cfgcleanup_altered_bbs;
74
75 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
76
77 static bool
78 remove_fallthru_edge (vec<edge, va_gc> *ev)
79 {
80 edge_iterator ei;
81 edge e;
82
83 FOR_EACH_EDGE (e, ei, ev)
84 if ((e->flags & EDGE_FALLTHRU) != 0)
85 {
86 if (e->flags & EDGE_COMPLEX)
87 e->flags &= ~EDGE_FALLTHRU;
88 else
89 remove_edge_and_dominated_blocks (e);
90 return true;
91 }
92 return false;
93 }
94
95
96 /* Disconnect an unreachable block in the control expression starting
97 at block BB. */
98
99 static bool
100 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
101 {
102 edge taken_edge;
103 bool retval = false;
104 gimple stmt = gsi_stmt (gsi);
105 tree val;
106
107 if (!single_succ_p (bb))
108 {
109 edge e;
110 edge_iterator ei;
111 bool warned;
112 location_t loc;
113
114 fold_defer_overflow_warnings ();
115 loc = gimple_location (stmt);
116 switch (gimple_code (stmt))
117 {
118 case GIMPLE_COND:
119 val = fold_binary_loc (loc, gimple_cond_code (stmt),
120 boolean_type_node,
121 gimple_cond_lhs (stmt),
122 gimple_cond_rhs (stmt));
123 break;
124
125 case GIMPLE_SWITCH:
126 val = gimple_switch_index (as_a <gswitch *> (stmt));
127 break;
128
129 default:
130 val = NULL_TREE;
131 }
132 taken_edge = find_taken_edge (bb, val);
133 if (!taken_edge)
134 {
135 fold_undefer_and_ignore_overflow_warnings ();
136 return false;
137 }
138
139 /* Remove all the edges except the one that is always executed. */
140 warned = false;
141 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
142 {
143 if (e != taken_edge)
144 {
145 if (!warned)
146 {
147 fold_undefer_overflow_warnings
148 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
149 warned = true;
150 }
151
152 taken_edge->probability += e->probability;
153 taken_edge->count += e->count;
154 remove_edge_and_dominated_blocks (e);
155 retval = true;
156 }
157 else
158 ei_next (&ei);
159 }
160 if (!warned)
161 fold_undefer_and_ignore_overflow_warnings ();
162 if (taken_edge->probability > REG_BR_PROB_BASE)
163 taken_edge->probability = REG_BR_PROB_BASE;
164 }
165 else
166 taken_edge = single_succ_edge (bb);
167
168 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
169 gsi_remove (&gsi, true);
170 taken_edge->flags = EDGE_FALLTHRU;
171
172 return retval;
173 }
174
175 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
176 to updated gimple_call_flags. */
177
178 static void
179 cleanup_call_ctrl_altering_flag (gimple bb_end)
180 {
181 if (!is_gimple_call (bb_end)
182 || !gimple_call_ctrl_altering_p (bb_end))
183 return;
184
185 int flags = gimple_call_flags (bb_end);
186 if (((flags & (ECF_CONST | ECF_PURE))
187 && !(flags & ECF_LOOPING_CONST_OR_PURE))
188 || (flags & ECF_LEAF))
189 gimple_call_set_ctrl_altering (bb_end, false);
190 }
191
192 /* Try to remove superfluous control structures in basic block BB. Returns
193 true if anything changes. */
194
195 static bool
196 cleanup_control_flow_bb (basic_block bb)
197 {
198 gimple_stmt_iterator gsi;
199 bool retval = false;
200 gimple stmt;
201
202 /* If the last statement of the block could throw and now cannot,
203 we need to prune cfg. */
204 retval |= gimple_purge_dead_eh_edges (bb);
205
206 gsi = gsi_last_bb (bb);
207 if (gsi_end_p (gsi))
208 return retval;
209
210 stmt = gsi_stmt (gsi);
211
212 /* Try to cleanup ctrl altering flag for call which ends bb. */
213 cleanup_call_ctrl_altering_flag (stmt);
214
215 if (gimple_code (stmt) == GIMPLE_COND
216 || gimple_code (stmt) == GIMPLE_SWITCH)
217 retval |= cleanup_control_expr_graph (bb, gsi);
218 else if (gimple_code (stmt) == GIMPLE_GOTO
219 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
220 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
221 == LABEL_DECL))
222 {
223 /* If we had a computed goto which has a compile-time determinable
224 destination, then we can eliminate the goto. */
225 edge e;
226 tree label;
227 edge_iterator ei;
228 basic_block target_block;
229
230 /* First look at all the outgoing edges. Delete any outgoing
231 edges which do not go to the right block. For the one
232 edge which goes to the right block, fix up its flags. */
233 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
234 target_block = label_to_block (label);
235 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
236 {
237 if (e->dest != target_block)
238 remove_edge_and_dominated_blocks (e);
239 else
240 {
241 /* Turn off the EDGE_ABNORMAL flag. */
242 e->flags &= ~EDGE_ABNORMAL;
243
244 /* And set EDGE_FALLTHRU. */
245 e->flags |= EDGE_FALLTHRU;
246 ei_next (&ei);
247 }
248 }
249
250 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
251 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
252
253 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
254 relevant information we need. */
255 gsi_remove (&gsi, true);
256 retval = true;
257 }
258
259 /* Check for indirect calls that have been turned into
260 noreturn calls. */
261 else if (is_gimple_call (stmt)
262 && gimple_call_noreturn_p (stmt)
263 && remove_fallthru_edge (bb->succs))
264 retval = true;
265
266 return retval;
267 }
268
269 /* Return true if basic block BB does nothing except pass control
270 flow to another block and that we can safely insert a label at
271 the start of the successor block.
272
273 As a precondition, we require that BB be not equal to
274 the entry block. */
275
276 static bool
277 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
278 {
279 gimple_stmt_iterator gsi;
280 location_t locus;
281
282 /* BB must have a single outgoing edge. */
283 if (single_succ_p (bb) != 1
284 /* If PHI_WANTED is false, BB must not have any PHI nodes.
285 Otherwise, BB must have PHI nodes. */
286 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
287 /* BB may not be a predecessor of the exit block. */
288 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
289 /* Nor should this be an infinite loop. */
290 || single_succ (bb) == bb
291 /* BB may not have an abnormal outgoing edge. */
292 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
293 return false;
294
295 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
296
297 locus = single_succ_edge (bb)->goto_locus;
298
299 /* There should not be an edge coming from entry, or an EH edge. */
300 {
301 edge_iterator ei;
302 edge e;
303
304 FOR_EACH_EDGE (e, ei, bb->preds)
305 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
306 return false;
307 /* If goto_locus of any of the edges differs, prevent removing
308 the forwarder block for -O0. */
309 else if (optimize == 0 && e->goto_locus != locus)
310 return false;
311 }
312
313 /* Now walk through the statements backward. We can ignore labels,
314 anything else means this is not a forwarder block. */
315 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
316 {
317 gimple stmt = gsi_stmt (gsi);
318
319 switch (gimple_code (stmt))
320 {
321 case GIMPLE_LABEL:
322 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
323 return false;
324 if (optimize == 0 && gimple_location (stmt) != locus)
325 return false;
326 break;
327
328 /* ??? For now, hope there's a corresponding debug
329 assignment at the destination. */
330 case GIMPLE_DEBUG:
331 break;
332
333 default:
334 return false;
335 }
336 }
337
338 if (current_loops)
339 {
340 basic_block dest;
341 /* Protect loop headers. */
342 if (bb->loop_father->header == bb)
343 return false;
344
345 dest = EDGE_SUCC (bb, 0)->dest;
346 /* Protect loop preheaders and latches if requested. */
347 if (dest->loop_father->header == dest)
348 {
349 if (bb->loop_father == dest->loop_father)
350 {
351 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
352 return false;
353 /* If bb doesn't have a single predecessor we'd make this
354 loop have multiple latches. Don't do that if that
355 would in turn require disambiguating them. */
356 return (single_pred_p (bb)
357 || loops_state_satisfies_p
358 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
359 }
360 else if (bb->loop_father == loop_outer (dest->loop_father))
361 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
362 /* Always preserve other edges into loop headers that are
363 not simple latches or preheaders. */
364 return false;
365 }
366 }
367
368 return true;
369 }
370
371 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
372 those alternatives are equal in each of the PHI nodes, then return
373 true, else return false. */
374
375 static bool
376 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
377 {
378 int n1 = e1->dest_idx;
379 int n2 = e2->dest_idx;
380 gphi_iterator gsi;
381
382 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
383 {
384 gphi *phi = gsi.phi ();
385 tree val1 = gimple_phi_arg_def (phi, n1);
386 tree val2 = gimple_phi_arg_def (phi, n2);
387
388 gcc_assert (val1 != NULL_TREE);
389 gcc_assert (val2 != NULL_TREE);
390
391 if (!operand_equal_for_phi_arg_p (val1, val2))
392 return false;
393 }
394
395 return true;
396 }
397
398 /* Removes forwarder block BB. Returns false if this failed. */
399
400 static bool
401 remove_forwarder_block (basic_block bb)
402 {
403 edge succ = single_succ_edge (bb), e, s;
404 basic_block dest = succ->dest;
405 gimple label;
406 edge_iterator ei;
407 gimple_stmt_iterator gsi, gsi_to;
408 bool can_move_debug_stmts;
409
410 /* We check for infinite loops already in tree_forwarder_block_p.
411 However it may happen that the infinite loop is created
412 afterwards due to removal of forwarders. */
413 if (dest == bb)
414 return false;
415
416 /* If the destination block consists of a nonlocal label or is a
417 EH landing pad, do not merge it. */
418 label = first_stmt (dest);
419 if (label)
420 if (glabel *label_stmt = dyn_cast <glabel *> (label))
421 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
422 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
423 return false;
424
425 /* If there is an abnormal edge to basic block BB, but not into
426 dest, problems might occur during removal of the phi node at out
427 of ssa due to overlapping live ranges of registers.
428
429 If there is an abnormal edge in DEST, the problems would occur
430 anyway since cleanup_dead_labels would then merge the labels for
431 two different eh regions, and rest of exception handling code
432 does not like it.
433
434 So if there is an abnormal edge to BB, proceed only if there is
435 no abnormal edge to DEST and there are no phi nodes in DEST. */
436 if (bb_has_abnormal_pred (bb)
437 && (bb_has_abnormal_pred (dest)
438 || !gimple_seq_empty_p (phi_nodes (dest))))
439 return false;
440
441 /* If there are phi nodes in DEST, and some of the blocks that are
442 predecessors of BB are also predecessors of DEST, check that the
443 phi node arguments match. */
444 if (!gimple_seq_empty_p (phi_nodes (dest)))
445 {
446 FOR_EACH_EDGE (e, ei, bb->preds)
447 {
448 s = find_edge (e->src, dest);
449 if (!s)
450 continue;
451
452 if (!phi_alternatives_equal (dest, succ, s))
453 return false;
454 }
455 }
456
457 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
458
459 basic_block pred = NULL;
460 if (single_pred_p (bb))
461 pred = single_pred (bb);
462
463 /* Redirect the edges. */
464 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
465 {
466 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
467
468 if (e->flags & EDGE_ABNORMAL)
469 {
470 /* If there is an abnormal edge, redirect it anyway, and
471 move the labels to the new block to make it legal. */
472 s = redirect_edge_succ_nodup (e, dest);
473 }
474 else
475 s = redirect_edge_and_branch (e, dest);
476
477 if (s == e)
478 {
479 /* Create arguments for the phi nodes, since the edge was not
480 here before. */
481 for (gphi_iterator psi = gsi_start_phis (dest);
482 !gsi_end_p (psi);
483 gsi_next (&psi))
484 {
485 gphi *phi = psi.phi ();
486 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
487 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
488 add_phi_arg (phi, unshare_expr (def), s, l);
489 }
490 }
491 }
492
493 /* Move nonlocal labels and computed goto targets as well as user
494 defined labels and labels with an EH landing pad number to the
495 new block, so that the redirection of the abnormal edges works,
496 jump targets end up in a sane place and debug information for
497 labels is retained. */
498 gsi_to = gsi_start_bb (dest);
499 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
500 {
501 tree decl;
502 label = gsi_stmt (gsi);
503 if (is_gimple_debug (label))
504 break;
505 decl = gimple_label_label (as_a <glabel *> (label));
506 if (EH_LANDING_PAD_NR (decl) != 0
507 || DECL_NONLOCAL (decl)
508 || FORCED_LABEL (decl)
509 || !DECL_ARTIFICIAL (decl))
510 {
511 gsi_remove (&gsi, false);
512 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
513 }
514 else
515 gsi_next (&gsi);
516 }
517
518 /* Move debug statements if the destination has a single predecessor. */
519 if (can_move_debug_stmts)
520 {
521 gsi_to = gsi_after_labels (dest);
522 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
523 {
524 gimple debug = gsi_stmt (gsi);
525 if (!is_gimple_debug (debug))
526 break;
527 gsi_remove (&gsi, false);
528 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
529 }
530 }
531
532 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
533
534 /* Update the dominators. */
535 if (dom_info_available_p (CDI_DOMINATORS))
536 {
537 basic_block dom, dombb, domdest;
538
539 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
540 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
541 if (domdest == bb)
542 {
543 /* Shortcut to avoid calling (relatively expensive)
544 nearest_common_dominator unless necessary. */
545 dom = dombb;
546 }
547 else
548 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
549
550 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
551 }
552
553 /* Adjust latch infomation of BB's parent loop as otherwise
554 the cfg hook has a hard time not to kill the loop. */
555 if (current_loops && bb->loop_father->latch == bb)
556 bb->loop_father->latch = pred;
557
558 /* And kill the forwarder block. */
559 delete_basic_block (bb);
560
561 return true;
562 }
563
564 /* STMT is a call that has been discovered noreturn. Fixup the CFG
565 and remove LHS. Return true if something changed. */
566
567 bool
568 fixup_noreturn_call (gimple stmt)
569 {
570 basic_block bb = gimple_bb (stmt);
571
572 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
573 return false;
574
575 /* First split basic block if stmt is not last. */
576 if (stmt != gsi_stmt (gsi_last_bb (bb)))
577 {
578 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
579 {
580 /* Don't split if there are only debug stmts
581 after stmt, that can result in -fcompare-debug
582 failures. Remove the debug stmts instead,
583 they should be all unreachable anyway. */
584 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
585 for (gsi_next (&gsi); !gsi_end_p (gsi); )
586 gsi_remove (&gsi, true);
587 }
588 else
589 split_block (bb, stmt);
590 }
591
592 /* If there is an LHS, remove it. */
593 tree lhs = gimple_call_lhs (stmt);
594 if (lhs)
595 {
596 gimple_call_set_lhs (stmt, NULL_TREE);
597
598 /* We need to fix up the SSA name to avoid checking errors. */
599 if (TREE_CODE (lhs) == SSA_NAME)
600 {
601 tree new_var = create_tmp_reg (TREE_TYPE (lhs), NULL);
602 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
603 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
604 set_ssa_default_def (cfun, new_var, lhs);
605 }
606
607 update_stmt (stmt);
608 }
609
610 return remove_fallthru_edge (bb->succs);
611 }
612
613
614 /* Split basic blocks on calls in the middle of a basic block that are now
615 known not to return, and remove the unreachable code. */
616
617 static bool
618 split_bb_on_noreturn_calls (basic_block bb)
619 {
620 bool changed = false;
621 gimple_stmt_iterator gsi;
622
623 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
624 {
625 gimple stmt = gsi_stmt (gsi);
626
627 if (!is_gimple_call (stmt))
628 continue;
629
630 if (gimple_call_noreturn_p (stmt))
631 changed |= fixup_noreturn_call (stmt);
632 }
633
634 if (changed)
635 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
636 return changed;
637 }
638
639 /* Tries to cleanup cfg in basic block BB. Returns true if anything
640 changes. */
641
642 static bool
643 cleanup_tree_cfg_bb (basic_block bb)
644 {
645 bool retval = cleanup_control_flow_bb (bb);
646
647 if (tree_forwarder_block_p (bb, false)
648 && remove_forwarder_block (bb))
649 return true;
650
651 /* Merging the blocks may create new opportunities for folding
652 conditional branches (due to the elimination of single-valued PHI
653 nodes). */
654 if (single_succ_p (bb)
655 && can_merge_blocks_p (bb, single_succ (bb)))
656 {
657 merge_blocks (bb, single_succ (bb));
658 return true;
659 }
660
661 return retval;
662 }
663
664 /* Iterate the cfg cleanups, while anything changes. */
665
666 static bool
667 cleanup_tree_cfg_1 (void)
668 {
669 bool retval = false;
670 basic_block bb;
671 unsigned i, n;
672
673 /* Prepare the worklists of altered blocks. */
674 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
675
676 /* During forwarder block cleanup, we may redirect edges out of
677 SWITCH_EXPRs, which can get expensive. So we want to enable
678 recording of edge to CASE_LABEL_EXPR. */
679 start_recording_case_labels ();
680
681 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
682 since the basic blocks may get removed. */
683 n = last_basic_block_for_fn (cfun);
684 for (i = NUM_FIXED_BLOCKS; i < n; i++)
685 {
686 bb = BASIC_BLOCK_FOR_FN (cfun, i);
687 if (bb)
688 {
689 retval |= cleanup_tree_cfg_bb (bb);
690 retval |= split_bb_on_noreturn_calls (bb);
691 }
692 }
693
694 /* Now process the altered blocks, as long as any are available. */
695 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
696 {
697 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
698 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
699 if (i < NUM_FIXED_BLOCKS)
700 continue;
701
702 bb = BASIC_BLOCK_FOR_FN (cfun, i);
703 if (!bb)
704 continue;
705
706 retval |= cleanup_tree_cfg_bb (bb);
707
708 /* Rerun split_bb_on_noreturn_calls, in case we have altered any noreturn
709 calls. */
710 retval |= split_bb_on_noreturn_calls (bb);
711 }
712
713 end_recording_case_labels ();
714 BITMAP_FREE (cfgcleanup_altered_bbs);
715 return retval;
716 }
717
718
719 /* Remove unreachable blocks and other miscellaneous clean up work.
720 Return true if the flowgraph was modified, false otherwise. */
721
722 static bool
723 cleanup_tree_cfg_noloop (void)
724 {
725 bool changed;
726
727 timevar_push (TV_TREE_CLEANUP_CFG);
728
729 /* Iterate until there are no more cleanups left to do. If any
730 iteration changed the flowgraph, set CHANGED to true.
731
732 If dominance information is available, there cannot be any unreachable
733 blocks. */
734 if (!dom_info_available_p (CDI_DOMINATORS))
735 {
736 changed = delete_unreachable_blocks ();
737 calculate_dominance_info (CDI_DOMINATORS);
738 }
739 else
740 {
741 #ifdef ENABLE_CHECKING
742 verify_dominators (CDI_DOMINATORS);
743 #endif
744 changed = false;
745 }
746
747 changed |= cleanup_tree_cfg_1 ();
748
749 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
750 compact_blocks ();
751
752 #ifdef ENABLE_CHECKING
753 verify_flow_info ();
754 #endif
755
756 timevar_pop (TV_TREE_CLEANUP_CFG);
757
758 if (changed && current_loops)
759 loops_state_set (LOOPS_NEED_FIXUP);
760
761 return changed;
762 }
763
764 /* Repairs loop structures. */
765
766 static void
767 repair_loop_structures (void)
768 {
769 bitmap changed_bbs;
770 unsigned n_new_loops;
771
772 calculate_dominance_info (CDI_DOMINATORS);
773
774 timevar_push (TV_REPAIR_LOOPS);
775 changed_bbs = BITMAP_ALLOC (NULL);
776 n_new_loops = fix_loop_structure (changed_bbs);
777
778 /* This usually does nothing. But sometimes parts of cfg that originally
779 were inside a loop get out of it due to edge removal (since they
780 become unreachable by back edges from latch). Also a former
781 irreducible loop can become reducible - in this case force a full
782 rewrite into loop-closed SSA form. */
783 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
784 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
785 TODO_update_ssa);
786
787 BITMAP_FREE (changed_bbs);
788
789 #ifdef ENABLE_CHECKING
790 verify_loop_structure ();
791 #endif
792 scev_reset ();
793
794 timevar_pop (TV_REPAIR_LOOPS);
795 }
796
797 /* Cleanup cfg and repair loop structures. */
798
799 bool
800 cleanup_tree_cfg (void)
801 {
802 bool changed = cleanup_tree_cfg_noloop ();
803
804 if (current_loops != NULL
805 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
806 repair_loop_structures ();
807
808 return changed;
809 }
810
811 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
812 Returns true if successful. */
813
814 static bool
815 remove_forwarder_block_with_phi (basic_block bb)
816 {
817 edge succ = single_succ_edge (bb);
818 basic_block dest = succ->dest;
819 gimple label;
820 basic_block dombb, domdest, dom;
821
822 /* We check for infinite loops already in tree_forwarder_block_p.
823 However it may happen that the infinite loop is created
824 afterwards due to removal of forwarders. */
825 if (dest == bb)
826 return false;
827
828 /* If the destination block consists of a nonlocal label, do not
829 merge it. */
830 label = first_stmt (dest);
831 if (label)
832 if (glabel *label_stmt = dyn_cast <glabel *> (label))
833 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
834 return false;
835
836 /* Record BB's single pred in case we need to update the father
837 loop's latch information later. */
838 basic_block pred = NULL;
839 if (single_pred_p (bb))
840 pred = single_pred (bb);
841
842 /* Redirect each incoming edge to BB to DEST. */
843 while (EDGE_COUNT (bb->preds) > 0)
844 {
845 edge e = EDGE_PRED (bb, 0), s;
846 gphi_iterator gsi;
847
848 s = find_edge (e->src, dest);
849 if (s)
850 {
851 /* We already have an edge S from E->src to DEST. If S and
852 E->dest's sole successor edge have the same PHI arguments
853 at DEST, redirect S to DEST. */
854 if (phi_alternatives_equal (dest, s, succ))
855 {
856 e = redirect_edge_and_branch (e, dest);
857 redirect_edge_var_map_clear (e);
858 continue;
859 }
860
861 /* PHI arguments are different. Create a forwarder block by
862 splitting E so that we can merge PHI arguments on E to
863 DEST. */
864 e = single_succ_edge (split_edge (e));
865 }
866
867 s = redirect_edge_and_branch (e, dest);
868
869 /* redirect_edge_and_branch must not create a new edge. */
870 gcc_assert (s == e);
871
872 /* Add to the PHI nodes at DEST each PHI argument removed at the
873 destination of E. */
874 for (gsi = gsi_start_phis (dest);
875 !gsi_end_p (gsi);
876 gsi_next (&gsi))
877 {
878 gphi *phi = gsi.phi ();
879 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
880 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
881
882 if (TREE_CODE (def) == SSA_NAME)
883 {
884 /* If DEF is one of the results of PHI nodes removed during
885 redirection, replace it with the PHI argument that used
886 to be on E. */
887 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
888 size_t length = head ? head->length () : 0;
889 for (size_t i = 0; i < length; i++)
890 {
891 edge_var_map *vm = &(*head)[i];
892 tree old_arg = redirect_edge_var_map_result (vm);
893 tree new_arg = redirect_edge_var_map_def (vm);
894
895 if (def == old_arg)
896 {
897 def = new_arg;
898 locus = redirect_edge_var_map_location (vm);
899 break;
900 }
901 }
902 }
903
904 add_phi_arg (phi, def, s, locus);
905 }
906
907 redirect_edge_var_map_clear (e);
908 }
909
910 /* Update the dominators. */
911 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
912 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
913 if (domdest == bb)
914 {
915 /* Shortcut to avoid calling (relatively expensive)
916 nearest_common_dominator unless necessary. */
917 dom = dombb;
918 }
919 else
920 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
921
922 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
923
924 /* Adjust latch infomation of BB's parent loop as otherwise
925 the cfg hook has a hard time not to kill the loop. */
926 if (current_loops && bb->loop_father->latch == bb)
927 bb->loop_father->latch = pred;
928
929 /* Remove BB since all of BB's incoming edges have been redirected
930 to DEST. */
931 delete_basic_block (bb);
932
933 return true;
934 }
935
936 /* This pass merges PHI nodes if one feeds into another. For example,
937 suppose we have the following:
938
939 goto <bb 9> (<L9>);
940
941 <L8>:;
942 tem_17 = foo ();
943
944 # tem_6 = PHI <tem_17(8), tem_23(7)>;
945 <L9>:;
946
947 # tem_3 = PHI <tem_6(9), tem_2(5)>;
948 <L10>:;
949
950 Then we merge the first PHI node into the second one like so:
951
952 goto <bb 9> (<L10>);
953
954 <L8>:;
955 tem_17 = foo ();
956
957 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
958 <L10>:;
959 */
960
961 namespace {
962
963 const pass_data pass_data_merge_phi =
964 {
965 GIMPLE_PASS, /* type */
966 "mergephi", /* name */
967 OPTGROUP_NONE, /* optinfo_flags */
968 TV_TREE_MERGE_PHI, /* tv_id */
969 ( PROP_cfg | PROP_ssa ), /* properties_required */
970 0, /* properties_provided */
971 0, /* properties_destroyed */
972 0, /* todo_flags_start */
973 0, /* todo_flags_finish */
974 };
975
976 class pass_merge_phi : public gimple_opt_pass
977 {
978 public:
979 pass_merge_phi (gcc::context *ctxt)
980 : gimple_opt_pass (pass_data_merge_phi, ctxt)
981 {}
982
983 /* opt_pass methods: */
984 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
985 virtual unsigned int execute (function *);
986
987 }; // class pass_merge_phi
988
989 unsigned int
990 pass_merge_phi::execute (function *fun)
991 {
992 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
993 basic_block *current = worklist;
994 basic_block bb;
995
996 calculate_dominance_info (CDI_DOMINATORS);
997
998 /* Find all PHI nodes that we may be able to merge. */
999 FOR_EACH_BB_FN (bb, fun)
1000 {
1001 basic_block dest;
1002
1003 /* Look for a forwarder block with PHI nodes. */
1004 if (!tree_forwarder_block_p (bb, true))
1005 continue;
1006
1007 dest = single_succ (bb);
1008
1009 /* We have to feed into another basic block with PHI
1010 nodes. */
1011 if (gimple_seq_empty_p (phi_nodes (dest))
1012 /* We don't want to deal with a basic block with
1013 abnormal edges. */
1014 || bb_has_abnormal_pred (bb))
1015 continue;
1016
1017 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1018 {
1019 /* If BB does not dominate DEST, then the PHI nodes at
1020 DEST must be the only users of the results of the PHI
1021 nodes at BB. */
1022 *current++ = bb;
1023 }
1024 else
1025 {
1026 gphi_iterator gsi;
1027 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1028
1029 /* BB dominates DEST. There may be many users of the PHI
1030 nodes in BB. However, there is still a trivial case we
1031 can handle. If the result of every PHI in BB is used
1032 only by a PHI in DEST, then we can trivially merge the
1033 PHI nodes from BB into DEST. */
1034 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1035 gsi_next (&gsi))
1036 {
1037 gphi *phi = gsi.phi ();
1038 tree result = gimple_phi_result (phi);
1039 use_operand_p imm_use;
1040 gimple use_stmt;
1041
1042 /* If the PHI's result is never used, then we can just
1043 ignore it. */
1044 if (has_zero_uses (result))
1045 continue;
1046
1047 /* Get the single use of the result of this PHI node. */
1048 if (!single_imm_use (result, &imm_use, &use_stmt)
1049 || gimple_code (use_stmt) != GIMPLE_PHI
1050 || gimple_bb (use_stmt) != dest
1051 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1052 break;
1053 }
1054
1055 /* If the loop above iterated through all the PHI nodes
1056 in BB, then we can merge the PHIs from BB into DEST. */
1057 if (gsi_end_p (gsi))
1058 *current++ = bb;
1059 }
1060 }
1061
1062 /* Now let's drain WORKLIST. */
1063 bool changed = false;
1064 while (current != worklist)
1065 {
1066 bb = *--current;
1067 changed |= remove_forwarder_block_with_phi (bb);
1068 }
1069 free (worklist);
1070
1071 /* Removing forwarder blocks can cause formerly irreducible loops
1072 to become reducible if we merged two entry blocks. */
1073 if (changed
1074 && current_loops)
1075 loops_state_set (LOOPS_NEED_FIXUP);
1076
1077 return 0;
1078 }
1079
1080 } // anon namespace
1081
1082 gimple_opt_pass *
1083 make_pass_merge_phi (gcc::context *ctxt)
1084 {
1085 return new pass_merge_phi (ctxt);
1086 }
1087
1088 /* Pass: cleanup the CFG just before expanding trees to RTL.
1089 This is just a round of label cleanups and case node grouping
1090 because after the tree optimizers have run such cleanups may
1091 be necessary. */
1092
1093 static unsigned int
1094 execute_cleanup_cfg_post_optimizing (void)
1095 {
1096 unsigned int todo = 0;
1097 if (cleanup_tree_cfg ())
1098 todo |= TODO_update_ssa;
1099 maybe_remove_unreachable_handlers ();
1100 cleanup_dead_labels ();
1101 group_case_labels ();
1102 if ((flag_compare_debug_opt || flag_compare_debug)
1103 && flag_dump_final_insns)
1104 {
1105 FILE *final_output = fopen (flag_dump_final_insns, "a");
1106
1107 if (!final_output)
1108 {
1109 error ("could not open final insn dump file %qs: %m",
1110 flag_dump_final_insns);
1111 flag_dump_final_insns = NULL;
1112 }
1113 else
1114 {
1115 int save_unnumbered = flag_dump_unnumbered;
1116 int save_noaddr = flag_dump_noaddr;
1117
1118 flag_dump_noaddr = flag_dump_unnumbered = 1;
1119 fprintf (final_output, "\n");
1120 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1121 flag_dump_noaddr = save_noaddr;
1122 flag_dump_unnumbered = save_unnumbered;
1123 if (fclose (final_output))
1124 {
1125 error ("could not close final insn dump file %qs: %m",
1126 flag_dump_final_insns);
1127 flag_dump_final_insns = NULL;
1128 }
1129 }
1130 }
1131 return todo;
1132 }
1133
1134 namespace {
1135
1136 const pass_data pass_data_cleanup_cfg_post_optimizing =
1137 {
1138 GIMPLE_PASS, /* type */
1139 "optimized", /* name */
1140 OPTGROUP_NONE, /* optinfo_flags */
1141 TV_TREE_CLEANUP_CFG, /* tv_id */
1142 PROP_cfg, /* properties_required */
1143 0, /* properties_provided */
1144 0, /* properties_destroyed */
1145 0, /* todo_flags_start */
1146 TODO_remove_unused_locals, /* todo_flags_finish */
1147 };
1148
1149 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1150 {
1151 public:
1152 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1153 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1154 {}
1155
1156 /* opt_pass methods: */
1157 virtual unsigned int execute (function *)
1158 {
1159 return execute_cleanup_cfg_post_optimizing ();
1160 }
1161
1162 }; // class pass_cleanup_cfg_post_optimizing
1163
1164 } // anon namespace
1165
1166 gimple_opt_pass *
1167 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1168 {
1169 return new pass_cleanup_cfg_post_optimizing (ctxt);
1170 }
1171
1172