re PR lto/60449 (Merging function DECLs discards leaf attribute which causes cfg...
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "diagnostic-core.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "langhooks.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "tree-eh.h"
34 #include "gimple-expr.h"
35 #include "is-a.h"
36 #include "gimple.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimple-ssa.h"
40 #include "tree-cfg.h"
41 #include "tree-phinodes.h"
42 #include "ssa-iterators.h"
43 #include "stringpool.h"
44 #include "tree-ssanames.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "expr.h"
47 #include "tree-dfa.h"
48 #include "tree-ssa.h"
49 #include "tree-pass.h"
50 #include "except.h"
51 #include "cfgloop.h"
52 #include "hashtab.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-scalar-evolution.h"
55
56 /* The set of blocks in that at least one of the following changes happened:
57 -- the statement at the end of the block was changed
58 -- the block was newly created
59 -- the set of the predecessors of the block changed
60 -- the set of the successors of the block changed
61 ??? Maybe we could track these changes separately, since they determine
62 what cleanups it makes sense to try on the block. */
63 bitmap cfgcleanup_altered_bbs;
64
65 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
66
67 static bool
68 remove_fallthru_edge (vec<edge, va_gc> *ev)
69 {
70 edge_iterator ei;
71 edge e;
72
73 FOR_EACH_EDGE (e, ei, ev)
74 if ((e->flags & EDGE_FALLTHRU) != 0)
75 {
76 if (e->flags & EDGE_COMPLEX)
77 e->flags &= ~EDGE_FALLTHRU;
78 else
79 remove_edge_and_dominated_blocks (e);
80 return true;
81 }
82 return false;
83 }
84
85
86 /* Disconnect an unreachable block in the control expression starting
87 at block BB. */
88
89 static bool
90 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
91 {
92 edge taken_edge;
93 bool retval = false;
94 gimple stmt = gsi_stmt (gsi);
95 tree val;
96
97 if (!single_succ_p (bb))
98 {
99 edge e;
100 edge_iterator ei;
101 bool warned;
102 location_t loc;
103
104 fold_defer_overflow_warnings ();
105 loc = gimple_location (stmt);
106 switch (gimple_code (stmt))
107 {
108 case GIMPLE_COND:
109 val = fold_binary_loc (loc, gimple_cond_code (stmt),
110 boolean_type_node,
111 gimple_cond_lhs (stmt),
112 gimple_cond_rhs (stmt));
113 break;
114
115 case GIMPLE_SWITCH:
116 val = gimple_switch_index (stmt);
117 break;
118
119 default:
120 val = NULL_TREE;
121 }
122 taken_edge = find_taken_edge (bb, val);
123 if (!taken_edge)
124 {
125 fold_undefer_and_ignore_overflow_warnings ();
126 return false;
127 }
128
129 /* Remove all the edges except the one that is always executed. */
130 warned = false;
131 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
132 {
133 if (e != taken_edge)
134 {
135 if (!warned)
136 {
137 fold_undefer_overflow_warnings
138 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
139 warned = true;
140 }
141
142 taken_edge->probability += e->probability;
143 taken_edge->count += e->count;
144 remove_edge_and_dominated_blocks (e);
145 retval = true;
146 }
147 else
148 ei_next (&ei);
149 }
150 if (!warned)
151 fold_undefer_and_ignore_overflow_warnings ();
152 if (taken_edge->probability > REG_BR_PROB_BASE)
153 taken_edge->probability = REG_BR_PROB_BASE;
154 }
155 else
156 taken_edge = single_succ_edge (bb);
157
158 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
159 gsi_remove (&gsi, true);
160 taken_edge->flags = EDGE_FALLTHRU;
161
162 return retval;
163 }
164
165 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
166 to updated gimple_call_flags. */
167
168 static void
169 cleanup_call_ctrl_altering_flag (gimple bb_end)
170 {
171 if (!is_gimple_call (bb_end)
172 || !gimple_call_ctrl_altering_p (bb_end))
173 return;
174
175 int flags = gimple_call_flags (bb_end);
176 if (((flags & (ECF_CONST | ECF_PURE))
177 && !(flags & ECF_LOOPING_CONST_OR_PURE))
178 || (flags & ECF_LEAF))
179 gimple_call_set_ctrl_altering (bb_end, false);
180 }
181
182 /* Try to remove superfluous control structures in basic block BB. Returns
183 true if anything changes. */
184
185 static bool
186 cleanup_control_flow_bb (basic_block bb)
187 {
188 gimple_stmt_iterator gsi;
189 bool retval = false;
190 gimple stmt;
191
192 /* If the last statement of the block could throw and now cannot,
193 we need to prune cfg. */
194 retval |= gimple_purge_dead_eh_edges (bb);
195
196 gsi = gsi_last_bb (bb);
197 if (gsi_end_p (gsi))
198 return retval;
199
200 stmt = gsi_stmt (gsi);
201
202 /* Try to cleanup ctrl altering flag for call which ends bb. */
203 cleanup_call_ctrl_altering_flag (stmt);
204
205 if (gimple_code (stmt) == GIMPLE_COND
206 || gimple_code (stmt) == GIMPLE_SWITCH)
207 retval |= cleanup_control_expr_graph (bb, gsi);
208 else if (gimple_code (stmt) == GIMPLE_GOTO
209 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
210 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
211 == LABEL_DECL))
212 {
213 /* If we had a computed goto which has a compile-time determinable
214 destination, then we can eliminate the goto. */
215 edge e;
216 tree label;
217 edge_iterator ei;
218 basic_block target_block;
219
220 /* First look at all the outgoing edges. Delete any outgoing
221 edges which do not go to the right block. For the one
222 edge which goes to the right block, fix up its flags. */
223 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
224 target_block = label_to_block (label);
225 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
226 {
227 if (e->dest != target_block)
228 remove_edge_and_dominated_blocks (e);
229 else
230 {
231 /* Turn off the EDGE_ABNORMAL flag. */
232 e->flags &= ~EDGE_ABNORMAL;
233
234 /* And set EDGE_FALLTHRU. */
235 e->flags |= EDGE_FALLTHRU;
236 ei_next (&ei);
237 }
238 }
239
240 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
241 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
242
243 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
244 relevant information we need. */
245 gsi_remove (&gsi, true);
246 retval = true;
247 }
248
249 /* Check for indirect calls that have been turned into
250 noreturn calls. */
251 else if (is_gimple_call (stmt)
252 && gimple_call_noreturn_p (stmt)
253 && remove_fallthru_edge (bb->succs))
254 retval = true;
255
256 return retval;
257 }
258
259 /* Return true if basic block BB does nothing except pass control
260 flow to another block and that we can safely insert a label at
261 the start of the successor block.
262
263 As a precondition, we require that BB be not equal to
264 the entry block. */
265
266 static bool
267 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
268 {
269 gimple_stmt_iterator gsi;
270 location_t locus;
271
272 /* BB must have a single outgoing edge. */
273 if (single_succ_p (bb) != 1
274 /* If PHI_WANTED is false, BB must not have any PHI nodes.
275 Otherwise, BB must have PHI nodes. */
276 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
277 /* BB may not be a predecessor of the exit block. */
278 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
279 /* Nor should this be an infinite loop. */
280 || single_succ (bb) == bb
281 /* BB may not have an abnormal outgoing edge. */
282 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
283 return false;
284
285 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
286
287 locus = single_succ_edge (bb)->goto_locus;
288
289 /* There should not be an edge coming from entry, or an EH edge. */
290 {
291 edge_iterator ei;
292 edge e;
293
294 FOR_EACH_EDGE (e, ei, bb->preds)
295 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
296 return false;
297 /* If goto_locus of any of the edges differs, prevent removing
298 the forwarder block for -O0. */
299 else if (optimize == 0 && e->goto_locus != locus)
300 return false;
301 }
302
303 /* Now walk through the statements backward. We can ignore labels,
304 anything else means this is not a forwarder block. */
305 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
306 {
307 gimple stmt = gsi_stmt (gsi);
308
309 switch (gimple_code (stmt))
310 {
311 case GIMPLE_LABEL:
312 if (DECL_NONLOCAL (gimple_label_label (stmt)))
313 return false;
314 if (optimize == 0 && gimple_location (stmt) != locus)
315 return false;
316 break;
317
318 /* ??? For now, hope there's a corresponding debug
319 assignment at the destination. */
320 case GIMPLE_DEBUG:
321 break;
322
323 default:
324 return false;
325 }
326 }
327
328 if (current_loops)
329 {
330 basic_block dest;
331 /* Protect loop headers. */
332 if (bb->loop_father->header == bb)
333 return false;
334
335 dest = EDGE_SUCC (bb, 0)->dest;
336 /* Protect loop preheaders and latches if requested. */
337 if (dest->loop_father->header == dest)
338 {
339 if (bb->loop_father == dest->loop_father)
340 {
341 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
342 return false;
343 /* If bb doesn't have a single predecessor we'd make this
344 loop have multiple latches. Don't do that if that
345 would in turn require disambiguating them. */
346 return (single_pred_p (bb)
347 || loops_state_satisfies_p
348 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
349 }
350 else if (bb->loop_father == loop_outer (dest->loop_father))
351 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
352 /* Always preserve other edges into loop headers that are
353 not simple latches or preheaders. */
354 return false;
355 }
356 }
357
358 return true;
359 }
360
361 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
362 those alternatives are equal in each of the PHI nodes, then return
363 true, else return false. */
364
365 static bool
366 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
367 {
368 int n1 = e1->dest_idx;
369 int n2 = e2->dest_idx;
370 gimple_stmt_iterator gsi;
371
372 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
373 {
374 gimple phi = gsi_stmt (gsi);
375 tree val1 = gimple_phi_arg_def (phi, n1);
376 tree val2 = gimple_phi_arg_def (phi, n2);
377
378 gcc_assert (val1 != NULL_TREE);
379 gcc_assert (val2 != NULL_TREE);
380
381 if (!operand_equal_for_phi_arg_p (val1, val2))
382 return false;
383 }
384
385 return true;
386 }
387
388 /* Removes forwarder block BB. Returns false if this failed. */
389
390 static bool
391 remove_forwarder_block (basic_block bb)
392 {
393 edge succ = single_succ_edge (bb), e, s;
394 basic_block dest = succ->dest;
395 gimple label;
396 edge_iterator ei;
397 gimple_stmt_iterator gsi, gsi_to;
398 bool can_move_debug_stmts;
399
400 /* We check for infinite loops already in tree_forwarder_block_p.
401 However it may happen that the infinite loop is created
402 afterwards due to removal of forwarders. */
403 if (dest == bb)
404 return false;
405
406 /* If the destination block consists of a nonlocal label or is a
407 EH landing pad, do not merge it. */
408 label = first_stmt (dest);
409 if (label
410 && gimple_code (label) == GIMPLE_LABEL
411 && (DECL_NONLOCAL (gimple_label_label (label))
412 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
413 return false;
414
415 /* If there is an abnormal edge to basic block BB, but not into
416 dest, problems might occur during removal of the phi node at out
417 of ssa due to overlapping live ranges of registers.
418
419 If there is an abnormal edge in DEST, the problems would occur
420 anyway since cleanup_dead_labels would then merge the labels for
421 two different eh regions, and rest of exception handling code
422 does not like it.
423
424 So if there is an abnormal edge to BB, proceed only if there is
425 no abnormal edge to DEST and there are no phi nodes in DEST. */
426 if (bb_has_abnormal_pred (bb)
427 && (bb_has_abnormal_pred (dest)
428 || !gimple_seq_empty_p (phi_nodes (dest))))
429 return false;
430
431 /* If there are phi nodes in DEST, and some of the blocks that are
432 predecessors of BB are also predecessors of DEST, check that the
433 phi node arguments match. */
434 if (!gimple_seq_empty_p (phi_nodes (dest)))
435 {
436 FOR_EACH_EDGE (e, ei, bb->preds)
437 {
438 s = find_edge (e->src, dest);
439 if (!s)
440 continue;
441
442 if (!phi_alternatives_equal (dest, succ, s))
443 return false;
444 }
445 }
446
447 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
448
449 basic_block pred = NULL;
450 if (single_pred_p (bb))
451 pred = single_pred (bb);
452
453 /* Redirect the edges. */
454 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
455 {
456 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
457
458 if (e->flags & EDGE_ABNORMAL)
459 {
460 /* If there is an abnormal edge, redirect it anyway, and
461 move the labels to the new block to make it legal. */
462 s = redirect_edge_succ_nodup (e, dest);
463 }
464 else
465 s = redirect_edge_and_branch (e, dest);
466
467 if (s == e)
468 {
469 /* Create arguments for the phi nodes, since the edge was not
470 here before. */
471 for (gsi = gsi_start_phis (dest);
472 !gsi_end_p (gsi);
473 gsi_next (&gsi))
474 {
475 gimple phi = gsi_stmt (gsi);
476 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
477 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
478 add_phi_arg (phi, unshare_expr (def), s, l);
479 }
480 }
481 }
482
483 /* Move nonlocal labels and computed goto targets as well as user
484 defined labels and labels with an EH landing pad number to the
485 new block, so that the redirection of the abnormal edges works,
486 jump targets end up in a sane place and debug information for
487 labels is retained. */
488 gsi_to = gsi_start_bb (dest);
489 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
490 {
491 tree decl;
492 label = gsi_stmt (gsi);
493 if (is_gimple_debug (label))
494 break;
495 decl = gimple_label_label (label);
496 if (EH_LANDING_PAD_NR (decl) != 0
497 || DECL_NONLOCAL (decl)
498 || FORCED_LABEL (decl)
499 || !DECL_ARTIFICIAL (decl))
500 {
501 gsi_remove (&gsi, false);
502 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
503 }
504 else
505 gsi_next (&gsi);
506 }
507
508 /* Move debug statements if the destination has a single predecessor. */
509 if (can_move_debug_stmts)
510 {
511 gsi_to = gsi_after_labels (dest);
512 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
513 {
514 gimple debug = gsi_stmt (gsi);
515 if (!is_gimple_debug (debug))
516 break;
517 gsi_remove (&gsi, false);
518 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
519 }
520 }
521
522 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
523
524 /* Update the dominators. */
525 if (dom_info_available_p (CDI_DOMINATORS))
526 {
527 basic_block dom, dombb, domdest;
528
529 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
530 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
531 if (domdest == bb)
532 {
533 /* Shortcut to avoid calling (relatively expensive)
534 nearest_common_dominator unless necessary. */
535 dom = dombb;
536 }
537 else
538 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
539
540 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
541 }
542
543 /* Adjust latch infomation of BB's parent loop as otherwise
544 the cfg hook has a hard time not to kill the loop. */
545 if (current_loops && bb->loop_father->latch == bb)
546 bb->loop_father->latch = pred;
547
548 /* And kill the forwarder block. */
549 delete_basic_block (bb);
550
551 return true;
552 }
553
554 /* STMT is a call that has been discovered noreturn. Fixup the CFG
555 and remove LHS. Return true if something changed. */
556
557 bool
558 fixup_noreturn_call (gimple stmt)
559 {
560 basic_block bb = gimple_bb (stmt);
561 bool changed = false;
562
563 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
564 return false;
565
566 /* First split basic block if stmt is not last. */
567 if (stmt != gsi_stmt (gsi_last_bb (bb)))
568 split_block (bb, stmt);
569
570 changed |= remove_fallthru_edge (bb->succs);
571
572 /* If there is LHS, remove it. */
573 if (gimple_call_lhs (stmt))
574 {
575 tree op = gimple_call_lhs (stmt);
576 gimple_call_set_lhs (stmt, NULL_TREE);
577
578 /* We need to remove SSA name to avoid checking errors.
579 All uses are dominated by the noreturn and thus will
580 be removed afterwards.
581 We proactively remove affected non-PHI statements to avoid
582 fixup_cfg from trying to update them and crashing. */
583 if (TREE_CODE (op) == SSA_NAME)
584 {
585 use_operand_p use_p;
586 imm_use_iterator iter;
587 gimple use_stmt;
588 bitmap_iterator bi;
589 unsigned int bb_index;
590
591 bitmap blocks = BITMAP_ALLOC (NULL);
592
593 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
594 {
595 if (gimple_code (use_stmt) != GIMPLE_PHI)
596 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
597 else
598 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
599 SET_USE (use_p, error_mark_node);
600 }
601 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
602 delete_basic_block (BASIC_BLOCK_FOR_FN (cfun, bb_index));
603 BITMAP_FREE (blocks);
604 release_ssa_name (op);
605 }
606 update_stmt (stmt);
607 changed = true;
608 }
609 return changed;
610 }
611
612
613 /* Split basic blocks on calls in the middle of a basic block that are now
614 known not to return, and remove the unreachable code. */
615
616 static bool
617 split_bb_on_noreturn_calls (basic_block bb)
618 {
619 bool changed = false;
620 gimple_stmt_iterator gsi;
621
622 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
623 {
624 gimple stmt = gsi_stmt (gsi);
625
626 if (!is_gimple_call (stmt))
627 continue;
628
629 if (gimple_call_noreturn_p (stmt))
630 changed |= fixup_noreturn_call (stmt);
631 }
632
633 if (changed)
634 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
635 return changed;
636 }
637
638 /* Tries to cleanup cfg in basic block BB. Returns true if anything
639 changes. */
640
641 static bool
642 cleanup_tree_cfg_bb (basic_block bb)
643 {
644 bool retval = cleanup_control_flow_bb (bb);
645
646 if (tree_forwarder_block_p (bb, false)
647 && remove_forwarder_block (bb))
648 return true;
649
650 /* Merging the blocks may create new opportunities for folding
651 conditional branches (due to the elimination of single-valued PHI
652 nodes). */
653 if (single_succ_p (bb)
654 && can_merge_blocks_p (bb, single_succ (bb)))
655 {
656 merge_blocks (bb, single_succ (bb));
657 return true;
658 }
659
660 return retval;
661 }
662
663 /* Iterate the cfg cleanups, while anything changes. */
664
665 static bool
666 cleanup_tree_cfg_1 (void)
667 {
668 bool retval = false;
669 basic_block bb;
670 unsigned i, n;
671
672 /* Prepare the worklists of altered blocks. */
673 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
674
675 /* During forwarder block cleanup, we may redirect edges out of
676 SWITCH_EXPRs, which can get expensive. So we want to enable
677 recording of edge to CASE_LABEL_EXPR. */
678 start_recording_case_labels ();
679
680 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
681 since the basic blocks may get removed. */
682 n = last_basic_block_for_fn (cfun);
683 for (i = NUM_FIXED_BLOCKS; i < n; i++)
684 {
685 bb = BASIC_BLOCK_FOR_FN (cfun, i);
686 if (bb)
687 {
688 retval |= cleanup_tree_cfg_bb (bb);
689 retval |= split_bb_on_noreturn_calls (bb);
690 }
691 }
692
693 /* Now process the altered blocks, as long as any are available. */
694 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
695 {
696 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
697 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
698 if (i < NUM_FIXED_BLOCKS)
699 continue;
700
701 bb = BASIC_BLOCK_FOR_FN (cfun, i);
702 if (!bb)
703 continue;
704
705 retval |= cleanup_tree_cfg_bb (bb);
706
707 /* Rerun split_bb_on_noreturn_calls, in case we have altered any noreturn
708 calls. */
709 retval |= split_bb_on_noreturn_calls (bb);
710 }
711
712 end_recording_case_labels ();
713 BITMAP_FREE (cfgcleanup_altered_bbs);
714 return retval;
715 }
716
717
718 /* Remove unreachable blocks and other miscellaneous clean up work.
719 Return true if the flowgraph was modified, false otherwise. */
720
721 static bool
722 cleanup_tree_cfg_noloop (void)
723 {
724 bool changed;
725
726 timevar_push (TV_TREE_CLEANUP_CFG);
727
728 /* Iterate until there are no more cleanups left to do. If any
729 iteration changed the flowgraph, set CHANGED to true.
730
731 If dominance information is available, there cannot be any unreachable
732 blocks. */
733 if (!dom_info_available_p (CDI_DOMINATORS))
734 {
735 changed = delete_unreachable_blocks ();
736 calculate_dominance_info (CDI_DOMINATORS);
737 }
738 else
739 {
740 #ifdef ENABLE_CHECKING
741 verify_dominators (CDI_DOMINATORS);
742 #endif
743 changed = false;
744 }
745
746 changed |= cleanup_tree_cfg_1 ();
747
748 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
749 compact_blocks ();
750
751 #ifdef ENABLE_CHECKING
752 verify_flow_info ();
753 #endif
754
755 timevar_pop (TV_TREE_CLEANUP_CFG);
756
757 if (changed && current_loops)
758 loops_state_set (LOOPS_NEED_FIXUP);
759
760 return changed;
761 }
762
763 /* Repairs loop structures. */
764
765 static void
766 repair_loop_structures (void)
767 {
768 bitmap changed_bbs;
769 unsigned n_new_loops;
770
771 calculate_dominance_info (CDI_DOMINATORS);
772
773 timevar_push (TV_REPAIR_LOOPS);
774 changed_bbs = BITMAP_ALLOC (NULL);
775 n_new_loops = fix_loop_structure (changed_bbs);
776
777 /* This usually does nothing. But sometimes parts of cfg that originally
778 were inside a loop get out of it due to edge removal (since they
779 become unreachable by back edges from latch). Also a former
780 irreducible loop can become reducible - in this case force a full
781 rewrite into loop-closed SSA form. */
782 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
783 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
784 TODO_update_ssa);
785
786 BITMAP_FREE (changed_bbs);
787
788 #ifdef ENABLE_CHECKING
789 verify_loop_structure ();
790 #endif
791 scev_reset ();
792
793 timevar_pop (TV_REPAIR_LOOPS);
794 }
795
796 /* Cleanup cfg and repair loop structures. */
797
798 bool
799 cleanup_tree_cfg (void)
800 {
801 bool changed = cleanup_tree_cfg_noloop ();
802
803 if (current_loops != NULL
804 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
805 repair_loop_structures ();
806
807 return changed;
808 }
809
810 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
811 Returns true if successful. */
812
813 static bool
814 remove_forwarder_block_with_phi (basic_block bb)
815 {
816 edge succ = single_succ_edge (bb);
817 basic_block dest = succ->dest;
818 gimple label;
819 basic_block dombb, domdest, dom;
820
821 /* We check for infinite loops already in tree_forwarder_block_p.
822 However it may happen that the infinite loop is created
823 afterwards due to removal of forwarders. */
824 if (dest == bb)
825 return false;
826
827 /* If the destination block consists of a nonlocal label, do not
828 merge it. */
829 label = first_stmt (dest);
830 if (label
831 && gimple_code (label) == GIMPLE_LABEL
832 && DECL_NONLOCAL (gimple_label_label (label)))
833 return false;
834
835 /* Record BB's single pred in case we need to update the father
836 loop's latch information later. */
837 basic_block pred = NULL;
838 if (single_pred_p (bb))
839 pred = single_pred (bb);
840
841 /* Redirect each incoming edge to BB to DEST. */
842 while (EDGE_COUNT (bb->preds) > 0)
843 {
844 edge e = EDGE_PRED (bb, 0), s;
845 gimple_stmt_iterator gsi;
846
847 s = find_edge (e->src, dest);
848 if (s)
849 {
850 /* We already have an edge S from E->src to DEST. If S and
851 E->dest's sole successor edge have the same PHI arguments
852 at DEST, redirect S to DEST. */
853 if (phi_alternatives_equal (dest, s, succ))
854 {
855 e = redirect_edge_and_branch (e, dest);
856 redirect_edge_var_map_clear (e);
857 continue;
858 }
859
860 /* PHI arguments are different. Create a forwarder block by
861 splitting E so that we can merge PHI arguments on E to
862 DEST. */
863 e = single_succ_edge (split_edge (e));
864 }
865
866 s = redirect_edge_and_branch (e, dest);
867
868 /* redirect_edge_and_branch must not create a new edge. */
869 gcc_assert (s == e);
870
871 /* Add to the PHI nodes at DEST each PHI argument removed at the
872 destination of E. */
873 for (gsi = gsi_start_phis (dest);
874 !gsi_end_p (gsi);
875 gsi_next (&gsi))
876 {
877 gimple phi = gsi_stmt (gsi);
878 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
879 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
880
881 if (TREE_CODE (def) == SSA_NAME)
882 {
883 /* If DEF is one of the results of PHI nodes removed during
884 redirection, replace it with the PHI argument that used
885 to be on E. */
886 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
887 size_t length = head ? head->length () : 0;
888 for (size_t i = 0; i < length; i++)
889 {
890 edge_var_map *vm = &(*head)[i];
891 tree old_arg = redirect_edge_var_map_result (vm);
892 tree new_arg = redirect_edge_var_map_def (vm);
893
894 if (def == old_arg)
895 {
896 def = new_arg;
897 locus = redirect_edge_var_map_location (vm);
898 break;
899 }
900 }
901 }
902
903 add_phi_arg (phi, def, s, locus);
904 }
905
906 redirect_edge_var_map_clear (e);
907 }
908
909 /* Update the dominators. */
910 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
911 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
912 if (domdest == bb)
913 {
914 /* Shortcut to avoid calling (relatively expensive)
915 nearest_common_dominator unless necessary. */
916 dom = dombb;
917 }
918 else
919 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
920
921 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
922
923 /* Adjust latch infomation of BB's parent loop as otherwise
924 the cfg hook has a hard time not to kill the loop. */
925 if (current_loops && bb->loop_father->latch == bb)
926 bb->loop_father->latch = pred;
927
928 /* Remove BB since all of BB's incoming edges have been redirected
929 to DEST. */
930 delete_basic_block (bb);
931
932 return true;
933 }
934
935 /* This pass merges PHI nodes if one feeds into another. For example,
936 suppose we have the following:
937
938 goto <bb 9> (<L9>);
939
940 <L8>:;
941 tem_17 = foo ();
942
943 # tem_6 = PHI <tem_17(8), tem_23(7)>;
944 <L9>:;
945
946 # tem_3 = PHI <tem_6(9), tem_2(5)>;
947 <L10>:;
948
949 Then we merge the first PHI node into the second one like so:
950
951 goto <bb 9> (<L10>);
952
953 <L8>:;
954 tem_17 = foo ();
955
956 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
957 <L10>:;
958 */
959
960 namespace {
961
962 const pass_data pass_data_merge_phi =
963 {
964 GIMPLE_PASS, /* type */
965 "mergephi", /* name */
966 OPTGROUP_NONE, /* optinfo_flags */
967 TV_TREE_MERGE_PHI, /* tv_id */
968 ( PROP_cfg | PROP_ssa ), /* properties_required */
969 0, /* properties_provided */
970 0, /* properties_destroyed */
971 0, /* todo_flags_start */
972 0, /* todo_flags_finish */
973 };
974
975 class pass_merge_phi : public gimple_opt_pass
976 {
977 public:
978 pass_merge_phi (gcc::context *ctxt)
979 : gimple_opt_pass (pass_data_merge_phi, ctxt)
980 {}
981
982 /* opt_pass methods: */
983 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
984 virtual unsigned int execute (function *);
985
986 }; // class pass_merge_phi
987
988 unsigned int
989 pass_merge_phi::execute (function *fun)
990 {
991 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
992 basic_block *current = worklist;
993 basic_block bb;
994
995 calculate_dominance_info (CDI_DOMINATORS);
996
997 /* Find all PHI nodes that we may be able to merge. */
998 FOR_EACH_BB_FN (bb, fun)
999 {
1000 basic_block dest;
1001
1002 /* Look for a forwarder block with PHI nodes. */
1003 if (!tree_forwarder_block_p (bb, true))
1004 continue;
1005
1006 dest = single_succ (bb);
1007
1008 /* We have to feed into another basic block with PHI
1009 nodes. */
1010 if (gimple_seq_empty_p (phi_nodes (dest))
1011 /* We don't want to deal with a basic block with
1012 abnormal edges. */
1013 || bb_has_abnormal_pred (bb))
1014 continue;
1015
1016 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1017 {
1018 /* If BB does not dominate DEST, then the PHI nodes at
1019 DEST must be the only users of the results of the PHI
1020 nodes at BB. */
1021 *current++ = bb;
1022 }
1023 else
1024 {
1025 gimple_stmt_iterator gsi;
1026 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1027
1028 /* BB dominates DEST. There may be many users of the PHI
1029 nodes in BB. However, there is still a trivial case we
1030 can handle. If the result of every PHI in BB is used
1031 only by a PHI in DEST, then we can trivially merge the
1032 PHI nodes from BB into DEST. */
1033 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1034 gsi_next (&gsi))
1035 {
1036 gimple phi = gsi_stmt (gsi);
1037 tree result = gimple_phi_result (phi);
1038 use_operand_p imm_use;
1039 gimple use_stmt;
1040
1041 /* If the PHI's result is never used, then we can just
1042 ignore it. */
1043 if (has_zero_uses (result))
1044 continue;
1045
1046 /* Get the single use of the result of this PHI node. */
1047 if (!single_imm_use (result, &imm_use, &use_stmt)
1048 || gimple_code (use_stmt) != GIMPLE_PHI
1049 || gimple_bb (use_stmt) != dest
1050 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1051 break;
1052 }
1053
1054 /* If the loop above iterated through all the PHI nodes
1055 in BB, then we can merge the PHIs from BB into DEST. */
1056 if (gsi_end_p (gsi))
1057 *current++ = bb;
1058 }
1059 }
1060
1061 /* Now let's drain WORKLIST. */
1062 bool changed = false;
1063 while (current != worklist)
1064 {
1065 bb = *--current;
1066 changed |= remove_forwarder_block_with_phi (bb);
1067 }
1068 free (worklist);
1069
1070 /* Removing forwarder blocks can cause formerly irreducible loops
1071 to become reducible if we merged two entry blocks. */
1072 if (changed
1073 && current_loops)
1074 loops_state_set (LOOPS_NEED_FIXUP);
1075
1076 return 0;
1077 }
1078
1079 } // anon namespace
1080
1081 gimple_opt_pass *
1082 make_pass_merge_phi (gcc::context *ctxt)
1083 {
1084 return new pass_merge_phi (ctxt);
1085 }
1086
1087 /* Pass: cleanup the CFG just before expanding trees to RTL.
1088 This is just a round of label cleanups and case node grouping
1089 because after the tree optimizers have run such cleanups may
1090 be necessary. */
1091
1092 static unsigned int
1093 execute_cleanup_cfg_post_optimizing (void)
1094 {
1095 unsigned int todo = 0;
1096 if (cleanup_tree_cfg ())
1097 todo |= TODO_update_ssa;
1098 maybe_remove_unreachable_handlers ();
1099 cleanup_dead_labels ();
1100 group_case_labels ();
1101 if ((flag_compare_debug_opt || flag_compare_debug)
1102 && flag_dump_final_insns)
1103 {
1104 FILE *final_output = fopen (flag_dump_final_insns, "a");
1105
1106 if (!final_output)
1107 {
1108 error ("could not open final insn dump file %qs: %m",
1109 flag_dump_final_insns);
1110 flag_dump_final_insns = NULL;
1111 }
1112 else
1113 {
1114 int save_unnumbered = flag_dump_unnumbered;
1115 int save_noaddr = flag_dump_noaddr;
1116
1117 flag_dump_noaddr = flag_dump_unnumbered = 1;
1118 fprintf (final_output, "\n");
1119 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1120 flag_dump_noaddr = save_noaddr;
1121 flag_dump_unnumbered = save_unnumbered;
1122 if (fclose (final_output))
1123 {
1124 error ("could not close final insn dump file %qs: %m",
1125 flag_dump_final_insns);
1126 flag_dump_final_insns = NULL;
1127 }
1128 }
1129 }
1130 return todo;
1131 }
1132
1133 namespace {
1134
1135 const pass_data pass_data_cleanup_cfg_post_optimizing =
1136 {
1137 GIMPLE_PASS, /* type */
1138 "optimized", /* name */
1139 OPTGROUP_NONE, /* optinfo_flags */
1140 TV_TREE_CLEANUP_CFG, /* tv_id */
1141 PROP_cfg, /* properties_required */
1142 0, /* properties_provided */
1143 0, /* properties_destroyed */
1144 0, /* todo_flags_start */
1145 TODO_remove_unused_locals, /* todo_flags_finish */
1146 };
1147
1148 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1149 {
1150 public:
1151 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1152 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1153 {}
1154
1155 /* opt_pass methods: */
1156 virtual unsigned int execute (function *)
1157 {
1158 return execute_cleanup_cfg_post_optimizing ();
1159 }
1160
1161 }; // class pass_cleanup_cfg_post_optimizing
1162
1163 } // anon namespace
1164
1165 gimple_opt_pass *
1166 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1167 {
1168 return new pass_cleanup_cfg_post_optimizing (ctxt);
1169 }
1170
1171