cfghooks.c (copy_bbs): Add update_dominance argument.
[gcc.git] / gcc / cfgloopmanip.c
1 /* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "basic-block.h"
26 #include "cfgloop.h"
27 #include "tree-flow.h"
28 #include "dumpfile.h"
29
30 static void copy_loops_to (struct loop **, int,
31 struct loop *);
32 static void loop_redirect_edge (edge, basic_block);
33 static void remove_bbs (basic_block *, int);
34 static bool rpe_enum_p (const_basic_block, const void *);
35 static int find_path (edge, basic_block **);
36 static void fix_loop_placements (struct loop *, bool *);
37 static bool fix_bb_placement (basic_block);
38 static void fix_bb_placements (basic_block, bool *, bitmap);
39
40 /* Checks whether basic block BB is dominated by DATA. */
41 static bool
42 rpe_enum_p (const_basic_block bb, const void *data)
43 {
44 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
45 }
46
47 /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
48
49 static void
50 remove_bbs (basic_block *bbs, int nbbs)
51 {
52 int i;
53
54 for (i = 0; i < nbbs; i++)
55 delete_basic_block (bbs[i]);
56 }
57
58 /* Find path -- i.e. the basic blocks dominated by edge E and put them
59 into array BBS, that will be allocated large enough to contain them.
60 E->dest must have exactly one predecessor for this to work (it is
61 easy to achieve and we do not put it here because we do not want to
62 alter anything by this function). The number of basic blocks in the
63 path is returned. */
64 static int
65 find_path (edge e, basic_block **bbs)
66 {
67 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
68
69 /* Find bbs in the path. */
70 *bbs = XNEWVEC (basic_block, n_basic_blocks);
71 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
72 n_basic_blocks, e->dest);
73 }
74
75 /* Fix placement of basic block BB inside loop hierarchy --
76 Let L be a loop to that BB belongs. Then every successor of BB must either
77 1) belong to some superloop of loop L, or
78 2) be a header of loop K such that K->outer is superloop of L
79 Returns true if we had to move BB into other loop to enforce this condition,
80 false if the placement of BB was already correct (provided that placements
81 of its successors are correct). */
82 static bool
83 fix_bb_placement (basic_block bb)
84 {
85 edge e;
86 edge_iterator ei;
87 struct loop *loop = current_loops->tree_root, *act;
88
89 FOR_EACH_EDGE (e, ei, bb->succs)
90 {
91 if (e->dest == EXIT_BLOCK_PTR)
92 continue;
93
94 act = e->dest->loop_father;
95 if (act->header == e->dest)
96 act = loop_outer (act);
97
98 if (flow_loop_nested_p (loop, act))
99 loop = act;
100 }
101
102 if (loop == bb->loop_father)
103 return false;
104
105 remove_bb_from_loops (bb);
106 add_bb_to_loop (bb, loop);
107
108 return true;
109 }
110
111 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
112 of LOOP to that leads at least one exit edge of LOOP, and set it
113 as the immediate superloop of LOOP. Return true if the immediate superloop
114 of LOOP changed.
115
116 IRRED_INVALIDATED is set to true if a change in the loop structures might
117 invalidate the information about irreducible regions. */
118
119 static bool
120 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
121 {
122 unsigned i;
123 edge e;
124 vec<edge> exits = get_loop_exit_edges (loop);
125 struct loop *father = current_loops->tree_root, *act;
126 bool ret = false;
127
128 FOR_EACH_VEC_ELT (exits, i, e)
129 {
130 act = find_common_loop (loop, e->dest->loop_father);
131 if (flow_loop_nested_p (father, act))
132 father = act;
133 }
134
135 if (father != loop_outer (loop))
136 {
137 for (act = loop_outer (loop); act != father; act = loop_outer (act))
138 act->num_nodes -= loop->num_nodes;
139 flow_loop_tree_node_remove (loop);
140 flow_loop_tree_node_add (father, loop);
141
142 /* The exit edges of LOOP no longer exits its original immediate
143 superloops; remove them from the appropriate exit lists. */
144 FOR_EACH_VEC_ELT (exits, i, e)
145 {
146 /* We may need to recompute irreducible loops. */
147 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
148 *irred_invalidated = true;
149 rescan_loop_exit (e, false, false);
150 }
151
152 ret = true;
153 }
154
155 exits.release ();
156 return ret;
157 }
158
159 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
160 enforce condition condition stated in description of fix_bb_placement. We
161 start from basic block FROM that had some of its successors removed, so that
162 his placement no longer has to be correct, and iteratively fix placement of
163 its predecessors that may change if placement of FROM changed. Also fix
164 placement of subloops of FROM->loop_father, that might also be altered due
165 to this change; the condition for them is similar, except that instead of
166 successors we consider edges coming out of the loops.
167
168 If the changes may invalidate the information about irreducible regions,
169 IRRED_INVALIDATED is set to true.
170
171 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
172 changed loop_father are collected there. */
173
174 static void
175 fix_bb_placements (basic_block from,
176 bool *irred_invalidated,
177 bitmap loop_closed_ssa_invalidated)
178 {
179 sbitmap in_queue;
180 basic_block *queue, *qtop, *qbeg, *qend;
181 struct loop *base_loop, *target_loop;
182 edge e;
183
184 /* We pass through blocks back-reachable from FROM, testing whether some
185 of their successors moved to outer loop. It may be necessary to
186 iterate several times, but it is finite, as we stop unless we move
187 the basic block up the loop structure. The whole story is a bit
188 more complicated due to presence of subloops, those are moved using
189 fix_loop_placement. */
190
191 base_loop = from->loop_father;
192 /* If we are already in the outermost loop, the basic blocks cannot be moved
193 outside of it. If FROM is the header of the base loop, it cannot be moved
194 outside of it, either. In both cases, we can end now. */
195 if (base_loop == current_loops->tree_root
196 || from == base_loop->header)
197 return;
198
199 in_queue = sbitmap_alloc (last_basic_block);
200 bitmap_clear (in_queue);
201 bitmap_set_bit (in_queue, from->index);
202 /* Prevent us from going out of the base_loop. */
203 bitmap_set_bit (in_queue, base_loop->header->index);
204
205 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
206 qtop = queue + base_loop->num_nodes + 1;
207 qbeg = queue;
208 qend = queue + 1;
209 *qbeg = from;
210
211 while (qbeg != qend)
212 {
213 edge_iterator ei;
214 from = *qbeg;
215 qbeg++;
216 if (qbeg == qtop)
217 qbeg = queue;
218 bitmap_clear_bit (in_queue, from->index);
219
220 if (from->loop_father->header == from)
221 {
222 /* Subloop header, maybe move the loop upward. */
223 if (!fix_loop_placement (from->loop_father, irred_invalidated))
224 continue;
225 target_loop = loop_outer (from->loop_father);
226 }
227 else
228 {
229 /* Ordinary basic block. */
230 if (!fix_bb_placement (from))
231 continue;
232 if (loop_closed_ssa_invalidated)
233 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
234 target_loop = from->loop_father;
235 }
236
237 FOR_EACH_EDGE (e, ei, from->succs)
238 {
239 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
240 *irred_invalidated = true;
241 }
242
243 /* Something has changed, insert predecessors into queue. */
244 FOR_EACH_EDGE (e, ei, from->preds)
245 {
246 basic_block pred = e->src;
247 struct loop *nca;
248
249 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
250 *irred_invalidated = true;
251
252 if (bitmap_bit_p (in_queue, pred->index))
253 continue;
254
255 /* If it is subloop, then it either was not moved, or
256 the path up the loop tree from base_loop do not contain
257 it. */
258 nca = find_common_loop (pred->loop_father, base_loop);
259 if (pred->loop_father != base_loop
260 && (nca == base_loop
261 || nca != pred->loop_father))
262 pred = pred->loop_father->header;
263 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
264 {
265 /* If PRED is already higher in the loop hierarchy than the
266 TARGET_LOOP to that we moved FROM, the change of the position
267 of FROM does not affect the position of PRED, so there is no
268 point in processing it. */
269 continue;
270 }
271
272 if (bitmap_bit_p (in_queue, pred->index))
273 continue;
274
275 /* Schedule the basic block. */
276 *qend = pred;
277 qend++;
278 if (qend == qtop)
279 qend = queue;
280 bitmap_set_bit (in_queue, pred->index);
281 }
282 }
283 free (in_queue);
284 free (queue);
285 }
286
287 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
288 and update loop structures and dominators. Return true if we were able
289 to remove the path, false otherwise (and nothing is affected then). */
290 bool
291 remove_path (edge e)
292 {
293 edge ae;
294 basic_block *rem_bbs, *bord_bbs, from, bb;
295 vec<basic_block> dom_bbs;
296 int i, nrem, n_bord_bbs;
297 sbitmap seen;
298 bool irred_invalidated = false;
299 edge_iterator ei;
300 struct loop *l, *f;
301
302 if (!can_remove_branch_p (e))
303 return false;
304
305 /* Keep track of whether we need to update information about irreducible
306 regions. This is the case if the removed area is a part of the
307 irreducible region, or if the set of basic blocks that belong to a loop
308 that is inside an irreducible region is changed, or if such a loop is
309 removed. */
310 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
311 irred_invalidated = true;
312
313 /* We need to check whether basic blocks are dominated by the edge
314 e, but we only have basic block dominators. This is easy to
315 fix -- when e->dest has exactly one predecessor, this corresponds
316 to blocks dominated by e->dest, if not, split the edge. */
317 if (!single_pred_p (e->dest))
318 e = single_pred_edge (split_edge (e));
319
320 /* It may happen that by removing path we remove one or more loops
321 we belong to. In this case first unloop the loops, then proceed
322 normally. We may assume that e->dest is not a header of any loop,
323 as it now has exactly one predecessor. */
324 for (l = e->src->loop_father; loop_outer (l); l = f)
325 {
326 f = loop_outer (l);
327 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
328 unloop (l, &irred_invalidated, NULL);
329 }
330
331 /* Identify the path. */
332 nrem = find_path (e, &rem_bbs);
333
334 n_bord_bbs = 0;
335 bord_bbs = XNEWVEC (basic_block, n_basic_blocks);
336 seen = sbitmap_alloc (last_basic_block);
337 bitmap_clear (seen);
338
339 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
340 for (i = 0; i < nrem; i++)
341 bitmap_set_bit (seen, rem_bbs[i]->index);
342 if (!irred_invalidated)
343 FOR_EACH_EDGE (ae, ei, e->src->succs)
344 if (ae != e && ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index)
345 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
346 irred_invalidated = true;
347 for (i = 0; i < nrem; i++)
348 {
349 bb = rem_bbs[i];
350 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
351 if (ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index))
352 {
353 bitmap_set_bit (seen, ae->dest->index);
354 bord_bbs[n_bord_bbs++] = ae->dest;
355
356 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
357 irred_invalidated = true;
358 }
359 }
360
361 /* Remove the path. */
362 from = e->src;
363 remove_branch (e);
364 dom_bbs.create (0);
365
366 /* Cancel loops contained in the path. */
367 for (i = 0; i < nrem; i++)
368 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
369 cancel_loop_tree (rem_bbs[i]->loop_father);
370
371 remove_bbs (rem_bbs, nrem);
372 free (rem_bbs);
373
374 /* Find blocks whose dominators may be affected. */
375 bitmap_clear (seen);
376 for (i = 0; i < n_bord_bbs; i++)
377 {
378 basic_block ldom;
379
380 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
381 if (bitmap_bit_p (seen, bb->index))
382 continue;
383 bitmap_set_bit (seen, bb->index);
384
385 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
386 ldom;
387 ldom = next_dom_son (CDI_DOMINATORS, ldom))
388 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
389 dom_bbs.safe_push (ldom);
390 }
391
392 free (seen);
393
394 /* Recount dominators. */
395 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
396 dom_bbs.release ();
397 free (bord_bbs);
398
399 /* Fix placements of basic blocks inside loops and the placement of
400 loops in the loop tree. */
401 fix_bb_placements (from, &irred_invalidated, NULL);
402 fix_loop_placements (from->loop_father, &irred_invalidated);
403
404 if (irred_invalidated
405 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
406 mark_irreducible_loops ();
407
408 return true;
409 }
410
411 /* Creates place for a new LOOP in loops structure of FN. */
412
413 void
414 place_new_loop (struct function *fn, struct loop *loop)
415 {
416 loop->num = number_of_loops (fn);
417 vec_safe_push (loops_for_fn (fn)->larray, loop);
418 }
419
420 /* Given LOOP structure with filled header and latch, find the body of the
421 corresponding loop and add it to loops tree. Insert the LOOP as a son of
422 outer. */
423
424 void
425 add_loop (struct loop *loop, struct loop *outer)
426 {
427 basic_block *bbs;
428 int i, n;
429 struct loop *subloop;
430 edge e;
431 edge_iterator ei;
432
433 /* Add it to loop structure. */
434 place_new_loop (cfun, loop);
435 flow_loop_tree_node_add (outer, loop);
436
437 /* Find its nodes. */
438 bbs = XNEWVEC (basic_block, n_basic_blocks);
439 n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
440
441 for (i = 0; i < n; i++)
442 {
443 if (bbs[i]->loop_father == outer)
444 {
445 remove_bb_from_loops (bbs[i]);
446 add_bb_to_loop (bbs[i], loop);
447 continue;
448 }
449
450 loop->num_nodes++;
451
452 /* If we find a direct subloop of OUTER, move it to LOOP. */
453 subloop = bbs[i]->loop_father;
454 if (loop_outer (subloop) == outer
455 && subloop->header == bbs[i])
456 {
457 flow_loop_tree_node_remove (subloop);
458 flow_loop_tree_node_add (loop, subloop);
459 }
460 }
461
462 /* Update the information about loop exit edges. */
463 for (i = 0; i < n; i++)
464 {
465 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
466 {
467 rescan_loop_exit (e, false, false);
468 }
469 }
470
471 free (bbs);
472 }
473
474 /* Multiply all frequencies in LOOP by NUM/DEN. */
475
476 void
477 scale_loop_frequencies (struct loop *loop, int num, int den)
478 {
479 basic_block *bbs;
480
481 bbs = get_loop_body (loop);
482 scale_bbs_frequencies_int (bbs, loop->num_nodes, num, den);
483 free (bbs);
484 }
485
486 /* Multiply all frequencies in LOOP by SCALE/REG_BR_PROB_BASE.
487 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
488 to iterate too many times. */
489
490 void
491 scale_loop_profile (struct loop *loop, int scale, gcov_type iteration_bound)
492 {
493 gcov_type iterations = expected_loop_iterations_unbounded (loop);
494 edge e;
495 edge_iterator ei;
496
497 if (dump_file && (dump_flags & TDF_DETAILS))
498 fprintf (dump_file, ";; Scaling loop %i with scale %f, "
499 "bounding iterations to %i from guessed %i\n",
500 loop->num, (double)scale / REG_BR_PROB_BASE,
501 (int)iteration_bound, (int)iterations);
502
503 /* See if loop is predicted to iterate too many times. */
504 if (iteration_bound && iterations > 0
505 && apply_probability (iterations, scale) > iteration_bound)
506 {
507 /* Fixing loop profile for different trip count is not trivial; the exit
508 probabilities has to be updated to match and frequencies propagated down
509 to the loop body.
510
511 We fully update only the simple case of loop with single exit that is
512 either from the latch or BB just before latch and leads from BB with
513 simple conditional jump. This is OK for use in vectorizer. */
514 e = single_exit (loop);
515 if (e)
516 {
517 edge other_e;
518 int freq_delta;
519 gcov_type count_delta;
520
521 FOR_EACH_EDGE (other_e, ei, e->src->succs)
522 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
523 && e != other_e)
524 break;
525
526 /* Probability of exit must be 1/iterations. */
527 freq_delta = EDGE_FREQUENCY (e);
528 e->probability = REG_BR_PROB_BASE / iteration_bound;
529 other_e->probability = inverse_probability (e->probability);
530 freq_delta -= EDGE_FREQUENCY (e);
531
532 /* Adjust counts accordingly. */
533 count_delta = e->count;
534 e->count = apply_probability (e->src->count, e->probability);
535 other_e->count = apply_probability (e->src->count, other_e->probability);
536 count_delta -= e->count;
537
538 /* If latch exists, change its frequency and count, since we changed
539 probability of exit. Theoretically we should update everything from
540 source of exit edge to latch, but for vectorizer this is enough. */
541 if (loop->latch
542 && loop->latch != e->src)
543 {
544 loop->latch->frequency += freq_delta;
545 if (loop->latch->frequency < 0)
546 loop->latch->frequency = 0;
547 loop->latch->count += count_delta;
548 if (loop->latch->count < 0)
549 loop->latch->count = 0;
550 }
551 }
552
553 /* Roughly speaking we want to reduce the loop body profile by the
554 the difference of loop iterations. We however can do better if
555 we look at the actual profile, if it is available. */
556 scale = RDIV (iteration_bound * scale, iterations);
557 if (loop->header->count)
558 {
559 gcov_type count_in = 0;
560
561 FOR_EACH_EDGE (e, ei, loop->header->preds)
562 if (e->src != loop->latch)
563 count_in += e->count;
564
565 if (count_in != 0)
566 scale = GCOV_COMPUTE_SCALE (count_in * iteration_bound,
567 loop->header->count);
568 }
569 else if (loop->header->frequency)
570 {
571 int freq_in = 0;
572
573 FOR_EACH_EDGE (e, ei, loop->header->preds)
574 if (e->src != loop->latch)
575 freq_in += EDGE_FREQUENCY (e);
576
577 if (freq_in != 0)
578 scale = GCOV_COMPUTE_SCALE (freq_in * iteration_bound,
579 loop->header->frequency);
580 }
581 if (!scale)
582 scale = 1;
583 }
584
585 if (scale == REG_BR_PROB_BASE)
586 return;
587
588 /* Scale the actual probabilities. */
589 scale_loop_frequencies (loop, scale, REG_BR_PROB_BASE);
590 if (dump_file && (dump_flags & TDF_DETAILS))
591 fprintf (dump_file, ";; guessed iterations are now %i\n",
592 (int)expected_loop_iterations_unbounded (loop));
593 }
594
595 /* Recompute dominance information for basic blocks outside LOOP. */
596
597 static void
598 update_dominators_in_loop (struct loop *loop)
599 {
600 vec<basic_block> dom_bbs = vNULL;
601 sbitmap seen;
602 basic_block *body;
603 unsigned i;
604
605 seen = sbitmap_alloc (last_basic_block);
606 bitmap_clear (seen);
607 body = get_loop_body (loop);
608
609 for (i = 0; i < loop->num_nodes; i++)
610 bitmap_set_bit (seen, body[i]->index);
611
612 for (i = 0; i < loop->num_nodes; i++)
613 {
614 basic_block ldom;
615
616 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
617 ldom;
618 ldom = next_dom_son (CDI_DOMINATORS, ldom))
619 if (!bitmap_bit_p (seen, ldom->index))
620 {
621 bitmap_set_bit (seen, ldom->index);
622 dom_bbs.safe_push (ldom);
623 }
624 }
625
626 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
627 free (body);
628 free (seen);
629 dom_bbs.release ();
630 }
631
632 /* Creates an if region as shown above. CONDITION is used to create
633 the test for the if.
634
635 |
636 | ------------- -------------
637 | | pred_bb | | pred_bb |
638 | ------------- -------------
639 | | |
640 | | | ENTRY_EDGE
641 | | ENTRY_EDGE V
642 | | ====> -------------
643 | | | cond_bb |
644 | | | CONDITION |
645 | | -------------
646 | V / \
647 | ------------- e_false / \ e_true
648 | | succ_bb | V V
649 | ------------- ----------- -----------
650 | | false_bb | | true_bb |
651 | ----------- -----------
652 | \ /
653 | \ /
654 | V V
655 | -------------
656 | | join_bb |
657 | -------------
658 | | exit_edge (result)
659 | V
660 | -----------
661 | | succ_bb |
662 | -----------
663 |
664 */
665
666 edge
667 create_empty_if_region_on_edge (edge entry_edge, tree condition)
668 {
669
670 basic_block cond_bb, true_bb, false_bb, join_bb;
671 edge e_true, e_false, exit_edge;
672 gimple cond_stmt;
673 tree simple_cond;
674 gimple_stmt_iterator gsi;
675
676 cond_bb = split_edge (entry_edge);
677
678 /* Insert condition in cond_bb. */
679 gsi = gsi_last_bb (cond_bb);
680 simple_cond =
681 force_gimple_operand_gsi (&gsi, condition, true, NULL,
682 false, GSI_NEW_STMT);
683 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
684 gsi = gsi_last_bb (cond_bb);
685 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
686
687 join_bb = split_edge (single_succ_edge (cond_bb));
688
689 e_true = single_succ_edge (cond_bb);
690 true_bb = split_edge (e_true);
691
692 e_false = make_edge (cond_bb, join_bb, 0);
693 false_bb = split_edge (e_false);
694
695 e_true->flags &= ~EDGE_FALLTHRU;
696 e_true->flags |= EDGE_TRUE_VALUE;
697 e_false->flags &= ~EDGE_FALLTHRU;
698 e_false->flags |= EDGE_FALSE_VALUE;
699
700 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
701 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
702 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
703 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
704
705 exit_edge = single_succ_edge (join_bb);
706
707 if (single_pred_p (exit_edge->dest))
708 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
709
710 return exit_edge;
711 }
712
713 /* create_empty_loop_on_edge
714 |
715 | - pred_bb - ------ pred_bb ------
716 | | | | iv0 = initial_value |
717 | -----|----- ---------|-----------
718 | | ______ | entry_edge
719 | | entry_edge / | |
720 | | ====> | -V---V- loop_header -------------
721 | V | | iv_before = phi (iv0, iv_after) |
722 | - succ_bb - | ---|-----------------------------
723 | | | | |
724 | ----------- | ---V--- loop_body ---------------
725 | | | iv_after = iv_before + stride |
726 | | | if (iv_before < upper_bound) |
727 | | ---|--------------\--------------
728 | | | \ exit_e
729 | | V \
730 | | - loop_latch - V- succ_bb -
731 | | | | | |
732 | | /------------- -----------
733 | \ ___ /
734
735 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
736 that is used before the increment of IV. IV_BEFORE should be used for
737 adding code to the body that uses the IV. OUTER is the outer loop in
738 which the new loop should be inserted.
739
740 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
741 inserted on the loop entry edge. This implies that this function
742 should be used only when the UPPER_BOUND expression is a loop
743 invariant. */
744
745 struct loop *
746 create_empty_loop_on_edge (edge entry_edge,
747 tree initial_value,
748 tree stride, tree upper_bound,
749 tree iv,
750 tree *iv_before,
751 tree *iv_after,
752 struct loop *outer)
753 {
754 basic_block loop_header, loop_latch, succ_bb, pred_bb;
755 struct loop *loop;
756 gimple_stmt_iterator gsi;
757 gimple_seq stmts;
758 gimple cond_expr;
759 tree exit_test;
760 edge exit_e;
761 int prob;
762
763 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
764
765 /* Create header, latch and wire up the loop. */
766 pred_bb = entry_edge->src;
767 loop_header = split_edge (entry_edge);
768 loop_latch = split_edge (single_succ_edge (loop_header));
769 succ_bb = single_succ (loop_latch);
770 make_edge (loop_header, succ_bb, 0);
771 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
772
773 /* Set immediate dominator information. */
774 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
775 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
776 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
777
778 /* Initialize a loop structure and put it in a loop hierarchy. */
779 loop = alloc_loop ();
780 loop->header = loop_header;
781 loop->latch = loop_latch;
782 add_loop (loop, outer);
783
784 /* TODO: Fix frequencies and counts. */
785 prob = REG_BR_PROB_BASE / 2;
786
787 scale_loop_frequencies (loop, REG_BR_PROB_BASE - prob, REG_BR_PROB_BASE);
788
789 /* Update dominators. */
790 update_dominators_in_loop (loop);
791
792 /* Modify edge flags. */
793 exit_e = single_exit (loop);
794 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
795 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
796
797 /* Construct IV code in loop. */
798 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
799 if (stmts)
800 {
801 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
802 gsi_commit_edge_inserts ();
803 }
804
805 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
806 if (stmts)
807 {
808 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
809 gsi_commit_edge_inserts ();
810 }
811
812 gsi = gsi_last_bb (loop_header);
813 create_iv (initial_value, stride, iv, loop, &gsi, false,
814 iv_before, iv_after);
815
816 /* Insert loop exit condition. */
817 cond_expr = gimple_build_cond
818 (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
819
820 exit_test = gimple_cond_lhs (cond_expr);
821 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
822 false, GSI_NEW_STMT);
823 gimple_cond_set_lhs (cond_expr, exit_test);
824 gsi = gsi_last_bb (exit_e->src);
825 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
826
827 split_block_after_labels (loop_header);
828
829 return loop;
830 }
831
832 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
833 latch to header and update loop tree and dominators
834 accordingly. Everything between them plus LATCH_EDGE destination must
835 be dominated by HEADER_EDGE destination, and back-reachable from
836 LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB,
837 FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
838 TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
839 Returns the newly created loop. Frequencies and counts in the new loop
840 are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
841
842 struct loop *
843 loopify (edge latch_edge, edge header_edge,
844 basic_block switch_bb, edge true_edge, edge false_edge,
845 bool redirect_all_edges, unsigned true_scale, unsigned false_scale)
846 {
847 basic_block succ_bb = latch_edge->dest;
848 basic_block pred_bb = header_edge->src;
849 struct loop *loop = alloc_loop ();
850 struct loop *outer = loop_outer (succ_bb->loop_father);
851 int freq;
852 gcov_type cnt;
853 edge e;
854 edge_iterator ei;
855
856 loop->header = header_edge->dest;
857 loop->latch = latch_edge->src;
858
859 freq = EDGE_FREQUENCY (header_edge);
860 cnt = header_edge->count;
861
862 /* Redirect edges. */
863 loop_redirect_edge (latch_edge, loop->header);
864 loop_redirect_edge (true_edge, succ_bb);
865
866 /* During loop versioning, one of the switch_bb edge is already properly
867 set. Do not redirect it again unless redirect_all_edges is true. */
868 if (redirect_all_edges)
869 {
870 loop_redirect_edge (header_edge, switch_bb);
871 loop_redirect_edge (false_edge, loop->header);
872
873 /* Update dominators. */
874 set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
875 set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
876 }
877
878 set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
879
880 /* Compute new loop. */
881 add_loop (loop, outer);
882
883 /* Add switch_bb to appropriate loop. */
884 if (switch_bb->loop_father)
885 remove_bb_from_loops (switch_bb);
886 add_bb_to_loop (switch_bb, outer);
887
888 /* Fix frequencies. */
889 if (redirect_all_edges)
890 {
891 switch_bb->frequency = freq;
892 switch_bb->count = cnt;
893 FOR_EACH_EDGE (e, ei, switch_bb->succs)
894 {
895 e->count = apply_probability (switch_bb->count, e->probability);
896 }
897 }
898 scale_loop_frequencies (loop, false_scale, REG_BR_PROB_BASE);
899 scale_loop_frequencies (succ_bb->loop_father, true_scale, REG_BR_PROB_BASE);
900 update_dominators_in_loop (loop);
901
902 return loop;
903 }
904
905 /* Remove the latch edge of a LOOP and update loops to indicate that
906 the LOOP was removed. After this function, original loop latch will
907 have no successor, which caller is expected to fix somehow.
908
909 If this may cause the information about irreducible regions to become
910 invalid, IRRED_INVALIDATED is set to true.
911
912 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
913 basic blocks that had non-trivial update on their loop_father.*/
914
915 void
916 unloop (struct loop *loop, bool *irred_invalidated,
917 bitmap loop_closed_ssa_invalidated)
918 {
919 basic_block *body;
920 struct loop *ploop;
921 unsigned i, n;
922 basic_block latch = loop->latch;
923 bool dummy = false;
924
925 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
926 *irred_invalidated = true;
927
928 /* This is relatively straightforward. The dominators are unchanged, as
929 loop header dominates loop latch, so the only thing we have to care of
930 is the placement of loops and basic blocks inside the loop tree. We
931 move them all to the loop->outer, and then let fix_bb_placements do
932 its work. */
933
934 body = get_loop_body (loop);
935 n = loop->num_nodes;
936 for (i = 0; i < n; i++)
937 if (body[i]->loop_father == loop)
938 {
939 remove_bb_from_loops (body[i]);
940 add_bb_to_loop (body[i], loop_outer (loop));
941 }
942 free(body);
943
944 while (loop->inner)
945 {
946 ploop = loop->inner;
947 flow_loop_tree_node_remove (ploop);
948 flow_loop_tree_node_add (loop_outer (loop), ploop);
949 }
950
951 /* Remove the loop and free its data. */
952 delete_loop (loop);
953
954 remove_edge (single_succ_edge (latch));
955
956 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
957 there is an irreducible region inside the cancelled loop, the flags will
958 be still correct. */
959 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
960 }
961
962 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
963 condition stated in description of fix_loop_placement holds for them.
964 It is used in case when we removed some edges coming out of LOOP, which
965 may cause the right placement of LOOP inside loop tree to change.
966
967 IRRED_INVALIDATED is set to true if a change in the loop structures might
968 invalidate the information about irreducible regions. */
969
970 static void
971 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
972 {
973 struct loop *outer;
974
975 while (loop_outer (loop))
976 {
977 outer = loop_outer (loop);
978 if (!fix_loop_placement (loop, irred_invalidated))
979 break;
980
981 /* Changing the placement of a loop in the loop tree may alter the
982 validity of condition 2) of the description of fix_bb_placement
983 for its preheader, because the successor is the header and belongs
984 to the loop. So call fix_bb_placements to fix up the placement
985 of the preheader and (possibly) of its predecessors. */
986 fix_bb_placements (loop_preheader_edge (loop)->src,
987 irred_invalidated, NULL);
988 loop = outer;
989 }
990 }
991
992 /* Duplicate loop bounds and other information we store about
993 the loop into its duplicate. */
994
995 void
996 copy_loop_info (struct loop *loop, struct loop *target)
997 {
998 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
999 target->any_upper_bound = loop->any_upper_bound;
1000 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1001 target->any_estimate = loop->any_estimate;
1002 target->nb_iterations_estimate = loop->nb_iterations_estimate;
1003 target->estimate_state = loop->estimate_state;
1004 }
1005
1006 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1007 created loop into loops structure. */
1008 struct loop *
1009 duplicate_loop (struct loop *loop, struct loop *target)
1010 {
1011 struct loop *cloop;
1012 cloop = alloc_loop ();
1013 place_new_loop (cfun, cloop);
1014
1015 copy_loop_info (loop, cloop);
1016
1017 /* Mark the new loop as copy of LOOP. */
1018 set_loop_copy (loop, cloop);
1019
1020 /* Add it to target. */
1021 flow_loop_tree_node_add (target, cloop);
1022
1023 return cloop;
1024 }
1025
1026 /* Copies structure of subloops of LOOP into TARGET loop, placing
1027 newly created loops into loop tree. */
1028 void
1029 duplicate_subloops (struct loop *loop, struct loop *target)
1030 {
1031 struct loop *aloop, *cloop;
1032
1033 for (aloop = loop->inner; aloop; aloop = aloop->next)
1034 {
1035 cloop = duplicate_loop (aloop, target);
1036 duplicate_subloops (aloop, cloop);
1037 }
1038 }
1039
1040 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1041 into TARGET loop, placing newly created loops into loop tree. */
1042 static void
1043 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1044 {
1045 struct loop *aloop;
1046 int i;
1047
1048 for (i = 0; i < n; i++)
1049 {
1050 aloop = duplicate_loop (copied_loops[i], target);
1051 duplicate_subloops (copied_loops[i], aloop);
1052 }
1053 }
1054
1055 /* Redirects edge E to basic block DEST. */
1056 static void
1057 loop_redirect_edge (edge e, basic_block dest)
1058 {
1059 if (e->dest == dest)
1060 return;
1061
1062 redirect_edge_and_branch_force (e, dest);
1063 }
1064
1065 /* Check whether LOOP's body can be duplicated. */
1066 bool
1067 can_duplicate_loop_p (const struct loop *loop)
1068 {
1069 int ret;
1070 basic_block *bbs = get_loop_body (loop);
1071
1072 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1073 free (bbs);
1074
1075 return ret;
1076 }
1077
1078 /* Sets probability and count of edge E to zero. The probability and count
1079 is redistributed evenly to the remaining edges coming from E->src. */
1080
1081 static void
1082 set_zero_probability (edge e)
1083 {
1084 basic_block bb = e->src;
1085 edge_iterator ei;
1086 edge ae, last = NULL;
1087 unsigned n = EDGE_COUNT (bb->succs);
1088 gcov_type cnt = e->count, cnt1;
1089 unsigned prob = e->probability, prob1;
1090
1091 gcc_assert (n > 1);
1092 cnt1 = cnt / (n - 1);
1093 prob1 = prob / (n - 1);
1094
1095 FOR_EACH_EDGE (ae, ei, bb->succs)
1096 {
1097 if (ae == e)
1098 continue;
1099
1100 ae->probability += prob1;
1101 ae->count += cnt1;
1102 last = ae;
1103 }
1104
1105 /* Move the rest to one of the edges. */
1106 last->probability += prob % (n - 1);
1107 last->count += cnt % (n - 1);
1108
1109 e->probability = 0;
1110 e->count = 0;
1111 }
1112
1113 /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1114 loop structure and dominators. E's destination must be LOOP header for
1115 this to work, i.e. it must be entry or latch edge of this loop; these are
1116 unique, as the loops must have preheaders for this function to work
1117 correctly (in case E is latch, the function unrolls the loop, if E is entry
1118 edge, it peels the loop). Store edges created by copying ORIG edge from
1119 copies corresponding to set bits in WONT_EXIT bitmap (bit 0 corresponds to
1120 original LOOP body, the other copies are numbered in order given by control
1121 flow through them) into TO_REMOVE array. Returns false if duplication is
1122 impossible. */
1123
1124 bool
1125 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1126 unsigned int ndupl, sbitmap wont_exit,
1127 edge orig, vec<edge> *to_remove,
1128 int flags)
1129 {
1130 struct loop *target, *aloop;
1131 struct loop **orig_loops;
1132 unsigned n_orig_loops;
1133 basic_block header = loop->header, latch = loop->latch;
1134 basic_block *new_bbs, *bbs, *first_active;
1135 basic_block new_bb, bb, first_active_latch = NULL;
1136 edge ae, latch_edge;
1137 edge spec_edges[2], new_spec_edges[2];
1138 #define SE_LATCH 0
1139 #define SE_ORIG 1
1140 unsigned i, j, n;
1141 int is_latch = (latch == e->src);
1142 int scale_act = 0, *scale_step = NULL, scale_main = 0;
1143 int scale_after_exit = 0;
1144 int p, freq_in, freq_le, freq_out_orig;
1145 int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
1146 int add_irreducible_flag;
1147 basic_block place_after;
1148 bitmap bbs_to_scale = NULL;
1149 bitmap_iterator bi;
1150
1151 gcc_assert (e->dest == loop->header);
1152 gcc_assert (ndupl > 0);
1153
1154 if (orig)
1155 {
1156 /* Orig must be edge out of the loop. */
1157 gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1158 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1159 }
1160
1161 n = loop->num_nodes;
1162 bbs = get_loop_body_in_dom_order (loop);
1163 gcc_assert (bbs[0] == loop->header);
1164 gcc_assert (bbs[n - 1] == loop->latch);
1165
1166 /* Check whether duplication is possible. */
1167 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1168 {
1169 free (bbs);
1170 return false;
1171 }
1172 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1173
1174 /* In case we are doing loop peeling and the loop is in the middle of
1175 irreducible region, the peeled copies will be inside it too. */
1176 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1177 gcc_assert (!is_latch || !add_irreducible_flag);
1178
1179 /* Find edge from latch. */
1180 latch_edge = loop_latch_edge (loop);
1181
1182 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1183 {
1184 /* Calculate coefficients by that we have to scale frequencies
1185 of duplicated loop bodies. */
1186 freq_in = header->frequency;
1187 freq_le = EDGE_FREQUENCY (latch_edge);
1188 if (freq_in == 0)
1189 freq_in = 1;
1190 if (freq_in < freq_le)
1191 freq_in = freq_le;
1192 freq_out_orig = orig ? EDGE_FREQUENCY (orig) : freq_in - freq_le;
1193 if (freq_out_orig > freq_in - freq_le)
1194 freq_out_orig = freq_in - freq_le;
1195 prob_pass_thru = RDIV (REG_BR_PROB_BASE * freq_le, freq_in);
1196 prob_pass_wont_exit =
1197 RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in);
1198
1199 if (orig
1200 && REG_BR_PROB_BASE - orig->probability != 0)
1201 {
1202 /* The blocks that are dominated by a removed exit edge ORIG have
1203 frequencies scaled by this. */
1204 scale_after_exit
1205 = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE,
1206 REG_BR_PROB_BASE - orig->probability);
1207 bbs_to_scale = BITMAP_ALLOC (NULL);
1208 for (i = 0; i < n; i++)
1209 {
1210 if (bbs[i] != orig->src
1211 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1212 bitmap_set_bit (bbs_to_scale, i);
1213 }
1214 }
1215
1216 scale_step = XNEWVEC (int, ndupl);
1217
1218 for (i = 1; i <= ndupl; i++)
1219 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1220 ? prob_pass_wont_exit
1221 : prob_pass_thru;
1222
1223 /* Complete peeling is special as the probability of exit in last
1224 copy becomes 1. */
1225 if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1226 {
1227 int wanted_freq = EDGE_FREQUENCY (e);
1228
1229 if (wanted_freq > freq_in)
1230 wanted_freq = freq_in;
1231
1232 gcc_assert (!is_latch);
1233 /* First copy has frequency of incoming edge. Each subsequent
1234 frequency should be reduced by prob_pass_wont_exit. Caller
1235 should've managed the flags so all except for original loop
1236 has won't exist set. */
1237 scale_act = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1238 /* Now simulate the duplication adjustments and compute header
1239 frequency of the last copy. */
1240 for (i = 0; i < ndupl; i++)
1241 wanted_freq = combine_probabilities (wanted_freq, scale_step[i]);
1242 scale_main = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1243 }
1244 else if (is_latch)
1245 {
1246 prob_pass_main = bitmap_bit_p (wont_exit, 0)
1247 ? prob_pass_wont_exit
1248 : prob_pass_thru;
1249 p = prob_pass_main;
1250 scale_main = REG_BR_PROB_BASE;
1251 for (i = 0; i < ndupl; i++)
1252 {
1253 scale_main += p;
1254 p = combine_probabilities (p, scale_step[i]);
1255 }
1256 scale_main = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE, scale_main);
1257 scale_act = combine_probabilities (scale_main, prob_pass_main);
1258 }
1259 else
1260 {
1261 scale_main = REG_BR_PROB_BASE;
1262 for (i = 0; i < ndupl; i++)
1263 scale_main = combine_probabilities (scale_main, scale_step[i]);
1264 scale_act = REG_BR_PROB_BASE - prob_pass_thru;
1265 }
1266 for (i = 0; i < ndupl; i++)
1267 gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
1268 gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
1269 && scale_act >= 0 && scale_act <= REG_BR_PROB_BASE);
1270 }
1271
1272 /* Loop the new bbs will belong to. */
1273 target = e->src->loop_father;
1274
1275 /* Original loops. */
1276 n_orig_loops = 0;
1277 for (aloop = loop->inner; aloop; aloop = aloop->next)
1278 n_orig_loops++;
1279 orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1280 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1281 orig_loops[i] = aloop;
1282
1283 set_loop_copy (loop, target);
1284
1285 first_active = XNEWVEC (basic_block, n);
1286 if (is_latch)
1287 {
1288 memcpy (first_active, bbs, n * sizeof (basic_block));
1289 first_active_latch = latch;
1290 }
1291
1292 spec_edges[SE_ORIG] = orig;
1293 spec_edges[SE_LATCH] = latch_edge;
1294
1295 place_after = e->src;
1296 for (j = 0; j < ndupl; j++)
1297 {
1298 /* Copy loops. */
1299 copy_loops_to (orig_loops, n_orig_loops, target);
1300
1301 /* Copy bbs. */
1302 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1303 place_after, true);
1304 place_after = new_spec_edges[SE_LATCH]->src;
1305
1306 if (flags & DLTHE_RECORD_COPY_NUMBER)
1307 for (i = 0; i < n; i++)
1308 {
1309 gcc_assert (!new_bbs[i]->aux);
1310 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1311 }
1312
1313 /* Note whether the blocks and edges belong to an irreducible loop. */
1314 if (add_irreducible_flag)
1315 {
1316 for (i = 0; i < n; i++)
1317 new_bbs[i]->flags |= BB_DUPLICATED;
1318 for (i = 0; i < n; i++)
1319 {
1320 edge_iterator ei;
1321 new_bb = new_bbs[i];
1322 if (new_bb->loop_father == target)
1323 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1324
1325 FOR_EACH_EDGE (ae, ei, new_bb->succs)
1326 if ((ae->dest->flags & BB_DUPLICATED)
1327 && (ae->src->loop_father == target
1328 || ae->dest->loop_father == target))
1329 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1330 }
1331 for (i = 0; i < n; i++)
1332 new_bbs[i]->flags &= ~BB_DUPLICATED;
1333 }
1334
1335 /* Redirect the special edges. */
1336 if (is_latch)
1337 {
1338 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1339 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1340 loop->header);
1341 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1342 latch = loop->latch = new_bbs[n - 1];
1343 e = latch_edge = new_spec_edges[SE_LATCH];
1344 }
1345 else
1346 {
1347 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1348 loop->header);
1349 redirect_edge_and_branch_force (e, new_bbs[0]);
1350 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1351 e = new_spec_edges[SE_LATCH];
1352 }
1353
1354 /* Record exit edge in this copy. */
1355 if (orig && bitmap_bit_p (wont_exit, j + 1))
1356 {
1357 if (to_remove)
1358 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1359 set_zero_probability (new_spec_edges[SE_ORIG]);
1360
1361 /* Scale the frequencies of the blocks dominated by the exit. */
1362 if (bbs_to_scale)
1363 {
1364 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1365 {
1366 scale_bbs_frequencies_int (new_bbs + i, 1, scale_after_exit,
1367 REG_BR_PROB_BASE);
1368 }
1369 }
1370 }
1371
1372 /* Record the first copy in the control flow order if it is not
1373 the original loop (i.e. in case of peeling). */
1374 if (!first_active_latch)
1375 {
1376 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1377 first_active_latch = new_bbs[n - 1];
1378 }
1379
1380 /* Set counts and frequencies. */
1381 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1382 {
1383 scale_bbs_frequencies_int (new_bbs, n, scale_act, REG_BR_PROB_BASE);
1384 scale_act = combine_probabilities (scale_act, scale_step[j]);
1385 }
1386 }
1387 free (new_bbs);
1388 free (orig_loops);
1389
1390 /* Record the exit edge in the original loop body, and update the frequencies. */
1391 if (orig && bitmap_bit_p (wont_exit, 0))
1392 {
1393 if (to_remove)
1394 to_remove->safe_push (orig);
1395 set_zero_probability (orig);
1396
1397 /* Scale the frequencies of the blocks dominated by the exit. */
1398 if (bbs_to_scale)
1399 {
1400 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1401 {
1402 scale_bbs_frequencies_int (bbs + i, 1, scale_after_exit,
1403 REG_BR_PROB_BASE);
1404 }
1405 }
1406 }
1407
1408 /* Update the original loop. */
1409 if (!is_latch)
1410 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1411 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1412 {
1413 scale_bbs_frequencies_int (bbs, n, scale_main, REG_BR_PROB_BASE);
1414 free (scale_step);
1415 }
1416
1417 /* Update dominators of outer blocks if affected. */
1418 for (i = 0; i < n; i++)
1419 {
1420 basic_block dominated, dom_bb;
1421 vec<basic_block> dom_bbs;
1422 unsigned j;
1423
1424 bb = bbs[i];
1425 bb->aux = 0;
1426
1427 dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1428 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1429 {
1430 if (flow_bb_inside_loop_p (loop, dominated))
1431 continue;
1432 dom_bb = nearest_common_dominator (
1433 CDI_DOMINATORS, first_active[i], first_active_latch);
1434 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1435 }
1436 dom_bbs.release ();
1437 }
1438 free (first_active);
1439
1440 free (bbs);
1441 BITMAP_FREE (bbs_to_scale);
1442
1443 return true;
1444 }
1445
1446 /* A callback for make_forwarder block, to redirect all edges except for
1447 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1448 whether to redirect it. */
1449
1450 edge mfb_kj_edge;
1451 bool
1452 mfb_keep_just (edge e)
1453 {
1454 return e != mfb_kj_edge;
1455 }
1456
1457 /* True when a candidate preheader BLOCK has predecessors from LOOP. */
1458
1459 static bool
1460 has_preds_from_loop (basic_block block, struct loop *loop)
1461 {
1462 edge e;
1463 edge_iterator ei;
1464
1465 FOR_EACH_EDGE (e, ei, block->preds)
1466 if (e->src->loop_father == loop)
1467 return true;
1468 return false;
1469 }
1470
1471 /* Creates a pre-header for a LOOP. Returns newly created block. Unless
1472 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1473 entry; otherwise we also force preheader block to have only one successor.
1474 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1475 to be a fallthru predecessor to the loop header and to have only
1476 predecessors from outside of the loop.
1477 The function also updates dominators. */
1478
1479 basic_block
1480 create_preheader (struct loop *loop, int flags)
1481 {
1482 edge e, fallthru;
1483 basic_block dummy;
1484 int nentry = 0;
1485 bool irred = false;
1486 bool latch_edge_was_fallthru;
1487 edge one_succ_pred = NULL, single_entry = NULL;
1488 edge_iterator ei;
1489
1490 FOR_EACH_EDGE (e, ei, loop->header->preds)
1491 {
1492 if (e->src == loop->latch)
1493 continue;
1494 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1495 nentry++;
1496 single_entry = e;
1497 if (single_succ_p (e->src))
1498 one_succ_pred = e;
1499 }
1500 gcc_assert (nentry);
1501 if (nentry == 1)
1502 {
1503 bool need_forwarder_block = false;
1504
1505 /* We do not allow entry block to be the loop preheader, since we
1506 cannot emit code there. */
1507 if (single_entry->src == ENTRY_BLOCK_PTR)
1508 need_forwarder_block = true;
1509 else
1510 {
1511 /* If we want simple preheaders, also force the preheader to have
1512 just a single successor. */
1513 if ((flags & CP_SIMPLE_PREHEADERS)
1514 && !single_succ_p (single_entry->src))
1515 need_forwarder_block = true;
1516 /* If we want fallthru preheaders, also create forwarder block when
1517 preheader ends with a jump or has predecessors from loop. */
1518 else if ((flags & CP_FALLTHRU_PREHEADERS)
1519 && (JUMP_P (BB_END (single_entry->src))
1520 || has_preds_from_loop (single_entry->src, loop)))
1521 need_forwarder_block = true;
1522 }
1523 if (! need_forwarder_block)
1524 return NULL;
1525 }
1526
1527 mfb_kj_edge = loop_latch_edge (loop);
1528 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1529 fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1530 dummy = fallthru->src;
1531 loop->header = fallthru->dest;
1532
1533 /* Try to be clever in placing the newly created preheader. The idea is to
1534 avoid breaking any "fallthruness" relationship between blocks.
1535
1536 The preheader was created just before the header and all incoming edges
1537 to the header were redirected to the preheader, except the latch edge.
1538 So the only problematic case is when this latch edge was a fallthru
1539 edge: it is not anymore after the preheader creation so we have broken
1540 the fallthruness. We're therefore going to look for a better place. */
1541 if (latch_edge_was_fallthru)
1542 {
1543 if (one_succ_pred)
1544 e = one_succ_pred;
1545 else
1546 e = EDGE_PRED (dummy, 0);
1547
1548 move_block_after (dummy, e->src);
1549 }
1550
1551 if (irred)
1552 {
1553 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1554 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1555 }
1556
1557 if (dump_file)
1558 fprintf (dump_file, "Created preheader block for loop %i\n",
1559 loop->num);
1560
1561 if (flags & CP_FALLTHRU_PREHEADERS)
1562 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1563 && !JUMP_P (BB_END (dummy)));
1564
1565 return dummy;
1566 }
1567
1568 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1569
1570 void
1571 create_preheaders (int flags)
1572 {
1573 loop_iterator li;
1574 struct loop *loop;
1575
1576 if (!current_loops)
1577 return;
1578
1579 FOR_EACH_LOOP (li, loop, 0)
1580 create_preheader (loop, flags);
1581 loops_state_set (LOOPS_HAVE_PREHEADERS);
1582 }
1583
1584 /* Forces all loop latches to have only single successor. */
1585
1586 void
1587 force_single_succ_latches (void)
1588 {
1589 loop_iterator li;
1590 struct loop *loop;
1591 edge e;
1592
1593 FOR_EACH_LOOP (li, loop, 0)
1594 {
1595 if (loop->latch != loop->header && single_succ_p (loop->latch))
1596 continue;
1597
1598 e = find_edge (loop->latch, loop->header);
1599 gcc_checking_assert (e != NULL);
1600
1601 split_edge (e);
1602 }
1603 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1604 }
1605
1606 /* This function is called from loop_version. It splits the entry edge
1607 of the loop we want to version, adds the versioning condition, and
1608 adjust the edges to the two versions of the loop appropriately.
1609 e is an incoming edge. Returns the basic block containing the
1610 condition.
1611
1612 --- edge e ---- > [second_head]
1613
1614 Split it and insert new conditional expression and adjust edges.
1615
1616 --- edge e ---> [cond expr] ---> [first_head]
1617 |
1618 +---------> [second_head]
1619
1620 THEN_PROB is the probability of then branch of the condition. */
1621
1622 static basic_block
1623 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1624 edge e, void *cond_expr, unsigned then_prob)
1625 {
1626 basic_block new_head = NULL;
1627 edge e1;
1628
1629 gcc_assert (e->dest == second_head);
1630
1631 /* Split edge 'e'. This will create a new basic block, where we can
1632 insert conditional expr. */
1633 new_head = split_edge (e);
1634
1635 lv_add_condition_to_bb (first_head, second_head, new_head,
1636 cond_expr);
1637
1638 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1639 e = single_succ_edge (new_head);
1640 e1 = make_edge (new_head, first_head,
1641 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1642 e1->probability = then_prob;
1643 e->probability = REG_BR_PROB_BASE - then_prob;
1644 e1->count = apply_probability (e->count, e1->probability);
1645 e->count = apply_probability (e->count, e->probability);
1646
1647 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1648 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1649
1650 /* Adjust loop header phi nodes. */
1651 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1652
1653 return new_head;
1654 }
1655
1656 /* Main entry point for Loop Versioning transformation.
1657
1658 This transformation given a condition and a loop, creates
1659 -if (condition) { loop_copy1 } else { loop_copy2 },
1660 where loop_copy1 is the loop transformed in one way, and loop_copy2
1661 is the loop transformed in another way (or unchanged). 'condition'
1662 may be a run time test for things that were not resolved by static
1663 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1664
1665 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1666 is the ratio by that the frequencies in the original loop should
1667 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1668 new loop should be scaled.
1669
1670 If PLACE_AFTER is true, we place the new loop after LOOP in the
1671 instruction stream, otherwise it is placed before LOOP. */
1672
1673 struct loop *
1674 loop_version (struct loop *loop,
1675 void *cond_expr, basic_block *condition_bb,
1676 unsigned then_prob, unsigned then_scale, unsigned else_scale,
1677 bool place_after)
1678 {
1679 basic_block first_head, second_head;
1680 edge entry, latch_edge, true_edge, false_edge;
1681 int irred_flag;
1682 struct loop *nloop;
1683 basic_block cond_bb;
1684
1685 /* Record entry and latch edges for the loop */
1686 entry = loop_preheader_edge (loop);
1687 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1688 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1689
1690 /* Note down head of loop as first_head. */
1691 first_head = entry->dest;
1692
1693 /* Duplicate loop. */
1694 if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1695 NULL, NULL, NULL, 0))
1696 {
1697 entry->flags |= irred_flag;
1698 return NULL;
1699 }
1700
1701 /* After duplication entry edge now points to new loop head block.
1702 Note down new head as second_head. */
1703 second_head = entry->dest;
1704
1705 /* Split loop entry edge and insert new block with cond expr. */
1706 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1707 entry, cond_expr, then_prob);
1708 if (condition_bb)
1709 *condition_bb = cond_bb;
1710
1711 if (!cond_bb)
1712 {
1713 entry->flags |= irred_flag;
1714 return NULL;
1715 }
1716
1717 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1718
1719 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1720 nloop = loopify (latch_edge,
1721 single_pred_edge (get_bb_copy (loop->header)),
1722 cond_bb, true_edge, false_edge,
1723 false /* Do not redirect all edges. */,
1724 then_scale, else_scale);
1725
1726 copy_loop_info (loop, nloop);
1727
1728 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1729 lv_flush_pending_stmts (latch_edge);
1730
1731 /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
1732 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1733 lv_flush_pending_stmts (false_edge);
1734 /* Adjust irreducible flag. */
1735 if (irred_flag)
1736 {
1737 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1738 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1739 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1740 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1741 }
1742
1743 if (place_after)
1744 {
1745 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1746 unsigned i;
1747
1748 after = loop->latch;
1749
1750 for (i = 0; i < nloop->num_nodes; i++)
1751 {
1752 move_block_after (bbs[i], after);
1753 after = bbs[i];
1754 }
1755 free (bbs);
1756 }
1757
1758 /* At this point condition_bb is loop preheader with two successors,
1759 first_head and second_head. Make sure that loop preheader has only
1760 one successor. */
1761 split_edge (loop_preheader_edge (loop));
1762 split_edge (loop_preheader_edge (nloop));
1763
1764 return nloop;
1765 }