Fix double word typos.
[gcc.git] / gcc / cfgloopmanip.c
1 /* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "cfghooks.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "rtl.h"
28 #include "cfganal.h"
29 #include "cfgloop.h"
30 #include "fold-const.h"
31 #include "internal-fn.h"
32 #include "gimple-iterator.h"
33 #include "gimplify-me.h"
34 #include "tree-ssa-loop-manip.h"
35 #include "dumpfile.h"
36
37 static void copy_loops_to (struct loop **, int,
38 struct loop *);
39 static void loop_redirect_edge (edge, basic_block);
40 static void remove_bbs (basic_block *, int);
41 static bool rpe_enum_p (const_basic_block, const void *);
42 static int find_path (edge, basic_block **);
43 static void fix_loop_placements (struct loop *, bool *);
44 static bool fix_bb_placement (basic_block);
45 static void fix_bb_placements (basic_block, bool *, bitmap);
46
47 /* Checks whether basic block BB is dominated by DATA. */
48 static bool
49 rpe_enum_p (const_basic_block bb, const void *data)
50 {
51 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
52 }
53
54 /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
55
56 static void
57 remove_bbs (basic_block *bbs, int nbbs)
58 {
59 int i;
60
61 for (i = 0; i < nbbs; i++)
62 delete_basic_block (bbs[i]);
63 }
64
65 /* Find path -- i.e. the basic blocks dominated by edge E and put them
66 into array BBS, that will be allocated large enough to contain them.
67 E->dest must have exactly one predecessor for this to work (it is
68 easy to achieve and we do not put it here because we do not want to
69 alter anything by this function). The number of basic blocks in the
70 path is returned. */
71 static int
72 find_path (edge e, basic_block **bbs)
73 {
74 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
75
76 /* Find bbs in the path. */
77 *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
78 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
79 n_basic_blocks_for_fn (cfun), e->dest);
80 }
81
82 /* Fix placement of basic block BB inside loop hierarchy --
83 Let L be a loop to that BB belongs. Then every successor of BB must either
84 1) belong to some superloop of loop L, or
85 2) be a header of loop K such that K->outer is superloop of L
86 Returns true if we had to move BB into other loop to enforce this condition,
87 false if the placement of BB was already correct (provided that placements
88 of its successors are correct). */
89 static bool
90 fix_bb_placement (basic_block bb)
91 {
92 edge e;
93 edge_iterator ei;
94 struct loop *loop = current_loops->tree_root, *act;
95
96 FOR_EACH_EDGE (e, ei, bb->succs)
97 {
98 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
99 continue;
100
101 act = e->dest->loop_father;
102 if (act->header == e->dest)
103 act = loop_outer (act);
104
105 if (flow_loop_nested_p (loop, act))
106 loop = act;
107 }
108
109 if (loop == bb->loop_father)
110 return false;
111
112 remove_bb_from_loops (bb);
113 add_bb_to_loop (bb, loop);
114
115 return true;
116 }
117
118 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
119 of LOOP to that leads at least one exit edge of LOOP, and set it
120 as the immediate superloop of LOOP. Return true if the immediate superloop
121 of LOOP changed.
122
123 IRRED_INVALIDATED is set to true if a change in the loop structures might
124 invalidate the information about irreducible regions. */
125
126 static bool
127 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
128 {
129 unsigned i;
130 edge e;
131 vec<edge> exits = get_loop_exit_edges (loop);
132 struct loop *father = current_loops->tree_root, *act;
133 bool ret = false;
134
135 FOR_EACH_VEC_ELT (exits, i, e)
136 {
137 act = find_common_loop (loop, e->dest->loop_father);
138 if (flow_loop_nested_p (father, act))
139 father = act;
140 }
141
142 if (father != loop_outer (loop))
143 {
144 for (act = loop_outer (loop); act != father; act = loop_outer (act))
145 act->num_nodes -= loop->num_nodes;
146 flow_loop_tree_node_remove (loop);
147 flow_loop_tree_node_add (father, loop);
148
149 /* The exit edges of LOOP no longer exits its original immediate
150 superloops; remove them from the appropriate exit lists. */
151 FOR_EACH_VEC_ELT (exits, i, e)
152 {
153 /* We may need to recompute irreducible loops. */
154 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
155 *irred_invalidated = true;
156 rescan_loop_exit (e, false, false);
157 }
158
159 ret = true;
160 }
161
162 exits.release ();
163 return ret;
164 }
165
166 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
167 enforce condition stated in description of fix_bb_placement. We
168 start from basic block FROM that had some of its successors removed, so that
169 his placement no longer has to be correct, and iteratively fix placement of
170 its predecessors that may change if placement of FROM changed. Also fix
171 placement of subloops of FROM->loop_father, that might also be altered due
172 to this change; the condition for them is similar, except that instead of
173 successors we consider edges coming out of the loops.
174
175 If the changes may invalidate the information about irreducible regions,
176 IRRED_INVALIDATED is set to true.
177
178 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
179 changed loop_father are collected there. */
180
181 static void
182 fix_bb_placements (basic_block from,
183 bool *irred_invalidated,
184 bitmap loop_closed_ssa_invalidated)
185 {
186 sbitmap in_queue;
187 basic_block *queue, *qtop, *qbeg, *qend;
188 struct loop *base_loop, *target_loop;
189 edge e;
190
191 /* We pass through blocks back-reachable from FROM, testing whether some
192 of their successors moved to outer loop. It may be necessary to
193 iterate several times, but it is finite, as we stop unless we move
194 the basic block up the loop structure. The whole story is a bit
195 more complicated due to presence of subloops, those are moved using
196 fix_loop_placement. */
197
198 base_loop = from->loop_father;
199 /* If we are already in the outermost loop, the basic blocks cannot be moved
200 outside of it. If FROM is the header of the base loop, it cannot be moved
201 outside of it, either. In both cases, we can end now. */
202 if (base_loop == current_loops->tree_root
203 || from == base_loop->header)
204 return;
205
206 in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
207 bitmap_clear (in_queue);
208 bitmap_set_bit (in_queue, from->index);
209 /* Prevent us from going out of the base_loop. */
210 bitmap_set_bit (in_queue, base_loop->header->index);
211
212 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
213 qtop = queue + base_loop->num_nodes + 1;
214 qbeg = queue;
215 qend = queue + 1;
216 *qbeg = from;
217
218 while (qbeg != qend)
219 {
220 edge_iterator ei;
221 from = *qbeg;
222 qbeg++;
223 if (qbeg == qtop)
224 qbeg = queue;
225 bitmap_clear_bit (in_queue, from->index);
226
227 if (from->loop_father->header == from)
228 {
229 /* Subloop header, maybe move the loop upward. */
230 if (!fix_loop_placement (from->loop_father, irred_invalidated))
231 continue;
232 target_loop = loop_outer (from->loop_father);
233 if (loop_closed_ssa_invalidated)
234 {
235 basic_block *bbs = get_loop_body (from->loop_father);
236 for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
237 bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
238 free (bbs);
239 }
240 }
241 else
242 {
243 /* Ordinary basic block. */
244 if (!fix_bb_placement (from))
245 continue;
246 target_loop = from->loop_father;
247 if (loop_closed_ssa_invalidated)
248 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
249 }
250
251 FOR_EACH_EDGE (e, ei, from->succs)
252 {
253 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
254 *irred_invalidated = true;
255 }
256
257 /* Something has changed, insert predecessors into queue. */
258 FOR_EACH_EDGE (e, ei, from->preds)
259 {
260 basic_block pred = e->src;
261 struct loop *nca;
262
263 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
264 *irred_invalidated = true;
265
266 if (bitmap_bit_p (in_queue, pred->index))
267 continue;
268
269 /* If it is subloop, then it either was not moved, or
270 the path up the loop tree from base_loop do not contain
271 it. */
272 nca = find_common_loop (pred->loop_father, base_loop);
273 if (pred->loop_father != base_loop
274 && (nca == base_loop
275 || nca != pred->loop_father))
276 pred = pred->loop_father->header;
277 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
278 {
279 /* If PRED is already higher in the loop hierarchy than the
280 TARGET_LOOP to that we moved FROM, the change of the position
281 of FROM does not affect the position of PRED, so there is no
282 point in processing it. */
283 continue;
284 }
285
286 if (bitmap_bit_p (in_queue, pred->index))
287 continue;
288
289 /* Schedule the basic block. */
290 *qend = pred;
291 qend++;
292 if (qend == qtop)
293 qend = queue;
294 bitmap_set_bit (in_queue, pred->index);
295 }
296 }
297 free (in_queue);
298 free (queue);
299 }
300
301 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
302 and update loop structures and dominators. Return true if we were able
303 to remove the path, false otherwise (and nothing is affected then). */
304 bool
305 remove_path (edge e)
306 {
307 edge ae;
308 basic_block *rem_bbs, *bord_bbs, from, bb;
309 vec<basic_block> dom_bbs;
310 int i, nrem, n_bord_bbs;
311 sbitmap seen;
312 bool irred_invalidated = false;
313 edge_iterator ei;
314 struct loop *l, *f;
315
316 if (!can_remove_branch_p (e))
317 return false;
318
319 /* Keep track of whether we need to update information about irreducible
320 regions. This is the case if the removed area is a part of the
321 irreducible region, or if the set of basic blocks that belong to a loop
322 that is inside an irreducible region is changed, or if such a loop is
323 removed. */
324 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
325 irred_invalidated = true;
326
327 /* We need to check whether basic blocks are dominated by the edge
328 e, but we only have basic block dominators. This is easy to
329 fix -- when e->dest has exactly one predecessor, this corresponds
330 to blocks dominated by e->dest, if not, split the edge. */
331 if (!single_pred_p (e->dest))
332 e = single_pred_edge (split_edge (e));
333
334 /* It may happen that by removing path we remove one or more loops
335 we belong to. In this case first unloop the loops, then proceed
336 normally. We may assume that e->dest is not a header of any loop,
337 as it now has exactly one predecessor. */
338 for (l = e->src->loop_father; loop_outer (l); l = f)
339 {
340 f = loop_outer (l);
341 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
342 unloop (l, &irred_invalidated, NULL);
343 }
344
345 /* Identify the path. */
346 nrem = find_path (e, &rem_bbs);
347
348 n_bord_bbs = 0;
349 bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
350 seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
351 bitmap_clear (seen);
352
353 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
354 for (i = 0; i < nrem; i++)
355 bitmap_set_bit (seen, rem_bbs[i]->index);
356 if (!irred_invalidated)
357 FOR_EACH_EDGE (ae, ei, e->src->succs)
358 if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
359 && !bitmap_bit_p (seen, ae->dest->index)
360 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
361 {
362 irred_invalidated = true;
363 break;
364 }
365
366 for (i = 0; i < nrem; i++)
367 {
368 bb = rem_bbs[i];
369 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
370 if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
371 && !bitmap_bit_p (seen, ae->dest->index))
372 {
373 bitmap_set_bit (seen, ae->dest->index);
374 bord_bbs[n_bord_bbs++] = ae->dest;
375
376 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
377 irred_invalidated = true;
378 }
379 }
380
381 /* Remove the path. */
382 from = e->src;
383 remove_branch (e);
384 dom_bbs.create (0);
385
386 /* Cancel loops contained in the path. */
387 for (i = 0; i < nrem; i++)
388 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
389 cancel_loop_tree (rem_bbs[i]->loop_father);
390
391 remove_bbs (rem_bbs, nrem);
392 free (rem_bbs);
393
394 /* Find blocks whose dominators may be affected. */
395 bitmap_clear (seen);
396 for (i = 0; i < n_bord_bbs; i++)
397 {
398 basic_block ldom;
399
400 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
401 if (bitmap_bit_p (seen, bb->index))
402 continue;
403 bitmap_set_bit (seen, bb->index);
404
405 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
406 ldom;
407 ldom = next_dom_son (CDI_DOMINATORS, ldom))
408 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
409 dom_bbs.safe_push (ldom);
410 }
411
412 free (seen);
413
414 /* Recount dominators. */
415 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
416 dom_bbs.release ();
417 free (bord_bbs);
418
419 /* Fix placements of basic blocks inside loops and the placement of
420 loops in the loop tree. */
421 fix_bb_placements (from, &irred_invalidated, NULL);
422 fix_loop_placements (from->loop_father, &irred_invalidated);
423
424 if (irred_invalidated
425 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
426 mark_irreducible_loops ();
427
428 return true;
429 }
430
431 /* Creates place for a new LOOP in loops structure of FN. */
432
433 void
434 place_new_loop (struct function *fn, struct loop *loop)
435 {
436 loop->num = number_of_loops (fn);
437 vec_safe_push (loops_for_fn (fn)->larray, loop);
438 }
439
440 /* Given LOOP structure with filled header and latch, find the body of the
441 corresponding loop and add it to loops tree. Insert the LOOP as a son of
442 outer. */
443
444 void
445 add_loop (struct loop *loop, struct loop *outer)
446 {
447 basic_block *bbs;
448 int i, n;
449 struct loop *subloop;
450 edge e;
451 edge_iterator ei;
452
453 /* Add it to loop structure. */
454 place_new_loop (cfun, loop);
455 flow_loop_tree_node_add (outer, loop);
456
457 /* Find its nodes. */
458 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
459 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
460
461 for (i = 0; i < n; i++)
462 {
463 if (bbs[i]->loop_father == outer)
464 {
465 remove_bb_from_loops (bbs[i]);
466 add_bb_to_loop (bbs[i], loop);
467 continue;
468 }
469
470 loop->num_nodes++;
471
472 /* If we find a direct subloop of OUTER, move it to LOOP. */
473 subloop = bbs[i]->loop_father;
474 if (loop_outer (subloop) == outer
475 && subloop->header == bbs[i])
476 {
477 flow_loop_tree_node_remove (subloop);
478 flow_loop_tree_node_add (loop, subloop);
479 }
480 }
481
482 /* Update the information about loop exit edges. */
483 for (i = 0; i < n; i++)
484 {
485 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
486 {
487 rescan_loop_exit (e, false, false);
488 }
489 }
490
491 free (bbs);
492 }
493
494 /* Multiply all frequencies in LOOP by NUM/DEN. */
495
496 void
497 scale_loop_frequencies (struct loop *loop, int num, int den)
498 {
499 basic_block *bbs;
500
501 bbs = get_loop_body (loop);
502 scale_bbs_frequencies_int (bbs, loop->num_nodes, num, den);
503 free (bbs);
504 }
505
506 /* Multiply all frequencies in LOOP by SCALE/REG_BR_PROB_BASE.
507 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
508 to iterate too many times. */
509
510 void
511 scale_loop_profile (struct loop *loop, int scale, gcov_type iteration_bound)
512 {
513 gcov_type iterations = expected_loop_iterations_unbounded (loop);
514 edge e;
515 edge_iterator ei;
516
517 if (dump_file && (dump_flags & TDF_DETAILS))
518 fprintf (dump_file, ";; Scaling loop %i with scale %f, "
519 "bounding iterations to %i from guessed %i\n",
520 loop->num, (double)scale / REG_BR_PROB_BASE,
521 (int)iteration_bound, (int)iterations);
522
523 /* See if loop is predicted to iterate too many times. */
524 if (iteration_bound && iterations > 0
525 && apply_probability (iterations, scale) > iteration_bound)
526 {
527 /* Fixing loop profile for different trip count is not trivial; the exit
528 probabilities has to be updated to match and frequencies propagated down
529 to the loop body.
530
531 We fully update only the simple case of loop with single exit that is
532 either from the latch or BB just before latch and leads from BB with
533 simple conditional jump. This is OK for use in vectorizer. */
534 e = single_exit (loop);
535 if (e)
536 {
537 edge other_e;
538 int freq_delta;
539 gcov_type count_delta;
540
541 FOR_EACH_EDGE (other_e, ei, e->src->succs)
542 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
543 && e != other_e)
544 break;
545
546 /* Probability of exit must be 1/iterations. */
547 freq_delta = EDGE_FREQUENCY (e);
548 e->probability = REG_BR_PROB_BASE / iteration_bound;
549 other_e->probability = inverse_probability (e->probability);
550 freq_delta -= EDGE_FREQUENCY (e);
551
552 /* Adjust counts accordingly. */
553 count_delta = e->count;
554 e->count = apply_probability (e->src->count, e->probability);
555 other_e->count = apply_probability (e->src->count, other_e->probability);
556 count_delta -= e->count;
557
558 /* If latch exists, change its frequency and count, since we changed
559 probability of exit. Theoretically we should update everything from
560 source of exit edge to latch, but for vectorizer this is enough. */
561 if (loop->latch
562 && loop->latch != e->src)
563 {
564 loop->latch->frequency += freq_delta;
565 if (loop->latch->frequency < 0)
566 loop->latch->frequency = 0;
567 loop->latch->count += count_delta;
568 if (loop->latch->count < 0)
569 loop->latch->count = 0;
570 }
571 }
572
573 /* Roughly speaking we want to reduce the loop body profile by the
574 the difference of loop iterations. We however can do better if
575 we look at the actual profile, if it is available. */
576 scale = RDIV (iteration_bound * scale, iterations);
577 if (loop->header->count)
578 {
579 gcov_type count_in = 0;
580
581 FOR_EACH_EDGE (e, ei, loop->header->preds)
582 if (e->src != loop->latch)
583 count_in += e->count;
584
585 if (count_in != 0)
586 scale = GCOV_COMPUTE_SCALE (count_in * iteration_bound,
587 loop->header->count);
588 }
589 else if (loop->header->frequency)
590 {
591 int freq_in = 0;
592
593 FOR_EACH_EDGE (e, ei, loop->header->preds)
594 if (e->src != loop->latch)
595 freq_in += EDGE_FREQUENCY (e);
596
597 if (freq_in != 0)
598 scale = GCOV_COMPUTE_SCALE (freq_in * iteration_bound,
599 loop->header->frequency);
600 }
601 if (!scale)
602 scale = 1;
603 }
604
605 if (scale == REG_BR_PROB_BASE)
606 return;
607
608 /* Scale the actual probabilities. */
609 scale_loop_frequencies (loop, scale, REG_BR_PROB_BASE);
610 if (dump_file && (dump_flags & TDF_DETAILS))
611 fprintf (dump_file, ";; guessed iterations are now %i\n",
612 (int)expected_loop_iterations_unbounded (loop));
613 }
614
615 /* Recompute dominance information for basic blocks outside LOOP. */
616
617 static void
618 update_dominators_in_loop (struct loop *loop)
619 {
620 vec<basic_block> dom_bbs = vNULL;
621 sbitmap seen;
622 basic_block *body;
623 unsigned i;
624
625 seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
626 bitmap_clear (seen);
627 body = get_loop_body (loop);
628
629 for (i = 0; i < loop->num_nodes; i++)
630 bitmap_set_bit (seen, body[i]->index);
631
632 for (i = 0; i < loop->num_nodes; i++)
633 {
634 basic_block ldom;
635
636 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
637 ldom;
638 ldom = next_dom_son (CDI_DOMINATORS, ldom))
639 if (!bitmap_bit_p (seen, ldom->index))
640 {
641 bitmap_set_bit (seen, ldom->index);
642 dom_bbs.safe_push (ldom);
643 }
644 }
645
646 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
647 free (body);
648 free (seen);
649 dom_bbs.release ();
650 }
651
652 /* Creates an if region as shown above. CONDITION is used to create
653 the test for the if.
654
655 |
656 | ------------- -------------
657 | | pred_bb | | pred_bb |
658 | ------------- -------------
659 | | |
660 | | | ENTRY_EDGE
661 | | ENTRY_EDGE V
662 | | ====> -------------
663 | | | cond_bb |
664 | | | CONDITION |
665 | | -------------
666 | V / \
667 | ------------- e_false / \ e_true
668 | | succ_bb | V V
669 | ------------- ----------- -----------
670 | | false_bb | | true_bb |
671 | ----------- -----------
672 | \ /
673 | \ /
674 | V V
675 | -------------
676 | | join_bb |
677 | -------------
678 | | exit_edge (result)
679 | V
680 | -----------
681 | | succ_bb |
682 | -----------
683 |
684 */
685
686 edge
687 create_empty_if_region_on_edge (edge entry_edge, tree condition)
688 {
689
690 basic_block cond_bb, true_bb, false_bb, join_bb;
691 edge e_true, e_false, exit_edge;
692 gcond *cond_stmt;
693 tree simple_cond;
694 gimple_stmt_iterator gsi;
695
696 cond_bb = split_edge (entry_edge);
697
698 /* Insert condition in cond_bb. */
699 gsi = gsi_last_bb (cond_bb);
700 simple_cond =
701 force_gimple_operand_gsi (&gsi, condition, true, NULL,
702 false, GSI_NEW_STMT);
703 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
704 gsi = gsi_last_bb (cond_bb);
705 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
706
707 join_bb = split_edge (single_succ_edge (cond_bb));
708
709 e_true = single_succ_edge (cond_bb);
710 true_bb = split_edge (e_true);
711
712 e_false = make_edge (cond_bb, join_bb, 0);
713 false_bb = split_edge (e_false);
714
715 e_true->flags &= ~EDGE_FALLTHRU;
716 e_true->flags |= EDGE_TRUE_VALUE;
717 e_false->flags &= ~EDGE_FALLTHRU;
718 e_false->flags |= EDGE_FALSE_VALUE;
719
720 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
721 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
722 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
723 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
724
725 exit_edge = single_succ_edge (join_bb);
726
727 if (single_pred_p (exit_edge->dest))
728 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
729
730 return exit_edge;
731 }
732
733 /* create_empty_loop_on_edge
734 |
735 | - pred_bb - ------ pred_bb ------
736 | | | | iv0 = initial_value |
737 | -----|----- ---------|-----------
738 | | ______ | entry_edge
739 | | entry_edge / | |
740 | | ====> | -V---V- loop_header -------------
741 | V | | iv_before = phi (iv0, iv_after) |
742 | - succ_bb - | ---|-----------------------------
743 | | | | |
744 | ----------- | ---V--- loop_body ---------------
745 | | | iv_after = iv_before + stride |
746 | | | if (iv_before < upper_bound) |
747 | | ---|--------------\--------------
748 | | | \ exit_e
749 | | V \
750 | | - loop_latch - V- succ_bb -
751 | | | | | |
752 | | /------------- -----------
753 | \ ___ /
754
755 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
756 that is used before the increment of IV. IV_BEFORE should be used for
757 adding code to the body that uses the IV. OUTER is the outer loop in
758 which the new loop should be inserted.
759
760 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
761 inserted on the loop entry edge. This implies that this function
762 should be used only when the UPPER_BOUND expression is a loop
763 invariant. */
764
765 struct loop *
766 create_empty_loop_on_edge (edge entry_edge,
767 tree initial_value,
768 tree stride, tree upper_bound,
769 tree iv,
770 tree *iv_before,
771 tree *iv_after,
772 struct loop *outer)
773 {
774 basic_block loop_header, loop_latch, succ_bb, pred_bb;
775 struct loop *loop;
776 gimple_stmt_iterator gsi;
777 gimple_seq stmts;
778 gcond *cond_expr;
779 tree exit_test;
780 edge exit_e;
781 int prob;
782
783 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
784
785 /* Create header, latch and wire up the loop. */
786 pred_bb = entry_edge->src;
787 loop_header = split_edge (entry_edge);
788 loop_latch = split_edge (single_succ_edge (loop_header));
789 succ_bb = single_succ (loop_latch);
790 make_edge (loop_header, succ_bb, 0);
791 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
792
793 /* Set immediate dominator information. */
794 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
795 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
796 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
797
798 /* Initialize a loop structure and put it in a loop hierarchy. */
799 loop = alloc_loop ();
800 loop->header = loop_header;
801 loop->latch = loop_latch;
802 add_loop (loop, outer);
803
804 /* TODO: Fix frequencies and counts. */
805 prob = REG_BR_PROB_BASE / 2;
806
807 scale_loop_frequencies (loop, REG_BR_PROB_BASE - prob, REG_BR_PROB_BASE);
808
809 /* Update dominators. */
810 update_dominators_in_loop (loop);
811
812 /* Modify edge flags. */
813 exit_e = single_exit (loop);
814 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
815 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
816
817 /* Construct IV code in loop. */
818 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
819 if (stmts)
820 {
821 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
822 gsi_commit_edge_inserts ();
823 }
824
825 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
826 if (stmts)
827 {
828 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
829 gsi_commit_edge_inserts ();
830 }
831
832 gsi = gsi_last_bb (loop_header);
833 create_iv (initial_value, stride, iv, loop, &gsi, false,
834 iv_before, iv_after);
835
836 /* Insert loop exit condition. */
837 cond_expr = gimple_build_cond
838 (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
839
840 exit_test = gimple_cond_lhs (cond_expr);
841 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
842 false, GSI_NEW_STMT);
843 gimple_cond_set_lhs (cond_expr, exit_test);
844 gsi = gsi_last_bb (exit_e->src);
845 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
846
847 split_block_after_labels (loop_header);
848
849 return loop;
850 }
851
852 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
853 latch to header and update loop tree and dominators
854 accordingly. Everything between them plus LATCH_EDGE destination must
855 be dominated by HEADER_EDGE destination, and back-reachable from
856 LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB,
857 FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
858 TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
859 Returns the newly created loop. Frequencies and counts in the new loop
860 are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
861
862 struct loop *
863 loopify (edge latch_edge, edge header_edge,
864 basic_block switch_bb, edge true_edge, edge false_edge,
865 bool redirect_all_edges, unsigned true_scale, unsigned false_scale)
866 {
867 basic_block succ_bb = latch_edge->dest;
868 basic_block pred_bb = header_edge->src;
869 struct loop *loop = alloc_loop ();
870 struct loop *outer = loop_outer (succ_bb->loop_father);
871 int freq;
872 gcov_type cnt;
873 edge e;
874 edge_iterator ei;
875
876 loop->header = header_edge->dest;
877 loop->latch = latch_edge->src;
878
879 freq = EDGE_FREQUENCY (header_edge);
880 cnt = header_edge->count;
881
882 /* Redirect edges. */
883 loop_redirect_edge (latch_edge, loop->header);
884 loop_redirect_edge (true_edge, succ_bb);
885
886 /* During loop versioning, one of the switch_bb edge is already properly
887 set. Do not redirect it again unless redirect_all_edges is true. */
888 if (redirect_all_edges)
889 {
890 loop_redirect_edge (header_edge, switch_bb);
891 loop_redirect_edge (false_edge, loop->header);
892
893 /* Update dominators. */
894 set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
895 set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
896 }
897
898 set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
899
900 /* Compute new loop. */
901 add_loop (loop, outer);
902
903 /* Add switch_bb to appropriate loop. */
904 if (switch_bb->loop_father)
905 remove_bb_from_loops (switch_bb);
906 add_bb_to_loop (switch_bb, outer);
907
908 /* Fix frequencies. */
909 if (redirect_all_edges)
910 {
911 switch_bb->frequency = freq;
912 switch_bb->count = cnt;
913 FOR_EACH_EDGE (e, ei, switch_bb->succs)
914 {
915 e->count = apply_probability (switch_bb->count, e->probability);
916 }
917 }
918 scale_loop_frequencies (loop, false_scale, REG_BR_PROB_BASE);
919 scale_loop_frequencies (succ_bb->loop_father, true_scale, REG_BR_PROB_BASE);
920 update_dominators_in_loop (loop);
921
922 return loop;
923 }
924
925 /* Remove the latch edge of a LOOP and update loops to indicate that
926 the LOOP was removed. After this function, original loop latch will
927 have no successor, which caller is expected to fix somehow.
928
929 If this may cause the information about irreducible regions to become
930 invalid, IRRED_INVALIDATED is set to true.
931
932 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
933 basic blocks that had non-trivial update on their loop_father.*/
934
935 void
936 unloop (struct loop *loop, bool *irred_invalidated,
937 bitmap loop_closed_ssa_invalidated)
938 {
939 basic_block *body;
940 struct loop *ploop;
941 unsigned i, n;
942 basic_block latch = loop->latch;
943 bool dummy = false;
944
945 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
946 *irred_invalidated = true;
947
948 /* This is relatively straightforward. The dominators are unchanged, as
949 loop header dominates loop latch, so the only thing we have to care of
950 is the placement of loops and basic blocks inside the loop tree. We
951 move them all to the loop->outer, and then let fix_bb_placements do
952 its work. */
953
954 body = get_loop_body (loop);
955 n = loop->num_nodes;
956 for (i = 0; i < n; i++)
957 if (body[i]->loop_father == loop)
958 {
959 remove_bb_from_loops (body[i]);
960 add_bb_to_loop (body[i], loop_outer (loop));
961 }
962 free (body);
963
964 while (loop->inner)
965 {
966 ploop = loop->inner;
967 flow_loop_tree_node_remove (ploop);
968 flow_loop_tree_node_add (loop_outer (loop), ploop);
969 }
970
971 /* Remove the loop and free its data. */
972 delete_loop (loop);
973
974 remove_edge (single_succ_edge (latch));
975
976 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
977 there is an irreducible region inside the cancelled loop, the flags will
978 be still correct. */
979 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
980 }
981
982 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
983 condition stated in description of fix_loop_placement holds for them.
984 It is used in case when we removed some edges coming out of LOOP, which
985 may cause the right placement of LOOP inside loop tree to change.
986
987 IRRED_INVALIDATED is set to true if a change in the loop structures might
988 invalidate the information about irreducible regions. */
989
990 static void
991 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
992 {
993 struct loop *outer;
994
995 while (loop_outer (loop))
996 {
997 outer = loop_outer (loop);
998 if (!fix_loop_placement (loop, irred_invalidated))
999 break;
1000
1001 /* Changing the placement of a loop in the loop tree may alter the
1002 validity of condition 2) of the description of fix_bb_placement
1003 for its preheader, because the successor is the header and belongs
1004 to the loop. So call fix_bb_placements to fix up the placement
1005 of the preheader and (possibly) of its predecessors. */
1006 fix_bb_placements (loop_preheader_edge (loop)->src,
1007 irred_invalidated, NULL);
1008 loop = outer;
1009 }
1010 }
1011
1012 /* Duplicate loop bounds and other information we store about
1013 the loop into its duplicate. */
1014
1015 void
1016 copy_loop_info (struct loop *loop, struct loop *target)
1017 {
1018 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
1019 target->any_upper_bound = loop->any_upper_bound;
1020 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1021 target->any_estimate = loop->any_estimate;
1022 target->nb_iterations_estimate = loop->nb_iterations_estimate;
1023 target->estimate_state = loop->estimate_state;
1024 target->warned_aggressive_loop_optimizations
1025 |= loop->warned_aggressive_loop_optimizations;
1026 }
1027
1028 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1029 created loop into loops structure. */
1030 struct loop *
1031 duplicate_loop (struct loop *loop, struct loop *target)
1032 {
1033 struct loop *cloop;
1034 cloop = alloc_loop ();
1035 place_new_loop (cfun, cloop);
1036
1037 copy_loop_info (loop, cloop);
1038
1039 /* Mark the new loop as copy of LOOP. */
1040 set_loop_copy (loop, cloop);
1041
1042 /* Add it to target. */
1043 flow_loop_tree_node_add (target, cloop);
1044
1045 return cloop;
1046 }
1047
1048 /* Copies structure of subloops of LOOP into TARGET loop, placing
1049 newly created loops into loop tree. */
1050 void
1051 duplicate_subloops (struct loop *loop, struct loop *target)
1052 {
1053 struct loop *aloop, *cloop;
1054
1055 for (aloop = loop->inner; aloop; aloop = aloop->next)
1056 {
1057 cloop = duplicate_loop (aloop, target);
1058 duplicate_subloops (aloop, cloop);
1059 }
1060 }
1061
1062 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1063 into TARGET loop, placing newly created loops into loop tree. */
1064 static void
1065 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1066 {
1067 struct loop *aloop;
1068 int i;
1069
1070 for (i = 0; i < n; i++)
1071 {
1072 aloop = duplicate_loop (copied_loops[i], target);
1073 duplicate_subloops (copied_loops[i], aloop);
1074 }
1075 }
1076
1077 /* Redirects edge E to basic block DEST. */
1078 static void
1079 loop_redirect_edge (edge e, basic_block dest)
1080 {
1081 if (e->dest == dest)
1082 return;
1083
1084 redirect_edge_and_branch_force (e, dest);
1085 }
1086
1087 /* Check whether LOOP's body can be duplicated. */
1088 bool
1089 can_duplicate_loop_p (const struct loop *loop)
1090 {
1091 int ret;
1092 basic_block *bbs = get_loop_body (loop);
1093
1094 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1095 free (bbs);
1096
1097 return ret;
1098 }
1099
1100 /* Sets probability and count of edge E to zero. The probability and count
1101 is redistributed evenly to the remaining edges coming from E->src. */
1102
1103 static void
1104 set_zero_probability (edge e)
1105 {
1106 basic_block bb = e->src;
1107 edge_iterator ei;
1108 edge ae, last = NULL;
1109 unsigned n = EDGE_COUNT (bb->succs);
1110 gcov_type cnt = e->count, cnt1;
1111 unsigned prob = e->probability, prob1;
1112
1113 gcc_assert (n > 1);
1114 cnt1 = cnt / (n - 1);
1115 prob1 = prob / (n - 1);
1116
1117 FOR_EACH_EDGE (ae, ei, bb->succs)
1118 {
1119 if (ae == e)
1120 continue;
1121
1122 ae->probability += prob1;
1123 ae->count += cnt1;
1124 last = ae;
1125 }
1126
1127 /* Move the rest to one of the edges. */
1128 last->probability += prob % (n - 1);
1129 last->count += cnt % (n - 1);
1130
1131 e->probability = 0;
1132 e->count = 0;
1133 }
1134
1135 /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1136 loop structure and dominators. E's destination must be LOOP header for
1137 this to work, i.e. it must be entry or latch edge of this loop; these are
1138 unique, as the loops must have preheaders for this function to work
1139 correctly (in case E is latch, the function unrolls the loop, if E is entry
1140 edge, it peels the loop). Store edges created by copying ORIG edge from
1141 copies corresponding to set bits in WONT_EXIT bitmap (bit 0 corresponds to
1142 original LOOP body, the other copies are numbered in order given by control
1143 flow through them) into TO_REMOVE array. Returns false if duplication is
1144 impossible. */
1145
1146 bool
1147 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1148 unsigned int ndupl, sbitmap wont_exit,
1149 edge orig, vec<edge> *to_remove,
1150 int flags)
1151 {
1152 struct loop *target, *aloop;
1153 struct loop **orig_loops;
1154 unsigned n_orig_loops;
1155 basic_block header = loop->header, latch = loop->latch;
1156 basic_block *new_bbs, *bbs, *first_active;
1157 basic_block new_bb, bb, first_active_latch = NULL;
1158 edge ae, latch_edge;
1159 edge spec_edges[2], new_spec_edges[2];
1160 #define SE_LATCH 0
1161 #define SE_ORIG 1
1162 unsigned i, j, n;
1163 int is_latch = (latch == e->src);
1164 int scale_act = 0, *scale_step = NULL, scale_main = 0;
1165 int scale_after_exit = 0;
1166 int p, freq_in, freq_le, freq_out_orig;
1167 int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
1168 int add_irreducible_flag;
1169 basic_block place_after;
1170 bitmap bbs_to_scale = NULL;
1171 bitmap_iterator bi;
1172
1173 gcc_assert (e->dest == loop->header);
1174 gcc_assert (ndupl > 0);
1175
1176 if (orig)
1177 {
1178 /* Orig must be edge out of the loop. */
1179 gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1180 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1181 }
1182
1183 n = loop->num_nodes;
1184 bbs = get_loop_body_in_dom_order (loop);
1185 gcc_assert (bbs[0] == loop->header);
1186 gcc_assert (bbs[n - 1] == loop->latch);
1187
1188 /* Check whether duplication is possible. */
1189 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1190 {
1191 free (bbs);
1192 return false;
1193 }
1194 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1195
1196 /* In case we are doing loop peeling and the loop is in the middle of
1197 irreducible region, the peeled copies will be inside it too. */
1198 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1199 gcc_assert (!is_latch || !add_irreducible_flag);
1200
1201 /* Find edge from latch. */
1202 latch_edge = loop_latch_edge (loop);
1203
1204 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1205 {
1206 /* Calculate coefficients by that we have to scale frequencies
1207 of duplicated loop bodies. */
1208 freq_in = header->frequency;
1209 freq_le = EDGE_FREQUENCY (latch_edge);
1210 if (freq_in == 0)
1211 freq_in = 1;
1212 if (freq_in < freq_le)
1213 freq_in = freq_le;
1214 freq_out_orig = orig ? EDGE_FREQUENCY (orig) : freq_in - freq_le;
1215 if (freq_out_orig > freq_in - freq_le)
1216 freq_out_orig = freq_in - freq_le;
1217 prob_pass_thru = RDIV (REG_BR_PROB_BASE * freq_le, freq_in);
1218 prob_pass_wont_exit =
1219 RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in);
1220
1221 if (orig
1222 && REG_BR_PROB_BASE - orig->probability != 0)
1223 {
1224 /* The blocks that are dominated by a removed exit edge ORIG have
1225 frequencies scaled by this. */
1226 scale_after_exit
1227 = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE,
1228 REG_BR_PROB_BASE - orig->probability);
1229 bbs_to_scale = BITMAP_ALLOC (NULL);
1230 for (i = 0; i < n; i++)
1231 {
1232 if (bbs[i] != orig->src
1233 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1234 bitmap_set_bit (bbs_to_scale, i);
1235 }
1236 }
1237
1238 scale_step = XNEWVEC (int, ndupl);
1239
1240 for (i = 1; i <= ndupl; i++)
1241 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1242 ? prob_pass_wont_exit
1243 : prob_pass_thru;
1244
1245 /* Complete peeling is special as the probability of exit in last
1246 copy becomes 1. */
1247 if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1248 {
1249 int wanted_freq = EDGE_FREQUENCY (e);
1250
1251 if (wanted_freq > freq_in)
1252 wanted_freq = freq_in;
1253
1254 gcc_assert (!is_latch);
1255 /* First copy has frequency of incoming edge. Each subsequent
1256 frequency should be reduced by prob_pass_wont_exit. Caller
1257 should've managed the flags so all except for original loop
1258 has won't exist set. */
1259 scale_act = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1260 /* Now simulate the duplication adjustments and compute header
1261 frequency of the last copy. */
1262 for (i = 0; i < ndupl; i++)
1263 wanted_freq = combine_probabilities (wanted_freq, scale_step[i]);
1264 scale_main = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1265 }
1266 else if (is_latch)
1267 {
1268 prob_pass_main = bitmap_bit_p (wont_exit, 0)
1269 ? prob_pass_wont_exit
1270 : prob_pass_thru;
1271 p = prob_pass_main;
1272 scale_main = REG_BR_PROB_BASE;
1273 for (i = 0; i < ndupl; i++)
1274 {
1275 scale_main += p;
1276 p = combine_probabilities (p, scale_step[i]);
1277 }
1278 scale_main = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE, scale_main);
1279 scale_act = combine_probabilities (scale_main, prob_pass_main);
1280 }
1281 else
1282 {
1283 scale_main = REG_BR_PROB_BASE;
1284 for (i = 0; i < ndupl; i++)
1285 scale_main = combine_probabilities (scale_main, scale_step[i]);
1286 scale_act = REG_BR_PROB_BASE - prob_pass_thru;
1287 }
1288 for (i = 0; i < ndupl; i++)
1289 gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
1290 gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
1291 && scale_act >= 0 && scale_act <= REG_BR_PROB_BASE);
1292 }
1293
1294 /* Loop the new bbs will belong to. */
1295 target = e->src->loop_father;
1296
1297 /* Original loops. */
1298 n_orig_loops = 0;
1299 for (aloop = loop->inner; aloop; aloop = aloop->next)
1300 n_orig_loops++;
1301 orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1302 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1303 orig_loops[i] = aloop;
1304
1305 set_loop_copy (loop, target);
1306
1307 first_active = XNEWVEC (basic_block, n);
1308 if (is_latch)
1309 {
1310 memcpy (first_active, bbs, n * sizeof (basic_block));
1311 first_active_latch = latch;
1312 }
1313
1314 spec_edges[SE_ORIG] = orig;
1315 spec_edges[SE_LATCH] = latch_edge;
1316
1317 place_after = e->src;
1318 for (j = 0; j < ndupl; j++)
1319 {
1320 /* Copy loops. */
1321 copy_loops_to (orig_loops, n_orig_loops, target);
1322
1323 /* Copy bbs. */
1324 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1325 place_after, true);
1326 place_after = new_spec_edges[SE_LATCH]->src;
1327
1328 if (flags & DLTHE_RECORD_COPY_NUMBER)
1329 for (i = 0; i < n; i++)
1330 {
1331 gcc_assert (!new_bbs[i]->aux);
1332 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1333 }
1334
1335 /* Note whether the blocks and edges belong to an irreducible loop. */
1336 if (add_irreducible_flag)
1337 {
1338 for (i = 0; i < n; i++)
1339 new_bbs[i]->flags |= BB_DUPLICATED;
1340 for (i = 0; i < n; i++)
1341 {
1342 edge_iterator ei;
1343 new_bb = new_bbs[i];
1344 if (new_bb->loop_father == target)
1345 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1346
1347 FOR_EACH_EDGE (ae, ei, new_bb->succs)
1348 if ((ae->dest->flags & BB_DUPLICATED)
1349 && (ae->src->loop_father == target
1350 || ae->dest->loop_father == target))
1351 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1352 }
1353 for (i = 0; i < n; i++)
1354 new_bbs[i]->flags &= ~BB_DUPLICATED;
1355 }
1356
1357 /* Redirect the special edges. */
1358 if (is_latch)
1359 {
1360 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1361 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1362 loop->header);
1363 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1364 latch = loop->latch = new_bbs[n - 1];
1365 e = latch_edge = new_spec_edges[SE_LATCH];
1366 }
1367 else
1368 {
1369 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1370 loop->header);
1371 redirect_edge_and_branch_force (e, new_bbs[0]);
1372 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1373 e = new_spec_edges[SE_LATCH];
1374 }
1375
1376 /* Record exit edge in this copy. */
1377 if (orig && bitmap_bit_p (wont_exit, j + 1))
1378 {
1379 if (to_remove)
1380 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1381 set_zero_probability (new_spec_edges[SE_ORIG]);
1382
1383 /* Scale the frequencies of the blocks dominated by the exit. */
1384 if (bbs_to_scale)
1385 {
1386 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1387 {
1388 scale_bbs_frequencies_int (new_bbs + i, 1, scale_after_exit,
1389 REG_BR_PROB_BASE);
1390 }
1391 }
1392 }
1393
1394 /* Record the first copy in the control flow order if it is not
1395 the original loop (i.e. in case of peeling). */
1396 if (!first_active_latch)
1397 {
1398 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1399 first_active_latch = new_bbs[n - 1];
1400 }
1401
1402 /* Set counts and frequencies. */
1403 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1404 {
1405 scale_bbs_frequencies_int (new_bbs, n, scale_act, REG_BR_PROB_BASE);
1406 scale_act = combine_probabilities (scale_act, scale_step[j]);
1407 }
1408 }
1409 free (new_bbs);
1410 free (orig_loops);
1411
1412 /* Record the exit edge in the original loop body, and update the frequencies. */
1413 if (orig && bitmap_bit_p (wont_exit, 0))
1414 {
1415 if (to_remove)
1416 to_remove->safe_push (orig);
1417 set_zero_probability (orig);
1418
1419 /* Scale the frequencies of the blocks dominated by the exit. */
1420 if (bbs_to_scale)
1421 {
1422 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1423 {
1424 scale_bbs_frequencies_int (bbs + i, 1, scale_after_exit,
1425 REG_BR_PROB_BASE);
1426 }
1427 }
1428 }
1429
1430 /* Update the original loop. */
1431 if (!is_latch)
1432 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1433 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1434 {
1435 scale_bbs_frequencies_int (bbs, n, scale_main, REG_BR_PROB_BASE);
1436 free (scale_step);
1437 }
1438
1439 /* Update dominators of outer blocks if affected. */
1440 for (i = 0; i < n; i++)
1441 {
1442 basic_block dominated, dom_bb;
1443 vec<basic_block> dom_bbs;
1444 unsigned j;
1445
1446 bb = bbs[i];
1447 bb->aux = 0;
1448
1449 dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1450 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1451 {
1452 if (flow_bb_inside_loop_p (loop, dominated))
1453 continue;
1454 dom_bb = nearest_common_dominator (
1455 CDI_DOMINATORS, first_active[i], first_active_latch);
1456 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1457 }
1458 dom_bbs.release ();
1459 }
1460 free (first_active);
1461
1462 free (bbs);
1463 BITMAP_FREE (bbs_to_scale);
1464
1465 return true;
1466 }
1467
1468 /* A callback for make_forwarder block, to redirect all edges except for
1469 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1470 whether to redirect it. */
1471
1472 edge mfb_kj_edge;
1473 bool
1474 mfb_keep_just (edge e)
1475 {
1476 return e != mfb_kj_edge;
1477 }
1478
1479 /* True when a candidate preheader BLOCK has predecessors from LOOP. */
1480
1481 static bool
1482 has_preds_from_loop (basic_block block, struct loop *loop)
1483 {
1484 edge e;
1485 edge_iterator ei;
1486
1487 FOR_EACH_EDGE (e, ei, block->preds)
1488 if (e->src->loop_father == loop)
1489 return true;
1490 return false;
1491 }
1492
1493 /* Creates a pre-header for a LOOP. Returns newly created block. Unless
1494 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1495 entry; otherwise we also force preheader block to have only one successor.
1496 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1497 to be a fallthru predecessor to the loop header and to have only
1498 predecessors from outside of the loop.
1499 The function also updates dominators. */
1500
1501 basic_block
1502 create_preheader (struct loop *loop, int flags)
1503 {
1504 edge e, fallthru;
1505 basic_block dummy;
1506 int nentry = 0;
1507 bool irred = false;
1508 bool latch_edge_was_fallthru;
1509 edge one_succ_pred = NULL, single_entry = NULL;
1510 edge_iterator ei;
1511
1512 FOR_EACH_EDGE (e, ei, loop->header->preds)
1513 {
1514 if (e->src == loop->latch)
1515 continue;
1516 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1517 nentry++;
1518 single_entry = e;
1519 if (single_succ_p (e->src))
1520 one_succ_pred = e;
1521 }
1522 gcc_assert (nentry);
1523 if (nentry == 1)
1524 {
1525 bool need_forwarder_block = false;
1526
1527 /* We do not allow entry block to be the loop preheader, since we
1528 cannot emit code there. */
1529 if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1530 need_forwarder_block = true;
1531 else
1532 {
1533 /* If we want simple preheaders, also force the preheader to have
1534 just a single successor. */
1535 if ((flags & CP_SIMPLE_PREHEADERS)
1536 && !single_succ_p (single_entry->src))
1537 need_forwarder_block = true;
1538 /* If we want fallthru preheaders, also create forwarder block when
1539 preheader ends with a jump or has predecessors from loop. */
1540 else if ((flags & CP_FALLTHRU_PREHEADERS)
1541 && (JUMP_P (BB_END (single_entry->src))
1542 || has_preds_from_loop (single_entry->src, loop)))
1543 need_forwarder_block = true;
1544 }
1545 if (! need_forwarder_block)
1546 return NULL;
1547 }
1548
1549 mfb_kj_edge = loop_latch_edge (loop);
1550 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1551 fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1552 dummy = fallthru->src;
1553 loop->header = fallthru->dest;
1554
1555 /* Try to be clever in placing the newly created preheader. The idea is to
1556 avoid breaking any "fallthruness" relationship between blocks.
1557
1558 The preheader was created just before the header and all incoming edges
1559 to the header were redirected to the preheader, except the latch edge.
1560 So the only problematic case is when this latch edge was a fallthru
1561 edge: it is not anymore after the preheader creation so we have broken
1562 the fallthruness. We're therefore going to look for a better place. */
1563 if (latch_edge_was_fallthru)
1564 {
1565 if (one_succ_pred)
1566 e = one_succ_pred;
1567 else
1568 e = EDGE_PRED (dummy, 0);
1569
1570 move_block_after (dummy, e->src);
1571 }
1572
1573 if (irred)
1574 {
1575 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1576 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1577 }
1578
1579 if (dump_file)
1580 fprintf (dump_file, "Created preheader block for loop %i\n",
1581 loop->num);
1582
1583 if (flags & CP_FALLTHRU_PREHEADERS)
1584 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1585 && !JUMP_P (BB_END (dummy)));
1586
1587 return dummy;
1588 }
1589
1590 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1591
1592 void
1593 create_preheaders (int flags)
1594 {
1595 struct loop *loop;
1596
1597 if (!current_loops)
1598 return;
1599
1600 FOR_EACH_LOOP (loop, 0)
1601 create_preheader (loop, flags);
1602 loops_state_set (LOOPS_HAVE_PREHEADERS);
1603 }
1604
1605 /* Forces all loop latches to have only single successor. */
1606
1607 void
1608 force_single_succ_latches (void)
1609 {
1610 struct loop *loop;
1611 edge e;
1612
1613 FOR_EACH_LOOP (loop, 0)
1614 {
1615 if (loop->latch != loop->header && single_succ_p (loop->latch))
1616 continue;
1617
1618 e = find_edge (loop->latch, loop->header);
1619 gcc_checking_assert (e != NULL);
1620
1621 split_edge (e);
1622 }
1623 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1624 }
1625
1626 /* This function is called from loop_version. It splits the entry edge
1627 of the loop we want to version, adds the versioning condition, and
1628 adjust the edges to the two versions of the loop appropriately.
1629 e is an incoming edge. Returns the basic block containing the
1630 condition.
1631
1632 --- edge e ---- > [second_head]
1633
1634 Split it and insert new conditional expression and adjust edges.
1635
1636 --- edge e ---> [cond expr] ---> [first_head]
1637 |
1638 +---------> [second_head]
1639
1640 THEN_PROB is the probability of then branch of the condition. */
1641
1642 static basic_block
1643 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1644 edge e, void *cond_expr, unsigned then_prob)
1645 {
1646 basic_block new_head = NULL;
1647 edge e1;
1648
1649 gcc_assert (e->dest == second_head);
1650
1651 /* Split edge 'e'. This will create a new basic block, where we can
1652 insert conditional expr. */
1653 new_head = split_edge (e);
1654
1655 lv_add_condition_to_bb (first_head, second_head, new_head,
1656 cond_expr);
1657
1658 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1659 e = single_succ_edge (new_head);
1660 e1 = make_edge (new_head, first_head,
1661 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1662 e1->probability = then_prob;
1663 e->probability = REG_BR_PROB_BASE - then_prob;
1664 e1->count = apply_probability (e->count, e1->probability);
1665 e->count = apply_probability (e->count, e->probability);
1666
1667 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1668 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1669
1670 /* Adjust loop header phi nodes. */
1671 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1672
1673 return new_head;
1674 }
1675
1676 /* Main entry point for Loop Versioning transformation.
1677
1678 This transformation given a condition and a loop, creates
1679 -if (condition) { loop_copy1 } else { loop_copy2 },
1680 where loop_copy1 is the loop transformed in one way, and loop_copy2
1681 is the loop transformed in another way (or unchanged). 'condition'
1682 may be a run time test for things that were not resolved by static
1683 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1684
1685 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1686 is the ratio by that the frequencies in the original loop should
1687 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1688 new loop should be scaled.
1689
1690 If PLACE_AFTER is true, we place the new loop after LOOP in the
1691 instruction stream, otherwise it is placed before LOOP. */
1692
1693 struct loop *
1694 loop_version (struct loop *loop,
1695 void *cond_expr, basic_block *condition_bb,
1696 unsigned then_prob, unsigned then_scale, unsigned else_scale,
1697 bool place_after)
1698 {
1699 basic_block first_head, second_head;
1700 edge entry, latch_edge, true_edge, false_edge;
1701 int irred_flag;
1702 struct loop *nloop;
1703 basic_block cond_bb;
1704
1705 /* Record entry and latch edges for the loop */
1706 entry = loop_preheader_edge (loop);
1707 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1708 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1709
1710 /* Note down head of loop as first_head. */
1711 first_head = entry->dest;
1712
1713 /* Duplicate loop. */
1714 if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1715 NULL, NULL, NULL, 0))
1716 {
1717 entry->flags |= irred_flag;
1718 return NULL;
1719 }
1720
1721 /* After duplication entry edge now points to new loop head block.
1722 Note down new head as second_head. */
1723 second_head = entry->dest;
1724
1725 /* Split loop entry edge and insert new block with cond expr. */
1726 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1727 entry, cond_expr, then_prob);
1728 if (condition_bb)
1729 *condition_bb = cond_bb;
1730
1731 if (!cond_bb)
1732 {
1733 entry->flags |= irred_flag;
1734 return NULL;
1735 }
1736
1737 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1738
1739 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1740 nloop = loopify (latch_edge,
1741 single_pred_edge (get_bb_copy (loop->header)),
1742 cond_bb, true_edge, false_edge,
1743 false /* Do not redirect all edges. */,
1744 then_scale, else_scale);
1745
1746 copy_loop_info (loop, nloop);
1747
1748 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1749 lv_flush_pending_stmts (latch_edge);
1750
1751 /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
1752 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1753 lv_flush_pending_stmts (false_edge);
1754 /* Adjust irreducible flag. */
1755 if (irred_flag)
1756 {
1757 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1758 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1759 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1760 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1761 }
1762
1763 if (place_after)
1764 {
1765 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1766 unsigned i;
1767
1768 after = loop->latch;
1769
1770 for (i = 0; i < nloop->num_nodes; i++)
1771 {
1772 move_block_after (bbs[i], after);
1773 after = bbs[i];
1774 }
1775 free (bbs);
1776 }
1777
1778 /* At this point condition_bb is loop preheader with two successors,
1779 first_head and second_head. Make sure that loop preheader has only
1780 one successor. */
1781 split_edge (loop_preheader_edge (loop));
1782 split_edge (loop_preheader_edge (nloop));
1783
1784 return nloop;
1785 }