coretypes.h: Include machmode.h...
[gcc.git] / gcc / cfgloopmanip.c
1 /* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "predict.h"
26 #include "vec.h"
27 #include "hashtab.h"
28 #include "hash-set.h"
29 #include "symtab.h"
30 #include "inchash.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "cfganal.h"
37 #include "basic-block.h"
38 #include "cfgloop.h"
39 #include "tree.h"
40 #include "fold-const.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
44 #include "is-a.h"
45 #include "gimple.h"
46 #include "gimple-iterator.h"
47 #include "gimplify-me.h"
48 #include "tree-ssa-loop-manip.h"
49 #include "dumpfile.h"
50
51 static void copy_loops_to (struct loop **, int,
52 struct loop *);
53 static void loop_redirect_edge (edge, basic_block);
54 static void remove_bbs (basic_block *, int);
55 static bool rpe_enum_p (const_basic_block, const void *);
56 static int find_path (edge, basic_block **);
57 static void fix_loop_placements (struct loop *, bool *);
58 static bool fix_bb_placement (basic_block);
59 static void fix_bb_placements (basic_block, bool *, bitmap);
60
61 /* Checks whether basic block BB is dominated by DATA. */
62 static bool
63 rpe_enum_p (const_basic_block bb, const void *data)
64 {
65 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
66 }
67
68 /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
69
70 static void
71 remove_bbs (basic_block *bbs, int nbbs)
72 {
73 int i;
74
75 for (i = 0; i < nbbs; i++)
76 delete_basic_block (bbs[i]);
77 }
78
79 /* Find path -- i.e. the basic blocks dominated by edge E and put them
80 into array BBS, that will be allocated large enough to contain them.
81 E->dest must have exactly one predecessor for this to work (it is
82 easy to achieve and we do not put it here because we do not want to
83 alter anything by this function). The number of basic blocks in the
84 path is returned. */
85 static int
86 find_path (edge e, basic_block **bbs)
87 {
88 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
89
90 /* Find bbs in the path. */
91 *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
92 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
93 n_basic_blocks_for_fn (cfun), e->dest);
94 }
95
96 /* Fix placement of basic block BB inside loop hierarchy --
97 Let L be a loop to that BB belongs. Then every successor of BB must either
98 1) belong to some superloop of loop L, or
99 2) be a header of loop K such that K->outer is superloop of L
100 Returns true if we had to move BB into other loop to enforce this condition,
101 false if the placement of BB was already correct (provided that placements
102 of its successors are correct). */
103 static bool
104 fix_bb_placement (basic_block bb)
105 {
106 edge e;
107 edge_iterator ei;
108 struct loop *loop = current_loops->tree_root, *act;
109
110 FOR_EACH_EDGE (e, ei, bb->succs)
111 {
112 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
113 continue;
114
115 act = e->dest->loop_father;
116 if (act->header == e->dest)
117 act = loop_outer (act);
118
119 if (flow_loop_nested_p (loop, act))
120 loop = act;
121 }
122
123 if (loop == bb->loop_father)
124 return false;
125
126 remove_bb_from_loops (bb);
127 add_bb_to_loop (bb, loop);
128
129 return true;
130 }
131
132 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
133 of LOOP to that leads at least one exit edge of LOOP, and set it
134 as the immediate superloop of LOOP. Return true if the immediate superloop
135 of LOOP changed.
136
137 IRRED_INVALIDATED is set to true if a change in the loop structures might
138 invalidate the information about irreducible regions. */
139
140 static bool
141 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
142 {
143 unsigned i;
144 edge e;
145 vec<edge> exits = get_loop_exit_edges (loop);
146 struct loop *father = current_loops->tree_root, *act;
147 bool ret = false;
148
149 FOR_EACH_VEC_ELT (exits, i, e)
150 {
151 act = find_common_loop (loop, e->dest->loop_father);
152 if (flow_loop_nested_p (father, act))
153 father = act;
154 }
155
156 if (father != loop_outer (loop))
157 {
158 for (act = loop_outer (loop); act != father; act = loop_outer (act))
159 act->num_nodes -= loop->num_nodes;
160 flow_loop_tree_node_remove (loop);
161 flow_loop_tree_node_add (father, loop);
162
163 /* The exit edges of LOOP no longer exits its original immediate
164 superloops; remove them from the appropriate exit lists. */
165 FOR_EACH_VEC_ELT (exits, i, e)
166 {
167 /* We may need to recompute irreducible loops. */
168 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
169 *irred_invalidated = true;
170 rescan_loop_exit (e, false, false);
171 }
172
173 ret = true;
174 }
175
176 exits.release ();
177 return ret;
178 }
179
180 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
181 enforce condition condition stated in description of fix_bb_placement. We
182 start from basic block FROM that had some of its successors removed, so that
183 his placement no longer has to be correct, and iteratively fix placement of
184 its predecessors that may change if placement of FROM changed. Also fix
185 placement of subloops of FROM->loop_father, that might also be altered due
186 to this change; the condition for them is similar, except that instead of
187 successors we consider edges coming out of the loops.
188
189 If the changes may invalidate the information about irreducible regions,
190 IRRED_INVALIDATED is set to true.
191
192 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
193 changed loop_father are collected there. */
194
195 static void
196 fix_bb_placements (basic_block from,
197 bool *irred_invalidated,
198 bitmap loop_closed_ssa_invalidated)
199 {
200 sbitmap in_queue;
201 basic_block *queue, *qtop, *qbeg, *qend;
202 struct loop *base_loop, *target_loop;
203 edge e;
204
205 /* We pass through blocks back-reachable from FROM, testing whether some
206 of their successors moved to outer loop. It may be necessary to
207 iterate several times, but it is finite, as we stop unless we move
208 the basic block up the loop structure. The whole story is a bit
209 more complicated due to presence of subloops, those are moved using
210 fix_loop_placement. */
211
212 base_loop = from->loop_father;
213 /* If we are already in the outermost loop, the basic blocks cannot be moved
214 outside of it. If FROM is the header of the base loop, it cannot be moved
215 outside of it, either. In both cases, we can end now. */
216 if (base_loop == current_loops->tree_root
217 || from == base_loop->header)
218 return;
219
220 in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
221 bitmap_clear (in_queue);
222 bitmap_set_bit (in_queue, from->index);
223 /* Prevent us from going out of the base_loop. */
224 bitmap_set_bit (in_queue, base_loop->header->index);
225
226 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
227 qtop = queue + base_loop->num_nodes + 1;
228 qbeg = queue;
229 qend = queue + 1;
230 *qbeg = from;
231
232 while (qbeg != qend)
233 {
234 edge_iterator ei;
235 from = *qbeg;
236 qbeg++;
237 if (qbeg == qtop)
238 qbeg = queue;
239 bitmap_clear_bit (in_queue, from->index);
240
241 if (from->loop_father->header == from)
242 {
243 /* Subloop header, maybe move the loop upward. */
244 if (!fix_loop_placement (from->loop_father, irred_invalidated))
245 continue;
246 target_loop = loop_outer (from->loop_father);
247 if (loop_closed_ssa_invalidated)
248 {
249 basic_block *bbs = get_loop_body (from->loop_father);
250 for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
251 bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
252 free (bbs);
253 }
254 }
255 else
256 {
257 /* Ordinary basic block. */
258 if (!fix_bb_placement (from))
259 continue;
260 target_loop = from->loop_father;
261 if (loop_closed_ssa_invalidated)
262 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
263 }
264
265 FOR_EACH_EDGE (e, ei, from->succs)
266 {
267 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
268 *irred_invalidated = true;
269 }
270
271 /* Something has changed, insert predecessors into queue. */
272 FOR_EACH_EDGE (e, ei, from->preds)
273 {
274 basic_block pred = e->src;
275 struct loop *nca;
276
277 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
278 *irred_invalidated = true;
279
280 if (bitmap_bit_p (in_queue, pred->index))
281 continue;
282
283 /* If it is subloop, then it either was not moved, or
284 the path up the loop tree from base_loop do not contain
285 it. */
286 nca = find_common_loop (pred->loop_father, base_loop);
287 if (pred->loop_father != base_loop
288 && (nca == base_loop
289 || nca != pred->loop_father))
290 pred = pred->loop_father->header;
291 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
292 {
293 /* If PRED is already higher in the loop hierarchy than the
294 TARGET_LOOP to that we moved FROM, the change of the position
295 of FROM does not affect the position of PRED, so there is no
296 point in processing it. */
297 continue;
298 }
299
300 if (bitmap_bit_p (in_queue, pred->index))
301 continue;
302
303 /* Schedule the basic block. */
304 *qend = pred;
305 qend++;
306 if (qend == qtop)
307 qend = queue;
308 bitmap_set_bit (in_queue, pred->index);
309 }
310 }
311 free (in_queue);
312 free (queue);
313 }
314
315 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
316 and update loop structures and dominators. Return true if we were able
317 to remove the path, false otherwise (and nothing is affected then). */
318 bool
319 remove_path (edge e)
320 {
321 edge ae;
322 basic_block *rem_bbs, *bord_bbs, from, bb;
323 vec<basic_block> dom_bbs;
324 int i, nrem, n_bord_bbs;
325 sbitmap seen;
326 bool irred_invalidated = false;
327 edge_iterator ei;
328 struct loop *l, *f;
329
330 if (!can_remove_branch_p (e))
331 return false;
332
333 /* Keep track of whether we need to update information about irreducible
334 regions. This is the case if the removed area is a part of the
335 irreducible region, or if the set of basic blocks that belong to a loop
336 that is inside an irreducible region is changed, or if such a loop is
337 removed. */
338 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
339 irred_invalidated = true;
340
341 /* We need to check whether basic blocks are dominated by the edge
342 e, but we only have basic block dominators. This is easy to
343 fix -- when e->dest has exactly one predecessor, this corresponds
344 to blocks dominated by e->dest, if not, split the edge. */
345 if (!single_pred_p (e->dest))
346 e = single_pred_edge (split_edge (e));
347
348 /* It may happen that by removing path we remove one or more loops
349 we belong to. In this case first unloop the loops, then proceed
350 normally. We may assume that e->dest is not a header of any loop,
351 as it now has exactly one predecessor. */
352 for (l = e->src->loop_father; loop_outer (l); l = f)
353 {
354 f = loop_outer (l);
355 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
356 unloop (l, &irred_invalidated, NULL);
357 }
358
359 /* Identify the path. */
360 nrem = find_path (e, &rem_bbs);
361
362 n_bord_bbs = 0;
363 bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
364 seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
365 bitmap_clear (seen);
366
367 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
368 for (i = 0; i < nrem; i++)
369 bitmap_set_bit (seen, rem_bbs[i]->index);
370 if (!irred_invalidated)
371 FOR_EACH_EDGE (ae, ei, e->src->succs)
372 if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
373 && !bitmap_bit_p (seen, ae->dest->index)
374 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
375 {
376 irred_invalidated = true;
377 break;
378 }
379
380 for (i = 0; i < nrem; i++)
381 {
382 bb = rem_bbs[i];
383 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
384 if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
385 && !bitmap_bit_p (seen, ae->dest->index))
386 {
387 bitmap_set_bit (seen, ae->dest->index);
388 bord_bbs[n_bord_bbs++] = ae->dest;
389
390 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
391 irred_invalidated = true;
392 }
393 }
394
395 /* Remove the path. */
396 from = e->src;
397 remove_branch (e);
398 dom_bbs.create (0);
399
400 /* Cancel loops contained in the path. */
401 for (i = 0; i < nrem; i++)
402 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
403 cancel_loop_tree (rem_bbs[i]->loop_father);
404
405 remove_bbs (rem_bbs, nrem);
406 free (rem_bbs);
407
408 /* Find blocks whose dominators may be affected. */
409 bitmap_clear (seen);
410 for (i = 0; i < n_bord_bbs; i++)
411 {
412 basic_block ldom;
413
414 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
415 if (bitmap_bit_p (seen, bb->index))
416 continue;
417 bitmap_set_bit (seen, bb->index);
418
419 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
420 ldom;
421 ldom = next_dom_son (CDI_DOMINATORS, ldom))
422 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
423 dom_bbs.safe_push (ldom);
424 }
425
426 free (seen);
427
428 /* Recount dominators. */
429 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
430 dom_bbs.release ();
431 free (bord_bbs);
432
433 /* Fix placements of basic blocks inside loops and the placement of
434 loops in the loop tree. */
435 fix_bb_placements (from, &irred_invalidated, NULL);
436 fix_loop_placements (from->loop_father, &irred_invalidated);
437
438 if (irred_invalidated
439 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
440 mark_irreducible_loops ();
441
442 return true;
443 }
444
445 /* Creates place for a new LOOP in loops structure of FN. */
446
447 void
448 place_new_loop (struct function *fn, struct loop *loop)
449 {
450 loop->num = number_of_loops (fn);
451 vec_safe_push (loops_for_fn (fn)->larray, loop);
452 }
453
454 /* Given LOOP structure with filled header and latch, find the body of the
455 corresponding loop and add it to loops tree. Insert the LOOP as a son of
456 outer. */
457
458 void
459 add_loop (struct loop *loop, struct loop *outer)
460 {
461 basic_block *bbs;
462 int i, n;
463 struct loop *subloop;
464 edge e;
465 edge_iterator ei;
466
467 /* Add it to loop structure. */
468 place_new_loop (cfun, loop);
469 flow_loop_tree_node_add (outer, loop);
470
471 /* Find its nodes. */
472 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
473 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
474
475 for (i = 0; i < n; i++)
476 {
477 if (bbs[i]->loop_father == outer)
478 {
479 remove_bb_from_loops (bbs[i]);
480 add_bb_to_loop (bbs[i], loop);
481 continue;
482 }
483
484 loop->num_nodes++;
485
486 /* If we find a direct subloop of OUTER, move it to LOOP. */
487 subloop = bbs[i]->loop_father;
488 if (loop_outer (subloop) == outer
489 && subloop->header == bbs[i])
490 {
491 flow_loop_tree_node_remove (subloop);
492 flow_loop_tree_node_add (loop, subloop);
493 }
494 }
495
496 /* Update the information about loop exit edges. */
497 for (i = 0; i < n; i++)
498 {
499 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
500 {
501 rescan_loop_exit (e, false, false);
502 }
503 }
504
505 free (bbs);
506 }
507
508 /* Multiply all frequencies in LOOP by NUM/DEN. */
509
510 void
511 scale_loop_frequencies (struct loop *loop, int num, int den)
512 {
513 basic_block *bbs;
514
515 bbs = get_loop_body (loop);
516 scale_bbs_frequencies_int (bbs, loop->num_nodes, num, den);
517 free (bbs);
518 }
519
520 /* Multiply all frequencies in LOOP by SCALE/REG_BR_PROB_BASE.
521 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
522 to iterate too many times. */
523
524 void
525 scale_loop_profile (struct loop *loop, int scale, gcov_type iteration_bound)
526 {
527 gcov_type iterations = expected_loop_iterations_unbounded (loop);
528 edge e;
529 edge_iterator ei;
530
531 if (dump_file && (dump_flags & TDF_DETAILS))
532 fprintf (dump_file, ";; Scaling loop %i with scale %f, "
533 "bounding iterations to %i from guessed %i\n",
534 loop->num, (double)scale / REG_BR_PROB_BASE,
535 (int)iteration_bound, (int)iterations);
536
537 /* See if loop is predicted to iterate too many times. */
538 if (iteration_bound && iterations > 0
539 && apply_probability (iterations, scale) > iteration_bound)
540 {
541 /* Fixing loop profile for different trip count is not trivial; the exit
542 probabilities has to be updated to match and frequencies propagated down
543 to the loop body.
544
545 We fully update only the simple case of loop with single exit that is
546 either from the latch or BB just before latch and leads from BB with
547 simple conditional jump. This is OK for use in vectorizer. */
548 e = single_exit (loop);
549 if (e)
550 {
551 edge other_e;
552 int freq_delta;
553 gcov_type count_delta;
554
555 FOR_EACH_EDGE (other_e, ei, e->src->succs)
556 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
557 && e != other_e)
558 break;
559
560 /* Probability of exit must be 1/iterations. */
561 freq_delta = EDGE_FREQUENCY (e);
562 e->probability = REG_BR_PROB_BASE / iteration_bound;
563 other_e->probability = inverse_probability (e->probability);
564 freq_delta -= EDGE_FREQUENCY (e);
565
566 /* Adjust counts accordingly. */
567 count_delta = e->count;
568 e->count = apply_probability (e->src->count, e->probability);
569 other_e->count = apply_probability (e->src->count, other_e->probability);
570 count_delta -= e->count;
571
572 /* If latch exists, change its frequency and count, since we changed
573 probability of exit. Theoretically we should update everything from
574 source of exit edge to latch, but for vectorizer this is enough. */
575 if (loop->latch
576 && loop->latch != e->src)
577 {
578 loop->latch->frequency += freq_delta;
579 if (loop->latch->frequency < 0)
580 loop->latch->frequency = 0;
581 loop->latch->count += count_delta;
582 if (loop->latch->count < 0)
583 loop->latch->count = 0;
584 }
585 }
586
587 /* Roughly speaking we want to reduce the loop body profile by the
588 the difference of loop iterations. We however can do better if
589 we look at the actual profile, if it is available. */
590 scale = RDIV (iteration_bound * scale, iterations);
591 if (loop->header->count)
592 {
593 gcov_type count_in = 0;
594
595 FOR_EACH_EDGE (e, ei, loop->header->preds)
596 if (e->src != loop->latch)
597 count_in += e->count;
598
599 if (count_in != 0)
600 scale = GCOV_COMPUTE_SCALE (count_in * iteration_bound,
601 loop->header->count);
602 }
603 else if (loop->header->frequency)
604 {
605 int freq_in = 0;
606
607 FOR_EACH_EDGE (e, ei, loop->header->preds)
608 if (e->src != loop->latch)
609 freq_in += EDGE_FREQUENCY (e);
610
611 if (freq_in != 0)
612 scale = GCOV_COMPUTE_SCALE (freq_in * iteration_bound,
613 loop->header->frequency);
614 }
615 if (!scale)
616 scale = 1;
617 }
618
619 if (scale == REG_BR_PROB_BASE)
620 return;
621
622 /* Scale the actual probabilities. */
623 scale_loop_frequencies (loop, scale, REG_BR_PROB_BASE);
624 if (dump_file && (dump_flags & TDF_DETAILS))
625 fprintf (dump_file, ";; guessed iterations are now %i\n",
626 (int)expected_loop_iterations_unbounded (loop));
627 }
628
629 /* Recompute dominance information for basic blocks outside LOOP. */
630
631 static void
632 update_dominators_in_loop (struct loop *loop)
633 {
634 vec<basic_block> dom_bbs = vNULL;
635 sbitmap seen;
636 basic_block *body;
637 unsigned i;
638
639 seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
640 bitmap_clear (seen);
641 body = get_loop_body (loop);
642
643 for (i = 0; i < loop->num_nodes; i++)
644 bitmap_set_bit (seen, body[i]->index);
645
646 for (i = 0; i < loop->num_nodes; i++)
647 {
648 basic_block ldom;
649
650 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
651 ldom;
652 ldom = next_dom_son (CDI_DOMINATORS, ldom))
653 if (!bitmap_bit_p (seen, ldom->index))
654 {
655 bitmap_set_bit (seen, ldom->index);
656 dom_bbs.safe_push (ldom);
657 }
658 }
659
660 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
661 free (body);
662 free (seen);
663 dom_bbs.release ();
664 }
665
666 /* Creates an if region as shown above. CONDITION is used to create
667 the test for the if.
668
669 |
670 | ------------- -------------
671 | | pred_bb | | pred_bb |
672 | ------------- -------------
673 | | |
674 | | | ENTRY_EDGE
675 | | ENTRY_EDGE V
676 | | ====> -------------
677 | | | cond_bb |
678 | | | CONDITION |
679 | | -------------
680 | V / \
681 | ------------- e_false / \ e_true
682 | | succ_bb | V V
683 | ------------- ----------- -----------
684 | | false_bb | | true_bb |
685 | ----------- -----------
686 | \ /
687 | \ /
688 | V V
689 | -------------
690 | | join_bb |
691 | -------------
692 | | exit_edge (result)
693 | V
694 | -----------
695 | | succ_bb |
696 | -----------
697 |
698 */
699
700 edge
701 create_empty_if_region_on_edge (edge entry_edge, tree condition)
702 {
703
704 basic_block cond_bb, true_bb, false_bb, join_bb;
705 edge e_true, e_false, exit_edge;
706 gcond *cond_stmt;
707 tree simple_cond;
708 gimple_stmt_iterator gsi;
709
710 cond_bb = split_edge (entry_edge);
711
712 /* Insert condition in cond_bb. */
713 gsi = gsi_last_bb (cond_bb);
714 simple_cond =
715 force_gimple_operand_gsi (&gsi, condition, true, NULL,
716 false, GSI_NEW_STMT);
717 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
718 gsi = gsi_last_bb (cond_bb);
719 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
720
721 join_bb = split_edge (single_succ_edge (cond_bb));
722
723 e_true = single_succ_edge (cond_bb);
724 true_bb = split_edge (e_true);
725
726 e_false = make_edge (cond_bb, join_bb, 0);
727 false_bb = split_edge (e_false);
728
729 e_true->flags &= ~EDGE_FALLTHRU;
730 e_true->flags |= EDGE_TRUE_VALUE;
731 e_false->flags &= ~EDGE_FALLTHRU;
732 e_false->flags |= EDGE_FALSE_VALUE;
733
734 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
735 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
736 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
737 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
738
739 exit_edge = single_succ_edge (join_bb);
740
741 if (single_pred_p (exit_edge->dest))
742 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
743
744 return exit_edge;
745 }
746
747 /* create_empty_loop_on_edge
748 |
749 | - pred_bb - ------ pred_bb ------
750 | | | | iv0 = initial_value |
751 | -----|----- ---------|-----------
752 | | ______ | entry_edge
753 | | entry_edge / | |
754 | | ====> | -V---V- loop_header -------------
755 | V | | iv_before = phi (iv0, iv_after) |
756 | - succ_bb - | ---|-----------------------------
757 | | | | |
758 | ----------- | ---V--- loop_body ---------------
759 | | | iv_after = iv_before + stride |
760 | | | if (iv_before < upper_bound) |
761 | | ---|--------------\--------------
762 | | | \ exit_e
763 | | V \
764 | | - loop_latch - V- succ_bb -
765 | | | | | |
766 | | /------------- -----------
767 | \ ___ /
768
769 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
770 that is used before the increment of IV. IV_BEFORE should be used for
771 adding code to the body that uses the IV. OUTER is the outer loop in
772 which the new loop should be inserted.
773
774 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
775 inserted on the loop entry edge. This implies that this function
776 should be used only when the UPPER_BOUND expression is a loop
777 invariant. */
778
779 struct loop *
780 create_empty_loop_on_edge (edge entry_edge,
781 tree initial_value,
782 tree stride, tree upper_bound,
783 tree iv,
784 tree *iv_before,
785 tree *iv_after,
786 struct loop *outer)
787 {
788 basic_block loop_header, loop_latch, succ_bb, pred_bb;
789 struct loop *loop;
790 gimple_stmt_iterator gsi;
791 gimple_seq stmts;
792 gcond *cond_expr;
793 tree exit_test;
794 edge exit_e;
795 int prob;
796
797 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
798
799 /* Create header, latch and wire up the loop. */
800 pred_bb = entry_edge->src;
801 loop_header = split_edge (entry_edge);
802 loop_latch = split_edge (single_succ_edge (loop_header));
803 succ_bb = single_succ (loop_latch);
804 make_edge (loop_header, succ_bb, 0);
805 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
806
807 /* Set immediate dominator information. */
808 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
809 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
810 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
811
812 /* Initialize a loop structure and put it in a loop hierarchy. */
813 loop = alloc_loop ();
814 loop->header = loop_header;
815 loop->latch = loop_latch;
816 add_loop (loop, outer);
817
818 /* TODO: Fix frequencies and counts. */
819 prob = REG_BR_PROB_BASE / 2;
820
821 scale_loop_frequencies (loop, REG_BR_PROB_BASE - prob, REG_BR_PROB_BASE);
822
823 /* Update dominators. */
824 update_dominators_in_loop (loop);
825
826 /* Modify edge flags. */
827 exit_e = single_exit (loop);
828 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
829 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
830
831 /* Construct IV code in loop. */
832 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
833 if (stmts)
834 {
835 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
836 gsi_commit_edge_inserts ();
837 }
838
839 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
840 if (stmts)
841 {
842 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
843 gsi_commit_edge_inserts ();
844 }
845
846 gsi = gsi_last_bb (loop_header);
847 create_iv (initial_value, stride, iv, loop, &gsi, false,
848 iv_before, iv_after);
849
850 /* Insert loop exit condition. */
851 cond_expr = gimple_build_cond
852 (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
853
854 exit_test = gimple_cond_lhs (cond_expr);
855 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
856 false, GSI_NEW_STMT);
857 gimple_cond_set_lhs (cond_expr, exit_test);
858 gsi = gsi_last_bb (exit_e->src);
859 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
860
861 split_block_after_labels (loop_header);
862
863 return loop;
864 }
865
866 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
867 latch to header and update loop tree and dominators
868 accordingly. Everything between them plus LATCH_EDGE destination must
869 be dominated by HEADER_EDGE destination, and back-reachable from
870 LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB,
871 FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
872 TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
873 Returns the newly created loop. Frequencies and counts in the new loop
874 are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
875
876 struct loop *
877 loopify (edge latch_edge, edge header_edge,
878 basic_block switch_bb, edge true_edge, edge false_edge,
879 bool redirect_all_edges, unsigned true_scale, unsigned false_scale)
880 {
881 basic_block succ_bb = latch_edge->dest;
882 basic_block pred_bb = header_edge->src;
883 struct loop *loop = alloc_loop ();
884 struct loop *outer = loop_outer (succ_bb->loop_father);
885 int freq;
886 gcov_type cnt;
887 edge e;
888 edge_iterator ei;
889
890 loop->header = header_edge->dest;
891 loop->latch = latch_edge->src;
892
893 freq = EDGE_FREQUENCY (header_edge);
894 cnt = header_edge->count;
895
896 /* Redirect edges. */
897 loop_redirect_edge (latch_edge, loop->header);
898 loop_redirect_edge (true_edge, succ_bb);
899
900 /* During loop versioning, one of the switch_bb edge is already properly
901 set. Do not redirect it again unless redirect_all_edges is true. */
902 if (redirect_all_edges)
903 {
904 loop_redirect_edge (header_edge, switch_bb);
905 loop_redirect_edge (false_edge, loop->header);
906
907 /* Update dominators. */
908 set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
909 set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
910 }
911
912 set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
913
914 /* Compute new loop. */
915 add_loop (loop, outer);
916
917 /* Add switch_bb to appropriate loop. */
918 if (switch_bb->loop_father)
919 remove_bb_from_loops (switch_bb);
920 add_bb_to_loop (switch_bb, outer);
921
922 /* Fix frequencies. */
923 if (redirect_all_edges)
924 {
925 switch_bb->frequency = freq;
926 switch_bb->count = cnt;
927 FOR_EACH_EDGE (e, ei, switch_bb->succs)
928 {
929 e->count = apply_probability (switch_bb->count, e->probability);
930 }
931 }
932 scale_loop_frequencies (loop, false_scale, REG_BR_PROB_BASE);
933 scale_loop_frequencies (succ_bb->loop_father, true_scale, REG_BR_PROB_BASE);
934 update_dominators_in_loop (loop);
935
936 return loop;
937 }
938
939 /* Remove the latch edge of a LOOP and update loops to indicate that
940 the LOOP was removed. After this function, original loop latch will
941 have no successor, which caller is expected to fix somehow.
942
943 If this may cause the information about irreducible regions to become
944 invalid, IRRED_INVALIDATED is set to true.
945
946 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
947 basic blocks that had non-trivial update on their loop_father.*/
948
949 void
950 unloop (struct loop *loop, bool *irred_invalidated,
951 bitmap loop_closed_ssa_invalidated)
952 {
953 basic_block *body;
954 struct loop *ploop;
955 unsigned i, n;
956 basic_block latch = loop->latch;
957 bool dummy = false;
958
959 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
960 *irred_invalidated = true;
961
962 /* This is relatively straightforward. The dominators are unchanged, as
963 loop header dominates loop latch, so the only thing we have to care of
964 is the placement of loops and basic blocks inside the loop tree. We
965 move them all to the loop->outer, and then let fix_bb_placements do
966 its work. */
967
968 body = get_loop_body (loop);
969 n = loop->num_nodes;
970 for (i = 0; i < n; i++)
971 if (body[i]->loop_father == loop)
972 {
973 remove_bb_from_loops (body[i]);
974 add_bb_to_loop (body[i], loop_outer (loop));
975 }
976 free (body);
977
978 while (loop->inner)
979 {
980 ploop = loop->inner;
981 flow_loop_tree_node_remove (ploop);
982 flow_loop_tree_node_add (loop_outer (loop), ploop);
983 }
984
985 /* Remove the loop and free its data. */
986 delete_loop (loop);
987
988 remove_edge (single_succ_edge (latch));
989
990 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
991 there is an irreducible region inside the cancelled loop, the flags will
992 be still correct. */
993 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
994 }
995
996 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
997 condition stated in description of fix_loop_placement holds for them.
998 It is used in case when we removed some edges coming out of LOOP, which
999 may cause the right placement of LOOP inside loop tree to change.
1000
1001 IRRED_INVALIDATED is set to true if a change in the loop structures might
1002 invalidate the information about irreducible regions. */
1003
1004 static void
1005 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
1006 {
1007 struct loop *outer;
1008
1009 while (loop_outer (loop))
1010 {
1011 outer = loop_outer (loop);
1012 if (!fix_loop_placement (loop, irred_invalidated))
1013 break;
1014
1015 /* Changing the placement of a loop in the loop tree may alter the
1016 validity of condition 2) of the description of fix_bb_placement
1017 for its preheader, because the successor is the header and belongs
1018 to the loop. So call fix_bb_placements to fix up the placement
1019 of the preheader and (possibly) of its predecessors. */
1020 fix_bb_placements (loop_preheader_edge (loop)->src,
1021 irred_invalidated, NULL);
1022 loop = outer;
1023 }
1024 }
1025
1026 /* Duplicate loop bounds and other information we store about
1027 the loop into its duplicate. */
1028
1029 void
1030 copy_loop_info (struct loop *loop, struct loop *target)
1031 {
1032 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
1033 target->any_upper_bound = loop->any_upper_bound;
1034 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1035 target->any_estimate = loop->any_estimate;
1036 target->nb_iterations_estimate = loop->nb_iterations_estimate;
1037 target->estimate_state = loop->estimate_state;
1038 target->warned_aggressive_loop_optimizations
1039 |= loop->warned_aggressive_loop_optimizations;
1040 }
1041
1042 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1043 created loop into loops structure. */
1044 struct loop *
1045 duplicate_loop (struct loop *loop, struct loop *target)
1046 {
1047 struct loop *cloop;
1048 cloop = alloc_loop ();
1049 place_new_loop (cfun, cloop);
1050
1051 copy_loop_info (loop, cloop);
1052
1053 /* Mark the new loop as copy of LOOP. */
1054 set_loop_copy (loop, cloop);
1055
1056 /* Add it to target. */
1057 flow_loop_tree_node_add (target, cloop);
1058
1059 return cloop;
1060 }
1061
1062 /* Copies structure of subloops of LOOP into TARGET loop, placing
1063 newly created loops into loop tree. */
1064 void
1065 duplicate_subloops (struct loop *loop, struct loop *target)
1066 {
1067 struct loop *aloop, *cloop;
1068
1069 for (aloop = loop->inner; aloop; aloop = aloop->next)
1070 {
1071 cloop = duplicate_loop (aloop, target);
1072 duplicate_subloops (aloop, cloop);
1073 }
1074 }
1075
1076 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1077 into TARGET loop, placing newly created loops into loop tree. */
1078 static void
1079 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1080 {
1081 struct loop *aloop;
1082 int i;
1083
1084 for (i = 0; i < n; i++)
1085 {
1086 aloop = duplicate_loop (copied_loops[i], target);
1087 duplicate_subloops (copied_loops[i], aloop);
1088 }
1089 }
1090
1091 /* Redirects edge E to basic block DEST. */
1092 static void
1093 loop_redirect_edge (edge e, basic_block dest)
1094 {
1095 if (e->dest == dest)
1096 return;
1097
1098 redirect_edge_and_branch_force (e, dest);
1099 }
1100
1101 /* Check whether LOOP's body can be duplicated. */
1102 bool
1103 can_duplicate_loop_p (const struct loop *loop)
1104 {
1105 int ret;
1106 basic_block *bbs = get_loop_body (loop);
1107
1108 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1109 free (bbs);
1110
1111 return ret;
1112 }
1113
1114 /* Sets probability and count of edge E to zero. The probability and count
1115 is redistributed evenly to the remaining edges coming from E->src. */
1116
1117 static void
1118 set_zero_probability (edge e)
1119 {
1120 basic_block bb = e->src;
1121 edge_iterator ei;
1122 edge ae, last = NULL;
1123 unsigned n = EDGE_COUNT (bb->succs);
1124 gcov_type cnt = e->count, cnt1;
1125 unsigned prob = e->probability, prob1;
1126
1127 gcc_assert (n > 1);
1128 cnt1 = cnt / (n - 1);
1129 prob1 = prob / (n - 1);
1130
1131 FOR_EACH_EDGE (ae, ei, bb->succs)
1132 {
1133 if (ae == e)
1134 continue;
1135
1136 ae->probability += prob1;
1137 ae->count += cnt1;
1138 last = ae;
1139 }
1140
1141 /* Move the rest to one of the edges. */
1142 last->probability += prob % (n - 1);
1143 last->count += cnt % (n - 1);
1144
1145 e->probability = 0;
1146 e->count = 0;
1147 }
1148
1149 /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1150 loop structure and dominators. E's destination must be LOOP header for
1151 this to work, i.e. it must be entry or latch edge of this loop; these are
1152 unique, as the loops must have preheaders for this function to work
1153 correctly (in case E is latch, the function unrolls the loop, if E is entry
1154 edge, it peels the loop). Store edges created by copying ORIG edge from
1155 copies corresponding to set bits in WONT_EXIT bitmap (bit 0 corresponds to
1156 original LOOP body, the other copies are numbered in order given by control
1157 flow through them) into TO_REMOVE array. Returns false if duplication is
1158 impossible. */
1159
1160 bool
1161 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1162 unsigned int ndupl, sbitmap wont_exit,
1163 edge orig, vec<edge> *to_remove,
1164 int flags)
1165 {
1166 struct loop *target, *aloop;
1167 struct loop **orig_loops;
1168 unsigned n_orig_loops;
1169 basic_block header = loop->header, latch = loop->latch;
1170 basic_block *new_bbs, *bbs, *first_active;
1171 basic_block new_bb, bb, first_active_latch = NULL;
1172 edge ae, latch_edge;
1173 edge spec_edges[2], new_spec_edges[2];
1174 #define SE_LATCH 0
1175 #define SE_ORIG 1
1176 unsigned i, j, n;
1177 int is_latch = (latch == e->src);
1178 int scale_act = 0, *scale_step = NULL, scale_main = 0;
1179 int scale_after_exit = 0;
1180 int p, freq_in, freq_le, freq_out_orig;
1181 int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
1182 int add_irreducible_flag;
1183 basic_block place_after;
1184 bitmap bbs_to_scale = NULL;
1185 bitmap_iterator bi;
1186
1187 gcc_assert (e->dest == loop->header);
1188 gcc_assert (ndupl > 0);
1189
1190 if (orig)
1191 {
1192 /* Orig must be edge out of the loop. */
1193 gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1194 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1195 }
1196
1197 n = loop->num_nodes;
1198 bbs = get_loop_body_in_dom_order (loop);
1199 gcc_assert (bbs[0] == loop->header);
1200 gcc_assert (bbs[n - 1] == loop->latch);
1201
1202 /* Check whether duplication is possible. */
1203 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1204 {
1205 free (bbs);
1206 return false;
1207 }
1208 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1209
1210 /* In case we are doing loop peeling and the loop is in the middle of
1211 irreducible region, the peeled copies will be inside it too. */
1212 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1213 gcc_assert (!is_latch || !add_irreducible_flag);
1214
1215 /* Find edge from latch. */
1216 latch_edge = loop_latch_edge (loop);
1217
1218 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1219 {
1220 /* Calculate coefficients by that we have to scale frequencies
1221 of duplicated loop bodies. */
1222 freq_in = header->frequency;
1223 freq_le = EDGE_FREQUENCY (latch_edge);
1224 if (freq_in == 0)
1225 freq_in = 1;
1226 if (freq_in < freq_le)
1227 freq_in = freq_le;
1228 freq_out_orig = orig ? EDGE_FREQUENCY (orig) : freq_in - freq_le;
1229 if (freq_out_orig > freq_in - freq_le)
1230 freq_out_orig = freq_in - freq_le;
1231 prob_pass_thru = RDIV (REG_BR_PROB_BASE * freq_le, freq_in);
1232 prob_pass_wont_exit =
1233 RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in);
1234
1235 if (orig
1236 && REG_BR_PROB_BASE - orig->probability != 0)
1237 {
1238 /* The blocks that are dominated by a removed exit edge ORIG have
1239 frequencies scaled by this. */
1240 scale_after_exit
1241 = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE,
1242 REG_BR_PROB_BASE - orig->probability);
1243 bbs_to_scale = BITMAP_ALLOC (NULL);
1244 for (i = 0; i < n; i++)
1245 {
1246 if (bbs[i] != orig->src
1247 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1248 bitmap_set_bit (bbs_to_scale, i);
1249 }
1250 }
1251
1252 scale_step = XNEWVEC (int, ndupl);
1253
1254 for (i = 1; i <= ndupl; i++)
1255 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1256 ? prob_pass_wont_exit
1257 : prob_pass_thru;
1258
1259 /* Complete peeling is special as the probability of exit in last
1260 copy becomes 1. */
1261 if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1262 {
1263 int wanted_freq = EDGE_FREQUENCY (e);
1264
1265 if (wanted_freq > freq_in)
1266 wanted_freq = freq_in;
1267
1268 gcc_assert (!is_latch);
1269 /* First copy has frequency of incoming edge. Each subsequent
1270 frequency should be reduced by prob_pass_wont_exit. Caller
1271 should've managed the flags so all except for original loop
1272 has won't exist set. */
1273 scale_act = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1274 /* Now simulate the duplication adjustments and compute header
1275 frequency of the last copy. */
1276 for (i = 0; i < ndupl; i++)
1277 wanted_freq = combine_probabilities (wanted_freq, scale_step[i]);
1278 scale_main = GCOV_COMPUTE_SCALE (wanted_freq, freq_in);
1279 }
1280 else if (is_latch)
1281 {
1282 prob_pass_main = bitmap_bit_p (wont_exit, 0)
1283 ? prob_pass_wont_exit
1284 : prob_pass_thru;
1285 p = prob_pass_main;
1286 scale_main = REG_BR_PROB_BASE;
1287 for (i = 0; i < ndupl; i++)
1288 {
1289 scale_main += p;
1290 p = combine_probabilities (p, scale_step[i]);
1291 }
1292 scale_main = GCOV_COMPUTE_SCALE (REG_BR_PROB_BASE, scale_main);
1293 scale_act = combine_probabilities (scale_main, prob_pass_main);
1294 }
1295 else
1296 {
1297 scale_main = REG_BR_PROB_BASE;
1298 for (i = 0; i < ndupl; i++)
1299 scale_main = combine_probabilities (scale_main, scale_step[i]);
1300 scale_act = REG_BR_PROB_BASE - prob_pass_thru;
1301 }
1302 for (i = 0; i < ndupl; i++)
1303 gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
1304 gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
1305 && scale_act >= 0 && scale_act <= REG_BR_PROB_BASE);
1306 }
1307
1308 /* Loop the new bbs will belong to. */
1309 target = e->src->loop_father;
1310
1311 /* Original loops. */
1312 n_orig_loops = 0;
1313 for (aloop = loop->inner; aloop; aloop = aloop->next)
1314 n_orig_loops++;
1315 orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1316 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1317 orig_loops[i] = aloop;
1318
1319 set_loop_copy (loop, target);
1320
1321 first_active = XNEWVEC (basic_block, n);
1322 if (is_latch)
1323 {
1324 memcpy (first_active, bbs, n * sizeof (basic_block));
1325 first_active_latch = latch;
1326 }
1327
1328 spec_edges[SE_ORIG] = orig;
1329 spec_edges[SE_LATCH] = latch_edge;
1330
1331 place_after = e->src;
1332 for (j = 0; j < ndupl; j++)
1333 {
1334 /* Copy loops. */
1335 copy_loops_to (orig_loops, n_orig_loops, target);
1336
1337 /* Copy bbs. */
1338 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1339 place_after, true);
1340 place_after = new_spec_edges[SE_LATCH]->src;
1341
1342 if (flags & DLTHE_RECORD_COPY_NUMBER)
1343 for (i = 0; i < n; i++)
1344 {
1345 gcc_assert (!new_bbs[i]->aux);
1346 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1347 }
1348
1349 /* Note whether the blocks and edges belong to an irreducible loop. */
1350 if (add_irreducible_flag)
1351 {
1352 for (i = 0; i < n; i++)
1353 new_bbs[i]->flags |= BB_DUPLICATED;
1354 for (i = 0; i < n; i++)
1355 {
1356 edge_iterator ei;
1357 new_bb = new_bbs[i];
1358 if (new_bb->loop_father == target)
1359 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1360
1361 FOR_EACH_EDGE (ae, ei, new_bb->succs)
1362 if ((ae->dest->flags & BB_DUPLICATED)
1363 && (ae->src->loop_father == target
1364 || ae->dest->loop_father == target))
1365 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1366 }
1367 for (i = 0; i < n; i++)
1368 new_bbs[i]->flags &= ~BB_DUPLICATED;
1369 }
1370
1371 /* Redirect the special edges. */
1372 if (is_latch)
1373 {
1374 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1375 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1376 loop->header);
1377 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1378 latch = loop->latch = new_bbs[n - 1];
1379 e = latch_edge = new_spec_edges[SE_LATCH];
1380 }
1381 else
1382 {
1383 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1384 loop->header);
1385 redirect_edge_and_branch_force (e, new_bbs[0]);
1386 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1387 e = new_spec_edges[SE_LATCH];
1388 }
1389
1390 /* Record exit edge in this copy. */
1391 if (orig && bitmap_bit_p (wont_exit, j + 1))
1392 {
1393 if (to_remove)
1394 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1395 set_zero_probability (new_spec_edges[SE_ORIG]);
1396
1397 /* Scale the frequencies of the blocks dominated by the exit. */
1398 if (bbs_to_scale)
1399 {
1400 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1401 {
1402 scale_bbs_frequencies_int (new_bbs + i, 1, scale_after_exit,
1403 REG_BR_PROB_BASE);
1404 }
1405 }
1406 }
1407
1408 /* Record the first copy in the control flow order if it is not
1409 the original loop (i.e. in case of peeling). */
1410 if (!first_active_latch)
1411 {
1412 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1413 first_active_latch = new_bbs[n - 1];
1414 }
1415
1416 /* Set counts and frequencies. */
1417 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1418 {
1419 scale_bbs_frequencies_int (new_bbs, n, scale_act, REG_BR_PROB_BASE);
1420 scale_act = combine_probabilities (scale_act, scale_step[j]);
1421 }
1422 }
1423 free (new_bbs);
1424 free (orig_loops);
1425
1426 /* Record the exit edge in the original loop body, and update the frequencies. */
1427 if (orig && bitmap_bit_p (wont_exit, 0))
1428 {
1429 if (to_remove)
1430 to_remove->safe_push (orig);
1431 set_zero_probability (orig);
1432
1433 /* Scale the frequencies of the blocks dominated by the exit. */
1434 if (bbs_to_scale)
1435 {
1436 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1437 {
1438 scale_bbs_frequencies_int (bbs + i, 1, scale_after_exit,
1439 REG_BR_PROB_BASE);
1440 }
1441 }
1442 }
1443
1444 /* Update the original loop. */
1445 if (!is_latch)
1446 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1447 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1448 {
1449 scale_bbs_frequencies_int (bbs, n, scale_main, REG_BR_PROB_BASE);
1450 free (scale_step);
1451 }
1452
1453 /* Update dominators of outer blocks if affected. */
1454 for (i = 0; i < n; i++)
1455 {
1456 basic_block dominated, dom_bb;
1457 vec<basic_block> dom_bbs;
1458 unsigned j;
1459
1460 bb = bbs[i];
1461 bb->aux = 0;
1462
1463 dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1464 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1465 {
1466 if (flow_bb_inside_loop_p (loop, dominated))
1467 continue;
1468 dom_bb = nearest_common_dominator (
1469 CDI_DOMINATORS, first_active[i], first_active_latch);
1470 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1471 }
1472 dom_bbs.release ();
1473 }
1474 free (first_active);
1475
1476 free (bbs);
1477 BITMAP_FREE (bbs_to_scale);
1478
1479 return true;
1480 }
1481
1482 /* A callback for make_forwarder block, to redirect all edges except for
1483 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1484 whether to redirect it. */
1485
1486 edge mfb_kj_edge;
1487 bool
1488 mfb_keep_just (edge e)
1489 {
1490 return e != mfb_kj_edge;
1491 }
1492
1493 /* True when a candidate preheader BLOCK has predecessors from LOOP. */
1494
1495 static bool
1496 has_preds_from_loop (basic_block block, struct loop *loop)
1497 {
1498 edge e;
1499 edge_iterator ei;
1500
1501 FOR_EACH_EDGE (e, ei, block->preds)
1502 if (e->src->loop_father == loop)
1503 return true;
1504 return false;
1505 }
1506
1507 /* Creates a pre-header for a LOOP. Returns newly created block. Unless
1508 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1509 entry; otherwise we also force preheader block to have only one successor.
1510 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1511 to be a fallthru predecessor to the loop header and to have only
1512 predecessors from outside of the loop.
1513 The function also updates dominators. */
1514
1515 basic_block
1516 create_preheader (struct loop *loop, int flags)
1517 {
1518 edge e, fallthru;
1519 basic_block dummy;
1520 int nentry = 0;
1521 bool irred = false;
1522 bool latch_edge_was_fallthru;
1523 edge one_succ_pred = NULL, single_entry = NULL;
1524 edge_iterator ei;
1525
1526 FOR_EACH_EDGE (e, ei, loop->header->preds)
1527 {
1528 if (e->src == loop->latch)
1529 continue;
1530 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1531 nentry++;
1532 single_entry = e;
1533 if (single_succ_p (e->src))
1534 one_succ_pred = e;
1535 }
1536 gcc_assert (nentry);
1537 if (nentry == 1)
1538 {
1539 bool need_forwarder_block = false;
1540
1541 /* We do not allow entry block to be the loop preheader, since we
1542 cannot emit code there. */
1543 if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1544 need_forwarder_block = true;
1545 else
1546 {
1547 /* If we want simple preheaders, also force the preheader to have
1548 just a single successor. */
1549 if ((flags & CP_SIMPLE_PREHEADERS)
1550 && !single_succ_p (single_entry->src))
1551 need_forwarder_block = true;
1552 /* If we want fallthru preheaders, also create forwarder block when
1553 preheader ends with a jump or has predecessors from loop. */
1554 else if ((flags & CP_FALLTHRU_PREHEADERS)
1555 && (JUMP_P (BB_END (single_entry->src))
1556 || has_preds_from_loop (single_entry->src, loop)))
1557 need_forwarder_block = true;
1558 }
1559 if (! need_forwarder_block)
1560 return NULL;
1561 }
1562
1563 mfb_kj_edge = loop_latch_edge (loop);
1564 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1565 fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1566 dummy = fallthru->src;
1567 loop->header = fallthru->dest;
1568
1569 /* Try to be clever in placing the newly created preheader. The idea is to
1570 avoid breaking any "fallthruness" relationship between blocks.
1571
1572 The preheader was created just before the header and all incoming edges
1573 to the header were redirected to the preheader, except the latch edge.
1574 So the only problematic case is when this latch edge was a fallthru
1575 edge: it is not anymore after the preheader creation so we have broken
1576 the fallthruness. We're therefore going to look for a better place. */
1577 if (latch_edge_was_fallthru)
1578 {
1579 if (one_succ_pred)
1580 e = one_succ_pred;
1581 else
1582 e = EDGE_PRED (dummy, 0);
1583
1584 move_block_after (dummy, e->src);
1585 }
1586
1587 if (irred)
1588 {
1589 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1590 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1591 }
1592
1593 if (dump_file)
1594 fprintf (dump_file, "Created preheader block for loop %i\n",
1595 loop->num);
1596
1597 if (flags & CP_FALLTHRU_PREHEADERS)
1598 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1599 && !JUMP_P (BB_END (dummy)));
1600
1601 return dummy;
1602 }
1603
1604 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1605
1606 void
1607 create_preheaders (int flags)
1608 {
1609 struct loop *loop;
1610
1611 if (!current_loops)
1612 return;
1613
1614 FOR_EACH_LOOP (loop, 0)
1615 create_preheader (loop, flags);
1616 loops_state_set (LOOPS_HAVE_PREHEADERS);
1617 }
1618
1619 /* Forces all loop latches to have only single successor. */
1620
1621 void
1622 force_single_succ_latches (void)
1623 {
1624 struct loop *loop;
1625 edge e;
1626
1627 FOR_EACH_LOOP (loop, 0)
1628 {
1629 if (loop->latch != loop->header && single_succ_p (loop->latch))
1630 continue;
1631
1632 e = find_edge (loop->latch, loop->header);
1633 gcc_checking_assert (e != NULL);
1634
1635 split_edge (e);
1636 }
1637 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1638 }
1639
1640 /* This function is called from loop_version. It splits the entry edge
1641 of the loop we want to version, adds the versioning condition, and
1642 adjust the edges to the two versions of the loop appropriately.
1643 e is an incoming edge. Returns the basic block containing the
1644 condition.
1645
1646 --- edge e ---- > [second_head]
1647
1648 Split it and insert new conditional expression and adjust edges.
1649
1650 --- edge e ---> [cond expr] ---> [first_head]
1651 |
1652 +---------> [second_head]
1653
1654 THEN_PROB is the probability of then branch of the condition. */
1655
1656 static basic_block
1657 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1658 edge e, void *cond_expr, unsigned then_prob)
1659 {
1660 basic_block new_head = NULL;
1661 edge e1;
1662
1663 gcc_assert (e->dest == second_head);
1664
1665 /* Split edge 'e'. This will create a new basic block, where we can
1666 insert conditional expr. */
1667 new_head = split_edge (e);
1668
1669 lv_add_condition_to_bb (first_head, second_head, new_head,
1670 cond_expr);
1671
1672 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1673 e = single_succ_edge (new_head);
1674 e1 = make_edge (new_head, first_head,
1675 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1676 e1->probability = then_prob;
1677 e->probability = REG_BR_PROB_BASE - then_prob;
1678 e1->count = apply_probability (e->count, e1->probability);
1679 e->count = apply_probability (e->count, e->probability);
1680
1681 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1682 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1683
1684 /* Adjust loop header phi nodes. */
1685 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1686
1687 return new_head;
1688 }
1689
1690 /* Main entry point for Loop Versioning transformation.
1691
1692 This transformation given a condition and a loop, creates
1693 -if (condition) { loop_copy1 } else { loop_copy2 },
1694 where loop_copy1 is the loop transformed in one way, and loop_copy2
1695 is the loop transformed in another way (or unchanged). 'condition'
1696 may be a run time test for things that were not resolved by static
1697 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1698
1699 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1700 is the ratio by that the frequencies in the original loop should
1701 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1702 new loop should be scaled.
1703
1704 If PLACE_AFTER is true, we place the new loop after LOOP in the
1705 instruction stream, otherwise it is placed before LOOP. */
1706
1707 struct loop *
1708 loop_version (struct loop *loop,
1709 void *cond_expr, basic_block *condition_bb,
1710 unsigned then_prob, unsigned then_scale, unsigned else_scale,
1711 bool place_after)
1712 {
1713 basic_block first_head, second_head;
1714 edge entry, latch_edge, true_edge, false_edge;
1715 int irred_flag;
1716 struct loop *nloop;
1717 basic_block cond_bb;
1718
1719 /* Record entry and latch edges for the loop */
1720 entry = loop_preheader_edge (loop);
1721 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1722 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1723
1724 /* Note down head of loop as first_head. */
1725 first_head = entry->dest;
1726
1727 /* Duplicate loop. */
1728 if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1729 NULL, NULL, NULL, 0))
1730 {
1731 entry->flags |= irred_flag;
1732 return NULL;
1733 }
1734
1735 /* After duplication entry edge now points to new loop head block.
1736 Note down new head as second_head. */
1737 second_head = entry->dest;
1738
1739 /* Split loop entry edge and insert new block with cond expr. */
1740 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1741 entry, cond_expr, then_prob);
1742 if (condition_bb)
1743 *condition_bb = cond_bb;
1744
1745 if (!cond_bb)
1746 {
1747 entry->flags |= irred_flag;
1748 return NULL;
1749 }
1750
1751 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1752
1753 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1754 nloop = loopify (latch_edge,
1755 single_pred_edge (get_bb_copy (loop->header)),
1756 cond_bb, true_edge, false_edge,
1757 false /* Do not redirect all edges. */,
1758 then_scale, else_scale);
1759
1760 copy_loop_info (loop, nloop);
1761
1762 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1763 lv_flush_pending_stmts (latch_edge);
1764
1765 /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
1766 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1767 lv_flush_pending_stmts (false_edge);
1768 /* Adjust irreducible flag. */
1769 if (irred_flag)
1770 {
1771 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1772 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1773 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1774 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1775 }
1776
1777 if (place_after)
1778 {
1779 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1780 unsigned i;
1781
1782 after = loop->latch;
1783
1784 for (i = 0; i < nloop->num_nodes; i++)
1785 {
1786 move_block_after (bbs[i], after);
1787 after = bbs[i];
1788 }
1789 free (bbs);
1790 }
1791
1792 /* At this point condition_bb is loop preheader with two successors,
1793 first_head and second_head. Make sure that loop preheader has only
1794 one successor. */
1795 split_edge (loop_preheader_edge (loop));
1796 split_edge (loop_preheader_edge (nloop));
1797
1798 return nloop;
1799 }