pr39543-2.c: Skip if ilp32 && pic.
[gcc.git] / gcc / loop-unroll.c
1 /* Loop unrolling and peeling.
2 Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hard-reg-set.h"
27 #include "obstack.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "cfglayout.h"
31 #include "params.h"
32 #include "output.h"
33 #include "expr.h"
34 #include "hashtab.h"
35 #include "recog.h"
36
37 /* This pass performs loop unrolling and peeling. We only perform these
38 optimizations on innermost loops (with single exception) because
39 the impact on performance is greatest here, and we want to avoid
40 unnecessary code size growth. The gain is caused by greater sequentiality
41 of code, better code to optimize for further passes and in some cases
42 by fewer testings of exit conditions. The main problem is code growth,
43 that impacts performance negatively due to effect of caches.
44
45 What we do:
46
47 -- complete peeling of once-rolling loops; this is the above mentioned
48 exception, as this causes loop to be cancelled completely and
49 does not cause code growth
50 -- complete peeling of loops that roll (small) constant times.
51 -- simple peeling of first iterations of loops that do not roll much
52 (according to profile feedback)
53 -- unrolling of loops that roll constant times; this is almost always
54 win, as we get rid of exit condition tests.
55 -- unrolling of loops that roll number of times that we can compute
56 in runtime; we also get rid of exit condition tests here, but there
57 is the extra expense for calculating the number of iterations
58 -- simple unrolling of remaining loops; this is performed only if we
59 are asked to, as the gain is questionable in this case and often
60 it may even slow down the code
61 For more detailed descriptions of each of those, see comments at
62 appropriate function below.
63
64 There is a lot of parameters (defined and described in params.def) that
65 control how much we unroll/peel.
66
67 ??? A great problem is that we don't have a good way how to determine
68 how many times we should unroll the loop; the experiments I have made
69 showed that this choice may affect performance in order of several %.
70 */
71
72 /* Information about induction variables to split. */
73
74 struct iv_to_split
75 {
76 rtx insn; /* The insn in that the induction variable occurs. */
77 rtx base_var; /* The variable on that the values in the further
78 iterations are based. */
79 rtx step; /* Step of the induction variable. */
80 unsigned n_loc;
81 unsigned loc[3]; /* Location where the definition of the induction
82 variable occurs in the insn. For example if
83 N_LOC is 2, the expression is located at
84 XEXP (XEXP (single_set, loc[0]), loc[1]). */
85 };
86
87 /* Information about accumulators to expand. */
88
89 struct var_to_expand
90 {
91 rtx insn; /* The insn in that the variable expansion occurs. */
92 rtx reg; /* The accumulator which is expanded. */
93 VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
94 enum rtx_code op; /* The type of the accumulation - addition, subtraction
95 or multiplication. */
96 int expansion_count; /* Count the number of expansions generated so far. */
97 int reuse_expansion; /* The expansion we intend to reuse to expand
98 the accumulator. If REUSE_EXPANSION is 0 reuse
99 the original accumulator. Else use
100 var_expansions[REUSE_EXPANSION - 1]. */
101 unsigned accum_pos; /* The position in which the accumulator is placed in
102 the insn src. For example in x = x + something
103 accum_pos is 0 while in x = something + x accum_pos
104 is 1. */
105 };
106
107 /* Information about optimization applied in
108 the unrolled loop. */
109
110 struct opt_info
111 {
112 htab_t insns_to_split; /* A hashtable of insns to split. */
113 htab_t insns_with_var_to_expand; /* A hashtable of insns with accumulators
114 to expand. */
115 unsigned first_new_block; /* The first basic block that was
116 duplicated. */
117 basic_block loop_exit; /* The loop exit basic block. */
118 basic_block loop_preheader; /* The loop preheader basic block. */
119 };
120
121 static void decide_unrolling_and_peeling (int);
122 static void peel_loops_completely (int);
123 static void decide_peel_simple (struct loop *, int);
124 static void decide_peel_once_rolling (struct loop *, int);
125 static void decide_peel_completely (struct loop *, int);
126 static void decide_unroll_stupid (struct loop *, int);
127 static void decide_unroll_constant_iterations (struct loop *, int);
128 static void decide_unroll_runtime_iterations (struct loop *, int);
129 static void peel_loop_simple (struct loop *);
130 static void peel_loop_completely (struct loop *);
131 static void unroll_loop_stupid (struct loop *);
132 static void unroll_loop_constant_iterations (struct loop *);
133 static void unroll_loop_runtime_iterations (struct loop *);
134 static struct opt_info *analyze_insns_in_loop (struct loop *);
135 static void opt_info_start_duplication (struct opt_info *);
136 static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
137 static void free_opt_info (struct opt_info *);
138 static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx);
139 static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx);
140 static struct iv_to_split *analyze_iv_to_split_insn (rtx);
141 static void expand_var_during_unrolling (struct var_to_expand *, rtx);
142 static int insert_var_expansion_initialization (void **, void *);
143 static int combine_var_copies_in_loop_exit (void **, void *);
144 static int release_var_copies (void **, void *);
145 static rtx get_expansion (struct var_to_expand *);
146
147 /* Unroll and/or peel (depending on FLAGS) LOOPS. */
148 void
149 unroll_and_peel_loops (int flags)
150 {
151 struct loop *loop;
152 bool check;
153 loop_iterator li;
154
155 /* First perform complete loop peeling (it is almost surely a win,
156 and affects parameters for further decision a lot). */
157 peel_loops_completely (flags);
158
159 /* Now decide rest of unrolling and peeling. */
160 decide_unrolling_and_peeling (flags);
161
162 /* Scan the loops, inner ones first. */
163 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
164 {
165 check = true;
166 /* And perform the appropriate transformations. */
167 switch (loop->lpt_decision.decision)
168 {
169 case LPT_PEEL_COMPLETELY:
170 /* Already done. */
171 gcc_unreachable ();
172 case LPT_PEEL_SIMPLE:
173 peel_loop_simple (loop);
174 break;
175 case LPT_UNROLL_CONSTANT:
176 unroll_loop_constant_iterations (loop);
177 break;
178 case LPT_UNROLL_RUNTIME:
179 unroll_loop_runtime_iterations (loop);
180 break;
181 case LPT_UNROLL_STUPID:
182 unroll_loop_stupid (loop);
183 break;
184 case LPT_NONE:
185 check = false;
186 break;
187 default:
188 gcc_unreachable ();
189 }
190 if (check)
191 {
192 #ifdef ENABLE_CHECKING
193 verify_dominators (CDI_DOMINATORS);
194 verify_loop_structure ();
195 #endif
196 }
197 }
198
199 iv_analysis_done ();
200 }
201
202 /* Check whether exit of the LOOP is at the end of loop body. */
203
204 static bool
205 loop_exit_at_end_p (struct loop *loop)
206 {
207 struct niter_desc *desc = get_simple_loop_desc (loop);
208 rtx insn;
209
210 if (desc->in_edge->dest != loop->latch)
211 return false;
212
213 /* Check that the latch is empty. */
214 FOR_BB_INSNS (loop->latch, insn)
215 {
216 if (INSN_P (insn))
217 return false;
218 }
219
220 return true;
221 }
222
223 /* Depending on FLAGS, check whether to peel loops completely and do so. */
224 static void
225 peel_loops_completely (int flags)
226 {
227 struct loop *loop;
228 loop_iterator li;
229
230 /* Scan the loops, the inner ones first. */
231 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
232 {
233 loop->lpt_decision.decision = LPT_NONE;
234
235 if (dump_file)
236 fprintf (dump_file,
237 "\n;; *** Considering loop %d for complete peeling ***\n",
238 loop->num);
239
240 loop->ninsns = num_loop_insns (loop);
241
242 decide_peel_once_rolling (loop, flags);
243 if (loop->lpt_decision.decision == LPT_NONE)
244 decide_peel_completely (loop, flags);
245
246 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
247 {
248 peel_loop_completely (loop);
249 #ifdef ENABLE_CHECKING
250 verify_dominators (CDI_DOMINATORS);
251 verify_loop_structure ();
252 #endif
253 }
254 }
255 }
256
257 /* Decide whether unroll or peel loops (depending on FLAGS) and how much. */
258 static void
259 decide_unrolling_and_peeling (int flags)
260 {
261 struct loop *loop;
262 loop_iterator li;
263
264 /* Scan the loops, inner ones first. */
265 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
266 {
267 loop->lpt_decision.decision = LPT_NONE;
268
269 if (dump_file)
270 fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num);
271
272 /* Do not peel cold areas. */
273 if (optimize_loop_for_size_p (loop))
274 {
275 if (dump_file)
276 fprintf (dump_file, ";; Not considering loop, cold area\n");
277 continue;
278 }
279
280 /* Can the loop be manipulated? */
281 if (!can_duplicate_loop_p (loop))
282 {
283 if (dump_file)
284 fprintf (dump_file,
285 ";; Not considering loop, cannot duplicate\n");
286 continue;
287 }
288
289 /* Skip non-innermost loops. */
290 if (loop->inner)
291 {
292 if (dump_file)
293 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
294 continue;
295 }
296
297 loop->ninsns = num_loop_insns (loop);
298 loop->av_ninsns = average_num_loop_insns (loop);
299
300 /* Try transformations one by one in decreasing order of
301 priority. */
302
303 decide_unroll_constant_iterations (loop, flags);
304 if (loop->lpt_decision.decision == LPT_NONE)
305 decide_unroll_runtime_iterations (loop, flags);
306 if (loop->lpt_decision.decision == LPT_NONE)
307 decide_unroll_stupid (loop, flags);
308 if (loop->lpt_decision.decision == LPT_NONE)
309 decide_peel_simple (loop, flags);
310 }
311 }
312
313 /* Decide whether the LOOP is once rolling and suitable for complete
314 peeling. */
315 static void
316 decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED)
317 {
318 struct niter_desc *desc;
319
320 if (dump_file)
321 fprintf (dump_file, "\n;; Considering peeling once rolling loop\n");
322
323 /* Is the loop small enough? */
324 if ((unsigned) PARAM_VALUE (PARAM_MAX_ONCE_PEELED_INSNS) < loop->ninsns)
325 {
326 if (dump_file)
327 fprintf (dump_file, ";; Not considering loop, is too big\n");
328 return;
329 }
330
331 /* Check for simple loops. */
332 desc = get_simple_loop_desc (loop);
333
334 /* Check number of iterations. */
335 if (!desc->simple_p
336 || desc->assumptions
337 || desc->infinite
338 || !desc->const_iter
339 || desc->niter != 0)
340 {
341 if (dump_file)
342 fprintf (dump_file,
343 ";; Unable to prove that the loop rolls exactly once\n");
344 return;
345 }
346
347 /* Success. */
348 if (dump_file)
349 fprintf (dump_file, ";; Decided to peel exactly once rolling loop\n");
350 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
351 }
352
353 /* Decide whether the LOOP is suitable for complete peeling. */
354 static void
355 decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED)
356 {
357 unsigned npeel;
358 struct niter_desc *desc;
359
360 if (dump_file)
361 fprintf (dump_file, "\n;; Considering peeling completely\n");
362
363 /* Skip non-innermost loops. */
364 if (loop->inner)
365 {
366 if (dump_file)
367 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
368 return;
369 }
370
371 /* Do not peel cold areas. */
372 if (optimize_loop_for_size_p (loop))
373 {
374 if (dump_file)
375 fprintf (dump_file, ";; Not considering loop, cold area\n");
376 return;
377 }
378
379 /* Can the loop be manipulated? */
380 if (!can_duplicate_loop_p (loop))
381 {
382 if (dump_file)
383 fprintf (dump_file,
384 ";; Not considering loop, cannot duplicate\n");
385 return;
386 }
387
388 /* npeel = number of iterations to peel. */
389 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS) / loop->ninsns;
390 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
391 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
392
393 /* Is the loop small enough? */
394 if (!npeel)
395 {
396 if (dump_file)
397 fprintf (dump_file, ";; Not considering loop, is too big\n");
398 return;
399 }
400
401 /* Check for simple loops. */
402 desc = get_simple_loop_desc (loop);
403
404 /* Check number of iterations. */
405 if (!desc->simple_p
406 || desc->assumptions
407 || !desc->const_iter
408 || desc->infinite)
409 {
410 if (dump_file)
411 fprintf (dump_file,
412 ";; Unable to prove that the loop iterates constant times\n");
413 return;
414 }
415
416 if (desc->niter > npeel - 1)
417 {
418 if (dump_file)
419 {
420 fprintf (dump_file,
421 ";; Not peeling loop completely, rolls too much (");
422 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
423 fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel);
424 }
425 return;
426 }
427
428 /* Success. */
429 if (dump_file)
430 fprintf (dump_file, ";; Decided to peel loop completely\n");
431 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
432 }
433
434 /* Peel all iterations of LOOP, remove exit edges and cancel the loop
435 completely. The transformation done:
436
437 for (i = 0; i < 4; i++)
438 body;
439
440 ==>
441
442 i = 0;
443 body; i++;
444 body; i++;
445 body; i++;
446 body; i++;
447 */
448 static void
449 peel_loop_completely (struct loop *loop)
450 {
451 sbitmap wont_exit;
452 unsigned HOST_WIDE_INT npeel;
453 unsigned i;
454 VEC (edge, heap) *remove_edges;
455 edge ein;
456 struct niter_desc *desc = get_simple_loop_desc (loop);
457 struct opt_info *opt_info = NULL;
458
459 npeel = desc->niter;
460
461 if (npeel)
462 {
463 bool ok;
464
465 wont_exit = sbitmap_alloc (npeel + 1);
466 sbitmap_ones (wont_exit);
467 RESET_BIT (wont_exit, 0);
468 if (desc->noloop_assumptions)
469 RESET_BIT (wont_exit, 1);
470
471 remove_edges = NULL;
472
473 if (flag_split_ivs_in_unroller)
474 opt_info = analyze_insns_in_loop (loop);
475
476 opt_info_start_duplication (opt_info);
477 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
478 npeel,
479 wont_exit, desc->out_edge,
480 &remove_edges,
481 DLTHE_FLAG_UPDATE_FREQ
482 | DLTHE_FLAG_COMPLETTE_PEEL
483 | (opt_info
484 ? DLTHE_RECORD_COPY_NUMBER : 0));
485 gcc_assert (ok);
486
487 free (wont_exit);
488
489 if (opt_info)
490 {
491 apply_opt_in_copies (opt_info, npeel, false, true);
492 free_opt_info (opt_info);
493 }
494
495 /* Remove the exit edges. */
496 for (i = 0; VEC_iterate (edge, remove_edges, i, ein); i++)
497 remove_path (ein);
498 VEC_free (edge, heap, remove_edges);
499 }
500
501 ein = desc->in_edge;
502 free_simple_loop_desc (loop);
503
504 /* Now remove the unreachable part of the last iteration and cancel
505 the loop. */
506 remove_path (ein);
507
508 if (dump_file)
509 fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel);
510 }
511
512 /* Decide whether to unroll LOOP iterating constant number of times
513 and how much. */
514
515 static void
516 decide_unroll_constant_iterations (struct loop *loop, int flags)
517 {
518 unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
519 struct niter_desc *desc;
520
521 if (!(flags & UAP_UNROLL))
522 {
523 /* We were not asked to, just return back silently. */
524 return;
525 }
526
527 if (dump_file)
528 fprintf (dump_file,
529 "\n;; Considering unrolling loop with constant "
530 "number of iterations\n");
531
532 /* nunroll = total number of copies of the original loop body in
533 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
534 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
535 nunroll_by_av
536 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
537 if (nunroll > nunroll_by_av)
538 nunroll = nunroll_by_av;
539 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
540 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
541
542 /* Skip big loops. */
543 if (nunroll <= 1)
544 {
545 if (dump_file)
546 fprintf (dump_file, ";; Not considering loop, is too big\n");
547 return;
548 }
549
550 /* Check for simple loops. */
551 desc = get_simple_loop_desc (loop);
552
553 /* Check number of iterations. */
554 if (!desc->simple_p || !desc->const_iter || desc->assumptions)
555 {
556 if (dump_file)
557 fprintf (dump_file,
558 ";; Unable to prove that the loop iterates constant times\n");
559 return;
560 }
561
562 /* Check whether the loop rolls enough to consider. */
563 if (desc->niter < 2 * nunroll)
564 {
565 if (dump_file)
566 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
567 return;
568 }
569
570 /* Success; now compute number of iterations to unroll. We alter
571 nunroll so that as few as possible copies of loop body are
572 necessary, while still not decreasing the number of unrollings
573 too much (at most by 1). */
574 best_copies = 2 * nunroll + 10;
575
576 i = 2 * nunroll + 2;
577 if (i - 1 >= desc->niter)
578 i = desc->niter - 2;
579
580 for (; i >= nunroll - 1; i--)
581 {
582 unsigned exit_mod = desc->niter % (i + 1);
583
584 if (!loop_exit_at_end_p (loop))
585 n_copies = exit_mod + i + 1;
586 else if (exit_mod != (unsigned) i
587 || desc->noloop_assumptions != NULL_RTX)
588 n_copies = exit_mod + i + 2;
589 else
590 n_copies = i + 1;
591
592 if (n_copies < best_copies)
593 {
594 best_copies = n_copies;
595 best_unroll = i;
596 }
597 }
598
599 if (dump_file)
600 fprintf (dump_file, ";; max_unroll %d (%d copies, initial %d).\n",
601 best_unroll + 1, best_copies, nunroll);
602
603 loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
604 loop->lpt_decision.times = best_unroll;
605
606 if (dump_file)
607 fprintf (dump_file,
608 ";; Decided to unroll the constant times rolling loop, %d times.\n",
609 loop->lpt_decision.times);
610 }
611
612 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES + 1
613 times. The transformation does this:
614
615 for (i = 0; i < 102; i++)
616 body;
617
618 ==>
619
620 i = 0;
621 body; i++;
622 body; i++;
623 while (i < 102)
624 {
625 body; i++;
626 body; i++;
627 body; i++;
628 body; i++;
629 }
630 */
631 static void
632 unroll_loop_constant_iterations (struct loop *loop)
633 {
634 unsigned HOST_WIDE_INT niter;
635 unsigned exit_mod;
636 sbitmap wont_exit;
637 unsigned i;
638 VEC (edge, heap) *remove_edges;
639 edge e;
640 unsigned max_unroll = loop->lpt_decision.times;
641 struct niter_desc *desc = get_simple_loop_desc (loop);
642 bool exit_at_end = loop_exit_at_end_p (loop);
643 struct opt_info *opt_info = NULL;
644 bool ok;
645
646 niter = desc->niter;
647
648 /* Should not get here (such loop should be peeled instead). */
649 gcc_assert (niter > max_unroll + 1);
650
651 exit_mod = niter % (max_unroll + 1);
652
653 wont_exit = sbitmap_alloc (max_unroll + 1);
654 sbitmap_ones (wont_exit);
655
656 remove_edges = NULL;
657 if (flag_split_ivs_in_unroller
658 || flag_variable_expansion_in_unroller)
659 opt_info = analyze_insns_in_loop (loop);
660
661 if (!exit_at_end)
662 {
663 /* The exit is not at the end of the loop; leave exit test
664 in the first copy, so that the loops that start with test
665 of exit condition have continuous body after unrolling. */
666
667 if (dump_file)
668 fprintf (dump_file, ";; Condition on beginning of loop.\n");
669
670 /* Peel exit_mod iterations. */
671 RESET_BIT (wont_exit, 0);
672 if (desc->noloop_assumptions)
673 RESET_BIT (wont_exit, 1);
674
675 if (exit_mod)
676 {
677 opt_info_start_duplication (opt_info);
678 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
679 exit_mod,
680 wont_exit, desc->out_edge,
681 &remove_edges,
682 DLTHE_FLAG_UPDATE_FREQ
683 | (opt_info && exit_mod > 1
684 ? DLTHE_RECORD_COPY_NUMBER
685 : 0));
686 gcc_assert (ok);
687
688 if (opt_info && exit_mod > 1)
689 apply_opt_in_copies (opt_info, exit_mod, false, false);
690
691 desc->noloop_assumptions = NULL_RTX;
692 desc->niter -= exit_mod;
693 desc->niter_max -= exit_mod;
694 }
695
696 SET_BIT (wont_exit, 1);
697 }
698 else
699 {
700 /* Leave exit test in last copy, for the same reason as above if
701 the loop tests the condition at the end of loop body. */
702
703 if (dump_file)
704 fprintf (dump_file, ";; Condition on end of loop.\n");
705
706 /* We know that niter >= max_unroll + 2; so we do not need to care of
707 case when we would exit before reaching the loop. So just peel
708 exit_mod + 1 iterations. */
709 if (exit_mod != max_unroll
710 || desc->noloop_assumptions)
711 {
712 RESET_BIT (wont_exit, 0);
713 if (desc->noloop_assumptions)
714 RESET_BIT (wont_exit, 1);
715
716 opt_info_start_duplication (opt_info);
717 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
718 exit_mod + 1,
719 wont_exit, desc->out_edge,
720 &remove_edges,
721 DLTHE_FLAG_UPDATE_FREQ
722 | (opt_info && exit_mod > 0
723 ? DLTHE_RECORD_COPY_NUMBER
724 : 0));
725 gcc_assert (ok);
726
727 if (opt_info && exit_mod > 0)
728 apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
729
730 desc->niter -= exit_mod + 1;
731 desc->niter_max -= exit_mod + 1;
732 desc->noloop_assumptions = NULL_RTX;
733
734 SET_BIT (wont_exit, 0);
735 SET_BIT (wont_exit, 1);
736 }
737
738 RESET_BIT (wont_exit, max_unroll);
739 }
740
741 /* Now unroll the loop. */
742
743 opt_info_start_duplication (opt_info);
744 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
745 max_unroll,
746 wont_exit, desc->out_edge,
747 &remove_edges,
748 DLTHE_FLAG_UPDATE_FREQ
749 | (opt_info
750 ? DLTHE_RECORD_COPY_NUMBER
751 : 0));
752 gcc_assert (ok);
753
754 if (opt_info)
755 {
756 apply_opt_in_copies (opt_info, max_unroll, true, true);
757 free_opt_info (opt_info);
758 }
759
760 free (wont_exit);
761
762 if (exit_at_end)
763 {
764 basic_block exit_block = get_bb_copy (desc->in_edge->src);
765 /* Find a new in and out edge; they are in the last copy we have made. */
766
767 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
768 {
769 desc->out_edge = EDGE_SUCC (exit_block, 0);
770 desc->in_edge = EDGE_SUCC (exit_block, 1);
771 }
772 else
773 {
774 desc->out_edge = EDGE_SUCC (exit_block, 1);
775 desc->in_edge = EDGE_SUCC (exit_block, 0);
776 }
777 }
778
779 desc->niter /= max_unroll + 1;
780 desc->niter_max /= max_unroll + 1;
781 desc->niter_expr = GEN_INT (desc->niter);
782
783 /* Remove the edges. */
784 for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++)
785 remove_path (e);
786 VEC_free (edge, heap, remove_edges);
787
788 if (dump_file)
789 fprintf (dump_file,
790 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
791 max_unroll, num_loop_insns (loop));
792 }
793
794 /* Decide whether to unroll LOOP iterating runtime computable number of times
795 and how much. */
796 static void
797 decide_unroll_runtime_iterations (struct loop *loop, int flags)
798 {
799 unsigned nunroll, nunroll_by_av, i;
800 struct niter_desc *desc;
801
802 if (!(flags & UAP_UNROLL))
803 {
804 /* We were not asked to, just return back silently. */
805 return;
806 }
807
808 if (dump_file)
809 fprintf (dump_file,
810 "\n;; Considering unrolling loop with runtime "
811 "computable number of iterations\n");
812
813 /* nunroll = total number of copies of the original loop body in
814 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
815 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
816 nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
817 if (nunroll > nunroll_by_av)
818 nunroll = nunroll_by_av;
819 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
820 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
821
822 /* Skip big loops. */
823 if (nunroll <= 1)
824 {
825 if (dump_file)
826 fprintf (dump_file, ";; Not considering loop, is too big\n");
827 return;
828 }
829
830 /* Check for simple loops. */
831 desc = get_simple_loop_desc (loop);
832
833 /* Check simpleness. */
834 if (!desc->simple_p || desc->assumptions)
835 {
836 if (dump_file)
837 fprintf (dump_file,
838 ";; Unable to prove that the number of iterations "
839 "can be counted in runtime\n");
840 return;
841 }
842
843 if (desc->const_iter)
844 {
845 if (dump_file)
846 fprintf (dump_file, ";; Loop iterates constant times\n");
847 return;
848 }
849
850 /* If we have profile feedback, check whether the loop rolls. */
851 if (loop->header->count && expected_loop_iterations (loop) < 2 * nunroll)
852 {
853 if (dump_file)
854 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
855 return;
856 }
857
858 /* Success; now force nunroll to be power of 2, as we are unable to
859 cope with overflows in computation of number of iterations. */
860 for (i = 1; 2 * i <= nunroll; i *= 2)
861 continue;
862
863 loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
864 loop->lpt_decision.times = i - 1;
865
866 if (dump_file)
867 fprintf (dump_file,
868 ";; Decided to unroll the runtime computable "
869 "times rolling loop, %d times.\n",
870 loop->lpt_decision.times);
871 }
872
873 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
874 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
875 and NULL is returned instead. */
876
877 basic_block
878 split_edge_and_insert (edge e, rtx insns)
879 {
880 basic_block bb;
881
882 if (!insns)
883 return NULL;
884 bb = split_edge (e);
885 emit_insn_after (insns, BB_END (bb));
886
887 /* ??? We used to assume that INSNS can contain control flow insns, and
888 that we had to try to find sub basic blocks in BB to maintain a valid
889 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
890 and call break_superblocks when going out of cfglayout mode. But it
891 turns out that this never happens; and that if it does ever happen,
892 the verify_flow_info call in loop_optimizer_finalize would fail.
893
894 There are two reasons why we expected we could have control flow insns
895 in INSNS. The first is when a comparison has to be done in parts, and
896 the second is when the number of iterations is computed for loops with
897 the number of iterations known at runtime. In both cases, test cases
898 to get control flow in INSNS appear to be impossible to construct:
899
900 * If do_compare_rtx_and_jump needs several branches to do comparison
901 in a mode that needs comparison by parts, we cannot analyze the
902 number of iterations of the loop, and we never get to unrolling it.
903
904 * The code in expand_divmod that was suspected to cause creation of
905 branching code seems to be only accessed for signed division. The
906 divisions used by # of iterations analysis are always unsigned.
907 Problems might arise on architectures that emits branching code
908 for some operations that may appear in the unroller (especially
909 for division), but we have no such architectures.
910
911 Considering all this, it was decided that we should for now assume
912 that INSNS can in theory contain control flow insns, but in practice
913 it never does. So we don't handle the theoretical case, and should
914 a real failure ever show up, we have a pretty good clue for how to
915 fix it. */
916
917 return bb;
918 }
919
920 /* Unroll LOOP for that we are able to count number of iterations in runtime
921 LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this (with some
922 extra care for case n < 0):
923
924 for (i = 0; i < n; i++)
925 body;
926
927 ==>
928
929 i = 0;
930 mod = n % 4;
931
932 switch (mod)
933 {
934 case 3:
935 body; i++;
936 case 2:
937 body; i++;
938 case 1:
939 body; i++;
940 case 0: ;
941 }
942
943 while (i < n)
944 {
945 body; i++;
946 body; i++;
947 body; i++;
948 body; i++;
949 }
950 */
951 static void
952 unroll_loop_runtime_iterations (struct loop *loop)
953 {
954 rtx old_niter, niter, init_code, branch_code, tmp;
955 unsigned i, j, p;
956 basic_block preheader, *body, swtch, ezc_swtch;
957 VEC (basic_block, heap) *dom_bbs;
958 sbitmap wont_exit;
959 int may_exit_copy;
960 unsigned n_peel;
961 VEC (edge, heap) *remove_edges;
962 edge e;
963 bool extra_zero_check, last_may_exit;
964 unsigned max_unroll = loop->lpt_decision.times;
965 struct niter_desc *desc = get_simple_loop_desc (loop);
966 bool exit_at_end = loop_exit_at_end_p (loop);
967 struct opt_info *opt_info = NULL;
968 bool ok;
969
970 if (flag_split_ivs_in_unroller
971 || flag_variable_expansion_in_unroller)
972 opt_info = analyze_insns_in_loop (loop);
973
974 /* Remember blocks whose dominators will have to be updated. */
975 dom_bbs = NULL;
976
977 body = get_loop_body (loop);
978 for (i = 0; i < loop->num_nodes; i++)
979 {
980 VEC (basic_block, heap) *ldom;
981 basic_block bb;
982
983 ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
984 for (j = 0; VEC_iterate (basic_block, ldom, j, bb); j++)
985 if (!flow_bb_inside_loop_p (loop, bb))
986 VEC_safe_push (basic_block, heap, dom_bbs, bb);
987
988 VEC_free (basic_block, heap, ldom);
989 }
990 free (body);
991
992 if (!exit_at_end)
993 {
994 /* Leave exit in first copy (for explanation why see comment in
995 unroll_loop_constant_iterations). */
996 may_exit_copy = 0;
997 n_peel = max_unroll - 1;
998 extra_zero_check = true;
999 last_may_exit = false;
1000 }
1001 else
1002 {
1003 /* Leave exit in last copy (for explanation why see comment in
1004 unroll_loop_constant_iterations). */
1005 may_exit_copy = max_unroll;
1006 n_peel = max_unroll;
1007 extra_zero_check = false;
1008 last_may_exit = true;
1009 }
1010
1011 /* Get expression for number of iterations. */
1012 start_sequence ();
1013 old_niter = niter = gen_reg_rtx (desc->mode);
1014 tmp = force_operand (copy_rtx (desc->niter_expr), niter);
1015 if (tmp != niter)
1016 emit_move_insn (niter, tmp);
1017
1018 /* Count modulo by ANDing it with max_unroll; we use the fact that
1019 the number of unrollings is a power of two, and thus this is correct
1020 even if there is overflow in the computation. */
1021 niter = expand_simple_binop (desc->mode, AND,
1022 niter,
1023 GEN_INT (max_unroll),
1024 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1025
1026 init_code = get_insns ();
1027 end_sequence ();
1028 unshare_all_rtl_in_chain (init_code);
1029
1030 /* Precondition the loop. */
1031 split_edge_and_insert (loop_preheader_edge (loop), init_code);
1032
1033 remove_edges = NULL;
1034
1035 wont_exit = sbitmap_alloc (max_unroll + 2);
1036
1037 /* Peel the first copy of loop body (almost always we must leave exit test
1038 here; the only exception is when we have extra zero check and the number
1039 of iterations is reliable. Also record the place of (possible) extra
1040 zero check. */
1041 sbitmap_zero (wont_exit);
1042 if (extra_zero_check
1043 && !desc->noloop_assumptions)
1044 SET_BIT (wont_exit, 1);
1045 ezc_swtch = loop_preheader_edge (loop)->src;
1046 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1047 1, wont_exit, desc->out_edge,
1048 &remove_edges,
1049 DLTHE_FLAG_UPDATE_FREQ);
1050 gcc_assert (ok);
1051
1052 /* Record the place where switch will be built for preconditioning. */
1053 swtch = split_edge (loop_preheader_edge (loop));
1054
1055 for (i = 0; i < n_peel; i++)
1056 {
1057 /* Peel the copy. */
1058 sbitmap_zero (wont_exit);
1059 if (i != n_peel - 1 || !last_may_exit)
1060 SET_BIT (wont_exit, 1);
1061 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1062 1, wont_exit, desc->out_edge,
1063 &remove_edges,
1064 DLTHE_FLAG_UPDATE_FREQ);
1065 gcc_assert (ok);
1066
1067 /* Create item for switch. */
1068 j = n_peel - i - (extra_zero_check ? 0 : 1);
1069 p = REG_BR_PROB_BASE / (i + 2);
1070
1071 preheader = split_edge (loop_preheader_edge (loop));
1072 branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
1073 block_label (preheader), p,
1074 NULL_RTX);
1075
1076 /* We rely on the fact that the compare and jump cannot be optimized out,
1077 and hence the cfg we create is correct. */
1078 gcc_assert (branch_code != NULL_RTX);
1079
1080 swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
1081 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1082 single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1083 e = make_edge (swtch, preheader,
1084 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1085 e->probability = p;
1086 }
1087
1088 if (extra_zero_check)
1089 {
1090 /* Add branch for zero iterations. */
1091 p = REG_BR_PROB_BASE / (max_unroll + 1);
1092 swtch = ezc_swtch;
1093 preheader = split_edge (loop_preheader_edge (loop));
1094 branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
1095 block_label (preheader), p,
1096 NULL_RTX);
1097 gcc_assert (branch_code != NULL_RTX);
1098
1099 swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
1100 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1101 single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1102 e = make_edge (swtch, preheader,
1103 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1104 e->probability = p;
1105 }
1106
1107 /* Recount dominators for outer blocks. */
1108 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
1109
1110 /* And unroll loop. */
1111
1112 sbitmap_ones (wont_exit);
1113 RESET_BIT (wont_exit, may_exit_copy);
1114 opt_info_start_duplication (opt_info);
1115
1116 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1117 max_unroll,
1118 wont_exit, desc->out_edge,
1119 &remove_edges,
1120 DLTHE_FLAG_UPDATE_FREQ
1121 | (opt_info
1122 ? DLTHE_RECORD_COPY_NUMBER
1123 : 0));
1124 gcc_assert (ok);
1125
1126 if (opt_info)
1127 {
1128 apply_opt_in_copies (opt_info, max_unroll, true, true);
1129 free_opt_info (opt_info);
1130 }
1131
1132 free (wont_exit);
1133
1134 if (exit_at_end)
1135 {
1136 basic_block exit_block = get_bb_copy (desc->in_edge->src);
1137 /* Find a new in and out edge; they are in the last copy we have
1138 made. */
1139
1140 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
1141 {
1142 desc->out_edge = EDGE_SUCC (exit_block, 0);
1143 desc->in_edge = EDGE_SUCC (exit_block, 1);
1144 }
1145 else
1146 {
1147 desc->out_edge = EDGE_SUCC (exit_block, 1);
1148 desc->in_edge = EDGE_SUCC (exit_block, 0);
1149 }
1150 }
1151
1152 /* Remove the edges. */
1153 for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++)
1154 remove_path (e);
1155 VEC_free (edge, heap, remove_edges);
1156
1157 /* We must be careful when updating the number of iterations due to
1158 preconditioning and the fact that the value must be valid at entry
1159 of the loop. After passing through the above code, we see that
1160 the correct new number of iterations is this: */
1161 gcc_assert (!desc->const_iter);
1162 desc->niter_expr =
1163 simplify_gen_binary (UDIV, desc->mode, old_niter,
1164 GEN_INT (max_unroll + 1));
1165 desc->niter_max /= max_unroll + 1;
1166 if (exit_at_end)
1167 {
1168 desc->niter_expr =
1169 simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx);
1170 desc->noloop_assumptions = NULL_RTX;
1171 desc->niter_max--;
1172 }
1173
1174 if (dump_file)
1175 fprintf (dump_file,
1176 ";; Unrolled loop %d times, counting # of iterations "
1177 "in runtime, %i insns\n",
1178 max_unroll, num_loop_insns (loop));
1179
1180 VEC_free (basic_block, heap, dom_bbs);
1181 }
1182
1183 /* Decide whether to simply peel LOOP and how much. */
1184 static void
1185 decide_peel_simple (struct loop *loop, int flags)
1186 {
1187 unsigned npeel;
1188 struct niter_desc *desc;
1189
1190 if (!(flags & UAP_PEEL))
1191 {
1192 /* We were not asked to, just return back silently. */
1193 return;
1194 }
1195
1196 if (dump_file)
1197 fprintf (dump_file, "\n;; Considering simply peeling loop\n");
1198
1199 /* npeel = number of iterations to peel. */
1200 npeel = PARAM_VALUE (PARAM_MAX_PEELED_INSNS) / loop->ninsns;
1201 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_PEEL_TIMES))
1202 npeel = PARAM_VALUE (PARAM_MAX_PEEL_TIMES);
1203
1204 /* Skip big loops. */
1205 if (!npeel)
1206 {
1207 if (dump_file)
1208 fprintf (dump_file, ";; Not considering loop, is too big\n");
1209 return;
1210 }
1211
1212 /* Check for simple loops. */
1213 desc = get_simple_loop_desc (loop);
1214
1215 /* Check number of iterations. */
1216 if (desc->simple_p && !desc->assumptions && desc->const_iter)
1217 {
1218 if (dump_file)
1219 fprintf (dump_file, ";; Loop iterates constant times\n");
1220 return;
1221 }
1222
1223 /* Do not simply peel loops with branches inside -- it increases number
1224 of mispredicts. */
1225 if (num_loop_branches (loop) > 1)
1226 {
1227 if (dump_file)
1228 fprintf (dump_file, ";; Not peeling, contains branches\n");
1229 return;
1230 }
1231
1232 if (loop->header->count)
1233 {
1234 unsigned niter = expected_loop_iterations (loop);
1235 if (niter + 1 > npeel)
1236 {
1237 if (dump_file)
1238 {
1239 fprintf (dump_file, ";; Not peeling loop, rolls too much (");
1240 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1241 (HOST_WIDEST_INT) (niter + 1));
1242 fprintf (dump_file, " iterations > %d [maximum peelings])\n",
1243 npeel);
1244 }
1245 return;
1246 }
1247 npeel = niter + 1;
1248 }
1249 else
1250 {
1251 /* For now we have no good heuristics to decide whether loop peeling
1252 will be effective, so disable it. */
1253 if (dump_file)
1254 fprintf (dump_file,
1255 ";; Not peeling loop, no evidence it will be profitable\n");
1256 return;
1257 }
1258
1259 /* Success. */
1260 loop->lpt_decision.decision = LPT_PEEL_SIMPLE;
1261 loop->lpt_decision.times = npeel;
1262
1263 if (dump_file)
1264 fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n",
1265 loop->lpt_decision.times);
1266 }
1267
1268 /* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation:
1269 while (cond)
1270 body;
1271
1272 ==>
1273
1274 if (!cond) goto end;
1275 body;
1276 if (!cond) goto end;
1277 body;
1278 while (cond)
1279 body;
1280 end: ;
1281 */
1282 static void
1283 peel_loop_simple (struct loop *loop)
1284 {
1285 sbitmap wont_exit;
1286 unsigned npeel = loop->lpt_decision.times;
1287 struct niter_desc *desc = get_simple_loop_desc (loop);
1288 struct opt_info *opt_info = NULL;
1289 bool ok;
1290
1291 if (flag_split_ivs_in_unroller && npeel > 1)
1292 opt_info = analyze_insns_in_loop (loop);
1293
1294 wont_exit = sbitmap_alloc (npeel + 1);
1295 sbitmap_zero (wont_exit);
1296
1297 opt_info_start_duplication (opt_info);
1298
1299 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1300 npeel, wont_exit, NULL,
1301 NULL, DLTHE_FLAG_UPDATE_FREQ
1302 | (opt_info
1303 ? DLTHE_RECORD_COPY_NUMBER
1304 : 0));
1305 gcc_assert (ok);
1306
1307 free (wont_exit);
1308
1309 if (opt_info)
1310 {
1311 apply_opt_in_copies (opt_info, npeel, false, false);
1312 free_opt_info (opt_info);
1313 }
1314
1315 if (desc->simple_p)
1316 {
1317 if (desc->const_iter)
1318 {
1319 desc->niter -= npeel;
1320 desc->niter_expr = GEN_INT (desc->niter);
1321 desc->noloop_assumptions = NULL_RTX;
1322 }
1323 else
1324 {
1325 /* We cannot just update niter_expr, as its value might be clobbered
1326 inside loop. We could handle this by counting the number into
1327 temporary just like we do in runtime unrolling, but it does not
1328 seem worthwhile. */
1329 free_simple_loop_desc (loop);
1330 }
1331 }
1332 if (dump_file)
1333 fprintf (dump_file, ";; Peeling loop %d times\n", npeel);
1334 }
1335
1336 /* Decide whether to unroll LOOP stupidly and how much. */
1337 static void
1338 decide_unroll_stupid (struct loop *loop, int flags)
1339 {
1340 unsigned nunroll, nunroll_by_av, i;
1341 struct niter_desc *desc;
1342
1343 if (!(flags & UAP_UNROLL_ALL))
1344 {
1345 /* We were not asked to, just return back silently. */
1346 return;
1347 }
1348
1349 if (dump_file)
1350 fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
1351
1352 /* nunroll = total number of copies of the original loop body in
1353 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1354 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
1355 nunroll_by_av
1356 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
1357 if (nunroll > nunroll_by_av)
1358 nunroll = nunroll_by_av;
1359 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
1360 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
1361
1362 /* Skip big loops. */
1363 if (nunroll <= 1)
1364 {
1365 if (dump_file)
1366 fprintf (dump_file, ";; Not considering loop, is too big\n");
1367 return;
1368 }
1369
1370 /* Check for simple loops. */
1371 desc = get_simple_loop_desc (loop);
1372
1373 /* Check simpleness. */
1374 if (desc->simple_p && !desc->assumptions)
1375 {
1376 if (dump_file)
1377 fprintf (dump_file, ";; The loop is simple\n");
1378 return;
1379 }
1380
1381 /* Do not unroll loops with branches inside -- it increases number
1382 of mispredicts. */
1383 if (num_loop_branches (loop) > 1)
1384 {
1385 if (dump_file)
1386 fprintf (dump_file, ";; Not unrolling, contains branches\n");
1387 return;
1388 }
1389
1390 /* If we have profile feedback, check whether the loop rolls. */
1391 if (loop->header->count
1392 && expected_loop_iterations (loop) < 2 * nunroll)
1393 {
1394 if (dump_file)
1395 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
1396 return;
1397 }
1398
1399 /* Success. Now force nunroll to be power of 2, as it seems that this
1400 improves results (partially because of better alignments, partially
1401 because of some dark magic). */
1402 for (i = 1; 2 * i <= nunroll; i *= 2)
1403 continue;
1404
1405 loop->lpt_decision.decision = LPT_UNROLL_STUPID;
1406 loop->lpt_decision.times = i - 1;
1407
1408 if (dump_file)
1409 fprintf (dump_file,
1410 ";; Decided to unroll the loop stupidly, %d times.\n",
1411 loop->lpt_decision.times);
1412 }
1413
1414 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation:
1415 while (cond)
1416 body;
1417
1418 ==>
1419
1420 while (cond)
1421 {
1422 body;
1423 if (!cond) break;
1424 body;
1425 if (!cond) break;
1426 body;
1427 if (!cond) break;
1428 body;
1429 }
1430 */
1431 static void
1432 unroll_loop_stupid (struct loop *loop)
1433 {
1434 sbitmap wont_exit;
1435 unsigned nunroll = loop->lpt_decision.times;
1436 struct niter_desc *desc = get_simple_loop_desc (loop);
1437 struct opt_info *opt_info = NULL;
1438 bool ok;
1439
1440 if (flag_split_ivs_in_unroller
1441 || flag_variable_expansion_in_unroller)
1442 opt_info = analyze_insns_in_loop (loop);
1443
1444
1445 wont_exit = sbitmap_alloc (nunroll + 1);
1446 sbitmap_zero (wont_exit);
1447 opt_info_start_duplication (opt_info);
1448
1449 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1450 nunroll, wont_exit,
1451 NULL, NULL,
1452 DLTHE_FLAG_UPDATE_FREQ
1453 | (opt_info
1454 ? DLTHE_RECORD_COPY_NUMBER
1455 : 0));
1456 gcc_assert (ok);
1457
1458 if (opt_info)
1459 {
1460 apply_opt_in_copies (opt_info, nunroll, true, true);
1461 free_opt_info (opt_info);
1462 }
1463
1464 free (wont_exit);
1465
1466 if (desc->simple_p)
1467 {
1468 /* We indeed may get here provided that there are nontrivial assumptions
1469 for a loop to be really simple. We could update the counts, but the
1470 problem is that we are unable to decide which exit will be taken
1471 (not really true in case the number of iterations is constant,
1472 but noone will do anything with this information, so we do not
1473 worry about it). */
1474 desc->simple_p = false;
1475 }
1476
1477 if (dump_file)
1478 fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n",
1479 nunroll, num_loop_insns (loop));
1480 }
1481
1482 /* A hash function for information about insns to split. */
1483
1484 static hashval_t
1485 si_info_hash (const void *ivts)
1486 {
1487 return (hashval_t) INSN_UID (((const struct iv_to_split *) ivts)->insn);
1488 }
1489
1490 /* An equality functions for information about insns to split. */
1491
1492 static int
1493 si_info_eq (const void *ivts1, const void *ivts2)
1494 {
1495 const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1;
1496 const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2;
1497
1498 return i1->insn == i2->insn;
1499 }
1500
1501 /* Return a hash for VES, which is really a "var_to_expand *". */
1502
1503 static hashval_t
1504 ve_info_hash (const void *ves)
1505 {
1506 return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn);
1507 }
1508
1509 /* Return true if IVTS1 and IVTS2 (which are really both of type
1510 "var_to_expand *") refer to the same instruction. */
1511
1512 static int
1513 ve_info_eq (const void *ivts1, const void *ivts2)
1514 {
1515 const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1;
1516 const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2;
1517
1518 return i1->insn == i2->insn;
1519 }
1520
1521 /* Returns true if REG is referenced in one insn in LOOP. */
1522
1523 bool
1524 referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg)
1525 {
1526 basic_block *body, bb;
1527 unsigned i;
1528 int count_ref = 0;
1529 rtx insn;
1530
1531 body = get_loop_body (loop);
1532 for (i = 0; i < loop->num_nodes; i++)
1533 {
1534 bb = body[i];
1535
1536 FOR_BB_INSNS (bb, insn)
1537 {
1538 if (rtx_referenced_p (reg, insn))
1539 count_ref++;
1540 }
1541 }
1542 return (count_ref == 1);
1543 }
1544
1545 /* Determine whether INSN contains an accumulator
1546 which can be expanded into separate copies,
1547 one for each copy of the LOOP body.
1548
1549 for (i = 0 ; i < n; i++)
1550 sum += a[i];
1551
1552 ==>
1553
1554 sum += a[i]
1555 ....
1556 i = i+1;
1557 sum1 += a[i]
1558 ....
1559 i = i+1
1560 sum2 += a[i];
1561 ....
1562
1563 Return NULL if INSN contains no opportunity for expansion of accumulator.
1564 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1565 information and return a pointer to it.
1566 */
1567
1568 static struct var_to_expand *
1569 analyze_insn_to_expand_var (struct loop *loop, rtx insn)
1570 {
1571 rtx set, dest, src, op1, op2, something;
1572 struct var_to_expand *ves;
1573 enum machine_mode mode1, mode2;
1574 unsigned accum_pos;
1575
1576 set = single_set (insn);
1577 if (!set)
1578 return NULL;
1579
1580 dest = SET_DEST (set);
1581 src = SET_SRC (set);
1582
1583 if (GET_CODE (src) != PLUS
1584 && GET_CODE (src) != MINUS
1585 && GET_CODE (src) != MULT)
1586 return NULL;
1587
1588 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1589 in MD. But if there is no optab to generate the insn, we can not
1590 perform the variable expansion. This can happen if an MD provides
1591 an insn but not a named pattern to generate it, for example to avoid
1592 producing code that needs additional mode switches like for x87/mmx.
1593
1594 So we check have_insn_for which looks for an optab for the operation
1595 in SRC. If it doesn't exist, we can't perform the expansion even
1596 though INSN is valid. */
1597 if (!have_insn_for (GET_CODE (src), GET_MODE (src)))
1598 return NULL;
1599
1600 op1 = XEXP (src, 0);
1601 op2 = XEXP (src, 1);
1602
1603 if (!REG_P (dest)
1604 && !(GET_CODE (dest) == SUBREG
1605 && REG_P (SUBREG_REG (dest))))
1606 return NULL;
1607
1608 if (rtx_equal_p (dest, op1))
1609 accum_pos = 0;
1610 else if (rtx_equal_p (dest, op2))
1611 accum_pos = 1;
1612 else
1613 return NULL;
1614
1615 /* The method of expansion that we are using; which includes
1616 the initialization of the expansions with zero and the summation of
1617 the expansions at the end of the computation will yield wrong results
1618 for (x = something - x) thus avoid using it in that case. */
1619 if (accum_pos == 1
1620 && GET_CODE (src) == MINUS)
1621 return NULL;
1622
1623 something = (accum_pos == 0)? op2 : op1;
1624
1625 if (!referenced_in_one_insn_in_loop_p (loop, dest))
1626 return NULL;
1627
1628 if (rtx_referenced_p (dest, something))
1629 return NULL;
1630
1631 mode1 = GET_MODE (dest);
1632 mode2 = GET_MODE (something);
1633 if ((FLOAT_MODE_P (mode1)
1634 || FLOAT_MODE_P (mode2))
1635 && !flag_associative_math)
1636 return NULL;
1637
1638 if (dump_file)
1639 {
1640 fprintf (dump_file,
1641 "\n;; Expanding Accumulator ");
1642 print_rtl (dump_file, dest);
1643 fprintf (dump_file, "\n");
1644 }
1645
1646 /* Record the accumulator to expand. */
1647 ves = XNEW (struct var_to_expand);
1648 ves->insn = insn;
1649 ves->var_expansions = VEC_alloc (rtx, heap, 1);
1650 ves->reg = copy_rtx (dest);
1651 ves->op = GET_CODE (src);
1652 ves->expansion_count = 0;
1653 ves->reuse_expansion = 0;
1654 ves->accum_pos = accum_pos;
1655 return ves;
1656 }
1657
1658 /* Determine whether there is an induction variable in INSN that
1659 we would like to split during unrolling.
1660
1661 I.e. replace
1662
1663 i = i + 1;
1664 ...
1665 i = i + 1;
1666 ...
1667 i = i + 1;
1668 ...
1669
1670 type chains by
1671
1672 i0 = i + 1
1673 ...
1674 i = i0 + 1
1675 ...
1676 i = i0 + 2
1677 ...
1678
1679 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1680 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1681 pointer to it. */
1682
1683 static struct iv_to_split *
1684 analyze_iv_to_split_insn (rtx insn)
1685 {
1686 rtx set, dest;
1687 struct rtx_iv iv;
1688 struct iv_to_split *ivts;
1689 bool ok;
1690
1691 /* For now we just split the basic induction variables. Later this may be
1692 extended for example by selecting also addresses of memory references. */
1693 set = single_set (insn);
1694 if (!set)
1695 return NULL;
1696
1697 dest = SET_DEST (set);
1698 if (!REG_P (dest))
1699 return NULL;
1700
1701 if (!biv_p (insn, dest))
1702 return NULL;
1703
1704 ok = iv_analyze_result (insn, dest, &iv);
1705
1706 /* This used to be an assert under the assumption that if biv_p returns
1707 true that iv_analyze_result must also return true. However, that
1708 assumption is not strictly correct as evidenced by pr25569.
1709
1710 Returning NULL when iv_analyze_result returns false is safe and
1711 avoids the problems in pr25569 until the iv_analyze_* routines
1712 can be fixed, which is apparently hard and time consuming
1713 according to their author. */
1714 if (! ok)
1715 return NULL;
1716
1717 if (iv.step == const0_rtx
1718 || iv.mode != iv.extend_mode)
1719 return NULL;
1720
1721 /* Record the insn to split. */
1722 ivts = XNEW (struct iv_to_split);
1723 ivts->insn = insn;
1724 ivts->base_var = NULL_RTX;
1725 ivts->step = iv.step;
1726 ivts->n_loc = 1;
1727 ivts->loc[0] = 1;
1728
1729 return ivts;
1730 }
1731
1732 /* Determines which of insns in LOOP can be optimized.
1733 Return a OPT_INFO struct with the relevant hash tables filled
1734 with all insns to be optimized. The FIRST_NEW_BLOCK field
1735 is undefined for the return value. */
1736
1737 static struct opt_info *
1738 analyze_insns_in_loop (struct loop *loop)
1739 {
1740 basic_block *body, bb;
1741 unsigned i;
1742 struct opt_info *opt_info = XCNEW (struct opt_info);
1743 rtx insn;
1744 struct iv_to_split *ivts = NULL;
1745 struct var_to_expand *ves = NULL;
1746 PTR *slot1;
1747 PTR *slot2;
1748 VEC (edge, heap) *edges = get_loop_exit_edges (loop);
1749 edge exit;
1750 bool can_apply = false;
1751
1752 iv_analysis_loop_init (loop);
1753
1754 body = get_loop_body (loop);
1755
1756 if (flag_split_ivs_in_unroller)
1757 opt_info->insns_to_split = htab_create (5 * loop->num_nodes,
1758 si_info_hash, si_info_eq, free);
1759
1760 /* Record the loop exit bb and loop preheader before the unrolling. */
1761 opt_info->loop_preheader = loop_preheader_edge (loop)->src;
1762
1763 if (VEC_length (edge, edges) == 1)
1764 {
1765 exit = VEC_index (edge, edges, 0);
1766 if (!(exit->flags & EDGE_COMPLEX))
1767 {
1768 opt_info->loop_exit = split_edge (exit);
1769 can_apply = true;
1770 }
1771 }
1772
1773 if (flag_variable_expansion_in_unroller
1774 && can_apply)
1775 opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes,
1776 ve_info_hash, ve_info_eq, free);
1777
1778 for (i = 0; i < loop->num_nodes; i++)
1779 {
1780 bb = body[i];
1781 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1782 continue;
1783
1784 FOR_BB_INSNS (bb, insn)
1785 {
1786 if (!INSN_P (insn))
1787 continue;
1788
1789 if (opt_info->insns_to_split)
1790 ivts = analyze_iv_to_split_insn (insn);
1791
1792 if (ivts)
1793 {
1794 slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT);
1795 *slot1 = ivts;
1796 continue;
1797 }
1798
1799 if (opt_info->insns_with_var_to_expand)
1800 ves = analyze_insn_to_expand_var (loop, insn);
1801
1802 if (ves)
1803 {
1804 slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT);
1805 *slot2 = ves;
1806 }
1807 }
1808 }
1809
1810 VEC_free (edge, heap, edges);
1811 free (body);
1812 return opt_info;
1813 }
1814
1815 /* Called just before loop duplication. Records start of duplicated area
1816 to OPT_INFO. */
1817
1818 static void
1819 opt_info_start_duplication (struct opt_info *opt_info)
1820 {
1821 if (opt_info)
1822 opt_info->first_new_block = last_basic_block;
1823 }
1824
1825 /* Determine the number of iterations between initialization of the base
1826 variable and the current copy (N_COPY). N_COPIES is the total number
1827 of newly created copies. UNROLLING is true if we are unrolling
1828 (not peeling) the loop. */
1829
1830 static unsigned
1831 determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
1832 {
1833 if (unrolling)
1834 {
1835 /* If we are unrolling, initialization is done in the original loop
1836 body (number 0). */
1837 return n_copy;
1838 }
1839 else
1840 {
1841 /* If we are peeling, the copy in that the initialization occurs has
1842 number 1. The original loop (number 0) is the last. */
1843 if (n_copy)
1844 return n_copy - 1;
1845 else
1846 return n_copies;
1847 }
1848 }
1849
1850 /* Locate in EXPR the expression corresponding to the location recorded
1851 in IVTS, and return a pointer to the RTX for this location. */
1852
1853 static rtx *
1854 get_ivts_expr (rtx expr, struct iv_to_split *ivts)
1855 {
1856 unsigned i;
1857 rtx *ret = &expr;
1858
1859 for (i = 0; i < ivts->n_loc; i++)
1860 ret = &XEXP (*ret, ivts->loc[i]);
1861
1862 return ret;
1863 }
1864
1865 /* Allocate basic variable for the induction variable chain. Callback for
1866 htab_traverse. */
1867
1868 static int
1869 allocate_basic_variable (void **slot, void *data ATTRIBUTE_UNUSED)
1870 {
1871 struct iv_to_split *ivts = (struct iv_to_split *) *slot;
1872 rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
1873
1874 ivts->base_var = gen_reg_rtx (GET_MODE (expr));
1875
1876 return 1;
1877 }
1878
1879 /* Insert initialization of basic variable of IVTS before INSN, taking
1880 the initial value from INSN. */
1881
1882 static void
1883 insert_base_initialization (struct iv_to_split *ivts, rtx insn)
1884 {
1885 rtx expr = copy_rtx (*get_ivts_expr (single_set (insn), ivts));
1886 rtx seq;
1887
1888 start_sequence ();
1889 expr = force_operand (expr, ivts->base_var);
1890 if (expr != ivts->base_var)
1891 emit_move_insn (ivts->base_var, expr);
1892 seq = get_insns ();
1893 end_sequence ();
1894
1895 emit_insn_before (seq, insn);
1896 }
1897
1898 /* Replace the use of induction variable described in IVTS in INSN
1899 by base variable + DELTA * step. */
1900
1901 static void
1902 split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
1903 {
1904 rtx expr, *loc, seq, incr, var;
1905 enum machine_mode mode = GET_MODE (ivts->base_var);
1906 rtx src, dest, set;
1907
1908 /* Construct base + DELTA * step. */
1909 if (!delta)
1910 expr = ivts->base_var;
1911 else
1912 {
1913 incr = simplify_gen_binary (MULT, mode,
1914 ivts->step, gen_int_mode (delta, mode));
1915 expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
1916 ivts->base_var, incr);
1917 }
1918
1919 /* Figure out where to do the replacement. */
1920 loc = get_ivts_expr (single_set (insn), ivts);
1921
1922 /* If we can make the replacement right away, we're done. */
1923 if (validate_change (insn, loc, expr, 0))
1924 return;
1925
1926 /* Otherwise, force EXPR into a register and try again. */
1927 start_sequence ();
1928 var = gen_reg_rtx (mode);
1929 expr = force_operand (expr, var);
1930 if (expr != var)
1931 emit_move_insn (var, expr);
1932 seq = get_insns ();
1933 end_sequence ();
1934 emit_insn_before (seq, insn);
1935
1936 if (validate_change (insn, loc, var, 0))
1937 return;
1938
1939 /* The last chance. Try recreating the assignment in insn
1940 completely from scratch. */
1941 set = single_set (insn);
1942 gcc_assert (set);
1943
1944 start_sequence ();
1945 *loc = var;
1946 src = copy_rtx (SET_SRC (set));
1947 dest = copy_rtx (SET_DEST (set));
1948 src = force_operand (src, dest);
1949 if (src != dest)
1950 emit_move_insn (dest, src);
1951 seq = get_insns ();
1952 end_sequence ();
1953
1954 emit_insn_before (seq, insn);
1955 delete_insn (insn);
1956 }
1957
1958
1959 /* Return one expansion of the accumulator recorded in struct VE. */
1960
1961 static rtx
1962 get_expansion (struct var_to_expand *ve)
1963 {
1964 rtx reg;
1965
1966 if (ve->reuse_expansion == 0)
1967 reg = ve->reg;
1968 else
1969 reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1);
1970
1971 if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion)
1972 ve->reuse_expansion = 0;
1973 else
1974 ve->reuse_expansion++;
1975
1976 return reg;
1977 }
1978
1979
1980 /* Given INSN replace the uses of the accumulator recorded in VE
1981 with a new register. */
1982
1983 static void
1984 expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
1985 {
1986 rtx new_reg, set;
1987 bool really_new_expansion = false;
1988
1989 set = single_set (insn);
1990 gcc_assert (set);
1991
1992 /* Generate a new register only if the expansion limit has not been
1993 reached. Else reuse an already existing expansion. */
1994 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
1995 {
1996 really_new_expansion = true;
1997 new_reg = gen_reg_rtx (GET_MODE (ve->reg));
1998 }
1999 else
2000 new_reg = get_expansion (ve);
2001
2002 validate_change (insn, &SET_DEST (set), new_reg, 1);
2003 validate_change (insn, &XEXP (SET_SRC (set), ve->accum_pos), new_reg, 1);
2004
2005 if (apply_change_group ())
2006 if (really_new_expansion)
2007 {
2008 VEC_safe_push (rtx, heap, ve->var_expansions, new_reg);
2009 ve->expansion_count++;
2010 }
2011 }
2012
2013 /* Initialize the variable expansions in loop preheader.
2014 Callbacks for htab_traverse. PLACE_P is the loop-preheader
2015 basic block where the initialization of the expansions
2016 should take place. The expansions are initialized with (-0)
2017 when the operation is plus or minus to honor sign zero.
2018 This way we can prevent cases where the sign of the final result is
2019 effected by the sign of the expansion.
2020 Here is an example to demonstrate this:
2021
2022 for (i = 0 ; i < n; i++)
2023 sum += something;
2024
2025 ==>
2026
2027 sum += something
2028 ....
2029 i = i+1;
2030 sum1 += something
2031 ....
2032 i = i+1
2033 sum2 += something;
2034 ....
2035
2036 When SUM is initialized with -zero and SOMETHING is also -zero; the
2037 final result of sum should be -zero thus the expansions sum1 and sum2
2038 should be initialized with -zero as well (otherwise we will get +zero
2039 as the final result). */
2040
2041 static int
2042 insert_var_expansion_initialization (void **slot, void *place_p)
2043 {
2044 struct var_to_expand *ve = (struct var_to_expand *) *slot;
2045 basic_block place = (basic_block)place_p;
2046 rtx seq, var, zero_init, insn;
2047 unsigned i;
2048 enum machine_mode mode = GET_MODE (ve->reg);
2049 bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
2050
2051 if (VEC_length (rtx, ve->var_expansions) == 0)
2052 return 1;
2053
2054 start_sequence ();
2055 if (ve->op == PLUS || ve->op == MINUS)
2056 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2057 {
2058 if (honor_signed_zero_p)
2059 zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
2060 else
2061 zero_init = CONST0_RTX (mode);
2062
2063 emit_move_insn (var, zero_init);
2064 }
2065 else if (ve->op == MULT)
2066 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2067 {
2068 zero_init = CONST1_RTX (GET_MODE (var));
2069 emit_move_insn (var, zero_init);
2070 }
2071
2072 seq = get_insns ();
2073 end_sequence ();
2074
2075 insn = BB_HEAD (place);
2076 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2077 insn = NEXT_INSN (insn);
2078
2079 emit_insn_after (seq, insn);
2080 /* Continue traversing the hash table. */
2081 return 1;
2082 }
2083
2084 /* Combine the variable expansions at the loop exit.
2085 Callbacks for htab_traverse. PLACE_P is the loop exit
2086 basic block where the summation of the expansions should
2087 take place. */
2088
2089 static int
2090 combine_var_copies_in_loop_exit (void **slot, void *place_p)
2091 {
2092 struct var_to_expand *ve = (struct var_to_expand *) *slot;
2093 basic_block place = (basic_block)place_p;
2094 rtx sum = ve->reg;
2095 rtx expr, seq, var, insn;
2096 unsigned i;
2097
2098 if (VEC_length (rtx, ve->var_expansions) == 0)
2099 return 1;
2100
2101 start_sequence ();
2102 if (ve->op == PLUS || ve->op == MINUS)
2103 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2104 {
2105 sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg),
2106 var, sum);
2107 }
2108 else if (ve->op == MULT)
2109 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2110 {
2111 sum = simplify_gen_binary (MULT, GET_MODE (ve->reg),
2112 var, sum);
2113 }
2114
2115 expr = force_operand (sum, ve->reg);
2116 if (expr != ve->reg)
2117 emit_move_insn (ve->reg, expr);
2118 seq = get_insns ();
2119 end_sequence ();
2120
2121 insn = BB_HEAD (place);
2122 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2123 insn = NEXT_INSN (insn);
2124
2125 emit_insn_after (seq, insn);
2126
2127 /* Continue traversing the hash table. */
2128 return 1;
2129 }
2130
2131 /* Apply loop optimizations in loop copies using the
2132 data which gathered during the unrolling. Structure
2133 OPT_INFO record that data.
2134
2135 UNROLLING is true if we unrolled (not peeled) the loop.
2136 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
2137 the loop (as it should happen in complete unrolling, but not in ordinary
2138 peeling of the loop). */
2139
2140 static void
2141 apply_opt_in_copies (struct opt_info *opt_info,
2142 unsigned n_copies, bool unrolling,
2143 bool rewrite_original_loop)
2144 {
2145 unsigned i, delta;
2146 basic_block bb, orig_bb;
2147 rtx insn, orig_insn, next;
2148 struct iv_to_split ivts_templ, *ivts;
2149 struct var_to_expand ve_templ, *ves;
2150
2151 /* Sanity check -- we need to put initialization in the original loop
2152 body. */
2153 gcc_assert (!unrolling || rewrite_original_loop);
2154
2155 /* Allocate the basic variables (i0). */
2156 if (opt_info->insns_to_split)
2157 htab_traverse (opt_info->insns_to_split, allocate_basic_variable, NULL);
2158
2159 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
2160 {
2161 bb = BASIC_BLOCK (i);
2162 orig_bb = get_bb_original (bb);
2163
2164 /* bb->aux holds position in copy sequence initialized by
2165 duplicate_loop_to_header_edge. */
2166 delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
2167 unrolling);
2168 bb->aux = 0;
2169 orig_insn = BB_HEAD (orig_bb);
2170 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
2171 {
2172 next = NEXT_INSN (insn);
2173 if (!INSN_P (insn))
2174 continue;
2175
2176 while (!INSN_P (orig_insn))
2177 orig_insn = NEXT_INSN (orig_insn);
2178
2179 ivts_templ.insn = orig_insn;
2180 ve_templ.insn = orig_insn;
2181
2182 /* Apply splitting iv optimization. */
2183 if (opt_info->insns_to_split)
2184 {
2185 ivts = (struct iv_to_split *)
2186 htab_find (opt_info->insns_to_split, &ivts_templ);
2187
2188 if (ivts)
2189 {
2190 gcc_assert (GET_CODE (PATTERN (insn))
2191 == GET_CODE (PATTERN (orig_insn)));
2192
2193 if (!delta)
2194 insert_base_initialization (ivts, insn);
2195 split_iv (ivts, insn, delta);
2196 }
2197 }
2198 /* Apply variable expansion optimization. */
2199 if (unrolling && opt_info->insns_with_var_to_expand)
2200 {
2201 ves = (struct var_to_expand *)
2202 htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
2203 if (ves)
2204 {
2205 gcc_assert (GET_CODE (PATTERN (insn))
2206 == GET_CODE (PATTERN (orig_insn)));
2207 expand_var_during_unrolling (ves, insn);
2208 }
2209 }
2210 orig_insn = NEXT_INSN (orig_insn);
2211 }
2212 }
2213
2214 if (!rewrite_original_loop)
2215 return;
2216
2217 /* Initialize the variable expansions in the loop preheader
2218 and take care of combining them at the loop exit. */
2219 if (opt_info->insns_with_var_to_expand)
2220 {
2221 htab_traverse (opt_info->insns_with_var_to_expand,
2222 insert_var_expansion_initialization,
2223 opt_info->loop_preheader);
2224 htab_traverse (opt_info->insns_with_var_to_expand,
2225 combine_var_copies_in_loop_exit,
2226 opt_info->loop_exit);
2227 }
2228
2229 /* Rewrite also the original loop body. Find them as originals of the blocks
2230 in the last copied iteration, i.e. those that have
2231 get_bb_copy (get_bb_original (bb)) == bb. */
2232 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
2233 {
2234 bb = BASIC_BLOCK (i);
2235 orig_bb = get_bb_original (bb);
2236 if (get_bb_copy (orig_bb) != bb)
2237 continue;
2238
2239 delta = determine_split_iv_delta (0, n_copies, unrolling);
2240 for (orig_insn = BB_HEAD (orig_bb);
2241 orig_insn != NEXT_INSN (BB_END (bb));
2242 orig_insn = next)
2243 {
2244 next = NEXT_INSN (orig_insn);
2245
2246 if (!INSN_P (orig_insn))
2247 continue;
2248
2249 ivts_templ.insn = orig_insn;
2250 if (opt_info->insns_to_split)
2251 {
2252 ivts = (struct iv_to_split *)
2253 htab_find (opt_info->insns_to_split, &ivts_templ);
2254 if (ivts)
2255 {
2256 if (!delta)
2257 insert_base_initialization (ivts, orig_insn);
2258 split_iv (ivts, orig_insn, delta);
2259 continue;
2260 }
2261 }
2262
2263 }
2264 }
2265 }
2266
2267 /* Release the data structures used for the variable expansion
2268 optimization. Callbacks for htab_traverse. */
2269
2270 static int
2271 release_var_copies (void **slot, void *data ATTRIBUTE_UNUSED)
2272 {
2273 struct var_to_expand *ve = (struct var_to_expand *) *slot;
2274
2275 VEC_free (rtx, heap, ve->var_expansions);
2276
2277 /* Continue traversing the hash table. */
2278 return 1;
2279 }
2280
2281 /* Release OPT_INFO. */
2282
2283 static void
2284 free_opt_info (struct opt_info *opt_info)
2285 {
2286 if (opt_info->insns_to_split)
2287 htab_delete (opt_info->insns_to_split);
2288 if (opt_info->insns_with_var_to_expand)
2289 {
2290 htab_traverse (opt_info->insns_with_var_to_expand,
2291 release_var_copies, NULL);
2292 htab_delete (opt_info->insns_with_var_to_expand);
2293 }
2294 free (opt_info);
2295 }