77d454f35bc77f6b70b563961cbf399498cefdb7
[gcc.git] / gcc / loop-unroll.c
1 /* Loop unrolling and peeling.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hard-reg-set.h"
27 #include "obstack.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "cfglayout.h"
31 #include "params.h"
32 #include "output.h"
33 #include "expr.h"
34 #include "hashtab.h"
35 #include "recog.h"
36
37 /* This pass performs loop unrolling and peeling. We only perform these
38 optimizations on innermost loops (with single exception) because
39 the impact on performance is greatest here, and we want to avoid
40 unnecessary code size growth. The gain is caused by greater sequentiality
41 of code, better code to optimize for further passes and in some cases
42 by fewer testings of exit conditions. The main problem is code growth,
43 that impacts performance negatively due to effect of caches.
44
45 What we do:
46
47 -- complete peeling of once-rolling loops; this is the above mentioned
48 exception, as this causes loop to be cancelled completely and
49 does not cause code growth
50 -- complete peeling of loops that roll (small) constant times.
51 -- simple peeling of first iterations of loops that do not roll much
52 (according to profile feedback)
53 -- unrolling of loops that roll constant times; this is almost always
54 win, as we get rid of exit condition tests.
55 -- unrolling of loops that roll number of times that we can compute
56 in runtime; we also get rid of exit condition tests here, but there
57 is the extra expense for calculating the number of iterations
58 -- simple unrolling of remaining loops; this is performed only if we
59 are asked to, as the gain is questionable in this case and often
60 it may even slow down the code
61 For more detailed descriptions of each of those, see comments at
62 appropriate function below.
63
64 There is a lot of parameters (defined and described in params.def) that
65 control how much we unroll/peel.
66
67 ??? A great problem is that we don't have a good way how to determine
68 how many times we should unroll the loop; the experiments I have made
69 showed that this choice may affect performance in order of several %.
70 */
71
72 /* Information about induction variables to split. */
73
74 struct iv_to_split
75 {
76 rtx insn; /* The insn in that the induction variable occurs. */
77 rtx base_var; /* The variable on that the values in the further
78 iterations are based. */
79 rtx step; /* Step of the induction variable. */
80 unsigned n_loc;
81 unsigned loc[3]; /* Location where the definition of the induction
82 variable occurs in the insn. For example if
83 N_LOC is 2, the expression is located at
84 XEXP (XEXP (single_set, loc[0]), loc[1]). */
85 };
86
87 /* Information about accumulators to expand. */
88
89 struct var_to_expand
90 {
91 rtx insn; /* The insn in that the variable expansion occurs. */
92 rtx reg; /* The accumulator which is expanded. */
93 VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
94 enum rtx_code op; /* The type of the accumulation - addition, subtraction
95 or multiplication. */
96 int expansion_count; /* Count the number of expansions generated so far. */
97 int reuse_expansion; /* The expansion we intend to reuse to expand
98 the accumulator. If REUSE_EXPANSION is 0 reuse
99 the original accumulator. Else use
100 var_expansions[REUSE_EXPANSION - 1]. */
101 unsigned accum_pos; /* The position in which the accumulator is placed in
102 the insn src. For example in x = x + something
103 accum_pos is 0 while in x = something + x accum_pos
104 is 1. */
105 };
106
107 /* Information about optimization applied in
108 the unrolled loop. */
109
110 struct opt_info
111 {
112 htab_t insns_to_split; /* A hashtable of insns to split. */
113 htab_t insns_with_var_to_expand; /* A hashtable of insns with accumulators
114 to expand. */
115 unsigned first_new_block; /* The first basic block that was
116 duplicated. */
117 basic_block loop_exit; /* The loop exit basic block. */
118 basic_block loop_preheader; /* The loop preheader basic block. */
119 };
120
121 static void decide_unrolling_and_peeling (int);
122 static void peel_loops_completely (int);
123 static void decide_peel_simple (struct loop *, int);
124 static void decide_peel_once_rolling (struct loop *, int);
125 static void decide_peel_completely (struct loop *, int);
126 static void decide_unroll_stupid (struct loop *, int);
127 static void decide_unroll_constant_iterations (struct loop *, int);
128 static void decide_unroll_runtime_iterations (struct loop *, int);
129 static void peel_loop_simple (struct loop *);
130 static void peel_loop_completely (struct loop *);
131 static void unroll_loop_stupid (struct loop *);
132 static void unroll_loop_constant_iterations (struct loop *);
133 static void unroll_loop_runtime_iterations (struct loop *);
134 static struct opt_info *analyze_insns_in_loop (struct loop *);
135 static void opt_info_start_duplication (struct opt_info *);
136 static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
137 static void free_opt_info (struct opt_info *);
138 static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx);
139 static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx);
140 static struct iv_to_split *analyze_iv_to_split_insn (rtx);
141 static void expand_var_during_unrolling (struct var_to_expand *, rtx);
142 static int insert_var_expansion_initialization (void **, void *);
143 static int combine_var_copies_in_loop_exit (void **, void *);
144 static int release_var_copies (void **, void *);
145 static rtx get_expansion (struct var_to_expand *);
146
147 /* Unroll and/or peel (depending on FLAGS) LOOPS. */
148 void
149 unroll_and_peel_loops (int flags)
150 {
151 struct loop *loop;
152 bool check;
153 loop_iterator li;
154
155 /* First perform complete loop peeling (it is almost surely a win,
156 and affects parameters for further decision a lot). */
157 peel_loops_completely (flags);
158
159 /* Now decide rest of unrolling and peeling. */
160 decide_unrolling_and_peeling (flags);
161
162 /* Scan the loops, inner ones first. */
163 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
164 {
165 check = true;
166 /* And perform the appropriate transformations. */
167 switch (loop->lpt_decision.decision)
168 {
169 case LPT_PEEL_COMPLETELY:
170 /* Already done. */
171 gcc_unreachable ();
172 case LPT_PEEL_SIMPLE:
173 peel_loop_simple (loop);
174 break;
175 case LPT_UNROLL_CONSTANT:
176 unroll_loop_constant_iterations (loop);
177 break;
178 case LPT_UNROLL_RUNTIME:
179 unroll_loop_runtime_iterations (loop);
180 break;
181 case LPT_UNROLL_STUPID:
182 unroll_loop_stupid (loop);
183 break;
184 case LPT_NONE:
185 check = false;
186 break;
187 default:
188 gcc_unreachable ();
189 }
190 if (check)
191 {
192 #ifdef ENABLE_CHECKING
193 verify_dominators (CDI_DOMINATORS);
194 verify_loop_structure ();
195 #endif
196 }
197 }
198
199 iv_analysis_done ();
200 }
201
202 /* Check whether exit of the LOOP is at the end of loop body. */
203
204 static bool
205 loop_exit_at_end_p (struct loop *loop)
206 {
207 struct niter_desc *desc = get_simple_loop_desc (loop);
208 rtx insn;
209
210 if (desc->in_edge->dest != loop->latch)
211 return false;
212
213 /* Check that the latch is empty. */
214 FOR_BB_INSNS (loop->latch, insn)
215 {
216 if (INSN_P (insn))
217 return false;
218 }
219
220 return true;
221 }
222
223 /* Depending on FLAGS, check whether to peel loops completely and do so. */
224 static void
225 peel_loops_completely (int flags)
226 {
227 struct loop *loop;
228 loop_iterator li;
229
230 /* Scan the loops, the inner ones first. */
231 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
232 {
233 loop->lpt_decision.decision = LPT_NONE;
234
235 if (dump_file)
236 fprintf (dump_file,
237 "\n;; *** Considering loop %d for complete peeling ***\n",
238 loop->num);
239
240 loop->ninsns = num_loop_insns (loop);
241
242 decide_peel_once_rolling (loop, flags);
243 if (loop->lpt_decision.decision == LPT_NONE)
244 decide_peel_completely (loop, flags);
245
246 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
247 {
248 peel_loop_completely (loop);
249 #ifdef ENABLE_CHECKING
250 verify_dominators (CDI_DOMINATORS);
251 verify_loop_structure ();
252 #endif
253 }
254 }
255 }
256
257 /* Decide whether unroll or peel loops (depending on FLAGS) and how much. */
258 static void
259 decide_unrolling_and_peeling (int flags)
260 {
261 struct loop *loop;
262 loop_iterator li;
263
264 /* Scan the loops, inner ones first. */
265 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
266 {
267 loop->lpt_decision.decision = LPT_NONE;
268
269 if (dump_file)
270 fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num);
271
272 /* Do not peel cold areas. */
273 if (!maybe_hot_bb_p (loop->header))
274 {
275 if (dump_file)
276 fprintf (dump_file, ";; Not considering loop, cold area\n");
277 continue;
278 }
279
280 /* Can the loop be manipulated? */
281 if (!can_duplicate_loop_p (loop))
282 {
283 if (dump_file)
284 fprintf (dump_file,
285 ";; Not considering loop, cannot duplicate\n");
286 continue;
287 }
288
289 /* Skip non-innermost loops. */
290 if (loop->inner)
291 {
292 if (dump_file)
293 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
294 continue;
295 }
296
297 loop->ninsns = num_loop_insns (loop);
298 loop->av_ninsns = average_num_loop_insns (loop);
299
300 /* Try transformations one by one in decreasing order of
301 priority. */
302
303 decide_unroll_constant_iterations (loop, flags);
304 if (loop->lpt_decision.decision == LPT_NONE)
305 decide_unroll_runtime_iterations (loop, flags);
306 if (loop->lpt_decision.decision == LPT_NONE)
307 decide_unroll_stupid (loop, flags);
308 if (loop->lpt_decision.decision == LPT_NONE)
309 decide_peel_simple (loop, flags);
310 }
311 }
312
313 /* Decide whether the LOOP is once rolling and suitable for complete
314 peeling. */
315 static void
316 decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED)
317 {
318 struct niter_desc *desc;
319
320 if (dump_file)
321 fprintf (dump_file, "\n;; Considering peeling once rolling loop\n");
322
323 /* Is the loop small enough? */
324 if ((unsigned) PARAM_VALUE (PARAM_MAX_ONCE_PEELED_INSNS) < loop->ninsns)
325 {
326 if (dump_file)
327 fprintf (dump_file, ";; Not considering loop, is too big\n");
328 return;
329 }
330
331 /* Check for simple loops. */
332 desc = get_simple_loop_desc (loop);
333
334 /* Check number of iterations. */
335 if (!desc->simple_p
336 || desc->assumptions
337 || desc->infinite
338 || !desc->const_iter
339 || desc->niter != 0)
340 {
341 if (dump_file)
342 fprintf (dump_file,
343 ";; Unable to prove that the loop rolls exactly once\n");
344 return;
345 }
346
347 /* Success. */
348 if (dump_file)
349 fprintf (dump_file, ";; Decided to peel exactly once rolling loop\n");
350 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
351 }
352
353 /* Decide whether the LOOP is suitable for complete peeling. */
354 static void
355 decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED)
356 {
357 unsigned npeel;
358 struct niter_desc *desc;
359
360 if (dump_file)
361 fprintf (dump_file, "\n;; Considering peeling completely\n");
362
363 /* Skip non-innermost loops. */
364 if (loop->inner)
365 {
366 if (dump_file)
367 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
368 return;
369 }
370
371 /* Do not peel cold areas. */
372 if (!maybe_hot_bb_p (loop->header))
373 {
374 if (dump_file)
375 fprintf (dump_file, ";; Not considering loop, cold area\n");
376 return;
377 }
378
379 /* Can the loop be manipulated? */
380 if (!can_duplicate_loop_p (loop))
381 {
382 if (dump_file)
383 fprintf (dump_file,
384 ";; Not considering loop, cannot duplicate\n");
385 return;
386 }
387
388 /* npeel = number of iterations to peel. */
389 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS) / loop->ninsns;
390 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
391 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
392
393 /* Is the loop small enough? */
394 if (!npeel)
395 {
396 if (dump_file)
397 fprintf (dump_file, ";; Not considering loop, is too big\n");
398 return;
399 }
400
401 /* Check for simple loops. */
402 desc = get_simple_loop_desc (loop);
403
404 /* Check number of iterations. */
405 if (!desc->simple_p
406 || desc->assumptions
407 || !desc->const_iter
408 || desc->infinite)
409 {
410 if (dump_file)
411 fprintf (dump_file,
412 ";; Unable to prove that the loop iterates constant times\n");
413 return;
414 }
415
416 if (desc->niter > npeel - 1)
417 {
418 if (dump_file)
419 {
420 fprintf (dump_file,
421 ";; Not peeling loop completely, rolls too much (");
422 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
423 fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel);
424 }
425 return;
426 }
427
428 /* Success. */
429 if (dump_file)
430 fprintf (dump_file, ";; Decided to peel loop completely\n");
431 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
432 }
433
434 /* Peel all iterations of LOOP, remove exit edges and cancel the loop
435 completely. The transformation done:
436
437 for (i = 0; i < 4; i++)
438 body;
439
440 ==>
441
442 i = 0;
443 body; i++;
444 body; i++;
445 body; i++;
446 body; i++;
447 */
448 static void
449 peel_loop_completely (struct loop *loop)
450 {
451 sbitmap wont_exit;
452 unsigned HOST_WIDE_INT npeel;
453 unsigned i;
454 VEC (edge, heap) *remove_edges;
455 edge ein;
456 struct niter_desc *desc = get_simple_loop_desc (loop);
457 struct opt_info *opt_info = NULL;
458
459 npeel = desc->niter;
460
461 if (npeel)
462 {
463 bool ok;
464
465 wont_exit = sbitmap_alloc (npeel + 1);
466 sbitmap_ones (wont_exit);
467 RESET_BIT (wont_exit, 0);
468 if (desc->noloop_assumptions)
469 RESET_BIT (wont_exit, 1);
470
471 remove_edges = NULL;
472
473 if (flag_split_ivs_in_unroller)
474 opt_info = analyze_insns_in_loop (loop);
475
476 opt_info_start_duplication (opt_info);
477 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
478 npeel,
479 wont_exit, desc->out_edge,
480 &remove_edges,
481 DLTHE_FLAG_UPDATE_FREQ
482 | DLTHE_FLAG_COMPLETTE_PEEL
483 | (opt_info
484 ? DLTHE_RECORD_COPY_NUMBER : 0));
485 gcc_assert (ok);
486
487 free (wont_exit);
488
489 if (opt_info)
490 {
491 apply_opt_in_copies (opt_info, npeel, false, true);
492 free_opt_info (opt_info);
493 }
494
495 /* Remove the exit edges. */
496 for (i = 0; VEC_iterate (edge, remove_edges, i, ein); i++)
497 remove_path (ein);
498 VEC_free (edge, heap, remove_edges);
499 }
500
501 ein = desc->in_edge;
502 free_simple_loop_desc (loop);
503
504 /* Now remove the unreachable part of the last iteration and cancel
505 the loop. */
506 remove_path (ein);
507
508 if (dump_file)
509 fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel);
510 }
511
512 /* Decide whether to unroll LOOP iterating constant number of times
513 and how much. */
514
515 static void
516 decide_unroll_constant_iterations (struct loop *loop, int flags)
517 {
518 unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
519 struct niter_desc *desc;
520
521 if (!(flags & UAP_UNROLL))
522 {
523 /* We were not asked to, just return back silently. */
524 return;
525 }
526
527 if (dump_file)
528 fprintf (dump_file,
529 "\n;; Considering unrolling loop with constant "
530 "number of iterations\n");
531
532 /* nunroll = total number of copies of the original loop body in
533 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
534 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
535 nunroll_by_av
536 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
537 if (nunroll > nunroll_by_av)
538 nunroll = nunroll_by_av;
539 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
540 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
541
542 /* Skip big loops. */
543 if (nunroll <= 1)
544 {
545 if (dump_file)
546 fprintf (dump_file, ";; Not considering loop, is too big\n");
547 return;
548 }
549
550 /* Check for simple loops. */
551 desc = get_simple_loop_desc (loop);
552
553 /* Check number of iterations. */
554 if (!desc->simple_p || !desc->const_iter || desc->assumptions)
555 {
556 if (dump_file)
557 fprintf (dump_file,
558 ";; Unable to prove that the loop iterates constant times\n");
559 return;
560 }
561
562 /* Check whether the loop rolls enough to consider. */
563 if (desc->niter < 2 * nunroll)
564 {
565 if (dump_file)
566 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
567 return;
568 }
569
570 /* Success; now compute number of iterations to unroll. We alter
571 nunroll so that as few as possible copies of loop body are
572 necessary, while still not decreasing the number of unrollings
573 too much (at most by 1). */
574 best_copies = 2 * nunroll + 10;
575
576 i = 2 * nunroll + 2;
577 if (i - 1 >= desc->niter)
578 i = desc->niter - 2;
579
580 for (; i >= nunroll - 1; i--)
581 {
582 unsigned exit_mod = desc->niter % (i + 1);
583
584 if (!loop_exit_at_end_p (loop))
585 n_copies = exit_mod + i + 1;
586 else if (exit_mod != (unsigned) i
587 || desc->noloop_assumptions != NULL_RTX)
588 n_copies = exit_mod + i + 2;
589 else
590 n_copies = i + 1;
591
592 if (n_copies < best_copies)
593 {
594 best_copies = n_copies;
595 best_unroll = i;
596 }
597 }
598
599 if (dump_file)
600 fprintf (dump_file, ";; max_unroll %d (%d copies, initial %d).\n",
601 best_unroll + 1, best_copies, nunroll);
602
603 loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
604 loop->lpt_decision.times = best_unroll;
605
606 if (dump_file)
607 fprintf (dump_file,
608 ";; Decided to unroll the constant times rolling loop, %d times.\n",
609 loop->lpt_decision.times);
610 }
611
612 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES + 1
613 times. The transformation does this:
614
615 for (i = 0; i < 102; i++)
616 body;
617
618 ==>
619
620 i = 0;
621 body; i++;
622 body; i++;
623 while (i < 102)
624 {
625 body; i++;
626 body; i++;
627 body; i++;
628 body; i++;
629 }
630 */
631 static void
632 unroll_loop_constant_iterations (struct loop *loop)
633 {
634 unsigned HOST_WIDE_INT niter;
635 unsigned exit_mod;
636 sbitmap wont_exit;
637 unsigned i;
638 VEC (edge, heap) *remove_edges;
639 edge e;
640 unsigned max_unroll = loop->lpt_decision.times;
641 struct niter_desc *desc = get_simple_loop_desc (loop);
642 bool exit_at_end = loop_exit_at_end_p (loop);
643 struct opt_info *opt_info = NULL;
644 bool ok;
645
646 niter = desc->niter;
647
648 /* Should not get here (such loop should be peeled instead). */
649 gcc_assert (niter > max_unroll + 1);
650
651 exit_mod = niter % (max_unroll + 1);
652
653 wont_exit = sbitmap_alloc (max_unroll + 1);
654 sbitmap_ones (wont_exit);
655
656 remove_edges = NULL;
657 if (flag_split_ivs_in_unroller
658 || flag_variable_expansion_in_unroller)
659 opt_info = analyze_insns_in_loop (loop);
660
661 if (!exit_at_end)
662 {
663 /* The exit is not at the end of the loop; leave exit test
664 in the first copy, so that the loops that start with test
665 of exit condition have continuous body after unrolling. */
666
667 if (dump_file)
668 fprintf (dump_file, ";; Condition on beginning of loop.\n");
669
670 /* Peel exit_mod iterations. */
671 RESET_BIT (wont_exit, 0);
672 if (desc->noloop_assumptions)
673 RESET_BIT (wont_exit, 1);
674
675 if (exit_mod)
676 {
677 opt_info_start_duplication (opt_info);
678 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
679 exit_mod,
680 wont_exit, desc->out_edge,
681 &remove_edges,
682 DLTHE_FLAG_UPDATE_FREQ
683 | (opt_info && exit_mod > 1
684 ? DLTHE_RECORD_COPY_NUMBER
685 : 0));
686 gcc_assert (ok);
687
688 if (opt_info && exit_mod > 1)
689 apply_opt_in_copies (opt_info, exit_mod, false, false);
690
691 desc->noloop_assumptions = NULL_RTX;
692 desc->niter -= exit_mod;
693 desc->niter_max -= exit_mod;
694 }
695
696 SET_BIT (wont_exit, 1);
697 }
698 else
699 {
700 /* Leave exit test in last copy, for the same reason as above if
701 the loop tests the condition at the end of loop body. */
702
703 if (dump_file)
704 fprintf (dump_file, ";; Condition on end of loop.\n");
705
706 /* We know that niter >= max_unroll + 2; so we do not need to care of
707 case when we would exit before reaching the loop. So just peel
708 exit_mod + 1 iterations. */
709 if (exit_mod != max_unroll
710 || desc->noloop_assumptions)
711 {
712 RESET_BIT (wont_exit, 0);
713 if (desc->noloop_assumptions)
714 RESET_BIT (wont_exit, 1);
715
716 opt_info_start_duplication (opt_info);
717 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
718 exit_mod + 1,
719 wont_exit, desc->out_edge,
720 &remove_edges,
721 DLTHE_FLAG_UPDATE_FREQ
722 | (opt_info && exit_mod > 0
723 ? DLTHE_RECORD_COPY_NUMBER
724 : 0));
725 gcc_assert (ok);
726
727 if (opt_info && exit_mod > 0)
728 apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
729
730 desc->niter -= exit_mod + 1;
731 desc->niter_max -= exit_mod + 1;
732 desc->noloop_assumptions = NULL_RTX;
733
734 SET_BIT (wont_exit, 0);
735 SET_BIT (wont_exit, 1);
736 }
737
738 RESET_BIT (wont_exit, max_unroll);
739 }
740
741 /* Now unroll the loop. */
742
743 opt_info_start_duplication (opt_info);
744 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
745 max_unroll,
746 wont_exit, desc->out_edge,
747 &remove_edges,
748 DLTHE_FLAG_UPDATE_FREQ
749 | (opt_info
750 ? DLTHE_RECORD_COPY_NUMBER
751 : 0));
752 gcc_assert (ok);
753
754 if (opt_info)
755 {
756 apply_opt_in_copies (opt_info, max_unroll, true, true);
757 free_opt_info (opt_info);
758 }
759
760 free (wont_exit);
761
762 if (exit_at_end)
763 {
764 basic_block exit_block = get_bb_copy (desc->in_edge->src);
765 /* Find a new in and out edge; they are in the last copy we have made. */
766
767 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
768 {
769 desc->out_edge = EDGE_SUCC (exit_block, 0);
770 desc->in_edge = EDGE_SUCC (exit_block, 1);
771 }
772 else
773 {
774 desc->out_edge = EDGE_SUCC (exit_block, 1);
775 desc->in_edge = EDGE_SUCC (exit_block, 0);
776 }
777 }
778
779 desc->niter /= max_unroll + 1;
780 desc->niter_max /= max_unroll + 1;
781 desc->niter_expr = GEN_INT (desc->niter);
782
783 /* Remove the edges. */
784 for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++)
785 remove_path (e);
786 VEC_free (edge, heap, remove_edges);
787
788 if (dump_file)
789 fprintf (dump_file,
790 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
791 max_unroll, num_loop_insns (loop));
792 }
793
794 /* Decide whether to unroll LOOP iterating runtime computable number of times
795 and how much. */
796 static void
797 decide_unroll_runtime_iterations (struct loop *loop, int flags)
798 {
799 unsigned nunroll, nunroll_by_av, i;
800 struct niter_desc *desc;
801
802 if (!(flags & UAP_UNROLL))
803 {
804 /* We were not asked to, just return back silently. */
805 return;
806 }
807
808 if (dump_file)
809 fprintf (dump_file,
810 "\n;; Considering unrolling loop with runtime "
811 "computable number of iterations\n");
812
813 /* nunroll = total number of copies of the original loop body in
814 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
815 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
816 nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
817 if (nunroll > nunroll_by_av)
818 nunroll = nunroll_by_av;
819 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
820 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
821
822 /* Skip big loops. */
823 if (nunroll <= 1)
824 {
825 if (dump_file)
826 fprintf (dump_file, ";; Not considering loop, is too big\n");
827 return;
828 }
829
830 /* Check for simple loops. */
831 desc = get_simple_loop_desc (loop);
832
833 /* Check simpleness. */
834 if (!desc->simple_p || desc->assumptions)
835 {
836 if (dump_file)
837 fprintf (dump_file,
838 ";; Unable to prove that the number of iterations "
839 "can be counted in runtime\n");
840 return;
841 }
842
843 if (desc->const_iter)
844 {
845 if (dump_file)
846 fprintf (dump_file, ";; Loop iterates constant times\n");
847 return;
848 }
849
850 /* If we have profile feedback, check whether the loop rolls. */
851 if (loop->header->count && expected_loop_iterations (loop) < 2 * nunroll)
852 {
853 if (dump_file)
854 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
855 return;
856 }
857
858 /* Success; now force nunroll to be power of 2, as we are unable to
859 cope with overflows in computation of number of iterations. */
860 for (i = 1; 2 * i <= nunroll; i *= 2)
861 continue;
862
863 loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
864 loop->lpt_decision.times = i - 1;
865
866 if (dump_file)
867 fprintf (dump_file,
868 ";; Decided to unroll the runtime computable "
869 "times rolling loop, %d times.\n",
870 loop->lpt_decision.times);
871 }
872
873 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
874 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
875 and NULL is returned instead. */
876
877 basic_block
878 split_edge_and_insert (edge e, rtx insns)
879 {
880 basic_block bb;
881
882 if (!insns)
883 return NULL;
884 bb = split_edge (e);
885 emit_insn_after (insns, BB_END (bb));
886
887 /* ??? We used to assume that INSNS can contain control flow insns, and
888 that we had to try to find sub basic blocks in BB to maintain a valid
889 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
890 and call break_superblocks when going out of cfglayout mode. But it
891 turns out that this never happens; and that if it does ever happen,
892 the verify_flow_info call in loop_optimizer_finalize would fail.
893
894 There are two reasons why we expected we could have control flow insns
895 in INSNS. The first is when a comparison has to be done in parts, and
896 the second is when the number of iterations is computed for loops with
897 the number of iterations known at runtime. In both cases, test cases
898 to get control flow in INSNS appear to be impossible to construct:
899
900 * If do_compare_rtx_and_jump needs several branches to do comparison
901 in a mode that needs comparison by parts, we cannot analyze the
902 number of iterations of the loop, and we never get to unrolling it.
903
904 * The code in expand_divmod that was suspected to cause creation of
905 branching code seems to be only accessed for signed division. The
906 divisions used by # of iterations analysis are always unsigned.
907 Problems might arise on architectures that emits branching code
908 for some operations that may appear in the unroller (especially
909 for division), but we have no such architectures.
910
911 Considering all this, it was decided that we should for now assume
912 that INSNS can in theory contain control flow insns, but in practice
913 it never does. So we don't handle the theoretical case, and should
914 a real failure ever show up, we have a pretty good clue for how to
915 fix it. */
916
917 return bb;
918 }
919
920 /* Unroll LOOP for that we are able to count number of iterations in runtime
921 LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this (with some
922 extra care for case n < 0):
923
924 for (i = 0; i < n; i++)
925 body;
926
927 ==>
928
929 i = 0;
930 mod = n % 4;
931
932 switch (mod)
933 {
934 case 3:
935 body; i++;
936 case 2:
937 body; i++;
938 case 1:
939 body; i++;
940 case 0: ;
941 }
942
943 while (i < n)
944 {
945 body; i++;
946 body; i++;
947 body; i++;
948 body; i++;
949 }
950 */
951 static void
952 unroll_loop_runtime_iterations (struct loop *loop)
953 {
954 rtx old_niter, niter, init_code, branch_code, tmp;
955 unsigned i, j, p;
956 basic_block preheader, *body, swtch, ezc_swtch;
957 VEC (basic_block, heap) *dom_bbs;
958 sbitmap wont_exit;
959 int may_exit_copy;
960 unsigned n_peel;
961 VEC (edge, heap) *remove_edges;
962 edge e;
963 bool extra_zero_check, last_may_exit;
964 unsigned max_unroll = loop->lpt_decision.times;
965 struct niter_desc *desc = get_simple_loop_desc (loop);
966 bool exit_at_end = loop_exit_at_end_p (loop);
967 struct opt_info *opt_info = NULL;
968 bool ok;
969
970 if (flag_split_ivs_in_unroller
971 || flag_variable_expansion_in_unroller)
972 opt_info = analyze_insns_in_loop (loop);
973
974 /* Remember blocks whose dominators will have to be updated. */
975 dom_bbs = NULL;
976
977 body = get_loop_body (loop);
978 for (i = 0; i < loop->num_nodes; i++)
979 {
980 VEC (basic_block, heap) *ldom;
981 basic_block bb;
982
983 ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
984 for (j = 0; VEC_iterate (basic_block, ldom, j, bb); j++)
985 if (!flow_bb_inside_loop_p (loop, bb))
986 VEC_safe_push (basic_block, heap, dom_bbs, bb);
987
988 VEC_free (basic_block, heap, ldom);
989 }
990 free (body);
991
992 if (!exit_at_end)
993 {
994 /* Leave exit in first copy (for explanation why see comment in
995 unroll_loop_constant_iterations). */
996 may_exit_copy = 0;
997 n_peel = max_unroll - 1;
998 extra_zero_check = true;
999 last_may_exit = false;
1000 }
1001 else
1002 {
1003 /* Leave exit in last copy (for explanation why see comment in
1004 unroll_loop_constant_iterations). */
1005 may_exit_copy = max_unroll;
1006 n_peel = max_unroll;
1007 extra_zero_check = false;
1008 last_may_exit = true;
1009 }
1010
1011 /* Get expression for number of iterations. */
1012 start_sequence ();
1013 old_niter = niter = gen_reg_rtx (desc->mode);
1014 tmp = force_operand (copy_rtx (desc->niter_expr), niter);
1015 if (tmp != niter)
1016 emit_move_insn (niter, tmp);
1017
1018 /* Count modulo by ANDing it with max_unroll; we use the fact that
1019 the number of unrollings is a power of two, and thus this is correct
1020 even if there is overflow in the computation. */
1021 niter = expand_simple_binop (desc->mode, AND,
1022 niter,
1023 GEN_INT (max_unroll),
1024 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1025
1026 init_code = get_insns ();
1027 end_sequence ();
1028
1029 /* Precondition the loop. */
1030 split_edge_and_insert (loop_preheader_edge (loop), init_code);
1031
1032 remove_edges = NULL;
1033
1034 wont_exit = sbitmap_alloc (max_unroll + 2);
1035
1036 /* Peel the first copy of loop body (almost always we must leave exit test
1037 here; the only exception is when we have extra zero check and the number
1038 of iterations is reliable. Also record the place of (possible) extra
1039 zero check. */
1040 sbitmap_zero (wont_exit);
1041 if (extra_zero_check
1042 && !desc->noloop_assumptions)
1043 SET_BIT (wont_exit, 1);
1044 ezc_swtch = loop_preheader_edge (loop)->src;
1045 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1046 1, wont_exit, desc->out_edge,
1047 &remove_edges,
1048 DLTHE_FLAG_UPDATE_FREQ);
1049 gcc_assert (ok);
1050
1051 /* Record the place where switch will be built for preconditioning. */
1052 swtch = split_edge (loop_preheader_edge (loop));
1053
1054 for (i = 0; i < n_peel; i++)
1055 {
1056 /* Peel the copy. */
1057 sbitmap_zero (wont_exit);
1058 if (i != n_peel - 1 || !last_may_exit)
1059 SET_BIT (wont_exit, 1);
1060 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1061 1, wont_exit, desc->out_edge,
1062 &remove_edges,
1063 DLTHE_FLAG_UPDATE_FREQ);
1064 gcc_assert (ok);
1065
1066 /* Create item for switch. */
1067 j = n_peel - i - (extra_zero_check ? 0 : 1);
1068 p = REG_BR_PROB_BASE / (i + 2);
1069
1070 preheader = split_edge (loop_preheader_edge (loop));
1071 branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
1072 block_label (preheader), p,
1073 NULL_RTX);
1074
1075 /* We rely on the fact that the compare and jump cannot be optimized out,
1076 and hence the cfg we create is correct. */
1077 gcc_assert (branch_code != NULL_RTX);
1078
1079 swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
1080 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1081 single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1082 e = make_edge (swtch, preheader,
1083 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1084 e->probability = p;
1085 }
1086
1087 if (extra_zero_check)
1088 {
1089 /* Add branch for zero iterations. */
1090 p = REG_BR_PROB_BASE / (max_unroll + 1);
1091 swtch = ezc_swtch;
1092 preheader = split_edge (loop_preheader_edge (loop));
1093 branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
1094 block_label (preheader), p,
1095 NULL_RTX);
1096 gcc_assert (branch_code != NULL_RTX);
1097
1098 swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
1099 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1100 single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1101 e = make_edge (swtch, preheader,
1102 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1103 e->probability = p;
1104 }
1105
1106 /* Recount dominators for outer blocks. */
1107 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
1108
1109 /* And unroll loop. */
1110
1111 sbitmap_ones (wont_exit);
1112 RESET_BIT (wont_exit, may_exit_copy);
1113 opt_info_start_duplication (opt_info);
1114
1115 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1116 max_unroll,
1117 wont_exit, desc->out_edge,
1118 &remove_edges,
1119 DLTHE_FLAG_UPDATE_FREQ
1120 | (opt_info
1121 ? DLTHE_RECORD_COPY_NUMBER
1122 : 0));
1123 gcc_assert (ok);
1124
1125 if (opt_info)
1126 {
1127 apply_opt_in_copies (opt_info, max_unroll, true, true);
1128 free_opt_info (opt_info);
1129 }
1130
1131 free (wont_exit);
1132
1133 if (exit_at_end)
1134 {
1135 basic_block exit_block = get_bb_copy (desc->in_edge->src);
1136 /* Find a new in and out edge; they are in the last copy we have
1137 made. */
1138
1139 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
1140 {
1141 desc->out_edge = EDGE_SUCC (exit_block, 0);
1142 desc->in_edge = EDGE_SUCC (exit_block, 1);
1143 }
1144 else
1145 {
1146 desc->out_edge = EDGE_SUCC (exit_block, 1);
1147 desc->in_edge = EDGE_SUCC (exit_block, 0);
1148 }
1149 }
1150
1151 /* Remove the edges. */
1152 for (i = 0; VEC_iterate (edge, remove_edges, i, e); i++)
1153 remove_path (e);
1154 VEC_free (edge, heap, remove_edges);
1155
1156 /* We must be careful when updating the number of iterations due to
1157 preconditioning and the fact that the value must be valid at entry
1158 of the loop. After passing through the above code, we see that
1159 the correct new number of iterations is this: */
1160 gcc_assert (!desc->const_iter);
1161 desc->niter_expr =
1162 simplify_gen_binary (UDIV, desc->mode, old_niter,
1163 GEN_INT (max_unroll + 1));
1164 desc->niter_max /= max_unroll + 1;
1165 if (exit_at_end)
1166 {
1167 desc->niter_expr =
1168 simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx);
1169 desc->noloop_assumptions = NULL_RTX;
1170 desc->niter_max--;
1171 }
1172
1173 if (dump_file)
1174 fprintf (dump_file,
1175 ";; Unrolled loop %d times, counting # of iterations "
1176 "in runtime, %i insns\n",
1177 max_unroll, num_loop_insns (loop));
1178
1179 VEC_free (basic_block, heap, dom_bbs);
1180 }
1181
1182 /* Decide whether to simply peel LOOP and how much. */
1183 static void
1184 decide_peel_simple (struct loop *loop, int flags)
1185 {
1186 unsigned npeel;
1187 struct niter_desc *desc;
1188
1189 if (!(flags & UAP_PEEL))
1190 {
1191 /* We were not asked to, just return back silently. */
1192 return;
1193 }
1194
1195 if (dump_file)
1196 fprintf (dump_file, "\n;; Considering simply peeling loop\n");
1197
1198 /* npeel = number of iterations to peel. */
1199 npeel = PARAM_VALUE (PARAM_MAX_PEELED_INSNS) / loop->ninsns;
1200 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_PEEL_TIMES))
1201 npeel = PARAM_VALUE (PARAM_MAX_PEEL_TIMES);
1202
1203 /* Skip big loops. */
1204 if (!npeel)
1205 {
1206 if (dump_file)
1207 fprintf (dump_file, ";; Not considering loop, is too big\n");
1208 return;
1209 }
1210
1211 /* Check for simple loops. */
1212 desc = get_simple_loop_desc (loop);
1213
1214 /* Check number of iterations. */
1215 if (desc->simple_p && !desc->assumptions && desc->const_iter)
1216 {
1217 if (dump_file)
1218 fprintf (dump_file, ";; Loop iterates constant times\n");
1219 return;
1220 }
1221
1222 /* Do not simply peel loops with branches inside -- it increases number
1223 of mispredicts. */
1224 if (num_loop_branches (loop) > 1)
1225 {
1226 if (dump_file)
1227 fprintf (dump_file, ";; Not peeling, contains branches\n");
1228 return;
1229 }
1230
1231 if (loop->header->count)
1232 {
1233 unsigned niter = expected_loop_iterations (loop);
1234 if (niter + 1 > npeel)
1235 {
1236 if (dump_file)
1237 {
1238 fprintf (dump_file, ";; Not peeling loop, rolls too much (");
1239 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1240 (HOST_WIDEST_INT) (niter + 1));
1241 fprintf (dump_file, " iterations > %d [maximum peelings])\n",
1242 npeel);
1243 }
1244 return;
1245 }
1246 npeel = niter + 1;
1247 }
1248 else
1249 {
1250 /* For now we have no good heuristics to decide whether loop peeling
1251 will be effective, so disable it. */
1252 if (dump_file)
1253 fprintf (dump_file,
1254 ";; Not peeling loop, no evidence it will be profitable\n");
1255 return;
1256 }
1257
1258 /* Success. */
1259 loop->lpt_decision.decision = LPT_PEEL_SIMPLE;
1260 loop->lpt_decision.times = npeel;
1261
1262 if (dump_file)
1263 fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n",
1264 loop->lpt_decision.times);
1265 }
1266
1267 /* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation:
1268 while (cond)
1269 body;
1270
1271 ==>
1272
1273 if (!cond) goto end;
1274 body;
1275 if (!cond) goto end;
1276 body;
1277 while (cond)
1278 body;
1279 end: ;
1280 */
1281 static void
1282 peel_loop_simple (struct loop *loop)
1283 {
1284 sbitmap wont_exit;
1285 unsigned npeel = loop->lpt_decision.times;
1286 struct niter_desc *desc = get_simple_loop_desc (loop);
1287 struct opt_info *opt_info = NULL;
1288 bool ok;
1289
1290 if (flag_split_ivs_in_unroller && npeel > 1)
1291 opt_info = analyze_insns_in_loop (loop);
1292
1293 wont_exit = sbitmap_alloc (npeel + 1);
1294 sbitmap_zero (wont_exit);
1295
1296 opt_info_start_duplication (opt_info);
1297
1298 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1299 npeel, wont_exit, NULL,
1300 NULL, DLTHE_FLAG_UPDATE_FREQ
1301 | (opt_info
1302 ? DLTHE_RECORD_COPY_NUMBER
1303 : 0));
1304 gcc_assert (ok);
1305
1306 free (wont_exit);
1307
1308 if (opt_info)
1309 {
1310 apply_opt_in_copies (opt_info, npeel, false, false);
1311 free_opt_info (opt_info);
1312 }
1313
1314 if (desc->simple_p)
1315 {
1316 if (desc->const_iter)
1317 {
1318 desc->niter -= npeel;
1319 desc->niter_expr = GEN_INT (desc->niter);
1320 desc->noloop_assumptions = NULL_RTX;
1321 }
1322 else
1323 {
1324 /* We cannot just update niter_expr, as its value might be clobbered
1325 inside loop. We could handle this by counting the number into
1326 temporary just like we do in runtime unrolling, but it does not
1327 seem worthwhile. */
1328 free_simple_loop_desc (loop);
1329 }
1330 }
1331 if (dump_file)
1332 fprintf (dump_file, ";; Peeling loop %d times\n", npeel);
1333 }
1334
1335 /* Decide whether to unroll LOOP stupidly and how much. */
1336 static void
1337 decide_unroll_stupid (struct loop *loop, int flags)
1338 {
1339 unsigned nunroll, nunroll_by_av, i;
1340 struct niter_desc *desc;
1341
1342 if (!(flags & UAP_UNROLL_ALL))
1343 {
1344 /* We were not asked to, just return back silently. */
1345 return;
1346 }
1347
1348 if (dump_file)
1349 fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
1350
1351 /* nunroll = total number of copies of the original loop body in
1352 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1353 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
1354 nunroll_by_av
1355 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
1356 if (nunroll > nunroll_by_av)
1357 nunroll = nunroll_by_av;
1358 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
1359 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
1360
1361 /* Skip big loops. */
1362 if (nunroll <= 1)
1363 {
1364 if (dump_file)
1365 fprintf (dump_file, ";; Not considering loop, is too big\n");
1366 return;
1367 }
1368
1369 /* Check for simple loops. */
1370 desc = get_simple_loop_desc (loop);
1371
1372 /* Check simpleness. */
1373 if (desc->simple_p && !desc->assumptions)
1374 {
1375 if (dump_file)
1376 fprintf (dump_file, ";; The loop is simple\n");
1377 return;
1378 }
1379
1380 /* Do not unroll loops with branches inside -- it increases number
1381 of mispredicts. */
1382 if (num_loop_branches (loop) > 1)
1383 {
1384 if (dump_file)
1385 fprintf (dump_file, ";; Not unrolling, contains branches\n");
1386 return;
1387 }
1388
1389 /* If we have profile feedback, check whether the loop rolls. */
1390 if (loop->header->count
1391 && expected_loop_iterations (loop) < 2 * nunroll)
1392 {
1393 if (dump_file)
1394 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
1395 return;
1396 }
1397
1398 /* Success. Now force nunroll to be power of 2, as it seems that this
1399 improves results (partially because of better alignments, partially
1400 because of some dark magic). */
1401 for (i = 1; 2 * i <= nunroll; i *= 2)
1402 continue;
1403
1404 loop->lpt_decision.decision = LPT_UNROLL_STUPID;
1405 loop->lpt_decision.times = i - 1;
1406
1407 if (dump_file)
1408 fprintf (dump_file,
1409 ";; Decided to unroll the loop stupidly, %d times.\n",
1410 loop->lpt_decision.times);
1411 }
1412
1413 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation:
1414 while (cond)
1415 body;
1416
1417 ==>
1418
1419 while (cond)
1420 {
1421 body;
1422 if (!cond) break;
1423 body;
1424 if (!cond) break;
1425 body;
1426 if (!cond) break;
1427 body;
1428 }
1429 */
1430 static void
1431 unroll_loop_stupid (struct loop *loop)
1432 {
1433 sbitmap wont_exit;
1434 unsigned nunroll = loop->lpt_decision.times;
1435 struct niter_desc *desc = get_simple_loop_desc (loop);
1436 struct opt_info *opt_info = NULL;
1437 bool ok;
1438
1439 if (flag_split_ivs_in_unroller
1440 || flag_variable_expansion_in_unroller)
1441 opt_info = analyze_insns_in_loop (loop);
1442
1443
1444 wont_exit = sbitmap_alloc (nunroll + 1);
1445 sbitmap_zero (wont_exit);
1446 opt_info_start_duplication (opt_info);
1447
1448 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1449 nunroll, wont_exit,
1450 NULL, NULL,
1451 DLTHE_FLAG_UPDATE_FREQ
1452 | (opt_info
1453 ? DLTHE_RECORD_COPY_NUMBER
1454 : 0));
1455 gcc_assert (ok);
1456
1457 if (opt_info)
1458 {
1459 apply_opt_in_copies (opt_info, nunroll, true, true);
1460 free_opt_info (opt_info);
1461 }
1462
1463 free (wont_exit);
1464
1465 if (desc->simple_p)
1466 {
1467 /* We indeed may get here provided that there are nontrivial assumptions
1468 for a loop to be really simple. We could update the counts, but the
1469 problem is that we are unable to decide which exit will be taken
1470 (not really true in case the number of iterations is constant,
1471 but noone will do anything with this information, so we do not
1472 worry about it). */
1473 desc->simple_p = false;
1474 }
1475
1476 if (dump_file)
1477 fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n",
1478 nunroll, num_loop_insns (loop));
1479 }
1480
1481 /* A hash function for information about insns to split. */
1482
1483 static hashval_t
1484 si_info_hash (const void *ivts)
1485 {
1486 return (hashval_t) INSN_UID (((struct iv_to_split *) ivts)->insn);
1487 }
1488
1489 /* An equality functions for information about insns to split. */
1490
1491 static int
1492 si_info_eq (const void *ivts1, const void *ivts2)
1493 {
1494 const struct iv_to_split *i1 = ivts1;
1495 const struct iv_to_split *i2 = ivts2;
1496
1497 return i1->insn == i2->insn;
1498 }
1499
1500 /* Return a hash for VES, which is really a "var_to_expand *". */
1501
1502 static hashval_t
1503 ve_info_hash (const void *ves)
1504 {
1505 return (hashval_t) INSN_UID (((struct var_to_expand *) ves)->insn);
1506 }
1507
1508 /* Return true if IVTS1 and IVTS2 (which are really both of type
1509 "var_to_expand *") refer to the same instruction. */
1510
1511 static int
1512 ve_info_eq (const void *ivts1, const void *ivts2)
1513 {
1514 const struct var_to_expand *i1 = ivts1;
1515 const struct var_to_expand *i2 = ivts2;
1516
1517 return i1->insn == i2->insn;
1518 }
1519
1520 /* Returns true if REG is referenced in one insn in LOOP. */
1521
1522 bool
1523 referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg)
1524 {
1525 basic_block *body, bb;
1526 unsigned i;
1527 int count_ref = 0;
1528 rtx insn;
1529
1530 body = get_loop_body (loop);
1531 for (i = 0; i < loop->num_nodes; i++)
1532 {
1533 bb = body[i];
1534
1535 FOR_BB_INSNS (bb, insn)
1536 {
1537 if (rtx_referenced_p (reg, insn))
1538 count_ref++;
1539 }
1540 }
1541 return (count_ref == 1);
1542 }
1543
1544 /* Determine whether INSN contains an accumulator
1545 which can be expanded into separate copies,
1546 one for each copy of the LOOP body.
1547
1548 for (i = 0 ; i < n; i++)
1549 sum += a[i];
1550
1551 ==>
1552
1553 sum += a[i]
1554 ....
1555 i = i+1;
1556 sum1 += a[i]
1557 ....
1558 i = i+1
1559 sum2 += a[i];
1560 ....
1561
1562 Return NULL if INSN contains no opportunity for expansion of accumulator.
1563 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1564 information and return a pointer to it.
1565 */
1566
1567 static struct var_to_expand *
1568 analyze_insn_to_expand_var (struct loop *loop, rtx insn)
1569 {
1570 rtx set, dest, src, op1, op2, something;
1571 struct var_to_expand *ves;
1572 enum machine_mode mode1, mode2;
1573 unsigned accum_pos;
1574
1575 set = single_set (insn);
1576 if (!set)
1577 return NULL;
1578
1579 dest = SET_DEST (set);
1580 src = SET_SRC (set);
1581
1582 if (GET_CODE (src) != PLUS
1583 && GET_CODE (src) != MINUS
1584 && GET_CODE (src) != MULT)
1585 return NULL;
1586
1587 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1588 in MD. But if there is no optab to generate the insn, we can not
1589 perform the variable expansion. This can happen if an MD provides
1590 an insn but not a named pattern to generate it, for example to avoid
1591 producing code that needs additional mode switches like for x87/mmx.
1592
1593 So we check have_insn_for which looks for an optab for the operation
1594 in SRC. If it doesn't exist, we can't perform the expansion even
1595 though INSN is valid. */
1596 if (!have_insn_for (GET_CODE (src), GET_MODE (src)))
1597 return NULL;
1598
1599 op1 = XEXP (src, 0);
1600 op2 = XEXP (src, 1);
1601
1602 if (!REG_P (dest)
1603 && !(GET_CODE (dest) == SUBREG
1604 && REG_P (SUBREG_REG (dest))))
1605 return NULL;
1606
1607 if (rtx_equal_p (dest, op1))
1608 accum_pos = 0;
1609 else if (rtx_equal_p (dest, op2))
1610 accum_pos = 1;
1611 else
1612 return NULL;
1613
1614 /* The method of expansion that we are using; which includes
1615 the initialization of the expansions with zero and the summation of
1616 the expansions at the end of the computation will yield wrong results
1617 for (x = something - x) thus avoid using it in that case. */
1618 if (accum_pos == 1
1619 && GET_CODE (src) == MINUS)
1620 return NULL;
1621
1622 something = (accum_pos == 0)? op2 : op1;
1623
1624 if (!referenced_in_one_insn_in_loop_p (loop, dest))
1625 return NULL;
1626
1627 if (rtx_referenced_p (dest, something))
1628 return NULL;
1629
1630 mode1 = GET_MODE (dest);
1631 mode2 = GET_MODE (something);
1632 if ((FLOAT_MODE_P (mode1)
1633 || FLOAT_MODE_P (mode2))
1634 && !flag_unsafe_math_optimizations)
1635 return NULL;
1636
1637 if (dump_file)
1638 {
1639 fprintf (dump_file,
1640 "\n;; Expanding Accumulator ");
1641 print_rtl (dump_file, dest);
1642 fprintf (dump_file, "\n");
1643 }
1644
1645 /* Record the accumulator to expand. */
1646 ves = XNEW (struct var_to_expand);
1647 ves->insn = insn;
1648 ves->var_expansions = VEC_alloc (rtx, heap, 1);
1649 ves->reg = copy_rtx (dest);
1650 ves->op = GET_CODE (src);
1651 ves->expansion_count = 0;
1652 ves->reuse_expansion = 0;
1653 ves->accum_pos = accum_pos;
1654 return ves;
1655 }
1656
1657 /* Determine whether there is an induction variable in INSN that
1658 we would like to split during unrolling.
1659
1660 I.e. replace
1661
1662 i = i + 1;
1663 ...
1664 i = i + 1;
1665 ...
1666 i = i + 1;
1667 ...
1668
1669 type chains by
1670
1671 i0 = i + 1
1672 ...
1673 i = i0 + 1
1674 ...
1675 i = i0 + 2
1676 ...
1677
1678 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1679 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1680 pointer to it. */
1681
1682 static struct iv_to_split *
1683 analyze_iv_to_split_insn (rtx insn)
1684 {
1685 rtx set, dest;
1686 struct rtx_iv iv;
1687 struct iv_to_split *ivts;
1688 bool ok;
1689
1690 /* For now we just split the basic induction variables. Later this may be
1691 extended for example by selecting also addresses of memory references. */
1692 set = single_set (insn);
1693 if (!set)
1694 return NULL;
1695
1696 dest = SET_DEST (set);
1697 if (!REG_P (dest))
1698 return NULL;
1699
1700 if (!biv_p (insn, dest))
1701 return NULL;
1702
1703 ok = iv_analyze_result (insn, dest, &iv);
1704
1705 /* This used to be an assert under the assumption that if biv_p returns
1706 true that iv_analyze_result must also return true. However, that
1707 assumption is not strictly correct as evidenced by pr25569.
1708
1709 Returning NULL when iv_analyze_result returns false is safe and
1710 avoids the problems in pr25569 until the iv_analyze_* routines
1711 can be fixed, which is apparently hard and time consuming
1712 according to their author. */
1713 if (! ok)
1714 return NULL;
1715
1716 if (iv.step == const0_rtx
1717 || iv.mode != iv.extend_mode)
1718 return NULL;
1719
1720 /* Record the insn to split. */
1721 ivts = XNEW (struct iv_to_split);
1722 ivts->insn = insn;
1723 ivts->base_var = NULL_RTX;
1724 ivts->step = iv.step;
1725 ivts->n_loc = 1;
1726 ivts->loc[0] = 1;
1727
1728 return ivts;
1729 }
1730
1731 /* Determines which of insns in LOOP can be optimized.
1732 Return a OPT_INFO struct with the relevant hash tables filled
1733 with all insns to be optimized. The FIRST_NEW_BLOCK field
1734 is undefined for the return value. */
1735
1736 static struct opt_info *
1737 analyze_insns_in_loop (struct loop *loop)
1738 {
1739 basic_block *body, bb;
1740 unsigned i;
1741 struct opt_info *opt_info = XCNEW (struct opt_info);
1742 rtx insn;
1743 struct iv_to_split *ivts = NULL;
1744 struct var_to_expand *ves = NULL;
1745 PTR *slot1;
1746 PTR *slot2;
1747 VEC (edge, heap) *edges = get_loop_exit_edges (loop);
1748 edge exit;
1749 bool can_apply = false;
1750
1751 iv_analysis_loop_init (loop);
1752
1753 body = get_loop_body (loop);
1754
1755 if (flag_split_ivs_in_unroller)
1756 opt_info->insns_to_split = htab_create (5 * loop->num_nodes,
1757 si_info_hash, si_info_eq, free);
1758
1759 /* Record the loop exit bb and loop preheader before the unrolling. */
1760 opt_info->loop_preheader = loop_preheader_edge (loop)->src;
1761
1762 if (VEC_length (edge, edges) == 1)
1763 {
1764 exit = VEC_index (edge, edges, 0);
1765 if (!(exit->flags & EDGE_COMPLEX))
1766 {
1767 opt_info->loop_exit = split_edge (exit);
1768 can_apply = true;
1769 }
1770 }
1771
1772 if (flag_variable_expansion_in_unroller
1773 && can_apply)
1774 opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes,
1775 ve_info_hash, ve_info_eq, free);
1776
1777 for (i = 0; i < loop->num_nodes; i++)
1778 {
1779 bb = body[i];
1780 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1781 continue;
1782
1783 FOR_BB_INSNS (bb, insn)
1784 {
1785 if (!INSN_P (insn))
1786 continue;
1787
1788 if (opt_info->insns_to_split)
1789 ivts = analyze_iv_to_split_insn (insn);
1790
1791 if (ivts)
1792 {
1793 slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT);
1794 *slot1 = ivts;
1795 continue;
1796 }
1797
1798 if (opt_info->insns_with_var_to_expand)
1799 ves = analyze_insn_to_expand_var (loop, insn);
1800
1801 if (ves)
1802 {
1803 slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT);
1804 *slot2 = ves;
1805 }
1806 }
1807 }
1808
1809 VEC_free (edge, heap, edges);
1810 free (body);
1811 return opt_info;
1812 }
1813
1814 /* Called just before loop duplication. Records start of duplicated area
1815 to OPT_INFO. */
1816
1817 static void
1818 opt_info_start_duplication (struct opt_info *opt_info)
1819 {
1820 if (opt_info)
1821 opt_info->first_new_block = last_basic_block;
1822 }
1823
1824 /* Determine the number of iterations between initialization of the base
1825 variable and the current copy (N_COPY). N_COPIES is the total number
1826 of newly created copies. UNROLLING is true if we are unrolling
1827 (not peeling) the loop. */
1828
1829 static unsigned
1830 determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
1831 {
1832 if (unrolling)
1833 {
1834 /* If we are unrolling, initialization is done in the original loop
1835 body (number 0). */
1836 return n_copy;
1837 }
1838 else
1839 {
1840 /* If we are peeling, the copy in that the initialization occurs has
1841 number 1. The original loop (number 0) is the last. */
1842 if (n_copy)
1843 return n_copy - 1;
1844 else
1845 return n_copies;
1846 }
1847 }
1848
1849 /* Locate in EXPR the expression corresponding to the location recorded
1850 in IVTS, and return a pointer to the RTX for this location. */
1851
1852 static rtx *
1853 get_ivts_expr (rtx expr, struct iv_to_split *ivts)
1854 {
1855 unsigned i;
1856 rtx *ret = &expr;
1857
1858 for (i = 0; i < ivts->n_loc; i++)
1859 ret = &XEXP (*ret, ivts->loc[i]);
1860
1861 return ret;
1862 }
1863
1864 /* Allocate basic variable for the induction variable chain. Callback for
1865 htab_traverse. */
1866
1867 static int
1868 allocate_basic_variable (void **slot, void *data ATTRIBUTE_UNUSED)
1869 {
1870 struct iv_to_split *ivts = *slot;
1871 rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
1872
1873 ivts->base_var = gen_reg_rtx (GET_MODE (expr));
1874
1875 return 1;
1876 }
1877
1878 /* Insert initialization of basic variable of IVTS before INSN, taking
1879 the initial value from INSN. */
1880
1881 static void
1882 insert_base_initialization (struct iv_to_split *ivts, rtx insn)
1883 {
1884 rtx expr = copy_rtx (*get_ivts_expr (single_set (insn), ivts));
1885 rtx seq;
1886
1887 start_sequence ();
1888 expr = force_operand (expr, ivts->base_var);
1889 if (expr != ivts->base_var)
1890 emit_move_insn (ivts->base_var, expr);
1891 seq = get_insns ();
1892 end_sequence ();
1893
1894 emit_insn_before (seq, insn);
1895 }
1896
1897 /* Replace the use of induction variable described in IVTS in INSN
1898 by base variable + DELTA * step. */
1899
1900 static void
1901 split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
1902 {
1903 rtx expr, *loc, seq, incr, var;
1904 enum machine_mode mode = GET_MODE (ivts->base_var);
1905 rtx src, dest, set;
1906
1907 /* Construct base + DELTA * step. */
1908 if (!delta)
1909 expr = ivts->base_var;
1910 else
1911 {
1912 incr = simplify_gen_binary (MULT, mode,
1913 ivts->step, gen_int_mode (delta, mode));
1914 expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
1915 ivts->base_var, incr);
1916 }
1917
1918 /* Figure out where to do the replacement. */
1919 loc = get_ivts_expr (single_set (insn), ivts);
1920
1921 /* If we can make the replacement right away, we're done. */
1922 if (validate_change (insn, loc, expr, 0))
1923 return;
1924
1925 /* Otherwise, force EXPR into a register and try again. */
1926 start_sequence ();
1927 var = gen_reg_rtx (mode);
1928 expr = force_operand (expr, var);
1929 if (expr != var)
1930 emit_move_insn (var, expr);
1931 seq = get_insns ();
1932 end_sequence ();
1933 emit_insn_before (seq, insn);
1934
1935 if (validate_change (insn, loc, var, 0))
1936 return;
1937
1938 /* The last chance. Try recreating the assignment in insn
1939 completely from scratch. */
1940 set = single_set (insn);
1941 gcc_assert (set);
1942
1943 start_sequence ();
1944 *loc = var;
1945 src = copy_rtx (SET_SRC (set));
1946 dest = copy_rtx (SET_DEST (set));
1947 src = force_operand (src, dest);
1948 if (src != dest)
1949 emit_move_insn (dest, src);
1950 seq = get_insns ();
1951 end_sequence ();
1952
1953 emit_insn_before (seq, insn);
1954 delete_insn (insn);
1955 }
1956
1957
1958 /* Return one expansion of the accumulator recorded in struct VE. */
1959
1960 static rtx
1961 get_expansion (struct var_to_expand *ve)
1962 {
1963 rtx reg;
1964
1965 if (ve->reuse_expansion == 0)
1966 reg = ve->reg;
1967 else
1968 reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1);
1969
1970 if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion)
1971 ve->reuse_expansion = 0;
1972 else
1973 ve->reuse_expansion++;
1974
1975 return reg;
1976 }
1977
1978
1979 /* Given INSN replace the uses of the accumulator recorded in VE
1980 with a new register. */
1981
1982 static void
1983 expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
1984 {
1985 rtx new_reg, set;
1986 bool really_new_expansion = false;
1987
1988 set = single_set (insn);
1989 gcc_assert (set);
1990
1991 /* Generate a new register only if the expansion limit has not been
1992 reached. Else reuse an already existing expansion. */
1993 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
1994 {
1995 really_new_expansion = true;
1996 new_reg = gen_reg_rtx (GET_MODE (ve->reg));
1997 }
1998 else
1999 new_reg = get_expansion (ve);
2000
2001 validate_change (insn, &SET_DEST (set), new_reg, 1);
2002 validate_change (insn, &XEXP (SET_SRC (set), ve->accum_pos), new_reg, 1);
2003
2004 if (apply_change_group ())
2005 if (really_new_expansion)
2006 {
2007 VEC_safe_push (rtx, heap, ve->var_expansions, new_reg);
2008 ve->expansion_count++;
2009 }
2010 }
2011
2012 /* Initialize the variable expansions in loop preheader.
2013 Callbacks for htab_traverse. PLACE_P is the loop-preheader
2014 basic block where the initialization of the expansions
2015 should take place. The expansions are initialized with (-0)
2016 when the operation is plus or minus to honor sign zero.
2017 This way we can prevent cases where the sign of the final result is
2018 effected by the sign of the expansion.
2019 Here is an example to demonstrate this:
2020
2021 for (i = 0 ; i < n; i++)
2022 sum += something;
2023
2024 ==>
2025
2026 sum += something
2027 ....
2028 i = i+1;
2029 sum1 += something
2030 ....
2031 i = i+1
2032 sum2 += something;
2033 ....
2034
2035 When SUM is initialized with -zero and SOMETHING is also -zero; the
2036 final result of sum should be -zero thus the expansions sum1 and sum2
2037 should be initialized with -zero as well (otherwise we will get +zero
2038 as the final result). */
2039
2040 static int
2041 insert_var_expansion_initialization (void **slot, void *place_p)
2042 {
2043 struct var_to_expand *ve = *slot;
2044 basic_block place = (basic_block)place_p;
2045 rtx seq, var, zero_init, insn;
2046 unsigned i;
2047 enum machine_mode mode = GET_MODE (ve->reg);
2048 bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
2049
2050 if (VEC_length (rtx, ve->var_expansions) == 0)
2051 return 1;
2052
2053 start_sequence ();
2054 if (ve->op == PLUS || ve->op == MINUS)
2055 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2056 {
2057 if (honor_signed_zero_p)
2058 zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
2059 else
2060 zero_init = CONST0_RTX (mode);
2061
2062 emit_move_insn (var, zero_init);
2063 }
2064 else if (ve->op == MULT)
2065 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2066 {
2067 zero_init = CONST1_RTX (GET_MODE (var));
2068 emit_move_insn (var, zero_init);
2069 }
2070
2071 seq = get_insns ();
2072 end_sequence ();
2073
2074 insn = BB_HEAD (place);
2075 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2076 insn = NEXT_INSN (insn);
2077
2078 emit_insn_after (seq, insn);
2079 /* Continue traversing the hash table. */
2080 return 1;
2081 }
2082
2083 /* Combine the variable expansions at the loop exit.
2084 Callbacks for htab_traverse. PLACE_P is the loop exit
2085 basic block where the summation of the expansions should
2086 take place. */
2087
2088 static int
2089 combine_var_copies_in_loop_exit (void **slot, void *place_p)
2090 {
2091 struct var_to_expand *ve = *slot;
2092 basic_block place = (basic_block)place_p;
2093 rtx sum = ve->reg;
2094 rtx expr, seq, var, insn;
2095 unsigned i;
2096
2097 if (VEC_length (rtx, ve->var_expansions) == 0)
2098 return 1;
2099
2100 start_sequence ();
2101 if (ve->op == PLUS || ve->op == MINUS)
2102 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2103 {
2104 sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg),
2105 var, sum);
2106 }
2107 else if (ve->op == MULT)
2108 for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
2109 {
2110 sum = simplify_gen_binary (MULT, GET_MODE (ve->reg),
2111 var, sum);
2112 }
2113
2114 expr = force_operand (sum, ve->reg);
2115 if (expr != ve->reg)
2116 emit_move_insn (ve->reg, expr);
2117 seq = get_insns ();
2118 end_sequence ();
2119
2120 insn = BB_HEAD (place);
2121 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2122 insn = NEXT_INSN (insn);
2123
2124 emit_insn_after (seq, insn);
2125
2126 /* Continue traversing the hash table. */
2127 return 1;
2128 }
2129
2130 /* Apply loop optimizations in loop copies using the
2131 data which gathered during the unrolling. Structure
2132 OPT_INFO record that data.
2133
2134 UNROLLING is true if we unrolled (not peeled) the loop.
2135 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
2136 the loop (as it should happen in complete unrolling, but not in ordinary
2137 peeling of the loop). */
2138
2139 static void
2140 apply_opt_in_copies (struct opt_info *opt_info,
2141 unsigned n_copies, bool unrolling,
2142 bool rewrite_original_loop)
2143 {
2144 unsigned i, delta;
2145 basic_block bb, orig_bb;
2146 rtx insn, orig_insn, next;
2147 struct iv_to_split ivts_templ, *ivts;
2148 struct var_to_expand ve_templ, *ves;
2149
2150 /* Sanity check -- we need to put initialization in the original loop
2151 body. */
2152 gcc_assert (!unrolling || rewrite_original_loop);
2153
2154 /* Allocate the basic variables (i0). */
2155 if (opt_info->insns_to_split)
2156 htab_traverse (opt_info->insns_to_split, allocate_basic_variable, NULL);
2157
2158 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
2159 {
2160 bb = BASIC_BLOCK (i);
2161 orig_bb = get_bb_original (bb);
2162
2163 /* bb->aux holds position in copy sequence initialized by
2164 duplicate_loop_to_header_edge. */
2165 delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
2166 unrolling);
2167 bb->aux = 0;
2168 orig_insn = BB_HEAD (orig_bb);
2169 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
2170 {
2171 next = NEXT_INSN (insn);
2172 if (!INSN_P (insn))
2173 continue;
2174
2175 while (!INSN_P (orig_insn))
2176 orig_insn = NEXT_INSN (orig_insn);
2177
2178 ivts_templ.insn = orig_insn;
2179 ve_templ.insn = orig_insn;
2180
2181 /* Apply splitting iv optimization. */
2182 if (opt_info->insns_to_split)
2183 {
2184 ivts = htab_find (opt_info->insns_to_split, &ivts_templ);
2185
2186 if (ivts)
2187 {
2188 gcc_assert (GET_CODE (PATTERN (insn))
2189 == GET_CODE (PATTERN (orig_insn)));
2190
2191 if (!delta)
2192 insert_base_initialization (ivts, insn);
2193 split_iv (ivts, insn, delta);
2194 }
2195 }
2196 /* Apply variable expansion optimization. */
2197 if (unrolling && opt_info->insns_with_var_to_expand)
2198 {
2199 ves = htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
2200 if (ves)
2201 {
2202 gcc_assert (GET_CODE (PATTERN (insn))
2203 == GET_CODE (PATTERN (orig_insn)));
2204 expand_var_during_unrolling (ves, insn);
2205 }
2206 }
2207 orig_insn = NEXT_INSN (orig_insn);
2208 }
2209 }
2210
2211 if (!rewrite_original_loop)
2212 return;
2213
2214 /* Initialize the variable expansions in the loop preheader
2215 and take care of combining them at the loop exit. */
2216 if (opt_info->insns_with_var_to_expand)
2217 {
2218 htab_traverse (opt_info->insns_with_var_to_expand,
2219 insert_var_expansion_initialization,
2220 opt_info->loop_preheader);
2221 htab_traverse (opt_info->insns_with_var_to_expand,
2222 combine_var_copies_in_loop_exit,
2223 opt_info->loop_exit);
2224 }
2225
2226 /* Rewrite also the original loop body. Find them as originals of the blocks
2227 in the last copied iteration, i.e. those that have
2228 get_bb_copy (get_bb_original (bb)) == bb. */
2229 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
2230 {
2231 bb = BASIC_BLOCK (i);
2232 orig_bb = get_bb_original (bb);
2233 if (get_bb_copy (orig_bb) != bb)
2234 continue;
2235
2236 delta = determine_split_iv_delta (0, n_copies, unrolling);
2237 for (orig_insn = BB_HEAD (orig_bb);
2238 orig_insn != NEXT_INSN (BB_END (bb));
2239 orig_insn = next)
2240 {
2241 next = NEXT_INSN (orig_insn);
2242
2243 if (!INSN_P (orig_insn))
2244 continue;
2245
2246 ivts_templ.insn = orig_insn;
2247 if (opt_info->insns_to_split)
2248 {
2249 ivts = htab_find (opt_info->insns_to_split, &ivts_templ);
2250 if (ivts)
2251 {
2252 if (!delta)
2253 insert_base_initialization (ivts, orig_insn);
2254 split_iv (ivts, orig_insn, delta);
2255 continue;
2256 }
2257 }
2258
2259 }
2260 }
2261 }
2262
2263 /* Release the data structures used for the variable expansion
2264 optimization. Callbacks for htab_traverse. */
2265
2266 static int
2267 release_var_copies (void **slot, void *data ATTRIBUTE_UNUSED)
2268 {
2269 struct var_to_expand *ve = *slot;
2270
2271 VEC_free (rtx, heap, ve->var_expansions);
2272
2273 /* Continue traversing the hash table. */
2274 return 1;
2275 }
2276
2277 /* Release OPT_INFO. */
2278
2279 static void
2280 free_opt_info (struct opt_info *opt_info)
2281 {
2282 if (opt_info->insns_to_split)
2283 htab_delete (opt_info->insns_to_split);
2284 if (opt_info->insns_with_var_to_expand)
2285 {
2286 htab_traverse (opt_info->insns_with_var_to_expand,
2287 release_var_copies, NULL);
2288 htab_delete (opt_info->insns_with_var_to_expand);
2289 }
2290 free (opt_info);
2291 }