re PR debug/66691 (ICE on valid code at -O3 with -g enabled in simplify_subreg, at...
[gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "symtab.h"
26 #include "hard-reg-set.h"
27 #include "function.h"
28 #include "predict.h"
29 #include "dominance.h"
30 #include "cfg.h"
31 #include "cfganal.h"
32 #include "basic-block.h"
33 #include "cfgloop.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "tree-ssa-alias.h"
39 #include "internal-fn.h"
40 #include "gimple-expr.h"
41 #include "gimple.h"
42 #include "gimple-iterator.h"
43 #include "gimple-ssa.h"
44 #include "dumpfile.h"
45
46 static void flow_loops_cfg_dump (FILE *);
47 \f
48 /* Dump loop related CFG information. */
49
50 static void
51 flow_loops_cfg_dump (FILE *file)
52 {
53 basic_block bb;
54
55 if (!file)
56 return;
57
58 FOR_EACH_BB_FN (bb, cfun)
59 {
60 edge succ;
61 edge_iterator ei;
62
63 fprintf (file, ";; %d succs { ", bb->index);
64 FOR_EACH_EDGE (succ, ei, bb->succs)
65 fprintf (file, "%d ", succ->dest->index);
66 fprintf (file, "}\n");
67 }
68 }
69
70 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
71
72 bool
73 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
74 {
75 unsigned odepth = loop_depth (outer);
76
77 return (loop_depth (loop) > odepth
78 && (*loop->superloops)[odepth] == outer);
79 }
80
81 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
82 loops within LOOP. */
83
84 struct loop *
85 superloop_at_depth (struct loop *loop, unsigned depth)
86 {
87 unsigned ldepth = loop_depth (loop);
88
89 gcc_assert (depth <= ldepth);
90
91 if (depth == ldepth)
92 return loop;
93
94 return (*loop->superloops)[depth];
95 }
96
97 /* Returns the list of the latch edges of LOOP. */
98
99 static vec<edge>
100 get_loop_latch_edges (const struct loop *loop)
101 {
102 edge_iterator ei;
103 edge e;
104 vec<edge> ret = vNULL;
105
106 FOR_EACH_EDGE (e, ei, loop->header->preds)
107 {
108 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
109 ret.safe_push (e);
110 }
111
112 return ret;
113 }
114
115 /* Dump the loop information specified by LOOP to the stream FILE
116 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
117
118 void
119 flow_loop_dump (const struct loop *loop, FILE *file,
120 void (*loop_dump_aux) (const struct loop *, FILE *, int),
121 int verbose)
122 {
123 basic_block *bbs;
124 unsigned i;
125 vec<edge> latches;
126 edge e;
127
128 if (! loop || ! loop->header)
129 return;
130
131 fprintf (file, ";;\n;; Loop %d\n", loop->num);
132
133 fprintf (file, ";; header %d, ", loop->header->index);
134 if (loop->latch)
135 fprintf (file, "latch %d\n", loop->latch->index);
136 else
137 {
138 fprintf (file, "multiple latches:");
139 latches = get_loop_latch_edges (loop);
140 FOR_EACH_VEC_ELT (latches, i, e)
141 fprintf (file, " %d", e->src->index);
142 latches.release ();
143 fprintf (file, "\n");
144 }
145
146 fprintf (file, ";; depth %d, outer %ld\n",
147 loop_depth (loop), (long) (loop_outer (loop)
148 ? loop_outer (loop)->num : -1));
149
150 fprintf (file, ";; nodes:");
151 bbs = get_loop_body (loop);
152 for (i = 0; i < loop->num_nodes; i++)
153 fprintf (file, " %d", bbs[i]->index);
154 free (bbs);
155 fprintf (file, "\n");
156
157 if (loop_dump_aux)
158 loop_dump_aux (loop, file, verbose);
159 }
160
161 /* Dump the loop information about loops to the stream FILE,
162 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
163
164 void
165 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
166 {
167 struct loop *loop;
168
169 if (!current_loops || ! file)
170 return;
171
172 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
173
174 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
175 {
176 flow_loop_dump (loop, file, loop_dump_aux, verbose);
177 }
178
179 if (verbose)
180 flow_loops_cfg_dump (file);
181 }
182
183 /* Free data allocated for LOOP. */
184
185 void
186 flow_loop_free (struct loop *loop)
187 {
188 struct loop_exit *exit, *next;
189
190 vec_free (loop->superloops);
191
192 /* Break the list of the loop exit records. They will be freed when the
193 corresponding edge is rescanned or removed, and this avoids
194 accessing the (already released) head of the list stored in the
195 loop structure. */
196 for (exit = loop->exits->next; exit != loop->exits; exit = next)
197 {
198 next = exit->next;
199 exit->next = exit;
200 exit->prev = exit;
201 }
202
203 ggc_free (loop->exits);
204 ggc_free (loop);
205 }
206
207 /* Free all the memory allocated for LOOPS. */
208
209 void
210 flow_loops_free (struct loops *loops)
211 {
212 if (loops->larray)
213 {
214 unsigned i;
215 loop_p loop;
216
217 /* Free the loop descriptors. */
218 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
219 {
220 if (!loop)
221 continue;
222
223 flow_loop_free (loop);
224 }
225
226 vec_free (loops->larray);
227 }
228 }
229
230 /* Find the nodes contained within the LOOP with header HEADER.
231 Return the number of nodes within the loop. */
232
233 int
234 flow_loop_nodes_find (basic_block header, struct loop *loop)
235 {
236 vec<basic_block> stack = vNULL;
237 int num_nodes = 1;
238 edge latch;
239 edge_iterator latch_ei;
240
241 header->loop_father = loop;
242
243 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
244 {
245 if (latch->src->loop_father == loop
246 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
247 continue;
248
249 num_nodes++;
250 stack.safe_push (latch->src);
251 latch->src->loop_father = loop;
252
253 while (!stack.is_empty ())
254 {
255 basic_block node;
256 edge e;
257 edge_iterator ei;
258
259 node = stack.pop ();
260
261 FOR_EACH_EDGE (e, ei, node->preds)
262 {
263 basic_block ancestor = e->src;
264
265 if (ancestor->loop_father != loop)
266 {
267 ancestor->loop_father = loop;
268 num_nodes++;
269 stack.safe_push (ancestor);
270 }
271 }
272 }
273 }
274 stack.release ();
275
276 return num_nodes;
277 }
278
279 /* Records the vector of superloops of the loop LOOP, whose immediate
280 superloop is FATHER. */
281
282 static void
283 establish_preds (struct loop *loop, struct loop *father)
284 {
285 loop_p ploop;
286 unsigned depth = loop_depth (father) + 1;
287 unsigned i;
288
289 loop->superloops = 0;
290 vec_alloc (loop->superloops, depth);
291 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
292 loop->superloops->quick_push (ploop);
293 loop->superloops->quick_push (father);
294
295 for (ploop = loop->inner; ploop; ploop = ploop->next)
296 establish_preds (ploop, loop);
297 }
298
299 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
300 added loop. If LOOP has some children, take care of that their
301 pred field will be initialized correctly. */
302
303 void
304 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
305 {
306 loop->next = father->inner;
307 father->inner = loop;
308
309 establish_preds (loop, father);
310 }
311
312 /* Remove LOOP from the loop hierarchy tree. */
313
314 void
315 flow_loop_tree_node_remove (struct loop *loop)
316 {
317 struct loop *prev, *father;
318
319 father = loop_outer (loop);
320
321 /* Remove loop from the list of sons. */
322 if (father->inner == loop)
323 father->inner = loop->next;
324 else
325 {
326 for (prev = father->inner; prev->next != loop; prev = prev->next)
327 continue;
328 prev->next = loop->next;
329 }
330
331 loop->superloops = NULL;
332 }
333
334 /* Allocates and returns new loop structure. */
335
336 struct loop *
337 alloc_loop (void)
338 {
339 struct loop *loop = ggc_cleared_alloc<struct loop> ();
340
341 loop->exits = ggc_cleared_alloc<loop_exit> ();
342 loop->exits->next = loop->exits->prev = loop->exits;
343 loop->can_be_parallel = false;
344 loop->nb_iterations_upper_bound = 0;
345 loop->nb_iterations_estimate = 0;
346 return loop;
347 }
348
349 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
350 (including the root of the loop tree). */
351
352 void
353 init_loops_structure (struct function *fn,
354 struct loops *loops, unsigned num_loops)
355 {
356 struct loop *root;
357
358 memset (loops, 0, sizeof *loops);
359 vec_alloc (loops->larray, num_loops);
360
361 /* Dummy loop containing whole function. */
362 root = alloc_loop ();
363 root->num_nodes = n_basic_blocks_for_fn (fn);
364 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
365 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
366 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
367 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
368
369 loops->larray->quick_push (root);
370 loops->tree_root = root;
371 }
372
373 /* Returns whether HEADER is a loop header. */
374
375 bool
376 bb_loop_header_p (basic_block header)
377 {
378 edge_iterator ei;
379 edge e;
380
381 /* If we have an abnormal predecessor, do not consider the
382 loop (not worth the problems). */
383 if (bb_has_abnormal_pred (header))
384 return false;
385
386 /* Look for back edges where a predecessor is dominated
387 by this block. A natural loop has a single entry
388 node (header) that dominates all the nodes in the
389 loop. It also has single back edge to the header
390 from a latch node. */
391 FOR_EACH_EDGE (e, ei, header->preds)
392 {
393 basic_block latch = e->src;
394 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
395 && dominated_by_p (CDI_DOMINATORS, latch, header))
396 return true;
397 }
398
399 return false;
400 }
401
402 /* Find all the natural loops in the function and save in LOOPS structure and
403 recalculate loop_father information in basic block structures.
404 If LOOPS is non-NULL then the loop structures for already recorded loops
405 will be re-used and their number will not change. We assume that no
406 stale loops exist in LOOPS.
407 When LOOPS is NULL it is allocated and re-built from scratch.
408 Return the built LOOPS structure. */
409
410 struct loops *
411 flow_loops_find (struct loops *loops)
412 {
413 bool from_scratch = (loops == NULL);
414 int *rc_order;
415 int b;
416 unsigned i;
417
418 /* Ensure that the dominators are computed. */
419 calculate_dominance_info (CDI_DOMINATORS);
420
421 if (!loops)
422 {
423 loops = ggc_cleared_alloc<struct loops> ();
424 init_loops_structure (cfun, loops, 1);
425 }
426
427 /* Ensure that loop exits were released. */
428 gcc_assert (loops->exits == NULL);
429
430 /* Taking care of this degenerate case makes the rest of
431 this code simpler. */
432 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
433 return loops;
434
435 /* The root loop node contains all basic-blocks. */
436 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
437
438 /* Compute depth first search order of the CFG so that outer
439 natural loops will be found before inner natural loops. */
440 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
441 pre_and_rev_post_order_compute (NULL, rc_order, false);
442
443 /* Gather all loop headers in reverse completion order and allocate
444 loop structures for loops that are not already present. */
445 auto_vec<loop_p> larray (loops->larray->length ());
446 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
447 {
448 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
449 if (bb_loop_header_p (header))
450 {
451 struct loop *loop;
452
453 /* The current active loop tree has valid loop-fathers for
454 header blocks. */
455 if (!from_scratch
456 && header->loop_father->header == header)
457 {
458 loop = header->loop_father;
459 /* If we found an existing loop remove it from the
460 loop tree. It is going to be inserted again
461 below. */
462 flow_loop_tree_node_remove (loop);
463 }
464 else
465 {
466 /* Otherwise allocate a new loop structure for the loop. */
467 loop = alloc_loop ();
468 /* ??? We could re-use unused loop slots here. */
469 loop->num = loops->larray->length ();
470 vec_safe_push (loops->larray, loop);
471 loop->header = header;
472
473 if (!from_scratch
474 && dump_file && (dump_flags & TDF_DETAILS))
475 fprintf (dump_file, "flow_loops_find: discovered new "
476 "loop %d with header %d\n",
477 loop->num, header->index);
478 }
479 /* Reset latch, we recompute it below. */
480 loop->latch = NULL;
481 larray.safe_push (loop);
482 }
483
484 /* Make blocks part of the loop root node at start. */
485 header->loop_father = loops->tree_root;
486 }
487
488 free (rc_order);
489
490 /* Now iterate over the loops found, insert them into the loop tree
491 and assign basic-block ownership. */
492 for (i = 0; i < larray.length (); ++i)
493 {
494 struct loop *loop = larray[i];
495 basic_block header = loop->header;
496 edge_iterator ei;
497 edge e;
498
499 flow_loop_tree_node_add (header->loop_father, loop);
500 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
501
502 /* Look for the latch for this header block, if it has just a
503 single one. */
504 FOR_EACH_EDGE (e, ei, header->preds)
505 {
506 basic_block latch = e->src;
507
508 if (flow_bb_inside_loop_p (loop, latch))
509 {
510 if (loop->latch != NULL)
511 {
512 /* More than one latch edge. */
513 loop->latch = NULL;
514 break;
515 }
516 loop->latch = latch;
517 }
518 }
519 }
520
521 return loops;
522 }
523
524 /* Ratio of frequencies of edges so that one of more latch edges is
525 considered to belong to inner loop with same header. */
526 #define HEAVY_EDGE_RATIO 8
527
528 /* Minimum number of samples for that we apply
529 find_subloop_latch_edge_by_profile heuristics. */
530 #define HEAVY_EDGE_MIN_SAMPLES 10
531
532 /* If the profile info is available, finds an edge in LATCHES that much more
533 frequent than the remaining edges. Returns such an edge, or NULL if we do
534 not find one.
535
536 We do not use guessed profile here, only the measured one. The guessed
537 profile is usually too flat and unreliable for this (and it is mostly based
538 on the loop structure of the program, so it does not make much sense to
539 derive the loop structure from it). */
540
541 static edge
542 find_subloop_latch_edge_by_profile (vec<edge> latches)
543 {
544 unsigned i;
545 edge e, me = NULL;
546 gcov_type mcount = 0, tcount = 0;
547
548 FOR_EACH_VEC_ELT (latches, i, e)
549 {
550 if (e->count > mcount)
551 {
552 me = e;
553 mcount = e->count;
554 }
555 tcount += e->count;
556 }
557
558 if (tcount < HEAVY_EDGE_MIN_SAMPLES
559 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
560 return NULL;
561
562 if (dump_file)
563 fprintf (dump_file,
564 "Found latch edge %d -> %d using profile information.\n",
565 me->src->index, me->dest->index);
566 return me;
567 }
568
569 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
570 on the structure of induction variables. Returns this edge, or NULL if we
571 do not find any.
572
573 We are quite conservative, and look just for an obvious simple innermost
574 loop (which is the case where we would lose the most performance by not
575 disambiguating the loop). More precisely, we look for the following
576 situation: The source of the chosen latch edge dominates sources of all
577 the other latch edges. Additionally, the header does not contain a phi node
578 such that the argument from the chosen edge is equal to the argument from
579 another edge. */
580
581 static edge
582 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
583 {
584 edge e, latch = latches[0];
585 unsigned i;
586 gphi *phi;
587 gphi_iterator psi;
588 tree lop;
589 basic_block bb;
590
591 /* Find the candidate for the latch edge. */
592 for (i = 1; latches.iterate (i, &e); i++)
593 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
594 latch = e;
595
596 /* Verify that it dominates all the latch edges. */
597 FOR_EACH_VEC_ELT (latches, i, e)
598 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
599 return NULL;
600
601 /* Check for a phi node that would deny that this is a latch edge of
602 a subloop. */
603 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
604 {
605 phi = psi.phi ();
606 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
607
608 /* Ignore the values that are not changed inside the subloop. */
609 if (TREE_CODE (lop) != SSA_NAME
610 || SSA_NAME_DEF_STMT (lop) == phi)
611 continue;
612 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
613 if (!bb || !flow_bb_inside_loop_p (loop, bb))
614 continue;
615
616 FOR_EACH_VEC_ELT (latches, i, e)
617 if (e != latch
618 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
619 return NULL;
620 }
621
622 if (dump_file)
623 fprintf (dump_file,
624 "Found latch edge %d -> %d using iv structure.\n",
625 latch->src->index, latch->dest->index);
626 return latch;
627 }
628
629 /* If we can determine that one of the several latch edges of LOOP behaves
630 as a latch edge of a separate subloop, returns this edge. Otherwise
631 returns NULL. */
632
633 static edge
634 find_subloop_latch_edge (struct loop *loop)
635 {
636 vec<edge> latches = get_loop_latch_edges (loop);
637 edge latch = NULL;
638
639 if (latches.length () > 1)
640 {
641 latch = find_subloop_latch_edge_by_profile (latches);
642
643 if (!latch
644 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
645 should use cfghook for this, but it is hard to imagine it would
646 be useful elsewhere. */
647 && current_ir_type () == IR_GIMPLE)
648 latch = find_subloop_latch_edge_by_ivs (loop, latches);
649 }
650
651 latches.release ();
652 return latch;
653 }
654
655 /* Callback for make_forwarder_block. Returns true if the edge E is marked
656 in the set MFB_REIS_SET. */
657
658 static hash_set<edge> *mfb_reis_set;
659 static bool
660 mfb_redirect_edges_in_set (edge e)
661 {
662 return mfb_reis_set->contains (e);
663 }
664
665 /* Creates a subloop of LOOP with latch edge LATCH. */
666
667 static void
668 form_subloop (struct loop *loop, edge latch)
669 {
670 edge_iterator ei;
671 edge e, new_entry;
672 struct loop *new_loop;
673
674 mfb_reis_set = new hash_set<edge>;
675 FOR_EACH_EDGE (e, ei, loop->header->preds)
676 {
677 if (e != latch)
678 mfb_reis_set->add (e);
679 }
680 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
681 NULL);
682 delete mfb_reis_set;
683
684 loop->header = new_entry->src;
685
686 /* Find the blocks and subloops that belong to the new loop, and add it to
687 the appropriate place in the loop tree. */
688 new_loop = alloc_loop ();
689 new_loop->header = new_entry->dest;
690 new_loop->latch = latch->src;
691 add_loop (new_loop, loop);
692 }
693
694 /* Make all the latch edges of LOOP to go to a single forwarder block --
695 a new latch of LOOP. */
696
697 static void
698 merge_latch_edges (struct loop *loop)
699 {
700 vec<edge> latches = get_loop_latch_edges (loop);
701 edge latch, e;
702 unsigned i;
703
704 gcc_assert (latches.length () > 0);
705
706 if (latches.length () == 1)
707 loop->latch = latches[0]->src;
708 else
709 {
710 if (dump_file)
711 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
712
713 mfb_reis_set = new hash_set<edge>;
714 FOR_EACH_VEC_ELT (latches, i, e)
715 mfb_reis_set->add (e);
716 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
717 NULL);
718 delete mfb_reis_set;
719
720 loop->header = latch->dest;
721 loop->latch = latch->src;
722 }
723
724 latches.release ();
725 }
726
727 /* LOOP may have several latch edges. Transform it into (possibly several)
728 loops with single latch edge. */
729
730 static void
731 disambiguate_multiple_latches (struct loop *loop)
732 {
733 edge e;
734
735 /* We eliminate the multiple latches by splitting the header to the forwarder
736 block F and the rest R, and redirecting the edges. There are two cases:
737
738 1) If there is a latch edge E that corresponds to a subloop (we guess
739 that based on profile -- if it is taken much more often than the
740 remaining edges; and on trees, using the information about induction
741 variables of the loops), we redirect E to R, all the remaining edges to
742 F, then rescan the loops and try again for the outer loop.
743 2) If there is no such edge, we redirect all latch edges to F, and the
744 entry edges to R, thus making F the single latch of the loop. */
745
746 if (dump_file)
747 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
748 loop->num);
749
750 /* During latch merging, we may need to redirect the entry edges to a new
751 block. This would cause problems if the entry edge was the one from the
752 entry block. To avoid having to handle this case specially, split
753 such entry edge. */
754 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
755 if (e)
756 split_edge (e);
757
758 while (1)
759 {
760 e = find_subloop_latch_edge (loop);
761 if (!e)
762 break;
763
764 form_subloop (loop, e);
765 }
766
767 merge_latch_edges (loop);
768 }
769
770 /* Split loops with multiple latch edges. */
771
772 void
773 disambiguate_loops_with_multiple_latches (void)
774 {
775 struct loop *loop;
776
777 FOR_EACH_LOOP (loop, 0)
778 {
779 if (!loop->latch)
780 disambiguate_multiple_latches (loop);
781 }
782 }
783
784 /* Return nonzero if basic block BB belongs to LOOP. */
785 bool
786 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
787 {
788 struct loop *source_loop;
789
790 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
791 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
792 return 0;
793
794 source_loop = bb->loop_father;
795 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
796 }
797
798 /* Enumeration predicate for get_loop_body_with_size. */
799 static bool
800 glb_enum_p (const_basic_block bb, const void *glb_loop)
801 {
802 const struct loop *const loop = (const struct loop *) glb_loop;
803 return (bb != loop->header
804 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
805 }
806
807 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
808 order against direction of edges from latch. Specially, if
809 header != latch, latch is the 1-st block. LOOP cannot be the fake
810 loop tree root, and its size must be at most MAX_SIZE. The blocks
811 in the LOOP body are stored to BODY, and the size of the LOOP is
812 returned. */
813
814 unsigned
815 get_loop_body_with_size (const struct loop *loop, basic_block *body,
816 unsigned max_size)
817 {
818 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
819 body, max_size, loop);
820 }
821
822 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
823 order against direction of edges from latch. Specially, if
824 header != latch, latch is the 1-st block. */
825
826 basic_block *
827 get_loop_body (const struct loop *loop)
828 {
829 basic_block *body, bb;
830 unsigned tv = 0;
831
832 gcc_assert (loop->num_nodes);
833
834 body = XNEWVEC (basic_block, loop->num_nodes);
835
836 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
837 {
838 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
839 special-case the fake loop that contains the whole function. */
840 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
841 body[tv++] = loop->header;
842 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
843 FOR_EACH_BB_FN (bb, cfun)
844 body[tv++] = bb;
845 }
846 else
847 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
848
849 gcc_assert (tv == loop->num_nodes);
850 return body;
851 }
852
853 /* Fills dominance descendants inside LOOP of the basic block BB into
854 array TOVISIT from index *TV. */
855
856 static void
857 fill_sons_in_loop (const struct loop *loop, basic_block bb,
858 basic_block *tovisit, int *tv)
859 {
860 basic_block son, postpone = NULL;
861
862 tovisit[(*tv)++] = bb;
863 for (son = first_dom_son (CDI_DOMINATORS, bb);
864 son;
865 son = next_dom_son (CDI_DOMINATORS, son))
866 {
867 if (!flow_bb_inside_loop_p (loop, son))
868 continue;
869
870 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
871 {
872 postpone = son;
873 continue;
874 }
875 fill_sons_in_loop (loop, son, tovisit, tv);
876 }
877
878 if (postpone)
879 fill_sons_in_loop (loop, postpone, tovisit, tv);
880 }
881
882 /* Gets body of a LOOP (that must be different from the outermost loop)
883 sorted by dominance relation. Additionally, if a basic block s dominates
884 the latch, then only blocks dominated by s are be after it. */
885
886 basic_block *
887 get_loop_body_in_dom_order (const struct loop *loop)
888 {
889 basic_block *tovisit;
890 int tv;
891
892 gcc_assert (loop->num_nodes);
893
894 tovisit = XNEWVEC (basic_block, loop->num_nodes);
895
896 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
897
898 tv = 0;
899 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
900
901 gcc_assert (tv == (int) loop->num_nodes);
902
903 return tovisit;
904 }
905
906 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
907
908 basic_block *
909 get_loop_body_in_custom_order (const struct loop *loop,
910 int (*bb_comparator) (const void *, const void *))
911 {
912 basic_block *bbs = get_loop_body (loop);
913
914 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
915
916 return bbs;
917 }
918
919 /* Get body of a LOOP in breadth first sort order. */
920
921 basic_block *
922 get_loop_body_in_bfs_order (const struct loop *loop)
923 {
924 basic_block *blocks;
925 basic_block bb;
926 bitmap visited;
927 unsigned int i = 0;
928 unsigned int vc = 1;
929
930 gcc_assert (loop->num_nodes);
931 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
932
933 blocks = XNEWVEC (basic_block, loop->num_nodes);
934 visited = BITMAP_ALLOC (NULL);
935
936 bb = loop->header;
937 while (i < loop->num_nodes)
938 {
939 edge e;
940 edge_iterator ei;
941
942 if (bitmap_set_bit (visited, bb->index))
943 /* This basic block is now visited */
944 blocks[i++] = bb;
945
946 FOR_EACH_EDGE (e, ei, bb->succs)
947 {
948 if (flow_bb_inside_loop_p (loop, e->dest))
949 {
950 if (bitmap_set_bit (visited, e->dest->index))
951 blocks[i++] = e->dest;
952 }
953 }
954
955 gcc_assert (i > vc);
956
957 bb = blocks[vc++];
958 }
959
960 BITMAP_FREE (visited);
961 return blocks;
962 }
963
964 /* Hash function for struct loop_exit. */
965
966 hashval_t
967 loop_exit_hasher::hash (loop_exit *exit)
968 {
969 return htab_hash_pointer (exit->e);
970 }
971
972 /* Equality function for struct loop_exit. Compares with edge. */
973
974 bool
975 loop_exit_hasher::equal (loop_exit *exit, edge e)
976 {
977 return exit->e == e;
978 }
979
980 /* Frees the list of loop exit descriptions EX. */
981
982 void
983 loop_exit_hasher::remove (loop_exit *exit)
984 {
985 loop_exit *next;
986 for (; exit; exit = next)
987 {
988 next = exit->next_e;
989
990 exit->next->prev = exit->prev;
991 exit->prev->next = exit->next;
992
993 ggc_free (exit);
994 }
995 }
996
997 /* Returns the list of records for E as an exit of a loop. */
998
999 static struct loop_exit *
1000 get_exit_descriptions (edge e)
1001 {
1002 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
1003 }
1004
1005 /* Updates the lists of loop exits in that E appears.
1006 If REMOVED is true, E is being removed, and we
1007 just remove it from the lists of exits.
1008 If NEW_EDGE is true and E is not a loop exit, we
1009 do not try to remove it from loop exit lists. */
1010
1011 void
1012 rescan_loop_exit (edge e, bool new_edge, bool removed)
1013 {
1014 struct loop_exit *exits = NULL, *exit;
1015 struct loop *aloop, *cloop;
1016
1017 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1018 return;
1019
1020 if (!removed
1021 && e->src->loop_father != NULL
1022 && e->dest->loop_father != NULL
1023 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1024 {
1025 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1026 for (aloop = e->src->loop_father;
1027 aloop != cloop;
1028 aloop = loop_outer (aloop))
1029 {
1030 exit = ggc_alloc<loop_exit> ();
1031 exit->e = e;
1032
1033 exit->next = aloop->exits->next;
1034 exit->prev = aloop->exits;
1035 exit->next->prev = exit;
1036 exit->prev->next = exit;
1037
1038 exit->next_e = exits;
1039 exits = exit;
1040 }
1041 }
1042
1043 if (!exits && new_edge)
1044 return;
1045
1046 loop_exit **slot
1047 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1048 exits ? INSERT : NO_INSERT);
1049 if (!slot)
1050 return;
1051
1052 if (exits)
1053 {
1054 if (*slot)
1055 loop_exit_hasher::remove (*slot);
1056 *slot = exits;
1057 }
1058 else
1059 current_loops->exits->clear_slot (slot);
1060 }
1061
1062 /* For each loop, record list of exit edges, and start maintaining these
1063 lists. */
1064
1065 void
1066 record_loop_exits (void)
1067 {
1068 basic_block bb;
1069 edge_iterator ei;
1070 edge e;
1071
1072 if (!current_loops)
1073 return;
1074
1075 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1076 return;
1077 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1078
1079 gcc_assert (current_loops->exits == NULL);
1080 current_loops->exits
1081 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1082
1083 FOR_EACH_BB_FN (bb, cfun)
1084 {
1085 FOR_EACH_EDGE (e, ei, bb->succs)
1086 {
1087 rescan_loop_exit (e, true, false);
1088 }
1089 }
1090 }
1091
1092 /* Dumps information about the exit in *SLOT to FILE.
1093 Callback for htab_traverse. */
1094
1095 int
1096 dump_recorded_exit (loop_exit **slot, FILE *file)
1097 {
1098 struct loop_exit *exit = *slot;
1099 unsigned n = 0;
1100 edge e = exit->e;
1101
1102 for (; exit != NULL; exit = exit->next_e)
1103 n++;
1104
1105 fprintf (file, "Edge %d->%d exits %u loops\n",
1106 e->src->index, e->dest->index, n);
1107
1108 return 1;
1109 }
1110
1111 /* Dumps the recorded exits of loops to FILE. */
1112
1113 extern void dump_recorded_exits (FILE *);
1114 void
1115 dump_recorded_exits (FILE *file)
1116 {
1117 if (!current_loops->exits)
1118 return;
1119 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1120 }
1121
1122 /* Releases lists of loop exits. */
1123
1124 void
1125 release_recorded_exits (void)
1126 {
1127 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1128 current_loops->exits->empty ();
1129 current_loops->exits = NULL;
1130 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1131 }
1132
1133 /* Returns the list of the exit edges of a LOOP. */
1134
1135 vec<edge>
1136 get_loop_exit_edges (const struct loop *loop)
1137 {
1138 vec<edge> edges = vNULL;
1139 edge e;
1140 unsigned i;
1141 basic_block *body;
1142 edge_iterator ei;
1143 struct loop_exit *exit;
1144
1145 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1146
1147 /* If we maintain the lists of exits, use them. Otherwise we must
1148 scan the body of the loop. */
1149 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1150 {
1151 for (exit = loop->exits->next; exit->e; exit = exit->next)
1152 edges.safe_push (exit->e);
1153 }
1154 else
1155 {
1156 body = get_loop_body (loop);
1157 for (i = 0; i < loop->num_nodes; i++)
1158 FOR_EACH_EDGE (e, ei, body[i]->succs)
1159 {
1160 if (!flow_bb_inside_loop_p (loop, e->dest))
1161 edges.safe_push (e);
1162 }
1163 free (body);
1164 }
1165
1166 return edges;
1167 }
1168
1169 /* Counts the number of conditional branches inside LOOP. */
1170
1171 unsigned
1172 num_loop_branches (const struct loop *loop)
1173 {
1174 unsigned i, n;
1175 basic_block * body;
1176
1177 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1178
1179 body = get_loop_body (loop);
1180 n = 0;
1181 for (i = 0; i < loop->num_nodes; i++)
1182 if (EDGE_COUNT (body[i]->succs) >= 2)
1183 n++;
1184 free (body);
1185
1186 return n;
1187 }
1188
1189 /* Adds basic block BB to LOOP. */
1190 void
1191 add_bb_to_loop (basic_block bb, struct loop *loop)
1192 {
1193 unsigned i;
1194 loop_p ploop;
1195 edge_iterator ei;
1196 edge e;
1197
1198 gcc_assert (bb->loop_father == NULL);
1199 bb->loop_father = loop;
1200 loop->num_nodes++;
1201 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1202 ploop->num_nodes++;
1203
1204 FOR_EACH_EDGE (e, ei, bb->succs)
1205 {
1206 rescan_loop_exit (e, true, false);
1207 }
1208 FOR_EACH_EDGE (e, ei, bb->preds)
1209 {
1210 rescan_loop_exit (e, true, false);
1211 }
1212 }
1213
1214 /* Remove basic block BB from loops. */
1215 void
1216 remove_bb_from_loops (basic_block bb)
1217 {
1218 unsigned i;
1219 struct loop *loop = bb->loop_father;
1220 loop_p ploop;
1221 edge_iterator ei;
1222 edge e;
1223
1224 gcc_assert (loop != NULL);
1225 loop->num_nodes--;
1226 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1227 ploop->num_nodes--;
1228 bb->loop_father = NULL;
1229
1230 FOR_EACH_EDGE (e, ei, bb->succs)
1231 {
1232 rescan_loop_exit (e, false, true);
1233 }
1234 FOR_EACH_EDGE (e, ei, bb->preds)
1235 {
1236 rescan_loop_exit (e, false, true);
1237 }
1238 }
1239
1240 /* Finds nearest common ancestor in loop tree for given loops. */
1241 struct loop *
1242 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1243 {
1244 unsigned sdepth, ddepth;
1245
1246 if (!loop_s) return loop_d;
1247 if (!loop_d) return loop_s;
1248
1249 sdepth = loop_depth (loop_s);
1250 ddepth = loop_depth (loop_d);
1251
1252 if (sdepth < ddepth)
1253 loop_d = (*loop_d->superloops)[sdepth];
1254 else if (sdepth > ddepth)
1255 loop_s = (*loop_s->superloops)[ddepth];
1256
1257 while (loop_s != loop_d)
1258 {
1259 loop_s = loop_outer (loop_s);
1260 loop_d = loop_outer (loop_d);
1261 }
1262 return loop_s;
1263 }
1264
1265 /* Removes LOOP from structures and frees its data. */
1266
1267 void
1268 delete_loop (struct loop *loop)
1269 {
1270 /* Remove the loop from structure. */
1271 flow_loop_tree_node_remove (loop);
1272
1273 /* Remove loop from loops array. */
1274 (*current_loops->larray)[loop->num] = NULL;
1275
1276 /* Free loop data. */
1277 flow_loop_free (loop);
1278 }
1279
1280 /* Cancels the LOOP; it must be innermost one. */
1281
1282 static void
1283 cancel_loop (struct loop *loop)
1284 {
1285 basic_block *bbs;
1286 unsigned i;
1287 struct loop *outer = loop_outer (loop);
1288
1289 gcc_assert (!loop->inner);
1290
1291 /* Move blocks up one level (they should be removed as soon as possible). */
1292 bbs = get_loop_body (loop);
1293 for (i = 0; i < loop->num_nodes; i++)
1294 bbs[i]->loop_father = outer;
1295
1296 free (bbs);
1297 delete_loop (loop);
1298 }
1299
1300 /* Cancels LOOP and all its subloops. */
1301 void
1302 cancel_loop_tree (struct loop *loop)
1303 {
1304 while (loop->inner)
1305 cancel_loop_tree (loop->inner);
1306 cancel_loop (loop);
1307 }
1308
1309 /* Checks that information about loops is correct
1310 -- sizes of loops are all right
1311 -- results of get_loop_body really belong to the loop
1312 -- loop header have just single entry edge and single latch edge
1313 -- loop latches have only single successor that is header of their loop
1314 -- irreducible loops are correctly marked
1315 -- the cached loop depth and loop father of each bb is correct
1316 */
1317 DEBUG_FUNCTION void
1318 verify_loop_structure (void)
1319 {
1320 unsigned *sizes, i, j;
1321 sbitmap irreds;
1322 basic_block bb, *bbs;
1323 struct loop *loop;
1324 int err = 0;
1325 edge e;
1326 unsigned num = number_of_loops (cfun);
1327 struct loop_exit *exit, *mexit;
1328 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1329 sbitmap visited;
1330
1331 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1332 {
1333 error ("loop verification on loop tree that needs fixup");
1334 err = 1;
1335 }
1336
1337 /* We need up-to-date dominators, compute or verify them. */
1338 if (!dom_available)
1339 calculate_dominance_info (CDI_DOMINATORS);
1340 else
1341 verify_dominators (CDI_DOMINATORS);
1342
1343 /* Check the loop tree root. */
1344 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1345 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1346 || (current_loops->tree_root->num_nodes
1347 != (unsigned) n_basic_blocks_for_fn (cfun)))
1348 {
1349 error ("corrupt loop tree root");
1350 err = 1;
1351 }
1352
1353 /* Check the headers. */
1354 FOR_EACH_BB_FN (bb, cfun)
1355 if (bb_loop_header_p (bb))
1356 {
1357 if (bb->loop_father->header == NULL)
1358 {
1359 error ("loop with header %d marked for removal", bb->index);
1360 err = 1;
1361 }
1362 else if (bb->loop_father->header != bb)
1363 {
1364 error ("loop with header %d not in loop tree", bb->index);
1365 err = 1;
1366 }
1367 }
1368 else if (bb->loop_father->header == bb)
1369 {
1370 error ("non-loop with header %d not marked for removal", bb->index);
1371 err = 1;
1372 }
1373
1374 /* Check the recorded loop father and sizes of loops. */
1375 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1376 bitmap_clear (visited);
1377 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1378 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1379 {
1380 unsigned n;
1381
1382 if (loop->header == NULL)
1383 {
1384 error ("removed loop %d in loop tree", loop->num);
1385 err = 1;
1386 continue;
1387 }
1388
1389 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1390 if (loop->num_nodes != n)
1391 {
1392 error ("size of loop %d should be %d, not %d",
1393 loop->num, n, loop->num_nodes);
1394 err = 1;
1395 }
1396
1397 for (j = 0; j < n; j++)
1398 {
1399 bb = bbs[j];
1400
1401 if (!flow_bb_inside_loop_p (loop, bb))
1402 {
1403 error ("bb %d does not belong to loop %d",
1404 bb->index, loop->num);
1405 err = 1;
1406 }
1407
1408 /* Ignore this block if it is in an inner loop. */
1409 if (bitmap_bit_p (visited, bb->index))
1410 continue;
1411 bitmap_set_bit (visited, bb->index);
1412
1413 if (bb->loop_father != loop)
1414 {
1415 error ("bb %d has father loop %d, should be loop %d",
1416 bb->index, bb->loop_father->num, loop->num);
1417 err = 1;
1418 }
1419 }
1420 }
1421 free (bbs);
1422 sbitmap_free (visited);
1423
1424 /* Check headers and latches. */
1425 FOR_EACH_LOOP (loop, 0)
1426 {
1427 i = loop->num;
1428 if (loop->header == NULL)
1429 continue;
1430 if (!bb_loop_header_p (loop->header))
1431 {
1432 error ("loop %d%'s header is not a loop header", i);
1433 err = 1;
1434 }
1435 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1436 && EDGE_COUNT (loop->header->preds) != 2)
1437 {
1438 error ("loop %d%'s header does not have exactly 2 entries", i);
1439 err = 1;
1440 }
1441 if (loop->latch)
1442 {
1443 if (!find_edge (loop->latch, loop->header))
1444 {
1445 error ("loop %d%'s latch does not have an edge to its header", i);
1446 err = 1;
1447 }
1448 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1449 {
1450 error ("loop %d%'s latch is not dominated by its header", i);
1451 err = 1;
1452 }
1453 }
1454 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1455 {
1456 if (!single_succ_p (loop->latch))
1457 {
1458 error ("loop %d%'s latch does not have exactly 1 successor", i);
1459 err = 1;
1460 }
1461 if (single_succ (loop->latch) != loop->header)
1462 {
1463 error ("loop %d%'s latch does not have header as successor", i);
1464 err = 1;
1465 }
1466 if (loop->latch->loop_father != loop)
1467 {
1468 error ("loop %d%'s latch does not belong directly to it", i);
1469 err = 1;
1470 }
1471 }
1472 if (loop->header->loop_father != loop)
1473 {
1474 error ("loop %d%'s header does not belong directly to it", i);
1475 err = 1;
1476 }
1477 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1478 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1479 {
1480 error ("loop %d%'s latch is marked as part of irreducible region", i);
1481 err = 1;
1482 }
1483 }
1484
1485 /* Check irreducible loops. */
1486 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1487 {
1488 /* Record old info. */
1489 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1490 FOR_EACH_BB_FN (bb, cfun)
1491 {
1492 edge_iterator ei;
1493 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1494 bitmap_set_bit (irreds, bb->index);
1495 else
1496 bitmap_clear_bit (irreds, bb->index);
1497 FOR_EACH_EDGE (e, ei, bb->succs)
1498 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1499 e->flags |= EDGE_ALL_FLAGS + 1;
1500 }
1501
1502 /* Recount it. */
1503 mark_irreducible_loops ();
1504
1505 /* Compare. */
1506 FOR_EACH_BB_FN (bb, cfun)
1507 {
1508 edge_iterator ei;
1509
1510 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1511 && !bitmap_bit_p (irreds, bb->index))
1512 {
1513 error ("basic block %d should be marked irreducible", bb->index);
1514 err = 1;
1515 }
1516 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1517 && bitmap_bit_p (irreds, bb->index))
1518 {
1519 error ("basic block %d should not be marked irreducible", bb->index);
1520 err = 1;
1521 }
1522 FOR_EACH_EDGE (e, ei, bb->succs)
1523 {
1524 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1525 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1526 {
1527 error ("edge from %d to %d should be marked irreducible",
1528 e->src->index, e->dest->index);
1529 err = 1;
1530 }
1531 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1532 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1533 {
1534 error ("edge from %d to %d should not be marked irreducible",
1535 e->src->index, e->dest->index);
1536 err = 1;
1537 }
1538 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1539 }
1540 }
1541 free (irreds);
1542 }
1543
1544 /* Check the recorded loop exits. */
1545 FOR_EACH_LOOP (loop, 0)
1546 {
1547 if (!loop->exits || loop->exits->e != NULL)
1548 {
1549 error ("corrupted head of the exits list of loop %d",
1550 loop->num);
1551 err = 1;
1552 }
1553 else
1554 {
1555 /* Check that the list forms a cycle, and all elements except
1556 for the head are nonnull. */
1557 for (mexit = loop->exits, exit = mexit->next, i = 0;
1558 exit->e && exit != mexit;
1559 exit = exit->next)
1560 {
1561 if (i++ & 1)
1562 mexit = mexit->next;
1563 }
1564
1565 if (exit != loop->exits)
1566 {
1567 error ("corrupted exits list of loop %d", loop->num);
1568 err = 1;
1569 }
1570 }
1571
1572 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1573 {
1574 if (loop->exits->next != loop->exits)
1575 {
1576 error ("nonempty exits list of loop %d, but exits are not recorded",
1577 loop->num);
1578 err = 1;
1579 }
1580 }
1581 }
1582
1583 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1584 {
1585 unsigned n_exits = 0, eloops;
1586
1587 sizes = XCNEWVEC (unsigned, num);
1588 memset (sizes, 0, sizeof (unsigned) * num);
1589 FOR_EACH_BB_FN (bb, cfun)
1590 {
1591 edge_iterator ei;
1592 if (bb->loop_father == current_loops->tree_root)
1593 continue;
1594 FOR_EACH_EDGE (e, ei, bb->succs)
1595 {
1596 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1597 continue;
1598
1599 n_exits++;
1600 exit = get_exit_descriptions (e);
1601 if (!exit)
1602 {
1603 error ("exit %d->%d not recorded",
1604 e->src->index, e->dest->index);
1605 err = 1;
1606 }
1607 eloops = 0;
1608 for (; exit; exit = exit->next_e)
1609 eloops++;
1610
1611 for (loop = bb->loop_father;
1612 loop != e->dest->loop_father
1613 /* When a loop exit is also an entry edge which
1614 can happen when avoiding CFG manipulations
1615 then the last loop exited is the outer loop
1616 of the loop entered. */
1617 && loop != loop_outer (e->dest->loop_father);
1618 loop = loop_outer (loop))
1619 {
1620 eloops--;
1621 sizes[loop->num]++;
1622 }
1623
1624 if (eloops != 0)
1625 {
1626 error ("wrong list of exited loops for edge %d->%d",
1627 e->src->index, e->dest->index);
1628 err = 1;
1629 }
1630 }
1631 }
1632
1633 if (n_exits != current_loops->exits->elements ())
1634 {
1635 error ("too many loop exits recorded");
1636 err = 1;
1637 }
1638
1639 FOR_EACH_LOOP (loop, 0)
1640 {
1641 eloops = 0;
1642 for (exit = loop->exits->next; exit->e; exit = exit->next)
1643 eloops++;
1644 if (eloops != sizes[loop->num])
1645 {
1646 error ("%d exits recorded for loop %d (having %d exits)",
1647 eloops, loop->num, sizes[loop->num]);
1648 err = 1;
1649 }
1650 }
1651
1652 free (sizes);
1653 }
1654
1655 gcc_assert (!err);
1656
1657 if (!dom_available)
1658 free_dominance_info (CDI_DOMINATORS);
1659 }
1660
1661 /* Returns latch edge of LOOP. */
1662 edge
1663 loop_latch_edge (const struct loop *loop)
1664 {
1665 return find_edge (loop->latch, loop->header);
1666 }
1667
1668 /* Returns preheader edge of LOOP. */
1669 edge
1670 loop_preheader_edge (const struct loop *loop)
1671 {
1672 edge e;
1673 edge_iterator ei;
1674
1675 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1676
1677 FOR_EACH_EDGE (e, ei, loop->header->preds)
1678 if (e->src != loop->latch)
1679 break;
1680
1681 return e;
1682 }
1683
1684 /* Returns true if E is an exit of LOOP. */
1685
1686 bool
1687 loop_exit_edge_p (const struct loop *loop, const_edge e)
1688 {
1689 return (flow_bb_inside_loop_p (loop, e->src)
1690 && !flow_bb_inside_loop_p (loop, e->dest));
1691 }
1692
1693 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1694 or more than one exit. If loops do not have the exits recorded, NULL
1695 is returned always. */
1696
1697 edge
1698 single_exit (const struct loop *loop)
1699 {
1700 struct loop_exit *exit = loop->exits->next;
1701
1702 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1703 return NULL;
1704
1705 if (exit->e && exit->next == loop->exits)
1706 return exit->e;
1707 else
1708 return NULL;
1709 }
1710
1711 /* Returns true when BB has an incoming edge exiting LOOP. */
1712
1713 bool
1714 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1715 {
1716 edge e;
1717 edge_iterator ei;
1718
1719 FOR_EACH_EDGE (e, ei, bb->preds)
1720 if (loop_exit_edge_p (loop, e))
1721 return true;
1722
1723 return false;
1724 }
1725
1726 /* Returns true when BB has an outgoing edge exiting LOOP. */
1727
1728 bool
1729 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1730 {
1731 edge e;
1732 edge_iterator ei;
1733
1734 FOR_EACH_EDGE (e, ei, bb->succs)
1735 if (loop_exit_edge_p (loop, e))
1736 return true;
1737
1738 return false;
1739 }
1740
1741 /* Return location corresponding to the loop control condition if possible. */
1742
1743 location_t
1744 get_loop_location (struct loop *loop)
1745 {
1746 rtx_insn *insn = NULL;
1747 struct niter_desc *desc = NULL;
1748 edge exit;
1749
1750 /* For a for or while loop, we would like to return the location
1751 of the for or while statement, if possible. To do this, look
1752 for the branch guarding the loop back-edge. */
1753
1754 /* If this is a simple loop with an in_edge, then the loop control
1755 branch is typically at the end of its source. */
1756 desc = get_simple_loop_desc (loop);
1757 if (desc->in_edge)
1758 {
1759 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1760 {
1761 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1762 return INSN_LOCATION (insn);
1763 }
1764 }
1765 /* If loop has a single exit, then the loop control branch
1766 must be at the end of its source. */
1767 if ((exit = single_exit (loop)))
1768 {
1769 FOR_BB_INSNS_REVERSE (exit->src, insn)
1770 {
1771 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1772 return INSN_LOCATION (insn);
1773 }
1774 }
1775 /* Next check the latch, to see if it is non-empty. */
1776 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1777 {
1778 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1779 return INSN_LOCATION (insn);
1780 }
1781 /* Finally, if none of the above identifies the loop control branch,
1782 return the first location in the loop header. */
1783 FOR_BB_INSNS (loop->header, insn)
1784 {
1785 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1786 return INSN_LOCATION (insn);
1787 }
1788 /* If all else fails, simply return the current function location. */
1789 return DECL_SOURCE_LOCATION (current_function_decl);
1790 }
1791
1792 /* Records that every statement in LOOP is executed I_BOUND times.
1793 REALISTIC is true if I_BOUND is expected to be close to the real number
1794 of iterations. UPPER is true if we are sure the loop iterates at most
1795 I_BOUND times. */
1796
1797 void
1798 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1799 bool realistic, bool upper)
1800 {
1801 /* Update the bounds only when there is no previous estimation, or when the
1802 current estimation is smaller. */
1803 if (upper
1804 && (!loop->any_upper_bound
1805 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1806 {
1807 loop->any_upper_bound = true;
1808 loop->nb_iterations_upper_bound = i_bound;
1809 }
1810 if (realistic
1811 && (!loop->any_estimate
1812 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1813 {
1814 loop->any_estimate = true;
1815 loop->nb_iterations_estimate = i_bound;
1816 }
1817
1818 /* If an upper bound is smaller than the realistic estimate of the
1819 number of iterations, use the upper bound instead. */
1820 if (loop->any_upper_bound
1821 && loop->any_estimate
1822 && wi::ltu_p (loop->nb_iterations_upper_bound,
1823 loop->nb_iterations_estimate))
1824 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1825 }
1826
1827 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1828 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1829 on the number of iterations of LOOP could not be derived, returns -1. */
1830
1831 HOST_WIDE_INT
1832 get_estimated_loop_iterations_int (struct loop *loop)
1833 {
1834 widest_int nit;
1835 HOST_WIDE_INT hwi_nit;
1836
1837 if (!get_estimated_loop_iterations (loop, &nit))
1838 return -1;
1839
1840 if (!wi::fits_shwi_p (nit))
1841 return -1;
1842 hwi_nit = nit.to_shwi ();
1843
1844 return hwi_nit < 0 ? -1 : hwi_nit;
1845 }
1846
1847 /* Returns an upper bound on the number of executions of statements
1848 in the LOOP. For statements before the loop exit, this exceeds
1849 the number of execution of the latch by one. */
1850
1851 HOST_WIDE_INT
1852 max_stmt_executions_int (struct loop *loop)
1853 {
1854 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1855 HOST_WIDE_INT snit;
1856
1857 if (nit == -1)
1858 return -1;
1859
1860 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1861
1862 /* If the computation overflows, return -1. */
1863 return snit < 0 ? -1 : snit;
1864 }
1865
1866 /* Sets NIT to the estimated number of executions of the latch of the
1867 LOOP. If we have no reliable estimate, the function returns false, otherwise
1868 returns true. */
1869
1870 bool
1871 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1872 {
1873 /* Even if the bound is not recorded, possibly we can derrive one from
1874 profile. */
1875 if (!loop->any_estimate)
1876 {
1877 if (loop->header->count)
1878 {
1879 *nit = gcov_type_to_wide_int
1880 (expected_loop_iterations_unbounded (loop) + 1);
1881 return true;
1882 }
1883 return false;
1884 }
1885
1886 *nit = loop->nb_iterations_estimate;
1887 return true;
1888 }
1889
1890 /* Sets NIT to an upper bound for the maximum number of executions of the
1891 latch of the LOOP. If we have no reliable estimate, the function returns
1892 false, otherwise returns true. */
1893
1894 bool
1895 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1896 {
1897 if (!loop->any_upper_bound)
1898 return false;
1899
1900 *nit = loop->nb_iterations_upper_bound;
1901 return true;
1902 }
1903
1904 /* Similar to get_max_loop_iterations, but returns the estimate only
1905 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1906 on the number of iterations of LOOP could not be derived, returns -1. */
1907
1908 HOST_WIDE_INT
1909 get_max_loop_iterations_int (struct loop *loop)
1910 {
1911 widest_int nit;
1912 HOST_WIDE_INT hwi_nit;
1913
1914 if (!get_max_loop_iterations (loop, &nit))
1915 return -1;
1916
1917 if (!wi::fits_shwi_p (nit))
1918 return -1;
1919 hwi_nit = nit.to_shwi ();
1920
1921 return hwi_nit < 0 ? -1 : hwi_nit;
1922 }
1923
1924 /* Returns the loop depth of the loop BB belongs to. */
1925
1926 int
1927 bb_loop_depth (const_basic_block bb)
1928 {
1929 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1930 }
1931
1932 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1933
1934 void
1935 mark_loop_for_removal (loop_p loop)
1936 {
1937 if (loop->header == NULL)
1938 return;
1939 loop->former_header = loop->header;
1940 loop->header = NULL;
1941 loop->latch = NULL;
1942 loops_state_set (LOOPS_NEED_FIXUP);
1943 }