coretypes.h: Include hash-table.h and hash-set.h for host files.
[gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "symtab.h"
26 #include "hard-reg-set.h"
27 #include "input.h"
28 #include "function.h"
29 #include "predict.h"
30 #include "dominance.h"
31 #include "cfg.h"
32 #include "cfganal.h"
33 #include "basic-block.h"
34 #include "cfgloop.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "tree.h"
38 #include "fold-const.h"
39 #include "tree-ssa-alias.h"
40 #include "internal-fn.h"
41 #include "gimple-expr.h"
42 #include "is-a.h"
43 #include "gimple.h"
44 #include "gimple-iterator.h"
45 #include "gimple-ssa.h"
46 #include "dumpfile.h"
47
48 static void flow_loops_cfg_dump (FILE *);
49 \f
50 /* Dump loop related CFG information. */
51
52 static void
53 flow_loops_cfg_dump (FILE *file)
54 {
55 basic_block bb;
56
57 if (!file)
58 return;
59
60 FOR_EACH_BB_FN (bb, cfun)
61 {
62 edge succ;
63 edge_iterator ei;
64
65 fprintf (file, ";; %d succs { ", bb->index);
66 FOR_EACH_EDGE (succ, ei, bb->succs)
67 fprintf (file, "%d ", succ->dest->index);
68 fprintf (file, "}\n");
69 }
70 }
71
72 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
73
74 bool
75 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
76 {
77 unsigned odepth = loop_depth (outer);
78
79 return (loop_depth (loop) > odepth
80 && (*loop->superloops)[odepth] == outer);
81 }
82
83 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
84 loops within LOOP. */
85
86 struct loop *
87 superloop_at_depth (struct loop *loop, unsigned depth)
88 {
89 unsigned ldepth = loop_depth (loop);
90
91 gcc_assert (depth <= ldepth);
92
93 if (depth == ldepth)
94 return loop;
95
96 return (*loop->superloops)[depth];
97 }
98
99 /* Returns the list of the latch edges of LOOP. */
100
101 static vec<edge>
102 get_loop_latch_edges (const struct loop *loop)
103 {
104 edge_iterator ei;
105 edge e;
106 vec<edge> ret = vNULL;
107
108 FOR_EACH_EDGE (e, ei, loop->header->preds)
109 {
110 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
111 ret.safe_push (e);
112 }
113
114 return ret;
115 }
116
117 /* Dump the loop information specified by LOOP to the stream FILE
118 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
119
120 void
121 flow_loop_dump (const struct loop *loop, FILE *file,
122 void (*loop_dump_aux) (const struct loop *, FILE *, int),
123 int verbose)
124 {
125 basic_block *bbs;
126 unsigned i;
127 vec<edge> latches;
128 edge e;
129
130 if (! loop || ! loop->header)
131 return;
132
133 fprintf (file, ";;\n;; Loop %d\n", loop->num);
134
135 fprintf (file, ";; header %d, ", loop->header->index);
136 if (loop->latch)
137 fprintf (file, "latch %d\n", loop->latch->index);
138 else
139 {
140 fprintf (file, "multiple latches:");
141 latches = get_loop_latch_edges (loop);
142 FOR_EACH_VEC_ELT (latches, i, e)
143 fprintf (file, " %d", e->src->index);
144 latches.release ();
145 fprintf (file, "\n");
146 }
147
148 fprintf (file, ";; depth %d, outer %ld\n",
149 loop_depth (loop), (long) (loop_outer (loop)
150 ? loop_outer (loop)->num : -1));
151
152 fprintf (file, ";; nodes:");
153 bbs = get_loop_body (loop);
154 for (i = 0; i < loop->num_nodes; i++)
155 fprintf (file, " %d", bbs[i]->index);
156 free (bbs);
157 fprintf (file, "\n");
158
159 if (loop_dump_aux)
160 loop_dump_aux (loop, file, verbose);
161 }
162
163 /* Dump the loop information about loops to the stream FILE,
164 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
165
166 void
167 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
168 {
169 struct loop *loop;
170
171 if (!current_loops || ! file)
172 return;
173
174 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
175
176 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
177 {
178 flow_loop_dump (loop, file, loop_dump_aux, verbose);
179 }
180
181 if (verbose)
182 flow_loops_cfg_dump (file);
183 }
184
185 /* Free data allocated for LOOP. */
186
187 void
188 flow_loop_free (struct loop *loop)
189 {
190 struct loop_exit *exit, *next;
191
192 vec_free (loop->superloops);
193
194 /* Break the list of the loop exit records. They will be freed when the
195 corresponding edge is rescanned or removed, and this avoids
196 accessing the (already released) head of the list stored in the
197 loop structure. */
198 for (exit = loop->exits->next; exit != loop->exits; exit = next)
199 {
200 next = exit->next;
201 exit->next = exit;
202 exit->prev = exit;
203 }
204
205 ggc_free (loop->exits);
206 ggc_free (loop);
207 }
208
209 /* Free all the memory allocated for LOOPS. */
210
211 void
212 flow_loops_free (struct loops *loops)
213 {
214 if (loops->larray)
215 {
216 unsigned i;
217 loop_p loop;
218
219 /* Free the loop descriptors. */
220 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
221 {
222 if (!loop)
223 continue;
224
225 flow_loop_free (loop);
226 }
227
228 vec_free (loops->larray);
229 }
230 }
231
232 /* Find the nodes contained within the LOOP with header HEADER.
233 Return the number of nodes within the loop. */
234
235 int
236 flow_loop_nodes_find (basic_block header, struct loop *loop)
237 {
238 vec<basic_block> stack = vNULL;
239 int num_nodes = 1;
240 edge latch;
241 edge_iterator latch_ei;
242
243 header->loop_father = loop;
244
245 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
246 {
247 if (latch->src->loop_father == loop
248 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
249 continue;
250
251 num_nodes++;
252 stack.safe_push (latch->src);
253 latch->src->loop_father = loop;
254
255 while (!stack.is_empty ())
256 {
257 basic_block node;
258 edge e;
259 edge_iterator ei;
260
261 node = stack.pop ();
262
263 FOR_EACH_EDGE (e, ei, node->preds)
264 {
265 basic_block ancestor = e->src;
266
267 if (ancestor->loop_father != loop)
268 {
269 ancestor->loop_father = loop;
270 num_nodes++;
271 stack.safe_push (ancestor);
272 }
273 }
274 }
275 }
276 stack.release ();
277
278 return num_nodes;
279 }
280
281 /* Records the vector of superloops of the loop LOOP, whose immediate
282 superloop is FATHER. */
283
284 static void
285 establish_preds (struct loop *loop, struct loop *father)
286 {
287 loop_p ploop;
288 unsigned depth = loop_depth (father) + 1;
289 unsigned i;
290
291 loop->superloops = 0;
292 vec_alloc (loop->superloops, depth);
293 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
294 loop->superloops->quick_push (ploop);
295 loop->superloops->quick_push (father);
296
297 for (ploop = loop->inner; ploop; ploop = ploop->next)
298 establish_preds (ploop, loop);
299 }
300
301 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
302 added loop. If LOOP has some children, take care of that their
303 pred field will be initialized correctly. */
304
305 void
306 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
307 {
308 loop->next = father->inner;
309 father->inner = loop;
310
311 establish_preds (loop, father);
312 }
313
314 /* Remove LOOP from the loop hierarchy tree. */
315
316 void
317 flow_loop_tree_node_remove (struct loop *loop)
318 {
319 struct loop *prev, *father;
320
321 father = loop_outer (loop);
322
323 /* Remove loop from the list of sons. */
324 if (father->inner == loop)
325 father->inner = loop->next;
326 else
327 {
328 for (prev = father->inner; prev->next != loop; prev = prev->next)
329 continue;
330 prev->next = loop->next;
331 }
332
333 loop->superloops = NULL;
334 }
335
336 /* Allocates and returns new loop structure. */
337
338 struct loop *
339 alloc_loop (void)
340 {
341 struct loop *loop = ggc_cleared_alloc<struct loop> ();
342
343 loop->exits = ggc_cleared_alloc<loop_exit> ();
344 loop->exits->next = loop->exits->prev = loop->exits;
345 loop->can_be_parallel = false;
346 loop->nb_iterations_upper_bound = 0;
347 loop->nb_iterations_estimate = 0;
348 return loop;
349 }
350
351 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
352 (including the root of the loop tree). */
353
354 void
355 init_loops_structure (struct function *fn,
356 struct loops *loops, unsigned num_loops)
357 {
358 struct loop *root;
359
360 memset (loops, 0, sizeof *loops);
361 vec_alloc (loops->larray, num_loops);
362
363 /* Dummy loop containing whole function. */
364 root = alloc_loop ();
365 root->num_nodes = n_basic_blocks_for_fn (fn);
366 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
367 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
368 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
369 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
370
371 loops->larray->quick_push (root);
372 loops->tree_root = root;
373 }
374
375 /* Returns whether HEADER is a loop header. */
376
377 bool
378 bb_loop_header_p (basic_block header)
379 {
380 edge_iterator ei;
381 edge e;
382
383 /* If we have an abnormal predecessor, do not consider the
384 loop (not worth the problems). */
385 if (bb_has_abnormal_pred (header))
386 return false;
387
388 /* Look for back edges where a predecessor is dominated
389 by this block. A natural loop has a single entry
390 node (header) that dominates all the nodes in the
391 loop. It also has single back edge to the header
392 from a latch node. */
393 FOR_EACH_EDGE (e, ei, header->preds)
394 {
395 basic_block latch = e->src;
396 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
397 && dominated_by_p (CDI_DOMINATORS, latch, header))
398 return true;
399 }
400
401 return false;
402 }
403
404 /* Find all the natural loops in the function and save in LOOPS structure and
405 recalculate loop_father information in basic block structures.
406 If LOOPS is non-NULL then the loop structures for already recorded loops
407 will be re-used and their number will not change. We assume that no
408 stale loops exist in LOOPS.
409 When LOOPS is NULL it is allocated and re-built from scratch.
410 Return the built LOOPS structure. */
411
412 struct loops *
413 flow_loops_find (struct loops *loops)
414 {
415 bool from_scratch = (loops == NULL);
416 int *rc_order;
417 int b;
418 unsigned i;
419
420 /* Ensure that the dominators are computed. */
421 calculate_dominance_info (CDI_DOMINATORS);
422
423 if (!loops)
424 {
425 loops = ggc_cleared_alloc<struct loops> ();
426 init_loops_structure (cfun, loops, 1);
427 }
428
429 /* Ensure that loop exits were released. */
430 gcc_assert (loops->exits == NULL);
431
432 /* Taking care of this degenerate case makes the rest of
433 this code simpler. */
434 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
435 return loops;
436
437 /* The root loop node contains all basic-blocks. */
438 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
439
440 /* Compute depth first search order of the CFG so that outer
441 natural loops will be found before inner natural loops. */
442 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
443 pre_and_rev_post_order_compute (NULL, rc_order, false);
444
445 /* Gather all loop headers in reverse completion order and allocate
446 loop structures for loops that are not already present. */
447 auto_vec<loop_p> larray (loops->larray->length ());
448 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
449 {
450 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
451 if (bb_loop_header_p (header))
452 {
453 struct loop *loop;
454
455 /* The current active loop tree has valid loop-fathers for
456 header blocks. */
457 if (!from_scratch
458 && header->loop_father->header == header)
459 {
460 loop = header->loop_father;
461 /* If we found an existing loop remove it from the
462 loop tree. It is going to be inserted again
463 below. */
464 flow_loop_tree_node_remove (loop);
465 }
466 else
467 {
468 /* Otherwise allocate a new loop structure for the loop. */
469 loop = alloc_loop ();
470 /* ??? We could re-use unused loop slots here. */
471 loop->num = loops->larray->length ();
472 vec_safe_push (loops->larray, loop);
473 loop->header = header;
474
475 if (!from_scratch
476 && dump_file && (dump_flags & TDF_DETAILS))
477 fprintf (dump_file, "flow_loops_find: discovered new "
478 "loop %d with header %d\n",
479 loop->num, header->index);
480 }
481 /* Reset latch, we recompute it below. */
482 loop->latch = NULL;
483 larray.safe_push (loop);
484 }
485
486 /* Make blocks part of the loop root node at start. */
487 header->loop_father = loops->tree_root;
488 }
489
490 free (rc_order);
491
492 /* Now iterate over the loops found, insert them into the loop tree
493 and assign basic-block ownership. */
494 for (i = 0; i < larray.length (); ++i)
495 {
496 struct loop *loop = larray[i];
497 basic_block header = loop->header;
498 edge_iterator ei;
499 edge e;
500
501 flow_loop_tree_node_add (header->loop_father, loop);
502 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
503
504 /* Look for the latch for this header block, if it has just a
505 single one. */
506 FOR_EACH_EDGE (e, ei, header->preds)
507 {
508 basic_block latch = e->src;
509
510 if (flow_bb_inside_loop_p (loop, latch))
511 {
512 if (loop->latch != NULL)
513 {
514 /* More than one latch edge. */
515 loop->latch = NULL;
516 break;
517 }
518 loop->latch = latch;
519 }
520 }
521 }
522
523 return loops;
524 }
525
526 /* Ratio of frequencies of edges so that one of more latch edges is
527 considered to belong to inner loop with same header. */
528 #define HEAVY_EDGE_RATIO 8
529
530 /* Minimum number of samples for that we apply
531 find_subloop_latch_edge_by_profile heuristics. */
532 #define HEAVY_EDGE_MIN_SAMPLES 10
533
534 /* If the profile info is available, finds an edge in LATCHES that much more
535 frequent than the remaining edges. Returns such an edge, or NULL if we do
536 not find one.
537
538 We do not use guessed profile here, only the measured one. The guessed
539 profile is usually too flat and unreliable for this (and it is mostly based
540 on the loop structure of the program, so it does not make much sense to
541 derive the loop structure from it). */
542
543 static edge
544 find_subloop_latch_edge_by_profile (vec<edge> latches)
545 {
546 unsigned i;
547 edge e, me = NULL;
548 gcov_type mcount = 0, tcount = 0;
549
550 FOR_EACH_VEC_ELT (latches, i, e)
551 {
552 if (e->count > mcount)
553 {
554 me = e;
555 mcount = e->count;
556 }
557 tcount += e->count;
558 }
559
560 if (tcount < HEAVY_EDGE_MIN_SAMPLES
561 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
562 return NULL;
563
564 if (dump_file)
565 fprintf (dump_file,
566 "Found latch edge %d -> %d using profile information.\n",
567 me->src->index, me->dest->index);
568 return me;
569 }
570
571 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
572 on the structure of induction variables. Returns this edge, or NULL if we
573 do not find any.
574
575 We are quite conservative, and look just for an obvious simple innermost
576 loop (which is the case where we would lose the most performance by not
577 disambiguating the loop). More precisely, we look for the following
578 situation: The source of the chosen latch edge dominates sources of all
579 the other latch edges. Additionally, the header does not contain a phi node
580 such that the argument from the chosen edge is equal to the argument from
581 another edge. */
582
583 static edge
584 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
585 {
586 edge e, latch = latches[0];
587 unsigned i;
588 gphi *phi;
589 gphi_iterator psi;
590 tree lop;
591 basic_block bb;
592
593 /* Find the candidate for the latch edge. */
594 for (i = 1; latches.iterate (i, &e); i++)
595 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
596 latch = e;
597
598 /* Verify that it dominates all the latch edges. */
599 FOR_EACH_VEC_ELT (latches, i, e)
600 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
601 return NULL;
602
603 /* Check for a phi node that would deny that this is a latch edge of
604 a subloop. */
605 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
606 {
607 phi = psi.phi ();
608 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
609
610 /* Ignore the values that are not changed inside the subloop. */
611 if (TREE_CODE (lop) != SSA_NAME
612 || SSA_NAME_DEF_STMT (lop) == phi)
613 continue;
614 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
615 if (!bb || !flow_bb_inside_loop_p (loop, bb))
616 continue;
617
618 FOR_EACH_VEC_ELT (latches, i, e)
619 if (e != latch
620 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
621 return NULL;
622 }
623
624 if (dump_file)
625 fprintf (dump_file,
626 "Found latch edge %d -> %d using iv structure.\n",
627 latch->src->index, latch->dest->index);
628 return latch;
629 }
630
631 /* If we can determine that one of the several latch edges of LOOP behaves
632 as a latch edge of a separate subloop, returns this edge. Otherwise
633 returns NULL. */
634
635 static edge
636 find_subloop_latch_edge (struct loop *loop)
637 {
638 vec<edge> latches = get_loop_latch_edges (loop);
639 edge latch = NULL;
640
641 if (latches.length () > 1)
642 {
643 latch = find_subloop_latch_edge_by_profile (latches);
644
645 if (!latch
646 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
647 should use cfghook for this, but it is hard to imagine it would
648 be useful elsewhere. */
649 && current_ir_type () == IR_GIMPLE)
650 latch = find_subloop_latch_edge_by_ivs (loop, latches);
651 }
652
653 latches.release ();
654 return latch;
655 }
656
657 /* Callback for make_forwarder_block. Returns true if the edge E is marked
658 in the set MFB_REIS_SET. */
659
660 static hash_set<edge> *mfb_reis_set;
661 static bool
662 mfb_redirect_edges_in_set (edge e)
663 {
664 return mfb_reis_set->contains (e);
665 }
666
667 /* Creates a subloop of LOOP with latch edge LATCH. */
668
669 static void
670 form_subloop (struct loop *loop, edge latch)
671 {
672 edge_iterator ei;
673 edge e, new_entry;
674 struct loop *new_loop;
675
676 mfb_reis_set = new hash_set<edge>;
677 FOR_EACH_EDGE (e, ei, loop->header->preds)
678 {
679 if (e != latch)
680 mfb_reis_set->add (e);
681 }
682 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
683 NULL);
684 delete mfb_reis_set;
685
686 loop->header = new_entry->src;
687
688 /* Find the blocks and subloops that belong to the new loop, and add it to
689 the appropriate place in the loop tree. */
690 new_loop = alloc_loop ();
691 new_loop->header = new_entry->dest;
692 new_loop->latch = latch->src;
693 add_loop (new_loop, loop);
694 }
695
696 /* Make all the latch edges of LOOP to go to a single forwarder block --
697 a new latch of LOOP. */
698
699 static void
700 merge_latch_edges (struct loop *loop)
701 {
702 vec<edge> latches = get_loop_latch_edges (loop);
703 edge latch, e;
704 unsigned i;
705
706 gcc_assert (latches.length () > 0);
707
708 if (latches.length () == 1)
709 loop->latch = latches[0]->src;
710 else
711 {
712 if (dump_file)
713 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
714
715 mfb_reis_set = new hash_set<edge>;
716 FOR_EACH_VEC_ELT (latches, i, e)
717 mfb_reis_set->add (e);
718 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
719 NULL);
720 delete mfb_reis_set;
721
722 loop->header = latch->dest;
723 loop->latch = latch->src;
724 }
725
726 latches.release ();
727 }
728
729 /* LOOP may have several latch edges. Transform it into (possibly several)
730 loops with single latch edge. */
731
732 static void
733 disambiguate_multiple_latches (struct loop *loop)
734 {
735 edge e;
736
737 /* We eliminate the multiple latches by splitting the header to the forwarder
738 block F and the rest R, and redirecting the edges. There are two cases:
739
740 1) If there is a latch edge E that corresponds to a subloop (we guess
741 that based on profile -- if it is taken much more often than the
742 remaining edges; and on trees, using the information about induction
743 variables of the loops), we redirect E to R, all the remaining edges to
744 F, then rescan the loops and try again for the outer loop.
745 2) If there is no such edge, we redirect all latch edges to F, and the
746 entry edges to R, thus making F the single latch of the loop. */
747
748 if (dump_file)
749 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
750 loop->num);
751
752 /* During latch merging, we may need to redirect the entry edges to a new
753 block. This would cause problems if the entry edge was the one from the
754 entry block. To avoid having to handle this case specially, split
755 such entry edge. */
756 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
757 if (e)
758 split_edge (e);
759
760 while (1)
761 {
762 e = find_subloop_latch_edge (loop);
763 if (!e)
764 break;
765
766 form_subloop (loop, e);
767 }
768
769 merge_latch_edges (loop);
770 }
771
772 /* Split loops with multiple latch edges. */
773
774 void
775 disambiguate_loops_with_multiple_latches (void)
776 {
777 struct loop *loop;
778
779 FOR_EACH_LOOP (loop, 0)
780 {
781 if (!loop->latch)
782 disambiguate_multiple_latches (loop);
783 }
784 }
785
786 /* Return nonzero if basic block BB belongs to LOOP. */
787 bool
788 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
789 {
790 struct loop *source_loop;
791
792 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
793 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
794 return 0;
795
796 source_loop = bb->loop_father;
797 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
798 }
799
800 /* Enumeration predicate for get_loop_body_with_size. */
801 static bool
802 glb_enum_p (const_basic_block bb, const void *glb_loop)
803 {
804 const struct loop *const loop = (const struct loop *) glb_loop;
805 return (bb != loop->header
806 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
807 }
808
809 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
810 order against direction of edges from latch. Specially, if
811 header != latch, latch is the 1-st block. LOOP cannot be the fake
812 loop tree root, and its size must be at most MAX_SIZE. The blocks
813 in the LOOP body are stored to BODY, and the size of the LOOP is
814 returned. */
815
816 unsigned
817 get_loop_body_with_size (const struct loop *loop, basic_block *body,
818 unsigned max_size)
819 {
820 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
821 body, max_size, loop);
822 }
823
824 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
825 order against direction of edges from latch. Specially, if
826 header != latch, latch is the 1-st block. */
827
828 basic_block *
829 get_loop_body (const struct loop *loop)
830 {
831 basic_block *body, bb;
832 unsigned tv = 0;
833
834 gcc_assert (loop->num_nodes);
835
836 body = XNEWVEC (basic_block, loop->num_nodes);
837
838 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
839 {
840 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
841 special-case the fake loop that contains the whole function. */
842 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
843 body[tv++] = loop->header;
844 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
845 FOR_EACH_BB_FN (bb, cfun)
846 body[tv++] = bb;
847 }
848 else
849 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
850
851 gcc_assert (tv == loop->num_nodes);
852 return body;
853 }
854
855 /* Fills dominance descendants inside LOOP of the basic block BB into
856 array TOVISIT from index *TV. */
857
858 static void
859 fill_sons_in_loop (const struct loop *loop, basic_block bb,
860 basic_block *tovisit, int *tv)
861 {
862 basic_block son, postpone = NULL;
863
864 tovisit[(*tv)++] = bb;
865 for (son = first_dom_son (CDI_DOMINATORS, bb);
866 son;
867 son = next_dom_son (CDI_DOMINATORS, son))
868 {
869 if (!flow_bb_inside_loop_p (loop, son))
870 continue;
871
872 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
873 {
874 postpone = son;
875 continue;
876 }
877 fill_sons_in_loop (loop, son, tovisit, tv);
878 }
879
880 if (postpone)
881 fill_sons_in_loop (loop, postpone, tovisit, tv);
882 }
883
884 /* Gets body of a LOOP (that must be different from the outermost loop)
885 sorted by dominance relation. Additionally, if a basic block s dominates
886 the latch, then only blocks dominated by s are be after it. */
887
888 basic_block *
889 get_loop_body_in_dom_order (const struct loop *loop)
890 {
891 basic_block *tovisit;
892 int tv;
893
894 gcc_assert (loop->num_nodes);
895
896 tovisit = XNEWVEC (basic_block, loop->num_nodes);
897
898 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
899
900 tv = 0;
901 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
902
903 gcc_assert (tv == (int) loop->num_nodes);
904
905 return tovisit;
906 }
907
908 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
909
910 basic_block *
911 get_loop_body_in_custom_order (const struct loop *loop,
912 int (*bb_comparator) (const void *, const void *))
913 {
914 basic_block *bbs = get_loop_body (loop);
915
916 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
917
918 return bbs;
919 }
920
921 /* Get body of a LOOP in breadth first sort order. */
922
923 basic_block *
924 get_loop_body_in_bfs_order (const struct loop *loop)
925 {
926 basic_block *blocks;
927 basic_block bb;
928 bitmap visited;
929 unsigned int i = 0;
930 unsigned int vc = 1;
931
932 gcc_assert (loop->num_nodes);
933 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
934
935 blocks = XNEWVEC (basic_block, loop->num_nodes);
936 visited = BITMAP_ALLOC (NULL);
937
938 bb = loop->header;
939 while (i < loop->num_nodes)
940 {
941 edge e;
942 edge_iterator ei;
943
944 if (bitmap_set_bit (visited, bb->index))
945 /* This basic block is now visited */
946 blocks[i++] = bb;
947
948 FOR_EACH_EDGE (e, ei, bb->succs)
949 {
950 if (flow_bb_inside_loop_p (loop, e->dest))
951 {
952 if (bitmap_set_bit (visited, e->dest->index))
953 blocks[i++] = e->dest;
954 }
955 }
956
957 gcc_assert (i >= vc);
958
959 bb = blocks[vc++];
960 }
961
962 BITMAP_FREE (visited);
963 return blocks;
964 }
965
966 /* Hash function for struct loop_exit. */
967
968 hashval_t
969 loop_exit_hasher::hash (loop_exit *exit)
970 {
971 return htab_hash_pointer (exit->e);
972 }
973
974 /* Equality function for struct loop_exit. Compares with edge. */
975
976 bool
977 loop_exit_hasher::equal (loop_exit *exit, edge e)
978 {
979 return exit->e == e;
980 }
981
982 /* Frees the list of loop exit descriptions EX. */
983
984 void
985 loop_exit_hasher::remove (loop_exit *exit)
986 {
987 loop_exit *next;
988 for (; exit; exit = next)
989 {
990 next = exit->next_e;
991
992 exit->next->prev = exit->prev;
993 exit->prev->next = exit->next;
994
995 ggc_free (exit);
996 }
997 }
998
999 /* Returns the list of records for E as an exit of a loop. */
1000
1001 static struct loop_exit *
1002 get_exit_descriptions (edge e)
1003 {
1004 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
1005 }
1006
1007 /* Updates the lists of loop exits in that E appears.
1008 If REMOVED is true, E is being removed, and we
1009 just remove it from the lists of exits.
1010 If NEW_EDGE is true and E is not a loop exit, we
1011 do not try to remove it from loop exit lists. */
1012
1013 void
1014 rescan_loop_exit (edge e, bool new_edge, bool removed)
1015 {
1016 struct loop_exit *exits = NULL, *exit;
1017 struct loop *aloop, *cloop;
1018
1019 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1020 return;
1021
1022 if (!removed
1023 && e->src->loop_father != NULL
1024 && e->dest->loop_father != NULL
1025 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1026 {
1027 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1028 for (aloop = e->src->loop_father;
1029 aloop != cloop;
1030 aloop = loop_outer (aloop))
1031 {
1032 exit = ggc_alloc<loop_exit> ();
1033 exit->e = e;
1034
1035 exit->next = aloop->exits->next;
1036 exit->prev = aloop->exits;
1037 exit->next->prev = exit;
1038 exit->prev->next = exit;
1039
1040 exit->next_e = exits;
1041 exits = exit;
1042 }
1043 }
1044
1045 if (!exits && new_edge)
1046 return;
1047
1048 loop_exit **slot
1049 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1050 exits ? INSERT : NO_INSERT);
1051 if (!slot)
1052 return;
1053
1054 if (exits)
1055 {
1056 if (*slot)
1057 loop_exit_hasher::remove (*slot);
1058 *slot = exits;
1059 }
1060 else
1061 current_loops->exits->clear_slot (slot);
1062 }
1063
1064 /* For each loop, record list of exit edges, and start maintaining these
1065 lists. */
1066
1067 void
1068 record_loop_exits (void)
1069 {
1070 basic_block bb;
1071 edge_iterator ei;
1072 edge e;
1073
1074 if (!current_loops)
1075 return;
1076
1077 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1078 return;
1079 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1080
1081 gcc_assert (current_loops->exits == NULL);
1082 current_loops->exits
1083 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1084
1085 FOR_EACH_BB_FN (bb, cfun)
1086 {
1087 FOR_EACH_EDGE (e, ei, bb->succs)
1088 {
1089 rescan_loop_exit (e, true, false);
1090 }
1091 }
1092 }
1093
1094 /* Dumps information about the exit in *SLOT to FILE.
1095 Callback for htab_traverse. */
1096
1097 int
1098 dump_recorded_exit (loop_exit **slot, FILE *file)
1099 {
1100 struct loop_exit *exit = *slot;
1101 unsigned n = 0;
1102 edge e = exit->e;
1103
1104 for (; exit != NULL; exit = exit->next_e)
1105 n++;
1106
1107 fprintf (file, "Edge %d->%d exits %u loops\n",
1108 e->src->index, e->dest->index, n);
1109
1110 return 1;
1111 }
1112
1113 /* Dumps the recorded exits of loops to FILE. */
1114
1115 extern void dump_recorded_exits (FILE *);
1116 void
1117 dump_recorded_exits (FILE *file)
1118 {
1119 if (!current_loops->exits)
1120 return;
1121 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1122 }
1123
1124 /* Releases lists of loop exits. */
1125
1126 void
1127 release_recorded_exits (void)
1128 {
1129 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1130 current_loops->exits->empty ();
1131 current_loops->exits = NULL;
1132 loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1133 }
1134
1135 /* Returns the list of the exit edges of a LOOP. */
1136
1137 vec<edge>
1138 get_loop_exit_edges (const struct loop *loop)
1139 {
1140 vec<edge> edges = vNULL;
1141 edge e;
1142 unsigned i;
1143 basic_block *body;
1144 edge_iterator ei;
1145 struct loop_exit *exit;
1146
1147 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1148
1149 /* If we maintain the lists of exits, use them. Otherwise we must
1150 scan the body of the loop. */
1151 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1152 {
1153 for (exit = loop->exits->next; exit->e; exit = exit->next)
1154 edges.safe_push (exit->e);
1155 }
1156 else
1157 {
1158 body = get_loop_body (loop);
1159 for (i = 0; i < loop->num_nodes; i++)
1160 FOR_EACH_EDGE (e, ei, body[i]->succs)
1161 {
1162 if (!flow_bb_inside_loop_p (loop, e->dest))
1163 edges.safe_push (e);
1164 }
1165 free (body);
1166 }
1167
1168 return edges;
1169 }
1170
1171 /* Counts the number of conditional branches inside LOOP. */
1172
1173 unsigned
1174 num_loop_branches (const struct loop *loop)
1175 {
1176 unsigned i, n;
1177 basic_block * body;
1178
1179 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1180
1181 body = get_loop_body (loop);
1182 n = 0;
1183 for (i = 0; i < loop->num_nodes; i++)
1184 if (EDGE_COUNT (body[i]->succs) >= 2)
1185 n++;
1186 free (body);
1187
1188 return n;
1189 }
1190
1191 /* Adds basic block BB to LOOP. */
1192 void
1193 add_bb_to_loop (basic_block bb, struct loop *loop)
1194 {
1195 unsigned i;
1196 loop_p ploop;
1197 edge_iterator ei;
1198 edge e;
1199
1200 gcc_assert (bb->loop_father == NULL);
1201 bb->loop_father = loop;
1202 loop->num_nodes++;
1203 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1204 ploop->num_nodes++;
1205
1206 FOR_EACH_EDGE (e, ei, bb->succs)
1207 {
1208 rescan_loop_exit (e, true, false);
1209 }
1210 FOR_EACH_EDGE (e, ei, bb->preds)
1211 {
1212 rescan_loop_exit (e, true, false);
1213 }
1214 }
1215
1216 /* Remove basic block BB from loops. */
1217 void
1218 remove_bb_from_loops (basic_block bb)
1219 {
1220 unsigned i;
1221 struct loop *loop = bb->loop_father;
1222 loop_p ploop;
1223 edge_iterator ei;
1224 edge e;
1225
1226 gcc_assert (loop != NULL);
1227 loop->num_nodes--;
1228 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1229 ploop->num_nodes--;
1230 bb->loop_father = NULL;
1231
1232 FOR_EACH_EDGE (e, ei, bb->succs)
1233 {
1234 rescan_loop_exit (e, false, true);
1235 }
1236 FOR_EACH_EDGE (e, ei, bb->preds)
1237 {
1238 rescan_loop_exit (e, false, true);
1239 }
1240 }
1241
1242 /* Finds nearest common ancestor in loop tree for given loops. */
1243 struct loop *
1244 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1245 {
1246 unsigned sdepth, ddepth;
1247
1248 if (!loop_s) return loop_d;
1249 if (!loop_d) return loop_s;
1250
1251 sdepth = loop_depth (loop_s);
1252 ddepth = loop_depth (loop_d);
1253
1254 if (sdepth < ddepth)
1255 loop_d = (*loop_d->superloops)[sdepth];
1256 else if (sdepth > ddepth)
1257 loop_s = (*loop_s->superloops)[ddepth];
1258
1259 while (loop_s != loop_d)
1260 {
1261 loop_s = loop_outer (loop_s);
1262 loop_d = loop_outer (loop_d);
1263 }
1264 return loop_s;
1265 }
1266
1267 /* Removes LOOP from structures and frees its data. */
1268
1269 void
1270 delete_loop (struct loop *loop)
1271 {
1272 /* Remove the loop from structure. */
1273 flow_loop_tree_node_remove (loop);
1274
1275 /* Remove loop from loops array. */
1276 (*current_loops->larray)[loop->num] = NULL;
1277
1278 /* Free loop data. */
1279 flow_loop_free (loop);
1280 }
1281
1282 /* Cancels the LOOP; it must be innermost one. */
1283
1284 static void
1285 cancel_loop (struct loop *loop)
1286 {
1287 basic_block *bbs;
1288 unsigned i;
1289 struct loop *outer = loop_outer (loop);
1290
1291 gcc_assert (!loop->inner);
1292
1293 /* Move blocks up one level (they should be removed as soon as possible). */
1294 bbs = get_loop_body (loop);
1295 for (i = 0; i < loop->num_nodes; i++)
1296 bbs[i]->loop_father = outer;
1297
1298 free (bbs);
1299 delete_loop (loop);
1300 }
1301
1302 /* Cancels LOOP and all its subloops. */
1303 void
1304 cancel_loop_tree (struct loop *loop)
1305 {
1306 while (loop->inner)
1307 cancel_loop_tree (loop->inner);
1308 cancel_loop (loop);
1309 }
1310
1311 /* Checks that information about loops is correct
1312 -- sizes of loops are all right
1313 -- results of get_loop_body really belong to the loop
1314 -- loop header have just single entry edge and single latch edge
1315 -- loop latches have only single successor that is header of their loop
1316 -- irreducible loops are correctly marked
1317 -- the cached loop depth and loop father of each bb is correct
1318 */
1319 DEBUG_FUNCTION void
1320 verify_loop_structure (void)
1321 {
1322 unsigned *sizes, i, j;
1323 sbitmap irreds;
1324 basic_block bb, *bbs;
1325 struct loop *loop;
1326 int err = 0;
1327 edge e;
1328 unsigned num = number_of_loops (cfun);
1329 struct loop_exit *exit, *mexit;
1330 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1331 sbitmap visited;
1332
1333 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1334 {
1335 error ("loop verification on loop tree that needs fixup");
1336 err = 1;
1337 }
1338
1339 /* We need up-to-date dominators, compute or verify them. */
1340 if (!dom_available)
1341 calculate_dominance_info (CDI_DOMINATORS);
1342 else
1343 verify_dominators (CDI_DOMINATORS);
1344
1345 /* Check the loop tree root. */
1346 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1347 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1348 || (current_loops->tree_root->num_nodes
1349 != (unsigned) n_basic_blocks_for_fn (cfun)))
1350 {
1351 error ("corrupt loop tree root");
1352 err = 1;
1353 }
1354
1355 /* Check the headers. */
1356 FOR_EACH_BB_FN (bb, cfun)
1357 if (bb_loop_header_p (bb))
1358 {
1359 if (bb->loop_father->header == NULL)
1360 {
1361 error ("loop with header %d marked for removal", bb->index);
1362 err = 1;
1363 }
1364 else if (bb->loop_father->header != bb)
1365 {
1366 error ("loop with header %d not in loop tree", bb->index);
1367 err = 1;
1368 }
1369 }
1370 else if (bb->loop_father->header == bb)
1371 {
1372 error ("non-loop with header %d not marked for removal", bb->index);
1373 err = 1;
1374 }
1375
1376 /* Check the recorded loop father and sizes of loops. */
1377 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1378 bitmap_clear (visited);
1379 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1380 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1381 {
1382 unsigned n;
1383
1384 if (loop->header == NULL)
1385 {
1386 error ("removed loop %d in loop tree", loop->num);
1387 err = 1;
1388 continue;
1389 }
1390
1391 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1392 if (loop->num_nodes != n)
1393 {
1394 error ("size of loop %d should be %d, not %d",
1395 loop->num, n, loop->num_nodes);
1396 err = 1;
1397 }
1398
1399 for (j = 0; j < n; j++)
1400 {
1401 bb = bbs[j];
1402
1403 if (!flow_bb_inside_loop_p (loop, bb))
1404 {
1405 error ("bb %d does not belong to loop %d",
1406 bb->index, loop->num);
1407 err = 1;
1408 }
1409
1410 /* Ignore this block if it is in an inner loop. */
1411 if (bitmap_bit_p (visited, bb->index))
1412 continue;
1413 bitmap_set_bit (visited, bb->index);
1414
1415 if (bb->loop_father != loop)
1416 {
1417 error ("bb %d has father loop %d, should be loop %d",
1418 bb->index, bb->loop_father->num, loop->num);
1419 err = 1;
1420 }
1421 }
1422 }
1423 free (bbs);
1424 sbitmap_free (visited);
1425
1426 /* Check headers and latches. */
1427 FOR_EACH_LOOP (loop, 0)
1428 {
1429 i = loop->num;
1430 if (loop->header == NULL)
1431 continue;
1432 if (!bb_loop_header_p (loop->header))
1433 {
1434 error ("loop %d%'s header is not a loop header", i);
1435 err = 1;
1436 }
1437 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1438 && EDGE_COUNT (loop->header->preds) != 2)
1439 {
1440 error ("loop %d%'s header does not have exactly 2 entries", i);
1441 err = 1;
1442 }
1443 if (loop->latch)
1444 {
1445 if (!find_edge (loop->latch, loop->header))
1446 {
1447 error ("loop %d%'s latch does not have an edge to its header", i);
1448 err = 1;
1449 }
1450 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1451 {
1452 error ("loop %d%'s latch is not dominated by its header", i);
1453 err = 1;
1454 }
1455 }
1456 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1457 {
1458 if (!single_succ_p (loop->latch))
1459 {
1460 error ("loop %d%'s latch does not have exactly 1 successor", i);
1461 err = 1;
1462 }
1463 if (single_succ (loop->latch) != loop->header)
1464 {
1465 error ("loop %d%'s latch does not have header as successor", i);
1466 err = 1;
1467 }
1468 if (loop->latch->loop_father != loop)
1469 {
1470 error ("loop %d%'s latch does not belong directly to it", i);
1471 err = 1;
1472 }
1473 }
1474 if (loop->header->loop_father != loop)
1475 {
1476 error ("loop %d%'s header does not belong directly to it", i);
1477 err = 1;
1478 }
1479 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1480 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1481 {
1482 error ("loop %d%'s latch is marked as part of irreducible region", i);
1483 err = 1;
1484 }
1485 }
1486
1487 /* Check irreducible loops. */
1488 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1489 {
1490 /* Record old info. */
1491 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1492 FOR_EACH_BB_FN (bb, cfun)
1493 {
1494 edge_iterator ei;
1495 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1496 bitmap_set_bit (irreds, bb->index);
1497 else
1498 bitmap_clear_bit (irreds, bb->index);
1499 FOR_EACH_EDGE (e, ei, bb->succs)
1500 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1501 e->flags |= EDGE_ALL_FLAGS + 1;
1502 }
1503
1504 /* Recount it. */
1505 mark_irreducible_loops ();
1506
1507 /* Compare. */
1508 FOR_EACH_BB_FN (bb, cfun)
1509 {
1510 edge_iterator ei;
1511
1512 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1513 && !bitmap_bit_p (irreds, bb->index))
1514 {
1515 error ("basic block %d should be marked irreducible", bb->index);
1516 err = 1;
1517 }
1518 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1519 && bitmap_bit_p (irreds, bb->index))
1520 {
1521 error ("basic block %d should not be marked irreducible", bb->index);
1522 err = 1;
1523 }
1524 FOR_EACH_EDGE (e, ei, bb->succs)
1525 {
1526 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1527 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1528 {
1529 error ("edge from %d to %d should be marked irreducible",
1530 e->src->index, e->dest->index);
1531 err = 1;
1532 }
1533 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1534 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1535 {
1536 error ("edge from %d to %d should not be marked irreducible",
1537 e->src->index, e->dest->index);
1538 err = 1;
1539 }
1540 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1541 }
1542 }
1543 free (irreds);
1544 }
1545
1546 /* Check the recorded loop exits. */
1547 FOR_EACH_LOOP (loop, 0)
1548 {
1549 if (!loop->exits || loop->exits->e != NULL)
1550 {
1551 error ("corrupted head of the exits list of loop %d",
1552 loop->num);
1553 err = 1;
1554 }
1555 else
1556 {
1557 /* Check that the list forms a cycle, and all elements except
1558 for the head are nonnull. */
1559 for (mexit = loop->exits, exit = mexit->next, i = 0;
1560 exit->e && exit != mexit;
1561 exit = exit->next)
1562 {
1563 if (i++ & 1)
1564 mexit = mexit->next;
1565 }
1566
1567 if (exit != loop->exits)
1568 {
1569 error ("corrupted exits list of loop %d", loop->num);
1570 err = 1;
1571 }
1572 }
1573
1574 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1575 {
1576 if (loop->exits->next != loop->exits)
1577 {
1578 error ("nonempty exits list of loop %d, but exits are not recorded",
1579 loop->num);
1580 err = 1;
1581 }
1582 }
1583 }
1584
1585 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1586 {
1587 unsigned n_exits = 0, eloops;
1588
1589 sizes = XCNEWVEC (unsigned, num);
1590 memset (sizes, 0, sizeof (unsigned) * num);
1591 FOR_EACH_BB_FN (bb, cfun)
1592 {
1593 edge_iterator ei;
1594 if (bb->loop_father == current_loops->tree_root)
1595 continue;
1596 FOR_EACH_EDGE (e, ei, bb->succs)
1597 {
1598 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1599 continue;
1600
1601 n_exits++;
1602 exit = get_exit_descriptions (e);
1603 if (!exit)
1604 {
1605 error ("exit %d->%d not recorded",
1606 e->src->index, e->dest->index);
1607 err = 1;
1608 }
1609 eloops = 0;
1610 for (; exit; exit = exit->next_e)
1611 eloops++;
1612
1613 for (loop = bb->loop_father;
1614 loop != e->dest->loop_father
1615 /* When a loop exit is also an entry edge which
1616 can happen when avoiding CFG manipulations
1617 then the last loop exited is the outer loop
1618 of the loop entered. */
1619 && loop != loop_outer (e->dest->loop_father);
1620 loop = loop_outer (loop))
1621 {
1622 eloops--;
1623 sizes[loop->num]++;
1624 }
1625
1626 if (eloops != 0)
1627 {
1628 error ("wrong list of exited loops for edge %d->%d",
1629 e->src->index, e->dest->index);
1630 err = 1;
1631 }
1632 }
1633 }
1634
1635 if (n_exits != current_loops->exits->elements ())
1636 {
1637 error ("too many loop exits recorded");
1638 err = 1;
1639 }
1640
1641 FOR_EACH_LOOP (loop, 0)
1642 {
1643 eloops = 0;
1644 for (exit = loop->exits->next; exit->e; exit = exit->next)
1645 eloops++;
1646 if (eloops != sizes[loop->num])
1647 {
1648 error ("%d exits recorded for loop %d (having %d exits)",
1649 eloops, loop->num, sizes[loop->num]);
1650 err = 1;
1651 }
1652 }
1653
1654 free (sizes);
1655 }
1656
1657 gcc_assert (!err);
1658
1659 if (!dom_available)
1660 free_dominance_info (CDI_DOMINATORS);
1661 }
1662
1663 /* Returns latch edge of LOOP. */
1664 edge
1665 loop_latch_edge (const struct loop *loop)
1666 {
1667 return find_edge (loop->latch, loop->header);
1668 }
1669
1670 /* Returns preheader edge of LOOP. */
1671 edge
1672 loop_preheader_edge (const struct loop *loop)
1673 {
1674 edge e;
1675 edge_iterator ei;
1676
1677 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1678
1679 FOR_EACH_EDGE (e, ei, loop->header->preds)
1680 if (e->src != loop->latch)
1681 break;
1682
1683 return e;
1684 }
1685
1686 /* Returns true if E is an exit of LOOP. */
1687
1688 bool
1689 loop_exit_edge_p (const struct loop *loop, const_edge e)
1690 {
1691 return (flow_bb_inside_loop_p (loop, e->src)
1692 && !flow_bb_inside_loop_p (loop, e->dest));
1693 }
1694
1695 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1696 or more than one exit. If loops do not have the exits recorded, NULL
1697 is returned always. */
1698
1699 edge
1700 single_exit (const struct loop *loop)
1701 {
1702 struct loop_exit *exit = loop->exits->next;
1703
1704 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1705 return NULL;
1706
1707 if (exit->e && exit->next == loop->exits)
1708 return exit->e;
1709 else
1710 return NULL;
1711 }
1712
1713 /* Returns true when BB has an incoming edge exiting LOOP. */
1714
1715 bool
1716 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1717 {
1718 edge e;
1719 edge_iterator ei;
1720
1721 FOR_EACH_EDGE (e, ei, bb->preds)
1722 if (loop_exit_edge_p (loop, e))
1723 return true;
1724
1725 return false;
1726 }
1727
1728 /* Returns true when BB has an outgoing edge exiting LOOP. */
1729
1730 bool
1731 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1732 {
1733 edge e;
1734 edge_iterator ei;
1735
1736 FOR_EACH_EDGE (e, ei, bb->succs)
1737 if (loop_exit_edge_p (loop, e))
1738 return true;
1739
1740 return false;
1741 }
1742
1743 /* Return location corresponding to the loop control condition if possible. */
1744
1745 location_t
1746 get_loop_location (struct loop *loop)
1747 {
1748 rtx_insn *insn = NULL;
1749 struct niter_desc *desc = NULL;
1750 edge exit;
1751
1752 /* For a for or while loop, we would like to return the location
1753 of the for or while statement, if possible. To do this, look
1754 for the branch guarding the loop back-edge. */
1755
1756 /* If this is a simple loop with an in_edge, then the loop control
1757 branch is typically at the end of its source. */
1758 desc = get_simple_loop_desc (loop);
1759 if (desc->in_edge)
1760 {
1761 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1762 {
1763 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1764 return INSN_LOCATION (insn);
1765 }
1766 }
1767 /* If loop has a single exit, then the loop control branch
1768 must be at the end of its source. */
1769 if ((exit = single_exit (loop)))
1770 {
1771 FOR_BB_INSNS_REVERSE (exit->src, insn)
1772 {
1773 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1774 return INSN_LOCATION (insn);
1775 }
1776 }
1777 /* Next check the latch, to see if it is non-empty. */
1778 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1779 {
1780 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1781 return INSN_LOCATION (insn);
1782 }
1783 /* Finally, if none of the above identifies the loop control branch,
1784 return the first location in the loop header. */
1785 FOR_BB_INSNS (loop->header, insn)
1786 {
1787 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1788 return INSN_LOCATION (insn);
1789 }
1790 /* If all else fails, simply return the current function location. */
1791 return DECL_SOURCE_LOCATION (current_function_decl);
1792 }
1793
1794 /* Records that every statement in LOOP is executed I_BOUND times.
1795 REALISTIC is true if I_BOUND is expected to be close to the real number
1796 of iterations. UPPER is true if we are sure the loop iterates at most
1797 I_BOUND times. */
1798
1799 void
1800 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1801 bool realistic, bool upper)
1802 {
1803 /* Update the bounds only when there is no previous estimation, or when the
1804 current estimation is smaller. */
1805 if (upper
1806 && (!loop->any_upper_bound
1807 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1808 {
1809 loop->any_upper_bound = true;
1810 loop->nb_iterations_upper_bound = i_bound;
1811 }
1812 if (realistic
1813 && (!loop->any_estimate
1814 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1815 {
1816 loop->any_estimate = true;
1817 loop->nb_iterations_estimate = i_bound;
1818 }
1819
1820 /* If an upper bound is smaller than the realistic estimate of the
1821 number of iterations, use the upper bound instead. */
1822 if (loop->any_upper_bound
1823 && loop->any_estimate
1824 && wi::ltu_p (loop->nb_iterations_upper_bound,
1825 loop->nb_iterations_estimate))
1826 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1827 }
1828
1829 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1830 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1831 on the number of iterations of LOOP could not be derived, returns -1. */
1832
1833 HOST_WIDE_INT
1834 get_estimated_loop_iterations_int (struct loop *loop)
1835 {
1836 widest_int nit;
1837 HOST_WIDE_INT hwi_nit;
1838
1839 if (!get_estimated_loop_iterations (loop, &nit))
1840 return -1;
1841
1842 if (!wi::fits_shwi_p (nit))
1843 return -1;
1844 hwi_nit = nit.to_shwi ();
1845
1846 return hwi_nit < 0 ? -1 : hwi_nit;
1847 }
1848
1849 /* Returns an upper bound on the number of executions of statements
1850 in the LOOP. For statements before the loop exit, this exceeds
1851 the number of execution of the latch by one. */
1852
1853 HOST_WIDE_INT
1854 max_stmt_executions_int (struct loop *loop)
1855 {
1856 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1857 HOST_WIDE_INT snit;
1858
1859 if (nit == -1)
1860 return -1;
1861
1862 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1863
1864 /* If the computation overflows, return -1. */
1865 return snit < 0 ? -1 : snit;
1866 }
1867
1868 /* Sets NIT to the estimated number of executions of the latch of the
1869 LOOP. If we have no reliable estimate, the function returns false, otherwise
1870 returns true. */
1871
1872 bool
1873 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1874 {
1875 /* Even if the bound is not recorded, possibly we can derrive one from
1876 profile. */
1877 if (!loop->any_estimate)
1878 {
1879 if (loop->header->count)
1880 {
1881 *nit = gcov_type_to_wide_int
1882 (expected_loop_iterations_unbounded (loop) + 1);
1883 return true;
1884 }
1885 return false;
1886 }
1887
1888 *nit = loop->nb_iterations_estimate;
1889 return true;
1890 }
1891
1892 /* Sets NIT to an upper bound for the maximum number of executions of the
1893 latch of the LOOP. If we have no reliable estimate, the function returns
1894 false, otherwise returns true. */
1895
1896 bool
1897 get_max_loop_iterations (struct loop *loop, widest_int *nit)
1898 {
1899 if (!loop->any_upper_bound)
1900 return false;
1901
1902 *nit = loop->nb_iterations_upper_bound;
1903 return true;
1904 }
1905
1906 /* Similar to get_max_loop_iterations, but returns the estimate only
1907 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1908 on the number of iterations of LOOP could not be derived, returns -1. */
1909
1910 HOST_WIDE_INT
1911 get_max_loop_iterations_int (struct loop *loop)
1912 {
1913 widest_int nit;
1914 HOST_WIDE_INT hwi_nit;
1915
1916 if (!get_max_loop_iterations (loop, &nit))
1917 return -1;
1918
1919 if (!wi::fits_shwi_p (nit))
1920 return -1;
1921 hwi_nit = nit.to_shwi ();
1922
1923 return hwi_nit < 0 ? -1 : hwi_nit;
1924 }
1925
1926 /* Returns the loop depth of the loop BB belongs to. */
1927
1928 int
1929 bb_loop_depth (const_basic_block bb)
1930 {
1931 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1932 }
1933
1934 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1935
1936 void
1937 mark_loop_for_removal (loop_p loop)
1938 {
1939 if (loop->header == NULL)
1940 return;
1941 loop->former_header = loop->header;
1942 loop->header = NULL;
1943 loop->latch = NULL;
1944 loops_state_set (LOOPS_NEED_FIXUP);
1945 }