Dump profile-based number of iterations
[gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "gimple-ssa.h"
29 #include "diagnostic-core.h"
30 #include "cfganal.h"
31 #include "cfgloop.h"
32 #include "gimple-iterator.h"
33 #include "dumpfile.h"
34
35 static void flow_loops_cfg_dump (FILE *);
36 \f
37 /* Dump loop related CFG information. */
38
39 static void
40 flow_loops_cfg_dump (FILE *file)
41 {
42 basic_block bb;
43
44 if (!file)
45 return;
46
47 FOR_EACH_BB_FN (bb, cfun)
48 {
49 edge succ;
50 edge_iterator ei;
51
52 fprintf (file, ";; %d succs { ", bb->index);
53 FOR_EACH_EDGE (succ, ei, bb->succs)
54 fprintf (file, "%d ", succ->dest->index);
55 fprintf (file, "}\n");
56 }
57 }
58
59 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
60
61 bool
62 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
63 {
64 unsigned odepth = loop_depth (outer);
65
66 return (loop_depth (loop) > odepth
67 && (*loop->superloops)[odepth] == outer);
68 }
69
70 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
71 loops within LOOP. */
72
73 struct loop *
74 superloop_at_depth (struct loop *loop, unsigned depth)
75 {
76 unsigned ldepth = loop_depth (loop);
77
78 gcc_assert (depth <= ldepth);
79
80 if (depth == ldepth)
81 return loop;
82
83 return (*loop->superloops)[depth];
84 }
85
86 /* Returns the list of the latch edges of LOOP. */
87
88 static vec<edge>
89 get_loop_latch_edges (const struct loop *loop)
90 {
91 edge_iterator ei;
92 edge e;
93 vec<edge> ret = vNULL;
94
95 FOR_EACH_EDGE (e, ei, loop->header->preds)
96 {
97 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
98 ret.safe_push (e);
99 }
100
101 return ret;
102 }
103
104 /* Dump the loop information specified by LOOP to the stream FILE
105 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
106
107 void
108 flow_loop_dump (const struct loop *loop, FILE *file,
109 void (*loop_dump_aux) (const struct loop *, FILE *, int),
110 int verbose)
111 {
112 basic_block *bbs;
113 unsigned i;
114 vec<edge> latches;
115 edge e;
116
117 if (! loop || ! loop->header)
118 return;
119
120 fprintf (file, ";;\n;; Loop %d\n", loop->num);
121
122 fprintf (file, ";; header %d, ", loop->header->index);
123 if (loop->latch)
124 fprintf (file, "latch %d\n", loop->latch->index);
125 else
126 {
127 fprintf (file, "multiple latches:");
128 latches = get_loop_latch_edges (loop);
129 FOR_EACH_VEC_ELT (latches, i, e)
130 fprintf (file, " %d", e->src->index);
131 latches.release ();
132 fprintf (file, "\n");
133 }
134
135 fprintf (file, ";; depth %d, outer %ld\n",
136 loop_depth (loop), (long) (loop_outer (loop)
137 ? loop_outer (loop)->num : -1));
138
139 if (loop->latch)
140 {
141 bool read_profile_p;
142 gcov_type nit = expected_loop_iterations_unbounded (loop, &read_profile_p);
143 if (read_profile_p && !loop->any_estimate)
144 fprintf (file, ";; profile-based iteration count: %lu\n", nit);
145 }
146
147 fprintf (file, ";; nodes:");
148 bbs = get_loop_body (loop);
149 for (i = 0; i < loop->num_nodes; i++)
150 fprintf (file, " %d", bbs[i]->index);
151 free (bbs);
152 fprintf (file, "\n");
153
154 if (loop_dump_aux)
155 loop_dump_aux (loop, file, verbose);
156 }
157
158 /* Dump the loop information about loops to the stream FILE,
159 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
160
161 void
162 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
163 {
164 struct loop *loop;
165
166 if (!current_loops || ! file)
167 return;
168
169 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
170
171 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
172 {
173 flow_loop_dump (loop, file, loop_dump_aux, verbose);
174 }
175
176 if (verbose)
177 flow_loops_cfg_dump (file);
178 }
179
180 /* Free data allocated for LOOP. */
181
182 void
183 flow_loop_free (struct loop *loop)
184 {
185 struct loop_exit *exit, *next;
186
187 vec_free (loop->superloops);
188
189 /* Break the list of the loop exit records. They will be freed when the
190 corresponding edge is rescanned or removed, and this avoids
191 accessing the (already released) head of the list stored in the
192 loop structure. */
193 for (exit = loop->exits->next; exit != loop->exits; exit = next)
194 {
195 next = exit->next;
196 exit->next = exit;
197 exit->prev = exit;
198 }
199
200 ggc_free (loop->exits);
201 ggc_free (loop);
202 }
203
204 /* Free all the memory allocated for LOOPS. */
205
206 void
207 flow_loops_free (struct loops *loops)
208 {
209 if (loops->larray)
210 {
211 unsigned i;
212 loop_p loop;
213
214 /* Free the loop descriptors. */
215 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
216 {
217 if (!loop)
218 continue;
219
220 flow_loop_free (loop);
221 }
222
223 vec_free (loops->larray);
224 }
225 }
226
227 /* Find the nodes contained within the LOOP with header HEADER.
228 Return the number of nodes within the loop. */
229
230 int
231 flow_loop_nodes_find (basic_block header, struct loop *loop)
232 {
233 vec<basic_block> stack = vNULL;
234 int num_nodes = 1;
235 edge latch;
236 edge_iterator latch_ei;
237
238 header->loop_father = loop;
239
240 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
241 {
242 if (latch->src->loop_father == loop
243 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
244 continue;
245
246 num_nodes++;
247 stack.safe_push (latch->src);
248 latch->src->loop_father = loop;
249
250 while (!stack.is_empty ())
251 {
252 basic_block node;
253 edge e;
254 edge_iterator ei;
255
256 node = stack.pop ();
257
258 FOR_EACH_EDGE (e, ei, node->preds)
259 {
260 basic_block ancestor = e->src;
261
262 if (ancestor->loop_father != loop)
263 {
264 ancestor->loop_father = loop;
265 num_nodes++;
266 stack.safe_push (ancestor);
267 }
268 }
269 }
270 }
271 stack.release ();
272
273 return num_nodes;
274 }
275
276 /* Records the vector of superloops of the loop LOOP, whose immediate
277 superloop is FATHER. */
278
279 static void
280 establish_preds (struct loop *loop, struct loop *father)
281 {
282 loop_p ploop;
283 unsigned depth = loop_depth (father) + 1;
284 unsigned i;
285
286 loop->superloops = 0;
287 vec_alloc (loop->superloops, depth);
288 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
289 loop->superloops->quick_push (ploop);
290 loop->superloops->quick_push (father);
291
292 for (ploop = loop->inner; ploop; ploop = ploop->next)
293 establish_preds (ploop, loop);
294 }
295
296 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
297 added loop. If LOOP has some children, take care of that their
298 pred field will be initialized correctly. */
299
300 void
301 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
302 {
303 loop->next = father->inner;
304 father->inner = loop;
305
306 establish_preds (loop, father);
307 }
308
309 /* Remove LOOP from the loop hierarchy tree. */
310
311 void
312 flow_loop_tree_node_remove (struct loop *loop)
313 {
314 struct loop *prev, *father;
315
316 father = loop_outer (loop);
317
318 /* Remove loop from the list of sons. */
319 if (father->inner == loop)
320 father->inner = loop->next;
321 else
322 {
323 for (prev = father->inner; prev->next != loop; prev = prev->next)
324 continue;
325 prev->next = loop->next;
326 }
327
328 loop->superloops = NULL;
329 }
330
331 /* Allocates and returns new loop structure. */
332
333 struct loop *
334 alloc_loop (void)
335 {
336 struct loop *loop = ggc_cleared_alloc<struct loop> ();
337
338 loop->exits = ggc_cleared_alloc<loop_exit> ();
339 loop->exits->next = loop->exits->prev = loop->exits;
340 loop->can_be_parallel = false;
341 loop->nb_iterations_upper_bound = 0;
342 loop->nb_iterations_likely_upper_bound = 0;
343 loop->nb_iterations_estimate = 0;
344 return loop;
345 }
346
347 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
348 (including the root of the loop tree). */
349
350 void
351 init_loops_structure (struct function *fn,
352 struct loops *loops, unsigned num_loops)
353 {
354 struct loop *root;
355
356 memset (loops, 0, sizeof *loops);
357 vec_alloc (loops->larray, num_loops);
358
359 /* Dummy loop containing whole function. */
360 root = alloc_loop ();
361 root->num_nodes = n_basic_blocks_for_fn (fn);
362 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
363 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
364 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
365 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
366
367 loops->larray->quick_push (root);
368 loops->tree_root = root;
369 }
370
371 /* Returns whether HEADER is a loop header. */
372
373 bool
374 bb_loop_header_p (basic_block header)
375 {
376 edge_iterator ei;
377 edge e;
378
379 /* If we have an abnormal predecessor, do not consider the
380 loop (not worth the problems). */
381 if (bb_has_abnormal_pred (header))
382 return false;
383
384 /* Look for back edges where a predecessor is dominated
385 by this block. A natural loop has a single entry
386 node (header) that dominates all the nodes in the
387 loop. It also has single back edge to the header
388 from a latch node. */
389 FOR_EACH_EDGE (e, ei, header->preds)
390 {
391 basic_block latch = e->src;
392 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
393 && dominated_by_p (CDI_DOMINATORS, latch, header))
394 return true;
395 }
396
397 return false;
398 }
399
400 /* Find all the natural loops in the function and save in LOOPS structure and
401 recalculate loop_father information in basic block structures.
402 If LOOPS is non-NULL then the loop structures for already recorded loops
403 will be re-used and their number will not change. We assume that no
404 stale loops exist in LOOPS.
405 When LOOPS is NULL it is allocated and re-built from scratch.
406 Return the built LOOPS structure. */
407
408 struct loops *
409 flow_loops_find (struct loops *loops)
410 {
411 bool from_scratch = (loops == NULL);
412 int *rc_order;
413 int b;
414 unsigned i;
415
416 /* Ensure that the dominators are computed. */
417 calculate_dominance_info (CDI_DOMINATORS);
418
419 if (!loops)
420 {
421 loops = ggc_cleared_alloc<struct loops> ();
422 init_loops_structure (cfun, loops, 1);
423 }
424
425 /* Ensure that loop exits were released. */
426 gcc_assert (loops->exits == NULL);
427
428 /* Taking care of this degenerate case makes the rest of
429 this code simpler. */
430 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
431 return loops;
432
433 /* The root loop node contains all basic-blocks. */
434 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
435
436 /* Compute depth first search order of the CFG so that outer
437 natural loops will be found before inner natural loops. */
438 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
439 pre_and_rev_post_order_compute (NULL, rc_order, false);
440
441 /* Gather all loop headers in reverse completion order and allocate
442 loop structures for loops that are not already present. */
443 auto_vec<loop_p> larray (loops->larray->length ());
444 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
445 {
446 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
447 if (bb_loop_header_p (header))
448 {
449 struct loop *loop;
450
451 /* The current active loop tree has valid loop-fathers for
452 header blocks. */
453 if (!from_scratch
454 && header->loop_father->header == header)
455 {
456 loop = header->loop_father;
457 /* If we found an existing loop remove it from the
458 loop tree. It is going to be inserted again
459 below. */
460 flow_loop_tree_node_remove (loop);
461 }
462 else
463 {
464 /* Otherwise allocate a new loop structure for the loop. */
465 loop = alloc_loop ();
466 /* ??? We could re-use unused loop slots here. */
467 loop->num = loops->larray->length ();
468 vec_safe_push (loops->larray, loop);
469 loop->header = header;
470
471 if (!from_scratch
472 && dump_file && (dump_flags & TDF_DETAILS))
473 fprintf (dump_file, "flow_loops_find: discovered new "
474 "loop %d with header %d\n",
475 loop->num, header->index);
476 }
477 /* Reset latch, we recompute it below. */
478 loop->latch = NULL;
479 larray.safe_push (loop);
480 }
481
482 /* Make blocks part of the loop root node at start. */
483 header->loop_father = loops->tree_root;
484 }
485
486 free (rc_order);
487
488 /* Now iterate over the loops found, insert them into the loop tree
489 and assign basic-block ownership. */
490 for (i = 0; i < larray.length (); ++i)
491 {
492 struct loop *loop = larray[i];
493 basic_block header = loop->header;
494 edge_iterator ei;
495 edge e;
496
497 flow_loop_tree_node_add (header->loop_father, loop);
498 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
499
500 /* Look for the latch for this header block, if it has just a
501 single one. */
502 FOR_EACH_EDGE (e, ei, header->preds)
503 {
504 basic_block latch = e->src;
505
506 if (flow_bb_inside_loop_p (loop, latch))
507 {
508 if (loop->latch != NULL)
509 {
510 /* More than one latch edge. */
511 loop->latch = NULL;
512 break;
513 }
514 loop->latch = latch;
515 }
516 }
517 }
518
519 return loops;
520 }
521
522 /* Ratio of frequencies of edges so that one of more latch edges is
523 considered to belong to inner loop with same header. */
524 #define HEAVY_EDGE_RATIO 8
525
526 /* Minimum number of samples for that we apply
527 find_subloop_latch_edge_by_profile heuristics. */
528 #define HEAVY_EDGE_MIN_SAMPLES 10
529
530 /* If the profile info is available, finds an edge in LATCHES that much more
531 frequent than the remaining edges. Returns such an edge, or NULL if we do
532 not find one.
533
534 We do not use guessed profile here, only the measured one. The guessed
535 profile is usually too flat and unreliable for this (and it is mostly based
536 on the loop structure of the program, so it does not make much sense to
537 derive the loop structure from it). */
538
539 static edge
540 find_subloop_latch_edge_by_profile (vec<edge> latches)
541 {
542 unsigned i;
543 edge e, me = NULL;
544 gcov_type mcount = 0, tcount = 0;
545
546 FOR_EACH_VEC_ELT (latches, i, e)
547 {
548 if (e->count > mcount)
549 {
550 me = e;
551 mcount = e->count;
552 }
553 tcount += e->count;
554 }
555
556 if (tcount < HEAVY_EDGE_MIN_SAMPLES
557 || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
558 return NULL;
559
560 if (dump_file)
561 fprintf (dump_file,
562 "Found latch edge %d -> %d using profile information.\n",
563 me->src->index, me->dest->index);
564 return me;
565 }
566
567 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
568 on the structure of induction variables. Returns this edge, or NULL if we
569 do not find any.
570
571 We are quite conservative, and look just for an obvious simple innermost
572 loop (which is the case where we would lose the most performance by not
573 disambiguating the loop). More precisely, we look for the following
574 situation: The source of the chosen latch edge dominates sources of all
575 the other latch edges. Additionally, the header does not contain a phi node
576 such that the argument from the chosen edge is equal to the argument from
577 another edge. */
578
579 static edge
580 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
581 {
582 edge e, latch = latches[0];
583 unsigned i;
584 gphi *phi;
585 gphi_iterator psi;
586 tree lop;
587 basic_block bb;
588
589 /* Find the candidate for the latch edge. */
590 for (i = 1; latches.iterate (i, &e); i++)
591 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
592 latch = e;
593
594 /* Verify that it dominates all the latch edges. */
595 FOR_EACH_VEC_ELT (latches, i, e)
596 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
597 return NULL;
598
599 /* Check for a phi node that would deny that this is a latch edge of
600 a subloop. */
601 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
602 {
603 phi = psi.phi ();
604 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
605
606 /* Ignore the values that are not changed inside the subloop. */
607 if (TREE_CODE (lop) != SSA_NAME
608 || SSA_NAME_DEF_STMT (lop) == phi)
609 continue;
610 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
611 if (!bb || !flow_bb_inside_loop_p (loop, bb))
612 continue;
613
614 FOR_EACH_VEC_ELT (latches, i, e)
615 if (e != latch
616 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
617 return NULL;
618 }
619
620 if (dump_file)
621 fprintf (dump_file,
622 "Found latch edge %d -> %d using iv structure.\n",
623 latch->src->index, latch->dest->index);
624 return latch;
625 }
626
627 /* If we can determine that one of the several latch edges of LOOP behaves
628 as a latch edge of a separate subloop, returns this edge. Otherwise
629 returns NULL. */
630
631 static edge
632 find_subloop_latch_edge (struct loop *loop)
633 {
634 vec<edge> latches = get_loop_latch_edges (loop);
635 edge latch = NULL;
636
637 if (latches.length () > 1)
638 {
639 latch = find_subloop_latch_edge_by_profile (latches);
640
641 if (!latch
642 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
643 should use cfghook for this, but it is hard to imagine it would
644 be useful elsewhere. */
645 && current_ir_type () == IR_GIMPLE)
646 latch = find_subloop_latch_edge_by_ivs (loop, latches);
647 }
648
649 latches.release ();
650 return latch;
651 }
652
653 /* Callback for make_forwarder_block. Returns true if the edge E is marked
654 in the set MFB_REIS_SET. */
655
656 static hash_set<edge> *mfb_reis_set;
657 static bool
658 mfb_redirect_edges_in_set (edge e)
659 {
660 return mfb_reis_set->contains (e);
661 }
662
663 /* Creates a subloop of LOOP with latch edge LATCH. */
664
665 static void
666 form_subloop (struct loop *loop, edge latch)
667 {
668 edge_iterator ei;
669 edge e, new_entry;
670 struct loop *new_loop;
671
672 mfb_reis_set = new hash_set<edge>;
673 FOR_EACH_EDGE (e, ei, loop->header->preds)
674 {
675 if (e != latch)
676 mfb_reis_set->add (e);
677 }
678 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
679 NULL);
680 delete mfb_reis_set;
681
682 loop->header = new_entry->src;
683
684 /* Find the blocks and subloops that belong to the new loop, and add it to
685 the appropriate place in the loop tree. */
686 new_loop = alloc_loop ();
687 new_loop->header = new_entry->dest;
688 new_loop->latch = latch->src;
689 add_loop (new_loop, loop);
690 }
691
692 /* Make all the latch edges of LOOP to go to a single forwarder block --
693 a new latch of LOOP. */
694
695 static void
696 merge_latch_edges (struct loop *loop)
697 {
698 vec<edge> latches = get_loop_latch_edges (loop);
699 edge latch, e;
700 unsigned i;
701
702 gcc_assert (latches.length () > 0);
703
704 if (latches.length () == 1)
705 loop->latch = latches[0]->src;
706 else
707 {
708 if (dump_file)
709 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
710
711 mfb_reis_set = new hash_set<edge>;
712 FOR_EACH_VEC_ELT (latches, i, e)
713 mfb_reis_set->add (e);
714 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
715 NULL);
716 delete mfb_reis_set;
717
718 loop->header = latch->dest;
719 loop->latch = latch->src;
720 }
721
722 latches.release ();
723 }
724
725 /* LOOP may have several latch edges. Transform it into (possibly several)
726 loops with single latch edge. */
727
728 static void
729 disambiguate_multiple_latches (struct loop *loop)
730 {
731 edge e;
732
733 /* We eliminate the multiple latches by splitting the header to the forwarder
734 block F and the rest R, and redirecting the edges. There are two cases:
735
736 1) If there is a latch edge E that corresponds to a subloop (we guess
737 that based on profile -- if it is taken much more often than the
738 remaining edges; and on trees, using the information about induction
739 variables of the loops), we redirect E to R, all the remaining edges to
740 F, then rescan the loops and try again for the outer loop.
741 2) If there is no such edge, we redirect all latch edges to F, and the
742 entry edges to R, thus making F the single latch of the loop. */
743
744 if (dump_file)
745 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
746 loop->num);
747
748 /* During latch merging, we may need to redirect the entry edges to a new
749 block. This would cause problems if the entry edge was the one from the
750 entry block. To avoid having to handle this case specially, split
751 such entry edge. */
752 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
753 if (e)
754 split_edge (e);
755
756 while (1)
757 {
758 e = find_subloop_latch_edge (loop);
759 if (!e)
760 break;
761
762 form_subloop (loop, e);
763 }
764
765 merge_latch_edges (loop);
766 }
767
768 /* Split loops with multiple latch edges. */
769
770 void
771 disambiguate_loops_with_multiple_latches (void)
772 {
773 struct loop *loop;
774
775 FOR_EACH_LOOP (loop, 0)
776 {
777 if (!loop->latch)
778 disambiguate_multiple_latches (loop);
779 }
780 }
781
782 /* Return nonzero if basic block BB belongs to LOOP. */
783 bool
784 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
785 {
786 struct loop *source_loop;
787
788 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
789 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
790 return 0;
791
792 source_loop = bb->loop_father;
793 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
794 }
795
796 /* Enumeration predicate for get_loop_body_with_size. */
797 static bool
798 glb_enum_p (const_basic_block bb, const void *glb_loop)
799 {
800 const struct loop *const loop = (const struct loop *) glb_loop;
801 return (bb != loop->header
802 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
803 }
804
805 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
806 order against direction of edges from latch. Specially, if
807 header != latch, latch is the 1-st block. LOOP cannot be the fake
808 loop tree root, and its size must be at most MAX_SIZE. The blocks
809 in the LOOP body are stored to BODY, and the size of the LOOP is
810 returned. */
811
812 unsigned
813 get_loop_body_with_size (const struct loop *loop, basic_block *body,
814 unsigned max_size)
815 {
816 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
817 body, max_size, loop);
818 }
819
820 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
821 order against direction of edges from latch. Specially, if
822 header != latch, latch is the 1-st block. */
823
824 basic_block *
825 get_loop_body (const struct loop *loop)
826 {
827 basic_block *body, bb;
828 unsigned tv = 0;
829
830 gcc_assert (loop->num_nodes);
831
832 body = XNEWVEC (basic_block, loop->num_nodes);
833
834 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
835 {
836 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
837 special-case the fake loop that contains the whole function. */
838 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
839 body[tv++] = loop->header;
840 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
841 FOR_EACH_BB_FN (bb, cfun)
842 body[tv++] = bb;
843 }
844 else
845 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
846
847 gcc_assert (tv == loop->num_nodes);
848 return body;
849 }
850
851 /* Fills dominance descendants inside LOOP of the basic block BB into
852 array TOVISIT from index *TV. */
853
854 static void
855 fill_sons_in_loop (const struct loop *loop, basic_block bb,
856 basic_block *tovisit, int *tv)
857 {
858 basic_block son, postpone = NULL;
859
860 tovisit[(*tv)++] = bb;
861 for (son = first_dom_son (CDI_DOMINATORS, bb);
862 son;
863 son = next_dom_son (CDI_DOMINATORS, son))
864 {
865 if (!flow_bb_inside_loop_p (loop, son))
866 continue;
867
868 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
869 {
870 postpone = son;
871 continue;
872 }
873 fill_sons_in_loop (loop, son, tovisit, tv);
874 }
875
876 if (postpone)
877 fill_sons_in_loop (loop, postpone, tovisit, tv);
878 }
879
880 /* Gets body of a LOOP (that must be different from the outermost loop)
881 sorted by dominance relation. Additionally, if a basic block s dominates
882 the latch, then only blocks dominated by s are be after it. */
883
884 basic_block *
885 get_loop_body_in_dom_order (const struct loop *loop)
886 {
887 basic_block *tovisit;
888 int tv;
889
890 gcc_assert (loop->num_nodes);
891
892 tovisit = XNEWVEC (basic_block, loop->num_nodes);
893
894 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
895
896 tv = 0;
897 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
898
899 gcc_assert (tv == (int) loop->num_nodes);
900
901 return tovisit;
902 }
903
904 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
905
906 basic_block *
907 get_loop_body_in_custom_order (const struct loop *loop,
908 int (*bb_comparator) (const void *, const void *))
909 {
910 basic_block *bbs = get_loop_body (loop);
911
912 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
913
914 return bbs;
915 }
916
917 /* Get body of a LOOP in breadth first sort order. */
918
919 basic_block *
920 get_loop_body_in_bfs_order (const struct loop *loop)
921 {
922 basic_block *blocks;
923 basic_block bb;
924 bitmap visited;
925 unsigned int i = 1;
926 unsigned int vc = 0;
927
928 gcc_assert (loop->num_nodes);
929 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
930
931 blocks = XNEWVEC (basic_block, loop->num_nodes);
932 visited = BITMAP_ALLOC (NULL);
933 blocks[0] = loop->header;
934 bitmap_set_bit (visited, loop->header->index);
935 while (i < loop->num_nodes)
936 {
937 edge e;
938 edge_iterator ei;
939 gcc_assert (i > vc);
940 bb = blocks[vc++];
941
942 FOR_EACH_EDGE (e, ei, bb->succs)
943 {
944 if (flow_bb_inside_loop_p (loop, e->dest))
945 {
946 /* This bb is now visited. */
947 if (bitmap_set_bit (visited, e->dest->index))
948 blocks[i++] = e->dest;
949 }
950 }
951 }
952
953 BITMAP_FREE (visited);
954 return blocks;
955 }
956
957 /* Hash function for struct loop_exit. */
958
959 hashval_t
960 loop_exit_hasher::hash (loop_exit *exit)
961 {
962 return htab_hash_pointer (exit->e);
963 }
964
965 /* Equality function for struct loop_exit. Compares with edge. */
966
967 bool
968 loop_exit_hasher::equal (loop_exit *exit, edge e)
969 {
970 return exit->e == e;
971 }
972
973 /* Frees the list of loop exit descriptions EX. */
974
975 void
976 loop_exit_hasher::remove (loop_exit *exit)
977 {
978 loop_exit *next;
979 for (; exit; exit = next)
980 {
981 next = exit->next_e;
982
983 exit->next->prev = exit->prev;
984 exit->prev->next = exit->next;
985
986 ggc_free (exit);
987 }
988 }
989
990 /* Returns the list of records for E as an exit of a loop. */
991
992 static struct loop_exit *
993 get_exit_descriptions (edge e)
994 {
995 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
996 }
997
998 /* Updates the lists of loop exits in that E appears.
999 If REMOVED is true, E is being removed, and we
1000 just remove it from the lists of exits.
1001 If NEW_EDGE is true and E is not a loop exit, we
1002 do not try to remove it from loop exit lists. */
1003
1004 void
1005 rescan_loop_exit (edge e, bool new_edge, bool removed)
1006 {
1007 struct loop_exit *exits = NULL, *exit;
1008 struct loop *aloop, *cloop;
1009
1010 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1011 return;
1012
1013 if (!removed
1014 && e->src->loop_father != NULL
1015 && e->dest->loop_father != NULL
1016 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1017 {
1018 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1019 for (aloop = e->src->loop_father;
1020 aloop != cloop;
1021 aloop = loop_outer (aloop))
1022 {
1023 exit = ggc_alloc<loop_exit> ();
1024 exit->e = e;
1025
1026 exit->next = aloop->exits->next;
1027 exit->prev = aloop->exits;
1028 exit->next->prev = exit;
1029 exit->prev->next = exit;
1030
1031 exit->next_e = exits;
1032 exits = exit;
1033 }
1034 }
1035
1036 if (!exits && new_edge)
1037 return;
1038
1039 loop_exit **slot
1040 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1041 exits ? INSERT : NO_INSERT);
1042 if (!slot)
1043 return;
1044
1045 if (exits)
1046 {
1047 if (*slot)
1048 loop_exit_hasher::remove (*slot);
1049 *slot = exits;
1050 }
1051 else
1052 current_loops->exits->clear_slot (slot);
1053 }
1054
1055 /* For each loop, record list of exit edges, and start maintaining these
1056 lists. */
1057
1058 void
1059 record_loop_exits (void)
1060 {
1061 basic_block bb;
1062 edge_iterator ei;
1063 edge e;
1064
1065 if (!current_loops)
1066 return;
1067
1068 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1069 return;
1070 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1071
1072 gcc_assert (current_loops->exits == NULL);
1073 current_loops->exits
1074 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1075
1076 FOR_EACH_BB_FN (bb, cfun)
1077 {
1078 FOR_EACH_EDGE (e, ei, bb->succs)
1079 {
1080 rescan_loop_exit (e, true, false);
1081 }
1082 }
1083 }
1084
1085 /* Dumps information about the exit in *SLOT to FILE.
1086 Callback for htab_traverse. */
1087
1088 int
1089 dump_recorded_exit (loop_exit **slot, FILE *file)
1090 {
1091 struct loop_exit *exit = *slot;
1092 unsigned n = 0;
1093 edge e = exit->e;
1094
1095 for (; exit != NULL; exit = exit->next_e)
1096 n++;
1097
1098 fprintf (file, "Edge %d->%d exits %u loops\n",
1099 e->src->index, e->dest->index, n);
1100
1101 return 1;
1102 }
1103
1104 /* Dumps the recorded exits of loops to FILE. */
1105
1106 extern void dump_recorded_exits (FILE *);
1107 void
1108 dump_recorded_exits (FILE *file)
1109 {
1110 if (!current_loops->exits)
1111 return;
1112 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1113 }
1114
1115 /* Releases lists of loop exits. */
1116
1117 void
1118 release_recorded_exits (function *fn)
1119 {
1120 gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
1121 loops_for_fn (fn)->exits->empty ();
1122 loops_for_fn (fn)->exits = NULL;
1123 loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1124 }
1125
1126 /* Returns the list of the exit edges of a LOOP. */
1127
1128 vec<edge>
1129 get_loop_exit_edges (const struct loop *loop)
1130 {
1131 vec<edge> edges = vNULL;
1132 edge e;
1133 unsigned i;
1134 basic_block *body;
1135 edge_iterator ei;
1136 struct loop_exit *exit;
1137
1138 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1139
1140 /* If we maintain the lists of exits, use them. Otherwise we must
1141 scan the body of the loop. */
1142 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1143 {
1144 for (exit = loop->exits->next; exit->e; exit = exit->next)
1145 edges.safe_push (exit->e);
1146 }
1147 else
1148 {
1149 body = get_loop_body (loop);
1150 for (i = 0; i < loop->num_nodes; i++)
1151 FOR_EACH_EDGE (e, ei, body[i]->succs)
1152 {
1153 if (!flow_bb_inside_loop_p (loop, e->dest))
1154 edges.safe_push (e);
1155 }
1156 free (body);
1157 }
1158
1159 return edges;
1160 }
1161
1162 /* Counts the number of conditional branches inside LOOP. */
1163
1164 unsigned
1165 num_loop_branches (const struct loop *loop)
1166 {
1167 unsigned i, n;
1168 basic_block * body;
1169
1170 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1171
1172 body = get_loop_body (loop);
1173 n = 0;
1174 for (i = 0; i < loop->num_nodes; i++)
1175 if (EDGE_COUNT (body[i]->succs) >= 2)
1176 n++;
1177 free (body);
1178
1179 return n;
1180 }
1181
1182 /* Adds basic block BB to LOOP. */
1183 void
1184 add_bb_to_loop (basic_block bb, struct loop *loop)
1185 {
1186 unsigned i;
1187 loop_p ploop;
1188 edge_iterator ei;
1189 edge e;
1190
1191 gcc_assert (bb->loop_father == NULL);
1192 bb->loop_father = loop;
1193 loop->num_nodes++;
1194 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1195 ploop->num_nodes++;
1196
1197 FOR_EACH_EDGE (e, ei, bb->succs)
1198 {
1199 rescan_loop_exit (e, true, false);
1200 }
1201 FOR_EACH_EDGE (e, ei, bb->preds)
1202 {
1203 rescan_loop_exit (e, true, false);
1204 }
1205 }
1206
1207 /* Remove basic block BB from loops. */
1208 void
1209 remove_bb_from_loops (basic_block bb)
1210 {
1211 unsigned i;
1212 struct loop *loop = bb->loop_father;
1213 loop_p ploop;
1214 edge_iterator ei;
1215 edge e;
1216
1217 gcc_assert (loop != NULL);
1218 loop->num_nodes--;
1219 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1220 ploop->num_nodes--;
1221 bb->loop_father = NULL;
1222
1223 FOR_EACH_EDGE (e, ei, bb->succs)
1224 {
1225 rescan_loop_exit (e, false, true);
1226 }
1227 FOR_EACH_EDGE (e, ei, bb->preds)
1228 {
1229 rescan_loop_exit (e, false, true);
1230 }
1231 }
1232
1233 /* Finds nearest common ancestor in loop tree for given loops. */
1234 struct loop *
1235 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1236 {
1237 unsigned sdepth, ddepth;
1238
1239 if (!loop_s) return loop_d;
1240 if (!loop_d) return loop_s;
1241
1242 sdepth = loop_depth (loop_s);
1243 ddepth = loop_depth (loop_d);
1244
1245 if (sdepth < ddepth)
1246 loop_d = (*loop_d->superloops)[sdepth];
1247 else if (sdepth > ddepth)
1248 loop_s = (*loop_s->superloops)[ddepth];
1249
1250 while (loop_s != loop_d)
1251 {
1252 loop_s = loop_outer (loop_s);
1253 loop_d = loop_outer (loop_d);
1254 }
1255 return loop_s;
1256 }
1257
1258 /* Removes LOOP from structures and frees its data. */
1259
1260 void
1261 delete_loop (struct loop *loop)
1262 {
1263 /* Remove the loop from structure. */
1264 flow_loop_tree_node_remove (loop);
1265
1266 /* Remove loop from loops array. */
1267 (*current_loops->larray)[loop->num] = NULL;
1268
1269 /* Free loop data. */
1270 flow_loop_free (loop);
1271 }
1272
1273 /* Cancels the LOOP; it must be innermost one. */
1274
1275 static void
1276 cancel_loop (struct loop *loop)
1277 {
1278 basic_block *bbs;
1279 unsigned i;
1280 struct loop *outer = loop_outer (loop);
1281
1282 gcc_assert (!loop->inner);
1283
1284 /* Move blocks up one level (they should be removed as soon as possible). */
1285 bbs = get_loop_body (loop);
1286 for (i = 0; i < loop->num_nodes; i++)
1287 bbs[i]->loop_father = outer;
1288
1289 free (bbs);
1290 delete_loop (loop);
1291 }
1292
1293 /* Cancels LOOP and all its subloops. */
1294 void
1295 cancel_loop_tree (struct loop *loop)
1296 {
1297 while (loop->inner)
1298 cancel_loop_tree (loop->inner);
1299 cancel_loop (loop);
1300 }
1301
1302 /* Checks that information about loops is correct
1303 -- sizes of loops are all right
1304 -- results of get_loop_body really belong to the loop
1305 -- loop header have just single entry edge and single latch edge
1306 -- loop latches have only single successor that is header of their loop
1307 -- irreducible loops are correctly marked
1308 -- the cached loop depth and loop father of each bb is correct
1309 */
1310 DEBUG_FUNCTION void
1311 verify_loop_structure (void)
1312 {
1313 unsigned *sizes, i, j;
1314 sbitmap irreds;
1315 basic_block bb, *bbs;
1316 struct loop *loop;
1317 int err = 0;
1318 edge e;
1319 unsigned num = number_of_loops (cfun);
1320 struct loop_exit *exit, *mexit;
1321 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1322 sbitmap visited;
1323
1324 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1325 {
1326 error ("loop verification on loop tree that needs fixup");
1327 err = 1;
1328 }
1329
1330 /* We need up-to-date dominators, compute or verify them. */
1331 if (!dom_available)
1332 calculate_dominance_info (CDI_DOMINATORS);
1333 else
1334 verify_dominators (CDI_DOMINATORS);
1335
1336 /* Check the loop tree root. */
1337 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1338 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1339 || (current_loops->tree_root->num_nodes
1340 != (unsigned) n_basic_blocks_for_fn (cfun)))
1341 {
1342 error ("corrupt loop tree root");
1343 err = 1;
1344 }
1345
1346 /* Check the headers. */
1347 FOR_EACH_BB_FN (bb, cfun)
1348 if (bb_loop_header_p (bb))
1349 {
1350 if (bb->loop_father->header == NULL)
1351 {
1352 error ("loop with header %d marked for removal", bb->index);
1353 err = 1;
1354 }
1355 else if (bb->loop_father->header != bb)
1356 {
1357 error ("loop with header %d not in loop tree", bb->index);
1358 err = 1;
1359 }
1360 }
1361 else if (bb->loop_father->header == bb)
1362 {
1363 error ("non-loop with header %d not marked for removal", bb->index);
1364 err = 1;
1365 }
1366
1367 /* Check the recorded loop father and sizes of loops. */
1368 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1369 bitmap_clear (visited);
1370 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1371 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1372 {
1373 unsigned n;
1374
1375 if (loop->header == NULL)
1376 {
1377 error ("removed loop %d in loop tree", loop->num);
1378 err = 1;
1379 continue;
1380 }
1381
1382 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1383 if (loop->num_nodes != n)
1384 {
1385 error ("size of loop %d should be %d, not %d",
1386 loop->num, n, loop->num_nodes);
1387 err = 1;
1388 }
1389
1390 for (j = 0; j < n; j++)
1391 {
1392 bb = bbs[j];
1393
1394 if (!flow_bb_inside_loop_p (loop, bb))
1395 {
1396 error ("bb %d does not belong to loop %d",
1397 bb->index, loop->num);
1398 err = 1;
1399 }
1400
1401 /* Ignore this block if it is in an inner loop. */
1402 if (bitmap_bit_p (visited, bb->index))
1403 continue;
1404 bitmap_set_bit (visited, bb->index);
1405
1406 if (bb->loop_father != loop)
1407 {
1408 error ("bb %d has father loop %d, should be loop %d",
1409 bb->index, bb->loop_father->num, loop->num);
1410 err = 1;
1411 }
1412 }
1413 }
1414 free (bbs);
1415 sbitmap_free (visited);
1416
1417 /* Check headers and latches. */
1418 FOR_EACH_LOOP (loop, 0)
1419 {
1420 i = loop->num;
1421 if (loop->header == NULL)
1422 continue;
1423 if (!bb_loop_header_p (loop->header))
1424 {
1425 error ("loop %d%'s header is not a loop header", i);
1426 err = 1;
1427 }
1428 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1429 && EDGE_COUNT (loop->header->preds) != 2)
1430 {
1431 error ("loop %d%'s header does not have exactly 2 entries", i);
1432 err = 1;
1433 }
1434 if (loop->latch)
1435 {
1436 if (!find_edge (loop->latch, loop->header))
1437 {
1438 error ("loop %d%'s latch does not have an edge to its header", i);
1439 err = 1;
1440 }
1441 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1442 {
1443 error ("loop %d%'s latch is not dominated by its header", i);
1444 err = 1;
1445 }
1446 }
1447 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1448 {
1449 if (!single_succ_p (loop->latch))
1450 {
1451 error ("loop %d%'s latch does not have exactly 1 successor", i);
1452 err = 1;
1453 }
1454 if (single_succ (loop->latch) != loop->header)
1455 {
1456 error ("loop %d%'s latch does not have header as successor", i);
1457 err = 1;
1458 }
1459 if (loop->latch->loop_father != loop)
1460 {
1461 error ("loop %d%'s latch does not belong directly to it", i);
1462 err = 1;
1463 }
1464 }
1465 if (loop->header->loop_father != loop)
1466 {
1467 error ("loop %d%'s header does not belong directly to it", i);
1468 err = 1;
1469 }
1470 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1471 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1472 {
1473 error ("loop %d%'s latch is marked as part of irreducible region", i);
1474 err = 1;
1475 }
1476 }
1477
1478 /* Check irreducible loops. */
1479 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1480 {
1481 /* Record old info. */
1482 irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1483 FOR_EACH_BB_FN (bb, cfun)
1484 {
1485 edge_iterator ei;
1486 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1487 bitmap_set_bit (irreds, bb->index);
1488 else
1489 bitmap_clear_bit (irreds, bb->index);
1490 FOR_EACH_EDGE (e, ei, bb->succs)
1491 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1492 e->flags |= EDGE_ALL_FLAGS + 1;
1493 }
1494
1495 /* Recount it. */
1496 mark_irreducible_loops ();
1497
1498 /* Compare. */
1499 FOR_EACH_BB_FN (bb, cfun)
1500 {
1501 edge_iterator ei;
1502
1503 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1504 && !bitmap_bit_p (irreds, bb->index))
1505 {
1506 error ("basic block %d should be marked irreducible", bb->index);
1507 err = 1;
1508 }
1509 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1510 && bitmap_bit_p (irreds, bb->index))
1511 {
1512 error ("basic block %d should not be marked irreducible", bb->index);
1513 err = 1;
1514 }
1515 FOR_EACH_EDGE (e, ei, bb->succs)
1516 {
1517 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1518 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1519 {
1520 error ("edge from %d to %d should be marked irreducible",
1521 e->src->index, e->dest->index);
1522 err = 1;
1523 }
1524 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1525 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1526 {
1527 error ("edge from %d to %d should not be marked irreducible",
1528 e->src->index, e->dest->index);
1529 err = 1;
1530 }
1531 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1532 }
1533 }
1534 free (irreds);
1535 }
1536
1537 /* Check the recorded loop exits. */
1538 FOR_EACH_LOOP (loop, 0)
1539 {
1540 if (!loop->exits || loop->exits->e != NULL)
1541 {
1542 error ("corrupted head of the exits list of loop %d",
1543 loop->num);
1544 err = 1;
1545 }
1546 else
1547 {
1548 /* Check that the list forms a cycle, and all elements except
1549 for the head are nonnull. */
1550 for (mexit = loop->exits, exit = mexit->next, i = 0;
1551 exit->e && exit != mexit;
1552 exit = exit->next)
1553 {
1554 if (i++ & 1)
1555 mexit = mexit->next;
1556 }
1557
1558 if (exit != loop->exits)
1559 {
1560 error ("corrupted exits list of loop %d", loop->num);
1561 err = 1;
1562 }
1563 }
1564
1565 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1566 {
1567 if (loop->exits->next != loop->exits)
1568 {
1569 error ("nonempty exits list of loop %d, but exits are not recorded",
1570 loop->num);
1571 err = 1;
1572 }
1573 }
1574 }
1575
1576 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1577 {
1578 unsigned n_exits = 0, eloops;
1579
1580 sizes = XCNEWVEC (unsigned, num);
1581 memset (sizes, 0, sizeof (unsigned) * num);
1582 FOR_EACH_BB_FN (bb, cfun)
1583 {
1584 edge_iterator ei;
1585 if (bb->loop_father == current_loops->tree_root)
1586 continue;
1587 FOR_EACH_EDGE (e, ei, bb->succs)
1588 {
1589 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1590 continue;
1591
1592 n_exits++;
1593 exit = get_exit_descriptions (e);
1594 if (!exit)
1595 {
1596 error ("exit %d->%d not recorded",
1597 e->src->index, e->dest->index);
1598 err = 1;
1599 }
1600 eloops = 0;
1601 for (; exit; exit = exit->next_e)
1602 eloops++;
1603
1604 for (loop = bb->loop_father;
1605 loop != e->dest->loop_father
1606 /* When a loop exit is also an entry edge which
1607 can happen when avoiding CFG manipulations
1608 then the last loop exited is the outer loop
1609 of the loop entered. */
1610 && loop != loop_outer (e->dest->loop_father);
1611 loop = loop_outer (loop))
1612 {
1613 eloops--;
1614 sizes[loop->num]++;
1615 }
1616
1617 if (eloops != 0)
1618 {
1619 error ("wrong list of exited loops for edge %d->%d",
1620 e->src->index, e->dest->index);
1621 err = 1;
1622 }
1623 }
1624 }
1625
1626 if (n_exits != current_loops->exits->elements ())
1627 {
1628 error ("too many loop exits recorded");
1629 err = 1;
1630 }
1631
1632 FOR_EACH_LOOP (loop, 0)
1633 {
1634 eloops = 0;
1635 for (exit = loop->exits->next; exit->e; exit = exit->next)
1636 eloops++;
1637 if (eloops != sizes[loop->num])
1638 {
1639 error ("%d exits recorded for loop %d (having %d exits)",
1640 eloops, loop->num, sizes[loop->num]);
1641 err = 1;
1642 }
1643 }
1644
1645 free (sizes);
1646 }
1647
1648 gcc_assert (!err);
1649
1650 if (!dom_available)
1651 free_dominance_info (CDI_DOMINATORS);
1652 }
1653
1654 /* Returns latch edge of LOOP. */
1655 edge
1656 loop_latch_edge (const struct loop *loop)
1657 {
1658 return find_edge (loop->latch, loop->header);
1659 }
1660
1661 /* Returns preheader edge of LOOP. */
1662 edge
1663 loop_preheader_edge (const struct loop *loop)
1664 {
1665 edge e;
1666 edge_iterator ei;
1667
1668 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1669
1670 FOR_EACH_EDGE (e, ei, loop->header->preds)
1671 if (e->src != loop->latch)
1672 break;
1673
1674 return e;
1675 }
1676
1677 /* Returns true if E is an exit of LOOP. */
1678
1679 bool
1680 loop_exit_edge_p (const struct loop *loop, const_edge e)
1681 {
1682 return (flow_bb_inside_loop_p (loop, e->src)
1683 && !flow_bb_inside_loop_p (loop, e->dest));
1684 }
1685
1686 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1687 or more than one exit. If loops do not have the exits recorded, NULL
1688 is returned always. */
1689
1690 edge
1691 single_exit (const struct loop *loop)
1692 {
1693 struct loop_exit *exit = loop->exits->next;
1694
1695 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1696 return NULL;
1697
1698 if (exit->e && exit->next == loop->exits)
1699 return exit->e;
1700 else
1701 return NULL;
1702 }
1703
1704 /* Returns true when BB has an incoming edge exiting LOOP. */
1705
1706 bool
1707 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1708 {
1709 edge e;
1710 edge_iterator ei;
1711
1712 FOR_EACH_EDGE (e, ei, bb->preds)
1713 if (loop_exit_edge_p (loop, e))
1714 return true;
1715
1716 return false;
1717 }
1718
1719 /* Returns true when BB has an outgoing edge exiting LOOP. */
1720
1721 bool
1722 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1723 {
1724 edge e;
1725 edge_iterator ei;
1726
1727 FOR_EACH_EDGE (e, ei, bb->succs)
1728 if (loop_exit_edge_p (loop, e))
1729 return true;
1730
1731 return false;
1732 }
1733
1734 /* Return location corresponding to the loop control condition if possible. */
1735
1736 location_t
1737 get_loop_location (struct loop *loop)
1738 {
1739 rtx_insn *insn = NULL;
1740 struct niter_desc *desc = NULL;
1741 edge exit;
1742
1743 /* For a for or while loop, we would like to return the location
1744 of the for or while statement, if possible. To do this, look
1745 for the branch guarding the loop back-edge. */
1746
1747 /* If this is a simple loop with an in_edge, then the loop control
1748 branch is typically at the end of its source. */
1749 desc = get_simple_loop_desc (loop);
1750 if (desc->in_edge)
1751 {
1752 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1753 {
1754 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1755 return INSN_LOCATION (insn);
1756 }
1757 }
1758 /* If loop has a single exit, then the loop control branch
1759 must be at the end of its source. */
1760 if ((exit = single_exit (loop)))
1761 {
1762 FOR_BB_INSNS_REVERSE (exit->src, insn)
1763 {
1764 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1765 return INSN_LOCATION (insn);
1766 }
1767 }
1768 /* Next check the latch, to see if it is non-empty. */
1769 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1770 {
1771 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1772 return INSN_LOCATION (insn);
1773 }
1774 /* Finally, if none of the above identifies the loop control branch,
1775 return the first location in the loop header. */
1776 FOR_BB_INSNS (loop->header, insn)
1777 {
1778 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1779 return INSN_LOCATION (insn);
1780 }
1781 /* If all else fails, simply return the current function location. */
1782 return DECL_SOURCE_LOCATION (current_function_decl);
1783 }
1784
1785 /* Records that every statement in LOOP is executed I_BOUND times.
1786 REALISTIC is true if I_BOUND is expected to be close to the real number
1787 of iterations. UPPER is true if we are sure the loop iterates at most
1788 I_BOUND times. */
1789
1790 void
1791 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1792 bool realistic, bool upper)
1793 {
1794 /* Update the bounds only when there is no previous estimation, or when the
1795 current estimation is smaller. */
1796 if (upper
1797 && (!loop->any_upper_bound
1798 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1799 {
1800 loop->any_upper_bound = true;
1801 loop->nb_iterations_upper_bound = i_bound;
1802 if (!loop->any_likely_upper_bound)
1803 {
1804 loop->any_likely_upper_bound = true;
1805 loop->nb_iterations_likely_upper_bound = i_bound;
1806 }
1807 }
1808 if (realistic
1809 && (!loop->any_estimate
1810 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1811 {
1812 loop->any_estimate = true;
1813 loop->nb_iterations_estimate = i_bound;
1814 }
1815 if (!realistic
1816 && (!loop->any_likely_upper_bound
1817 || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
1818 {
1819 loop->any_likely_upper_bound = true;
1820 loop->nb_iterations_likely_upper_bound = i_bound;
1821 }
1822
1823 /* If an upper bound is smaller than the realistic estimate of the
1824 number of iterations, use the upper bound instead. */
1825 if (loop->any_upper_bound
1826 && loop->any_estimate
1827 && wi::ltu_p (loop->nb_iterations_upper_bound,
1828 loop->nb_iterations_estimate))
1829 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1830 if (loop->any_upper_bound
1831 && loop->any_likely_upper_bound
1832 && wi::ltu_p (loop->nb_iterations_upper_bound,
1833 loop->nb_iterations_likely_upper_bound))
1834 loop->nb_iterations_likely_upper_bound = loop->nb_iterations_upper_bound;
1835 }
1836
1837 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1838 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1839 on the number of iterations of LOOP could not be derived, returns -1. */
1840
1841 HOST_WIDE_INT
1842 get_estimated_loop_iterations_int (struct loop *loop)
1843 {
1844 widest_int nit;
1845 HOST_WIDE_INT hwi_nit;
1846
1847 if (!get_estimated_loop_iterations (loop, &nit))
1848 return -1;
1849
1850 if (!wi::fits_shwi_p (nit))
1851 return -1;
1852 hwi_nit = nit.to_shwi ();
1853
1854 return hwi_nit < 0 ? -1 : hwi_nit;
1855 }
1856
1857 /* Returns an upper bound on the number of executions of statements
1858 in the LOOP. For statements before the loop exit, this exceeds
1859 the number of execution of the latch by one. */
1860
1861 HOST_WIDE_INT
1862 max_stmt_executions_int (struct loop *loop)
1863 {
1864 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1865 HOST_WIDE_INT snit;
1866
1867 if (nit == -1)
1868 return -1;
1869
1870 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1871
1872 /* If the computation overflows, return -1. */
1873 return snit < 0 ? -1 : snit;
1874 }
1875
1876 /* Returns an likely upper bound on the number of executions of statements
1877 in the LOOP. For statements before the loop exit, this exceeds
1878 the number of execution of the latch by one. */
1879
1880 HOST_WIDE_INT
1881 likely_max_stmt_executions_int (struct loop *loop)
1882 {
1883 HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
1884 HOST_WIDE_INT snit;
1885
1886 if (nit == -1)
1887 return -1;
1888
1889 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1890
1891 /* If the computation overflows, return -1. */
1892 return snit < 0 ? -1 : snit;
1893 }
1894
1895 /* Sets NIT to the estimated number of executions of the latch of the
1896 LOOP. If we have no reliable estimate, the function returns false, otherwise
1897 returns true. */
1898
1899 bool
1900 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1901 {
1902 /* Even if the bound is not recorded, possibly we can derrive one from
1903 profile. */
1904 if (!loop->any_estimate)
1905 {
1906 if (loop->header->count)
1907 {
1908 *nit = gcov_type_to_wide_int
1909 (expected_loop_iterations_unbounded (loop) + 1);
1910 return true;
1911 }
1912 return false;
1913 }
1914
1915 *nit = loop->nb_iterations_estimate;
1916 return true;
1917 }
1918
1919 /* Sets NIT to an upper bound for the maximum number of executions of the
1920 latch of the LOOP. If we have no reliable estimate, the function returns
1921 false, otherwise returns true. */
1922
1923 bool
1924 get_max_loop_iterations (const struct loop *loop, widest_int *nit)
1925 {
1926 if (!loop->any_upper_bound)
1927 return false;
1928
1929 *nit = loop->nb_iterations_upper_bound;
1930 return true;
1931 }
1932
1933 /* Similar to get_max_loop_iterations, but returns the estimate only
1934 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1935 on the number of iterations of LOOP could not be derived, returns -1. */
1936
1937 HOST_WIDE_INT
1938 get_max_loop_iterations_int (const struct loop *loop)
1939 {
1940 widest_int nit;
1941 HOST_WIDE_INT hwi_nit;
1942
1943 if (!get_max_loop_iterations (loop, &nit))
1944 return -1;
1945
1946 if (!wi::fits_shwi_p (nit))
1947 return -1;
1948 hwi_nit = nit.to_shwi ();
1949
1950 return hwi_nit < 0 ? -1 : hwi_nit;
1951 }
1952
1953 /* Sets NIT to an upper bound for the maximum number of executions of the
1954 latch of the LOOP. If we have no reliable estimate, the function returns
1955 false, otherwise returns true. */
1956
1957 bool
1958 get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
1959 {
1960 if (!loop->any_likely_upper_bound)
1961 return false;
1962
1963 *nit = loop->nb_iterations_likely_upper_bound;
1964 return true;
1965 }
1966
1967 /* Similar to get_max_loop_iterations, but returns the estimate only
1968 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1969 on the number of iterations of LOOP could not be derived, returns -1. */
1970
1971 HOST_WIDE_INT
1972 get_likely_max_loop_iterations_int (struct loop *loop)
1973 {
1974 widest_int nit;
1975 HOST_WIDE_INT hwi_nit;
1976
1977 if (!get_likely_max_loop_iterations (loop, &nit))
1978 return -1;
1979
1980 if (!wi::fits_shwi_p (nit))
1981 return -1;
1982 hwi_nit = nit.to_shwi ();
1983
1984 return hwi_nit < 0 ? -1 : hwi_nit;
1985 }
1986
1987 /* Returns the loop depth of the loop BB belongs to. */
1988
1989 int
1990 bb_loop_depth (const_basic_block bb)
1991 {
1992 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1993 }
1994
1995 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1996
1997 void
1998 mark_loop_for_removal (loop_p loop)
1999 {
2000 if (loop->header == NULL)
2001 return;
2002 loop->former_header = loop->header;
2003 loop->header = NULL;
2004 loop->latch = NULL;
2005 loops_state_set (LOOPS_NEED_FIXUP);
2006 }