i386.c (make_resolver_func): Update.
[gcc.git] / gcc / cfgloop.c
1 /* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "gimple-ssa.h"
29 #include "diagnostic-core.h"
30 #include "cfganal.h"
31 #include "cfgloop.h"
32 #include "gimple-iterator.h"
33 #include "dumpfile.h"
34
35 static void flow_loops_cfg_dump (FILE *);
36 \f
37 /* Dump loop related CFG information. */
38
39 static void
40 flow_loops_cfg_dump (FILE *file)
41 {
42 basic_block bb;
43
44 if (!file)
45 return;
46
47 FOR_EACH_BB_FN (bb, cfun)
48 {
49 edge succ;
50 edge_iterator ei;
51
52 fprintf (file, ";; %d succs { ", bb->index);
53 FOR_EACH_EDGE (succ, ei, bb->succs)
54 fprintf (file, "%d ", succ->dest->index);
55 fprintf (file, "}\n");
56 }
57 }
58
59 /* Return nonzero if the nodes of LOOP are a subset of OUTER. */
60
61 bool
62 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
63 {
64 unsigned odepth = loop_depth (outer);
65
66 return (loop_depth (loop) > odepth
67 && (*loop->superloops)[odepth] == outer);
68 }
69
70 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
71 loops within LOOP. */
72
73 struct loop *
74 superloop_at_depth (struct loop *loop, unsigned depth)
75 {
76 unsigned ldepth = loop_depth (loop);
77
78 gcc_assert (depth <= ldepth);
79
80 if (depth == ldepth)
81 return loop;
82
83 return (*loop->superloops)[depth];
84 }
85
86 /* Returns the list of the latch edges of LOOP. */
87
88 static vec<edge>
89 get_loop_latch_edges (const struct loop *loop)
90 {
91 edge_iterator ei;
92 edge e;
93 vec<edge> ret = vNULL;
94
95 FOR_EACH_EDGE (e, ei, loop->header->preds)
96 {
97 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
98 ret.safe_push (e);
99 }
100
101 return ret;
102 }
103
104 /* Dump the loop information specified by LOOP to the stream FILE
105 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
106
107 void
108 flow_loop_dump (const struct loop *loop, FILE *file,
109 void (*loop_dump_aux) (const struct loop *, FILE *, int),
110 int verbose)
111 {
112 basic_block *bbs;
113 unsigned i;
114 vec<edge> latches;
115 edge e;
116
117 if (! loop || ! loop->header)
118 return;
119
120 fprintf (file, ";;\n;; Loop %d\n", loop->num);
121
122 fprintf (file, ";; header %d, ", loop->header->index);
123 if (loop->latch)
124 fprintf (file, "latch %d\n", loop->latch->index);
125 else
126 {
127 fprintf (file, "multiple latches:");
128 latches = get_loop_latch_edges (loop);
129 FOR_EACH_VEC_ELT (latches, i, e)
130 fprintf (file, " %d", e->src->index);
131 latches.release ();
132 fprintf (file, "\n");
133 }
134
135 fprintf (file, ";; depth %d, outer %ld\n",
136 loop_depth (loop), (long) (loop_outer (loop)
137 ? loop_outer (loop)->num : -1));
138
139 if (loop->latch)
140 {
141 bool read_profile_p;
142 gcov_type nit = expected_loop_iterations_unbounded (loop, &read_profile_p);
143 if (read_profile_p && !loop->any_estimate)
144 fprintf (file, ";; profile-based iteration count: %" PRIu64 "\n",
145 (uint64_t) nit);
146 }
147
148 fprintf (file, ";; nodes:");
149 bbs = get_loop_body (loop);
150 for (i = 0; i < loop->num_nodes; i++)
151 fprintf (file, " %d", bbs[i]->index);
152 free (bbs);
153 fprintf (file, "\n");
154
155 if (loop_dump_aux)
156 loop_dump_aux (loop, file, verbose);
157 }
158
159 /* Dump the loop information about loops to the stream FILE,
160 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
161
162 void
163 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
164 {
165 struct loop *loop;
166
167 if (!current_loops || ! file)
168 return;
169
170 fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
171
172 FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
173 {
174 flow_loop_dump (loop, file, loop_dump_aux, verbose);
175 }
176
177 if (verbose)
178 flow_loops_cfg_dump (file);
179 }
180
181 /* Free data allocated for LOOP. */
182
183 void
184 flow_loop_free (struct loop *loop)
185 {
186 struct loop_exit *exit, *next;
187
188 vec_free (loop->superloops);
189
190 /* Break the list of the loop exit records. They will be freed when the
191 corresponding edge is rescanned or removed, and this avoids
192 accessing the (already released) head of the list stored in the
193 loop structure. */
194 for (exit = loop->exits->next; exit != loop->exits; exit = next)
195 {
196 next = exit->next;
197 exit->next = exit;
198 exit->prev = exit;
199 }
200
201 ggc_free (loop->exits);
202 ggc_free (loop);
203 }
204
205 /* Free all the memory allocated for LOOPS. */
206
207 void
208 flow_loops_free (struct loops *loops)
209 {
210 if (loops->larray)
211 {
212 unsigned i;
213 loop_p loop;
214
215 /* Free the loop descriptors. */
216 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
217 {
218 if (!loop)
219 continue;
220
221 flow_loop_free (loop);
222 }
223
224 vec_free (loops->larray);
225 }
226 }
227
228 /* Find the nodes contained within the LOOP with header HEADER.
229 Return the number of nodes within the loop. */
230
231 int
232 flow_loop_nodes_find (basic_block header, struct loop *loop)
233 {
234 vec<basic_block> stack = vNULL;
235 int num_nodes = 1;
236 edge latch;
237 edge_iterator latch_ei;
238
239 header->loop_father = loop;
240
241 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
242 {
243 if (latch->src->loop_father == loop
244 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
245 continue;
246
247 num_nodes++;
248 stack.safe_push (latch->src);
249 latch->src->loop_father = loop;
250
251 while (!stack.is_empty ())
252 {
253 basic_block node;
254 edge e;
255 edge_iterator ei;
256
257 node = stack.pop ();
258
259 FOR_EACH_EDGE (e, ei, node->preds)
260 {
261 basic_block ancestor = e->src;
262
263 if (ancestor->loop_father != loop)
264 {
265 ancestor->loop_father = loop;
266 num_nodes++;
267 stack.safe_push (ancestor);
268 }
269 }
270 }
271 }
272 stack.release ();
273
274 return num_nodes;
275 }
276
277 /* Records the vector of superloops of the loop LOOP, whose immediate
278 superloop is FATHER. */
279
280 static void
281 establish_preds (struct loop *loop, struct loop *father)
282 {
283 loop_p ploop;
284 unsigned depth = loop_depth (father) + 1;
285 unsigned i;
286
287 loop->superloops = 0;
288 vec_alloc (loop->superloops, depth);
289 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
290 loop->superloops->quick_push (ploop);
291 loop->superloops->quick_push (father);
292
293 for (ploop = loop->inner; ploop; ploop = ploop->next)
294 establish_preds (ploop, loop);
295 }
296
297 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
298 added loop. If LOOP has some children, take care of that their
299 pred field will be initialized correctly. */
300
301 void
302 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
303 {
304 loop->next = father->inner;
305 father->inner = loop;
306
307 establish_preds (loop, father);
308 }
309
310 /* Remove LOOP from the loop hierarchy tree. */
311
312 void
313 flow_loop_tree_node_remove (struct loop *loop)
314 {
315 struct loop *prev, *father;
316
317 father = loop_outer (loop);
318
319 /* Remove loop from the list of sons. */
320 if (father->inner == loop)
321 father->inner = loop->next;
322 else
323 {
324 for (prev = father->inner; prev->next != loop; prev = prev->next)
325 continue;
326 prev->next = loop->next;
327 }
328
329 loop->superloops = NULL;
330 }
331
332 /* Allocates and returns new loop structure. */
333
334 struct loop *
335 alloc_loop (void)
336 {
337 struct loop *loop = ggc_cleared_alloc<struct loop> ();
338
339 loop->exits = ggc_cleared_alloc<loop_exit> ();
340 loop->exits->next = loop->exits->prev = loop->exits;
341 loop->can_be_parallel = false;
342 loop->constraints = 0;
343 loop->nb_iterations_upper_bound = 0;
344 loop->nb_iterations_likely_upper_bound = 0;
345 loop->nb_iterations_estimate = 0;
346 return loop;
347 }
348
349 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
350 (including the root of the loop tree). */
351
352 void
353 init_loops_structure (struct function *fn,
354 struct loops *loops, unsigned num_loops)
355 {
356 struct loop *root;
357
358 memset (loops, 0, sizeof *loops);
359 vec_alloc (loops->larray, num_loops);
360
361 /* Dummy loop containing whole function. */
362 root = alloc_loop ();
363 root->num_nodes = n_basic_blocks_for_fn (fn);
364 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
365 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
366 ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
367 EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
368
369 loops->larray->quick_push (root);
370 loops->tree_root = root;
371 }
372
373 /* Returns whether HEADER is a loop header. */
374
375 bool
376 bb_loop_header_p (basic_block header)
377 {
378 edge_iterator ei;
379 edge e;
380
381 /* If we have an abnormal predecessor, do not consider the
382 loop (not worth the problems). */
383 if (bb_has_abnormal_pred (header))
384 return false;
385
386 /* Look for back edges where a predecessor is dominated
387 by this block. A natural loop has a single entry
388 node (header) that dominates all the nodes in the
389 loop. It also has single back edge to the header
390 from a latch node. */
391 FOR_EACH_EDGE (e, ei, header->preds)
392 {
393 basic_block latch = e->src;
394 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
395 && dominated_by_p (CDI_DOMINATORS, latch, header))
396 return true;
397 }
398
399 return false;
400 }
401
402 /* Find all the natural loops in the function and save in LOOPS structure and
403 recalculate loop_father information in basic block structures.
404 If LOOPS is non-NULL then the loop structures for already recorded loops
405 will be re-used and their number will not change. We assume that no
406 stale loops exist in LOOPS.
407 When LOOPS is NULL it is allocated and re-built from scratch.
408 Return the built LOOPS structure. */
409
410 struct loops *
411 flow_loops_find (struct loops *loops)
412 {
413 bool from_scratch = (loops == NULL);
414 int *rc_order;
415 int b;
416 unsigned i;
417
418 /* Ensure that the dominators are computed. */
419 calculate_dominance_info (CDI_DOMINATORS);
420
421 if (!loops)
422 {
423 loops = ggc_cleared_alloc<struct loops> ();
424 init_loops_structure (cfun, loops, 1);
425 }
426
427 /* Ensure that loop exits were released. */
428 gcc_assert (loops->exits == NULL);
429
430 /* Taking care of this degenerate case makes the rest of
431 this code simpler. */
432 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
433 return loops;
434
435 /* The root loop node contains all basic-blocks. */
436 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
437
438 /* Compute depth first search order of the CFG so that outer
439 natural loops will be found before inner natural loops. */
440 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
441 pre_and_rev_post_order_compute (NULL, rc_order, false);
442
443 /* Gather all loop headers in reverse completion order and allocate
444 loop structures for loops that are not already present. */
445 auto_vec<loop_p> larray (loops->larray->length ());
446 for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
447 {
448 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
449 if (bb_loop_header_p (header))
450 {
451 struct loop *loop;
452
453 /* The current active loop tree has valid loop-fathers for
454 header blocks. */
455 if (!from_scratch
456 && header->loop_father->header == header)
457 {
458 loop = header->loop_father;
459 /* If we found an existing loop remove it from the
460 loop tree. It is going to be inserted again
461 below. */
462 flow_loop_tree_node_remove (loop);
463 }
464 else
465 {
466 /* Otherwise allocate a new loop structure for the loop. */
467 loop = alloc_loop ();
468 /* ??? We could re-use unused loop slots here. */
469 loop->num = loops->larray->length ();
470 vec_safe_push (loops->larray, loop);
471 loop->header = header;
472
473 if (!from_scratch
474 && dump_file && (dump_flags & TDF_DETAILS))
475 fprintf (dump_file, "flow_loops_find: discovered new "
476 "loop %d with header %d\n",
477 loop->num, header->index);
478 }
479 /* Reset latch, we recompute it below. */
480 loop->latch = NULL;
481 larray.safe_push (loop);
482 }
483
484 /* Make blocks part of the loop root node at start. */
485 header->loop_father = loops->tree_root;
486 }
487
488 free (rc_order);
489
490 /* Now iterate over the loops found, insert them into the loop tree
491 and assign basic-block ownership. */
492 for (i = 0; i < larray.length (); ++i)
493 {
494 struct loop *loop = larray[i];
495 basic_block header = loop->header;
496 edge_iterator ei;
497 edge e;
498
499 flow_loop_tree_node_add (header->loop_father, loop);
500 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
501
502 /* Look for the latch for this header block, if it has just a
503 single one. */
504 FOR_EACH_EDGE (e, ei, header->preds)
505 {
506 basic_block latch = e->src;
507
508 if (flow_bb_inside_loop_p (loop, latch))
509 {
510 if (loop->latch != NULL)
511 {
512 /* More than one latch edge. */
513 loop->latch = NULL;
514 break;
515 }
516 loop->latch = latch;
517 }
518 }
519 }
520
521 return loops;
522 }
523
524 /* Ratio of frequencies of edges so that one of more latch edges is
525 considered to belong to inner loop with same header. */
526 #define HEAVY_EDGE_RATIO 8
527
528 /* Minimum number of samples for that we apply
529 find_subloop_latch_edge_by_profile heuristics. */
530 #define HEAVY_EDGE_MIN_SAMPLES 10
531
532 /* If the profile info is available, finds an edge in LATCHES that much more
533 frequent than the remaining edges. Returns such an edge, or NULL if we do
534 not find one.
535
536 We do not use guessed profile here, only the measured one. The guessed
537 profile is usually too flat and unreliable for this (and it is mostly based
538 on the loop structure of the program, so it does not make much sense to
539 derive the loop structure from it). */
540
541 static edge
542 find_subloop_latch_edge_by_profile (vec<edge> latches)
543 {
544 unsigned i;
545 edge e, me = NULL;
546 profile_count mcount = profile_count::zero (), tcount = profile_count::zero ();
547
548 FOR_EACH_VEC_ELT (latches, i, e)
549 {
550 if (e->count > mcount)
551 {
552 me = e;
553 mcount = e->count;
554 }
555 tcount += e->count;
556 }
557
558 if (!tcount.initialized_p () || tcount < HEAVY_EDGE_MIN_SAMPLES
559 || (tcount - mcount).apply_scale (HEAVY_EDGE_RATIO, 1) > tcount)
560 return NULL;
561
562 if (dump_file)
563 fprintf (dump_file,
564 "Found latch edge %d -> %d using profile information.\n",
565 me->src->index, me->dest->index);
566 return me;
567 }
568
569 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
570 on the structure of induction variables. Returns this edge, or NULL if we
571 do not find any.
572
573 We are quite conservative, and look just for an obvious simple innermost
574 loop (which is the case where we would lose the most performance by not
575 disambiguating the loop). More precisely, we look for the following
576 situation: The source of the chosen latch edge dominates sources of all
577 the other latch edges. Additionally, the header does not contain a phi node
578 such that the argument from the chosen edge is equal to the argument from
579 another edge. */
580
581 static edge
582 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
583 {
584 edge e, latch = latches[0];
585 unsigned i;
586 gphi *phi;
587 gphi_iterator psi;
588 tree lop;
589 basic_block bb;
590
591 /* Find the candidate for the latch edge. */
592 for (i = 1; latches.iterate (i, &e); i++)
593 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
594 latch = e;
595
596 /* Verify that it dominates all the latch edges. */
597 FOR_EACH_VEC_ELT (latches, i, e)
598 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
599 return NULL;
600
601 /* Check for a phi node that would deny that this is a latch edge of
602 a subloop. */
603 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
604 {
605 phi = psi.phi ();
606 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
607
608 /* Ignore the values that are not changed inside the subloop. */
609 if (TREE_CODE (lop) != SSA_NAME
610 || SSA_NAME_DEF_STMT (lop) == phi)
611 continue;
612 bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
613 if (!bb || !flow_bb_inside_loop_p (loop, bb))
614 continue;
615
616 FOR_EACH_VEC_ELT (latches, i, e)
617 if (e != latch
618 && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
619 return NULL;
620 }
621
622 if (dump_file)
623 fprintf (dump_file,
624 "Found latch edge %d -> %d using iv structure.\n",
625 latch->src->index, latch->dest->index);
626 return latch;
627 }
628
629 /* If we can determine that one of the several latch edges of LOOP behaves
630 as a latch edge of a separate subloop, returns this edge. Otherwise
631 returns NULL. */
632
633 static edge
634 find_subloop_latch_edge (struct loop *loop)
635 {
636 vec<edge> latches = get_loop_latch_edges (loop);
637 edge latch = NULL;
638
639 if (latches.length () > 1)
640 {
641 latch = find_subloop_latch_edge_by_profile (latches);
642
643 if (!latch
644 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
645 should use cfghook for this, but it is hard to imagine it would
646 be useful elsewhere. */
647 && current_ir_type () == IR_GIMPLE)
648 latch = find_subloop_latch_edge_by_ivs (loop, latches);
649 }
650
651 latches.release ();
652 return latch;
653 }
654
655 /* Callback for make_forwarder_block. Returns true if the edge E is marked
656 in the set MFB_REIS_SET. */
657
658 static hash_set<edge> *mfb_reis_set;
659 static bool
660 mfb_redirect_edges_in_set (edge e)
661 {
662 return mfb_reis_set->contains (e);
663 }
664
665 /* Creates a subloop of LOOP with latch edge LATCH. */
666
667 static void
668 form_subloop (struct loop *loop, edge latch)
669 {
670 edge_iterator ei;
671 edge e, new_entry;
672 struct loop *new_loop;
673
674 mfb_reis_set = new hash_set<edge>;
675 FOR_EACH_EDGE (e, ei, loop->header->preds)
676 {
677 if (e != latch)
678 mfb_reis_set->add (e);
679 }
680 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
681 NULL);
682 delete mfb_reis_set;
683
684 loop->header = new_entry->src;
685
686 /* Find the blocks and subloops that belong to the new loop, and add it to
687 the appropriate place in the loop tree. */
688 new_loop = alloc_loop ();
689 new_loop->header = new_entry->dest;
690 new_loop->latch = latch->src;
691 add_loop (new_loop, loop);
692 }
693
694 /* Make all the latch edges of LOOP to go to a single forwarder block --
695 a new latch of LOOP. */
696
697 static void
698 merge_latch_edges (struct loop *loop)
699 {
700 vec<edge> latches = get_loop_latch_edges (loop);
701 edge latch, e;
702 unsigned i;
703
704 gcc_assert (latches.length () > 0);
705
706 if (latches.length () == 1)
707 loop->latch = latches[0]->src;
708 else
709 {
710 if (dump_file)
711 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
712
713 mfb_reis_set = new hash_set<edge>;
714 FOR_EACH_VEC_ELT (latches, i, e)
715 mfb_reis_set->add (e);
716 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
717 NULL);
718 delete mfb_reis_set;
719
720 loop->header = latch->dest;
721 loop->latch = latch->src;
722 }
723
724 latches.release ();
725 }
726
727 /* LOOP may have several latch edges. Transform it into (possibly several)
728 loops with single latch edge. */
729
730 static void
731 disambiguate_multiple_latches (struct loop *loop)
732 {
733 edge e;
734
735 /* We eliminate the multiple latches by splitting the header to the forwarder
736 block F and the rest R, and redirecting the edges. There are two cases:
737
738 1) If there is a latch edge E that corresponds to a subloop (we guess
739 that based on profile -- if it is taken much more often than the
740 remaining edges; and on trees, using the information about induction
741 variables of the loops), we redirect E to R, all the remaining edges to
742 F, then rescan the loops and try again for the outer loop.
743 2) If there is no such edge, we redirect all latch edges to F, and the
744 entry edges to R, thus making F the single latch of the loop. */
745
746 if (dump_file)
747 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
748 loop->num);
749
750 /* During latch merging, we may need to redirect the entry edges to a new
751 block. This would cause problems if the entry edge was the one from the
752 entry block. To avoid having to handle this case specially, split
753 such entry edge. */
754 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
755 if (e)
756 split_edge (e);
757
758 while (1)
759 {
760 e = find_subloop_latch_edge (loop);
761 if (!e)
762 break;
763
764 form_subloop (loop, e);
765 }
766
767 merge_latch_edges (loop);
768 }
769
770 /* Split loops with multiple latch edges. */
771
772 void
773 disambiguate_loops_with_multiple_latches (void)
774 {
775 struct loop *loop;
776
777 FOR_EACH_LOOP (loop, 0)
778 {
779 if (!loop->latch)
780 disambiguate_multiple_latches (loop);
781 }
782 }
783
784 /* Return nonzero if basic block BB belongs to LOOP. */
785 bool
786 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
787 {
788 struct loop *source_loop;
789
790 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
791 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
792 return 0;
793
794 source_loop = bb->loop_father;
795 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
796 }
797
798 /* Enumeration predicate for get_loop_body_with_size. */
799 static bool
800 glb_enum_p (const_basic_block bb, const void *glb_loop)
801 {
802 const struct loop *const loop = (const struct loop *) glb_loop;
803 return (bb != loop->header
804 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
805 }
806
807 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
808 order against direction of edges from latch. Specially, if
809 header != latch, latch is the 1-st block. LOOP cannot be the fake
810 loop tree root, and its size must be at most MAX_SIZE. The blocks
811 in the LOOP body are stored to BODY, and the size of the LOOP is
812 returned. */
813
814 unsigned
815 get_loop_body_with_size (const struct loop *loop, basic_block *body,
816 unsigned max_size)
817 {
818 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
819 body, max_size, loop);
820 }
821
822 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
823 order against direction of edges from latch. Specially, if
824 header != latch, latch is the 1-st block. */
825
826 basic_block *
827 get_loop_body (const struct loop *loop)
828 {
829 basic_block *body, bb;
830 unsigned tv = 0;
831
832 gcc_assert (loop->num_nodes);
833
834 body = XNEWVEC (basic_block, loop->num_nodes);
835
836 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
837 {
838 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
839 special-case the fake loop that contains the whole function. */
840 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
841 body[tv++] = loop->header;
842 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
843 FOR_EACH_BB_FN (bb, cfun)
844 body[tv++] = bb;
845 }
846 else
847 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
848
849 gcc_assert (tv == loop->num_nodes);
850 return body;
851 }
852
853 /* Fills dominance descendants inside LOOP of the basic block BB into
854 array TOVISIT from index *TV. */
855
856 static void
857 fill_sons_in_loop (const struct loop *loop, basic_block bb,
858 basic_block *tovisit, int *tv)
859 {
860 basic_block son, postpone = NULL;
861
862 tovisit[(*tv)++] = bb;
863 for (son = first_dom_son (CDI_DOMINATORS, bb);
864 son;
865 son = next_dom_son (CDI_DOMINATORS, son))
866 {
867 if (!flow_bb_inside_loop_p (loop, son))
868 continue;
869
870 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
871 {
872 postpone = son;
873 continue;
874 }
875 fill_sons_in_loop (loop, son, tovisit, tv);
876 }
877
878 if (postpone)
879 fill_sons_in_loop (loop, postpone, tovisit, tv);
880 }
881
882 /* Gets body of a LOOP (that must be different from the outermost loop)
883 sorted by dominance relation. Additionally, if a basic block s dominates
884 the latch, then only blocks dominated by s are be after it. */
885
886 basic_block *
887 get_loop_body_in_dom_order (const struct loop *loop)
888 {
889 basic_block *tovisit;
890 int tv;
891
892 gcc_assert (loop->num_nodes);
893
894 tovisit = XNEWVEC (basic_block, loop->num_nodes);
895
896 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
897
898 tv = 0;
899 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
900
901 gcc_assert (tv == (int) loop->num_nodes);
902
903 return tovisit;
904 }
905
906 /* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
907
908 basic_block *
909 get_loop_body_in_custom_order (const struct loop *loop,
910 int (*bb_comparator) (const void *, const void *))
911 {
912 basic_block *bbs = get_loop_body (loop);
913
914 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
915
916 return bbs;
917 }
918
919 /* Get body of a LOOP in breadth first sort order. */
920
921 basic_block *
922 get_loop_body_in_bfs_order (const struct loop *loop)
923 {
924 basic_block *blocks;
925 basic_block bb;
926 unsigned int i = 1;
927 unsigned int vc = 0;
928
929 gcc_assert (loop->num_nodes);
930 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
931
932 blocks = XNEWVEC (basic_block, loop->num_nodes);
933 auto_bitmap visited;
934 blocks[0] = loop->header;
935 bitmap_set_bit (visited, loop->header->index);
936 while (i < loop->num_nodes)
937 {
938 edge e;
939 edge_iterator ei;
940 gcc_assert (i > vc);
941 bb = blocks[vc++];
942
943 FOR_EACH_EDGE (e, ei, bb->succs)
944 {
945 if (flow_bb_inside_loop_p (loop, e->dest))
946 {
947 /* This bb is now visited. */
948 if (bitmap_set_bit (visited, e->dest->index))
949 blocks[i++] = e->dest;
950 }
951 }
952 }
953
954 return blocks;
955 }
956
957 /* Hash function for struct loop_exit. */
958
959 hashval_t
960 loop_exit_hasher::hash (loop_exit *exit)
961 {
962 return htab_hash_pointer (exit->e);
963 }
964
965 /* Equality function for struct loop_exit. Compares with edge. */
966
967 bool
968 loop_exit_hasher::equal (loop_exit *exit, edge e)
969 {
970 return exit->e == e;
971 }
972
973 /* Frees the list of loop exit descriptions EX. */
974
975 void
976 loop_exit_hasher::remove (loop_exit *exit)
977 {
978 loop_exit *next;
979 for (; exit; exit = next)
980 {
981 next = exit->next_e;
982
983 exit->next->prev = exit->prev;
984 exit->prev->next = exit->next;
985
986 ggc_free (exit);
987 }
988 }
989
990 /* Returns the list of records for E as an exit of a loop. */
991
992 static struct loop_exit *
993 get_exit_descriptions (edge e)
994 {
995 return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
996 }
997
998 /* Updates the lists of loop exits in that E appears.
999 If REMOVED is true, E is being removed, and we
1000 just remove it from the lists of exits.
1001 If NEW_EDGE is true and E is not a loop exit, we
1002 do not try to remove it from loop exit lists. */
1003
1004 void
1005 rescan_loop_exit (edge e, bool new_edge, bool removed)
1006 {
1007 struct loop_exit *exits = NULL, *exit;
1008 struct loop *aloop, *cloop;
1009
1010 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1011 return;
1012
1013 if (!removed
1014 && e->src->loop_father != NULL
1015 && e->dest->loop_father != NULL
1016 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1017 {
1018 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1019 for (aloop = e->src->loop_father;
1020 aloop != cloop;
1021 aloop = loop_outer (aloop))
1022 {
1023 exit = ggc_alloc<loop_exit> ();
1024 exit->e = e;
1025
1026 exit->next = aloop->exits->next;
1027 exit->prev = aloop->exits;
1028 exit->next->prev = exit;
1029 exit->prev->next = exit;
1030
1031 exit->next_e = exits;
1032 exits = exit;
1033 }
1034 }
1035
1036 if (!exits && new_edge)
1037 return;
1038
1039 loop_exit **slot
1040 = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1041 exits ? INSERT : NO_INSERT);
1042 if (!slot)
1043 return;
1044
1045 if (exits)
1046 {
1047 if (*slot)
1048 loop_exit_hasher::remove (*slot);
1049 *slot = exits;
1050 }
1051 else
1052 current_loops->exits->clear_slot (slot);
1053 }
1054
1055 /* For each loop, record list of exit edges, and start maintaining these
1056 lists. */
1057
1058 void
1059 record_loop_exits (void)
1060 {
1061 basic_block bb;
1062 edge_iterator ei;
1063 edge e;
1064
1065 if (!current_loops)
1066 return;
1067
1068 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1069 return;
1070 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1071
1072 gcc_assert (current_loops->exits == NULL);
1073 current_loops->exits
1074 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
1075
1076 FOR_EACH_BB_FN (bb, cfun)
1077 {
1078 FOR_EACH_EDGE (e, ei, bb->succs)
1079 {
1080 rescan_loop_exit (e, true, false);
1081 }
1082 }
1083 }
1084
1085 /* Dumps information about the exit in *SLOT to FILE.
1086 Callback for htab_traverse. */
1087
1088 int
1089 dump_recorded_exit (loop_exit **slot, FILE *file)
1090 {
1091 struct loop_exit *exit = *slot;
1092 unsigned n = 0;
1093 edge e = exit->e;
1094
1095 for (; exit != NULL; exit = exit->next_e)
1096 n++;
1097
1098 fprintf (file, "Edge %d->%d exits %u loops\n",
1099 e->src->index, e->dest->index, n);
1100
1101 return 1;
1102 }
1103
1104 /* Dumps the recorded exits of loops to FILE. */
1105
1106 extern void dump_recorded_exits (FILE *);
1107 void
1108 dump_recorded_exits (FILE *file)
1109 {
1110 if (!current_loops->exits)
1111 return;
1112 current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
1113 }
1114
1115 /* Releases lists of loop exits. */
1116
1117 void
1118 release_recorded_exits (function *fn)
1119 {
1120 gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
1121 loops_for_fn (fn)->exits->empty ();
1122 loops_for_fn (fn)->exits = NULL;
1123 loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1124 }
1125
1126 /* Returns the list of the exit edges of a LOOP. */
1127
1128 vec<edge>
1129 get_loop_exit_edges (const struct loop *loop)
1130 {
1131 vec<edge> edges = vNULL;
1132 edge e;
1133 unsigned i;
1134 basic_block *body;
1135 edge_iterator ei;
1136 struct loop_exit *exit;
1137
1138 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1139
1140 /* If we maintain the lists of exits, use them. Otherwise we must
1141 scan the body of the loop. */
1142 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1143 {
1144 for (exit = loop->exits->next; exit->e; exit = exit->next)
1145 edges.safe_push (exit->e);
1146 }
1147 else
1148 {
1149 body = get_loop_body (loop);
1150 for (i = 0; i < loop->num_nodes; i++)
1151 FOR_EACH_EDGE (e, ei, body[i]->succs)
1152 {
1153 if (!flow_bb_inside_loop_p (loop, e->dest))
1154 edges.safe_push (e);
1155 }
1156 free (body);
1157 }
1158
1159 return edges;
1160 }
1161
1162 /* Counts the number of conditional branches inside LOOP. */
1163
1164 unsigned
1165 num_loop_branches (const struct loop *loop)
1166 {
1167 unsigned i, n;
1168 basic_block * body;
1169
1170 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1171
1172 body = get_loop_body (loop);
1173 n = 0;
1174 for (i = 0; i < loop->num_nodes; i++)
1175 if (EDGE_COUNT (body[i]->succs) >= 2)
1176 n++;
1177 free (body);
1178
1179 return n;
1180 }
1181
1182 /* Adds basic block BB to LOOP. */
1183 void
1184 add_bb_to_loop (basic_block bb, struct loop *loop)
1185 {
1186 unsigned i;
1187 loop_p ploop;
1188 edge_iterator ei;
1189 edge e;
1190
1191 gcc_assert (bb->loop_father == NULL);
1192 bb->loop_father = loop;
1193 loop->num_nodes++;
1194 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1195 ploop->num_nodes++;
1196
1197 FOR_EACH_EDGE (e, ei, bb->succs)
1198 {
1199 rescan_loop_exit (e, true, false);
1200 }
1201 FOR_EACH_EDGE (e, ei, bb->preds)
1202 {
1203 rescan_loop_exit (e, true, false);
1204 }
1205 }
1206
1207 /* Remove basic block BB from loops. */
1208 void
1209 remove_bb_from_loops (basic_block bb)
1210 {
1211 unsigned i;
1212 struct loop *loop = bb->loop_father;
1213 loop_p ploop;
1214 edge_iterator ei;
1215 edge e;
1216
1217 gcc_assert (loop != NULL);
1218 loop->num_nodes--;
1219 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1220 ploop->num_nodes--;
1221 bb->loop_father = NULL;
1222
1223 FOR_EACH_EDGE (e, ei, bb->succs)
1224 {
1225 rescan_loop_exit (e, false, true);
1226 }
1227 FOR_EACH_EDGE (e, ei, bb->preds)
1228 {
1229 rescan_loop_exit (e, false, true);
1230 }
1231 }
1232
1233 /* Finds nearest common ancestor in loop tree for given loops. */
1234 struct loop *
1235 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1236 {
1237 unsigned sdepth, ddepth;
1238
1239 if (!loop_s) return loop_d;
1240 if (!loop_d) return loop_s;
1241
1242 sdepth = loop_depth (loop_s);
1243 ddepth = loop_depth (loop_d);
1244
1245 if (sdepth < ddepth)
1246 loop_d = (*loop_d->superloops)[sdepth];
1247 else if (sdepth > ddepth)
1248 loop_s = (*loop_s->superloops)[ddepth];
1249
1250 while (loop_s != loop_d)
1251 {
1252 loop_s = loop_outer (loop_s);
1253 loop_d = loop_outer (loop_d);
1254 }
1255 return loop_s;
1256 }
1257
1258 /* Removes LOOP from structures and frees its data. */
1259
1260 void
1261 delete_loop (struct loop *loop)
1262 {
1263 /* Remove the loop from structure. */
1264 flow_loop_tree_node_remove (loop);
1265
1266 /* Remove loop from loops array. */
1267 (*current_loops->larray)[loop->num] = NULL;
1268
1269 /* Free loop data. */
1270 flow_loop_free (loop);
1271 }
1272
1273 /* Cancels the LOOP; it must be innermost one. */
1274
1275 static void
1276 cancel_loop (struct loop *loop)
1277 {
1278 basic_block *bbs;
1279 unsigned i;
1280 struct loop *outer = loop_outer (loop);
1281
1282 gcc_assert (!loop->inner);
1283
1284 /* Move blocks up one level (they should be removed as soon as possible). */
1285 bbs = get_loop_body (loop);
1286 for (i = 0; i < loop->num_nodes; i++)
1287 bbs[i]->loop_father = outer;
1288
1289 free (bbs);
1290 delete_loop (loop);
1291 }
1292
1293 /* Cancels LOOP and all its subloops. */
1294 void
1295 cancel_loop_tree (struct loop *loop)
1296 {
1297 while (loop->inner)
1298 cancel_loop_tree (loop->inner);
1299 cancel_loop (loop);
1300 }
1301
1302 /* Checks that information about loops is correct
1303 -- sizes of loops are all right
1304 -- results of get_loop_body really belong to the loop
1305 -- loop header have just single entry edge and single latch edge
1306 -- loop latches have only single successor that is header of their loop
1307 -- irreducible loops are correctly marked
1308 -- the cached loop depth and loop father of each bb is correct
1309 */
1310 DEBUG_FUNCTION void
1311 verify_loop_structure (void)
1312 {
1313 unsigned *sizes, i, j;
1314 basic_block bb, *bbs;
1315 struct loop *loop;
1316 int err = 0;
1317 edge e;
1318 unsigned num = number_of_loops (cfun);
1319 struct loop_exit *exit, *mexit;
1320 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1321
1322 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1323 {
1324 error ("loop verification on loop tree that needs fixup");
1325 err = 1;
1326 }
1327
1328 /* We need up-to-date dominators, compute or verify them. */
1329 if (!dom_available)
1330 calculate_dominance_info (CDI_DOMINATORS);
1331 else
1332 verify_dominators (CDI_DOMINATORS);
1333
1334 /* Check the loop tree root. */
1335 if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1336 || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
1337 || (current_loops->tree_root->num_nodes
1338 != (unsigned) n_basic_blocks_for_fn (cfun)))
1339 {
1340 error ("corrupt loop tree root");
1341 err = 1;
1342 }
1343
1344 /* Check the headers. */
1345 FOR_EACH_BB_FN (bb, cfun)
1346 if (bb_loop_header_p (bb))
1347 {
1348 if (bb->loop_father->header == NULL)
1349 {
1350 error ("loop with header %d marked for removal", bb->index);
1351 err = 1;
1352 }
1353 else if (bb->loop_father->header != bb)
1354 {
1355 error ("loop with header %d not in loop tree", bb->index);
1356 err = 1;
1357 }
1358 }
1359 else if (bb->loop_father->header == bb)
1360 {
1361 error ("non-loop with header %d not marked for removal", bb->index);
1362 err = 1;
1363 }
1364
1365 /* Check the recorded loop father and sizes of loops. */
1366 auto_sbitmap visited (last_basic_block_for_fn (cfun));
1367 bitmap_clear (visited);
1368 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1369 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1370 {
1371 unsigned n;
1372
1373 if (loop->header == NULL)
1374 {
1375 error ("removed loop %d in loop tree", loop->num);
1376 err = 1;
1377 continue;
1378 }
1379
1380 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1381 if (loop->num_nodes != n)
1382 {
1383 error ("size of loop %d should be %d, not %d",
1384 loop->num, n, loop->num_nodes);
1385 err = 1;
1386 }
1387
1388 for (j = 0; j < n; j++)
1389 {
1390 bb = bbs[j];
1391
1392 if (!flow_bb_inside_loop_p (loop, bb))
1393 {
1394 error ("bb %d does not belong to loop %d",
1395 bb->index, loop->num);
1396 err = 1;
1397 }
1398
1399 /* Ignore this block if it is in an inner loop. */
1400 if (bitmap_bit_p (visited, bb->index))
1401 continue;
1402 bitmap_set_bit (visited, bb->index);
1403
1404 if (bb->loop_father != loop)
1405 {
1406 error ("bb %d has father loop %d, should be loop %d",
1407 bb->index, bb->loop_father->num, loop->num);
1408 err = 1;
1409 }
1410 }
1411 }
1412 free (bbs);
1413
1414 /* Check headers and latches. */
1415 FOR_EACH_LOOP (loop, 0)
1416 {
1417 i = loop->num;
1418 if (loop->header == NULL)
1419 continue;
1420 if (!bb_loop_header_p (loop->header))
1421 {
1422 error ("loop %d%'s header is not a loop header", i);
1423 err = 1;
1424 }
1425 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1426 && EDGE_COUNT (loop->header->preds) != 2)
1427 {
1428 error ("loop %d%'s header does not have exactly 2 entries", i);
1429 err = 1;
1430 }
1431 if (loop->latch)
1432 {
1433 if (!find_edge (loop->latch, loop->header))
1434 {
1435 error ("loop %d%'s latch does not have an edge to its header", i);
1436 err = 1;
1437 }
1438 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1439 {
1440 error ("loop %d%'s latch is not dominated by its header", i);
1441 err = 1;
1442 }
1443 }
1444 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1445 {
1446 if (!single_succ_p (loop->latch))
1447 {
1448 error ("loop %d%'s latch does not have exactly 1 successor", i);
1449 err = 1;
1450 }
1451 if (single_succ (loop->latch) != loop->header)
1452 {
1453 error ("loop %d%'s latch does not have header as successor", i);
1454 err = 1;
1455 }
1456 if (loop->latch->loop_father != loop)
1457 {
1458 error ("loop %d%'s latch does not belong directly to it", i);
1459 err = 1;
1460 }
1461 }
1462 if (loop->header->loop_father != loop)
1463 {
1464 error ("loop %d%'s header does not belong directly to it", i);
1465 err = 1;
1466 }
1467 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1468 && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1469 {
1470 error ("loop %d%'s latch is marked as part of irreducible region", i);
1471 err = 1;
1472 }
1473 }
1474
1475 /* Check irreducible loops. */
1476 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1477 {
1478 /* Record old info. */
1479 auto_sbitmap irreds (last_basic_block_for_fn (cfun));
1480 FOR_EACH_BB_FN (bb, cfun)
1481 {
1482 edge_iterator ei;
1483 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1484 bitmap_set_bit (irreds, bb->index);
1485 else
1486 bitmap_clear_bit (irreds, bb->index);
1487 FOR_EACH_EDGE (e, ei, bb->succs)
1488 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1489 e->flags |= EDGE_ALL_FLAGS + 1;
1490 }
1491
1492 /* Recount it. */
1493 mark_irreducible_loops ();
1494
1495 /* Compare. */
1496 FOR_EACH_BB_FN (bb, cfun)
1497 {
1498 edge_iterator ei;
1499
1500 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1501 && !bitmap_bit_p (irreds, bb->index))
1502 {
1503 error ("basic block %d should be marked irreducible", bb->index);
1504 err = 1;
1505 }
1506 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1507 && bitmap_bit_p (irreds, bb->index))
1508 {
1509 error ("basic block %d should not be marked irreducible", bb->index);
1510 err = 1;
1511 }
1512 FOR_EACH_EDGE (e, ei, bb->succs)
1513 {
1514 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1515 && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1516 {
1517 error ("edge from %d to %d should be marked irreducible",
1518 e->src->index, e->dest->index);
1519 err = 1;
1520 }
1521 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1522 && (e->flags & (EDGE_ALL_FLAGS + 1)))
1523 {
1524 error ("edge from %d to %d should not be marked irreducible",
1525 e->src->index, e->dest->index);
1526 err = 1;
1527 }
1528 e->flags &= ~(EDGE_ALL_FLAGS + 1);
1529 }
1530 }
1531 }
1532
1533 /* Check the recorded loop exits. */
1534 FOR_EACH_LOOP (loop, 0)
1535 {
1536 if (!loop->exits || loop->exits->e != NULL)
1537 {
1538 error ("corrupted head of the exits list of loop %d",
1539 loop->num);
1540 err = 1;
1541 }
1542 else
1543 {
1544 /* Check that the list forms a cycle, and all elements except
1545 for the head are nonnull. */
1546 for (mexit = loop->exits, exit = mexit->next, i = 0;
1547 exit->e && exit != mexit;
1548 exit = exit->next)
1549 {
1550 if (i++ & 1)
1551 mexit = mexit->next;
1552 }
1553
1554 if (exit != loop->exits)
1555 {
1556 error ("corrupted exits list of loop %d", loop->num);
1557 err = 1;
1558 }
1559 }
1560
1561 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1562 {
1563 if (loop->exits->next != loop->exits)
1564 {
1565 error ("nonempty exits list of loop %d, but exits are not recorded",
1566 loop->num);
1567 err = 1;
1568 }
1569 }
1570 }
1571
1572 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1573 {
1574 unsigned n_exits = 0, eloops;
1575
1576 sizes = XCNEWVEC (unsigned, num);
1577 memset (sizes, 0, sizeof (unsigned) * num);
1578 FOR_EACH_BB_FN (bb, cfun)
1579 {
1580 edge_iterator ei;
1581 if (bb->loop_father == current_loops->tree_root)
1582 continue;
1583 FOR_EACH_EDGE (e, ei, bb->succs)
1584 {
1585 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1586 continue;
1587
1588 n_exits++;
1589 exit = get_exit_descriptions (e);
1590 if (!exit)
1591 {
1592 error ("exit %d->%d not recorded",
1593 e->src->index, e->dest->index);
1594 err = 1;
1595 }
1596 eloops = 0;
1597 for (; exit; exit = exit->next_e)
1598 eloops++;
1599
1600 for (loop = bb->loop_father;
1601 loop != e->dest->loop_father
1602 /* When a loop exit is also an entry edge which
1603 can happen when avoiding CFG manipulations
1604 then the last loop exited is the outer loop
1605 of the loop entered. */
1606 && loop != loop_outer (e->dest->loop_father);
1607 loop = loop_outer (loop))
1608 {
1609 eloops--;
1610 sizes[loop->num]++;
1611 }
1612
1613 if (eloops != 0)
1614 {
1615 error ("wrong list of exited loops for edge %d->%d",
1616 e->src->index, e->dest->index);
1617 err = 1;
1618 }
1619 }
1620 }
1621
1622 if (n_exits != current_loops->exits->elements ())
1623 {
1624 error ("too many loop exits recorded");
1625 err = 1;
1626 }
1627
1628 FOR_EACH_LOOP (loop, 0)
1629 {
1630 eloops = 0;
1631 for (exit = loop->exits->next; exit->e; exit = exit->next)
1632 eloops++;
1633 if (eloops != sizes[loop->num])
1634 {
1635 error ("%d exits recorded for loop %d (having %d exits)",
1636 eloops, loop->num, sizes[loop->num]);
1637 err = 1;
1638 }
1639 }
1640
1641 free (sizes);
1642 }
1643
1644 gcc_assert (!err);
1645
1646 if (!dom_available)
1647 free_dominance_info (CDI_DOMINATORS);
1648 }
1649
1650 /* Returns latch edge of LOOP. */
1651 edge
1652 loop_latch_edge (const struct loop *loop)
1653 {
1654 return find_edge (loop->latch, loop->header);
1655 }
1656
1657 /* Returns preheader edge of LOOP. */
1658 edge
1659 loop_preheader_edge (const struct loop *loop)
1660 {
1661 edge e;
1662 edge_iterator ei;
1663
1664 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1665
1666 FOR_EACH_EDGE (e, ei, loop->header->preds)
1667 if (e->src != loop->latch)
1668 break;
1669
1670 return e;
1671 }
1672
1673 /* Returns true if E is an exit of LOOP. */
1674
1675 bool
1676 loop_exit_edge_p (const struct loop *loop, const_edge e)
1677 {
1678 return (flow_bb_inside_loop_p (loop, e->src)
1679 && !flow_bb_inside_loop_p (loop, e->dest));
1680 }
1681
1682 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1683 or more than one exit. If loops do not have the exits recorded, NULL
1684 is returned always. */
1685
1686 edge
1687 single_exit (const struct loop *loop)
1688 {
1689 struct loop_exit *exit = loop->exits->next;
1690
1691 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1692 return NULL;
1693
1694 if (exit->e && exit->next == loop->exits)
1695 return exit->e;
1696 else
1697 return NULL;
1698 }
1699
1700 /* Returns true when BB has an incoming edge exiting LOOP. */
1701
1702 bool
1703 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1704 {
1705 edge e;
1706 edge_iterator ei;
1707
1708 FOR_EACH_EDGE (e, ei, bb->preds)
1709 if (loop_exit_edge_p (loop, e))
1710 return true;
1711
1712 return false;
1713 }
1714
1715 /* Returns true when BB has an outgoing edge exiting LOOP. */
1716
1717 bool
1718 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1719 {
1720 edge e;
1721 edge_iterator ei;
1722
1723 FOR_EACH_EDGE (e, ei, bb->succs)
1724 if (loop_exit_edge_p (loop, e))
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Return location corresponding to the loop control condition if possible. */
1731
1732 location_t
1733 get_loop_location (struct loop *loop)
1734 {
1735 rtx_insn *insn = NULL;
1736 struct niter_desc *desc = NULL;
1737 edge exit;
1738
1739 /* For a for or while loop, we would like to return the location
1740 of the for or while statement, if possible. To do this, look
1741 for the branch guarding the loop back-edge. */
1742
1743 /* If this is a simple loop with an in_edge, then the loop control
1744 branch is typically at the end of its source. */
1745 desc = get_simple_loop_desc (loop);
1746 if (desc->in_edge)
1747 {
1748 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1749 {
1750 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1751 return INSN_LOCATION (insn);
1752 }
1753 }
1754 /* If loop has a single exit, then the loop control branch
1755 must be at the end of its source. */
1756 if ((exit = single_exit (loop)))
1757 {
1758 FOR_BB_INSNS_REVERSE (exit->src, insn)
1759 {
1760 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1761 return INSN_LOCATION (insn);
1762 }
1763 }
1764 /* Next check the latch, to see if it is non-empty. */
1765 FOR_BB_INSNS_REVERSE (loop->latch, insn)
1766 {
1767 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1768 return INSN_LOCATION (insn);
1769 }
1770 /* Finally, if none of the above identifies the loop control branch,
1771 return the first location in the loop header. */
1772 FOR_BB_INSNS (loop->header, insn)
1773 {
1774 if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1775 return INSN_LOCATION (insn);
1776 }
1777 /* If all else fails, simply return the current function location. */
1778 return DECL_SOURCE_LOCATION (current_function_decl);
1779 }
1780
1781 /* Records that every statement in LOOP is executed I_BOUND times.
1782 REALISTIC is true if I_BOUND is expected to be close to the real number
1783 of iterations. UPPER is true if we are sure the loop iterates at most
1784 I_BOUND times. */
1785
1786 void
1787 record_niter_bound (struct loop *loop, const widest_int &i_bound,
1788 bool realistic, bool upper)
1789 {
1790 /* Update the bounds only when there is no previous estimation, or when the
1791 current estimation is smaller. */
1792 if (upper
1793 && (!loop->any_upper_bound
1794 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1795 {
1796 loop->any_upper_bound = true;
1797 loop->nb_iterations_upper_bound = i_bound;
1798 if (!loop->any_likely_upper_bound)
1799 {
1800 loop->any_likely_upper_bound = true;
1801 loop->nb_iterations_likely_upper_bound = i_bound;
1802 }
1803 }
1804 if (realistic
1805 && (!loop->any_estimate
1806 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1807 {
1808 loop->any_estimate = true;
1809 loop->nb_iterations_estimate = i_bound;
1810 }
1811 if (!realistic
1812 && (!loop->any_likely_upper_bound
1813 || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
1814 {
1815 loop->any_likely_upper_bound = true;
1816 loop->nb_iterations_likely_upper_bound = i_bound;
1817 }
1818
1819 /* If an upper bound is smaller than the realistic estimate of the
1820 number of iterations, use the upper bound instead. */
1821 if (loop->any_upper_bound
1822 && loop->any_estimate
1823 && wi::ltu_p (loop->nb_iterations_upper_bound,
1824 loop->nb_iterations_estimate))
1825 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1826 if (loop->any_upper_bound
1827 && loop->any_likely_upper_bound
1828 && wi::ltu_p (loop->nb_iterations_upper_bound,
1829 loop->nb_iterations_likely_upper_bound))
1830 loop->nb_iterations_likely_upper_bound = loop->nb_iterations_upper_bound;
1831 }
1832
1833 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1834 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1835 on the number of iterations of LOOP could not be derived, returns -1. */
1836
1837 HOST_WIDE_INT
1838 get_estimated_loop_iterations_int (struct loop *loop)
1839 {
1840 widest_int nit;
1841 HOST_WIDE_INT hwi_nit;
1842
1843 if (!get_estimated_loop_iterations (loop, &nit))
1844 return -1;
1845
1846 if (!wi::fits_shwi_p (nit))
1847 return -1;
1848 hwi_nit = nit.to_shwi ();
1849
1850 return hwi_nit < 0 ? -1 : hwi_nit;
1851 }
1852
1853 /* Returns an upper bound on the number of executions of statements
1854 in the LOOP. For statements before the loop exit, this exceeds
1855 the number of execution of the latch by one. */
1856
1857 HOST_WIDE_INT
1858 max_stmt_executions_int (struct loop *loop)
1859 {
1860 HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1861 HOST_WIDE_INT snit;
1862
1863 if (nit == -1)
1864 return -1;
1865
1866 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1867
1868 /* If the computation overflows, return -1. */
1869 return snit < 0 ? -1 : snit;
1870 }
1871
1872 /* Returns an likely upper bound on the number of executions of statements
1873 in the LOOP. For statements before the loop exit, this exceeds
1874 the number of execution of the latch by one. */
1875
1876 HOST_WIDE_INT
1877 likely_max_stmt_executions_int (struct loop *loop)
1878 {
1879 HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
1880 HOST_WIDE_INT snit;
1881
1882 if (nit == -1)
1883 return -1;
1884
1885 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1886
1887 /* If the computation overflows, return -1. */
1888 return snit < 0 ? -1 : snit;
1889 }
1890
1891 /* Sets NIT to the estimated number of executions of the latch of the
1892 LOOP. If we have no reliable estimate, the function returns false, otherwise
1893 returns true. */
1894
1895 bool
1896 get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
1897 {
1898 /* Even if the bound is not recorded, possibly we can derrive one from
1899 profile. */
1900 if (!loop->any_estimate)
1901 {
1902 if (loop->header->count.reliable_p ())
1903 {
1904 *nit = gcov_type_to_wide_int
1905 (expected_loop_iterations_unbounded (loop) + 1);
1906 return true;
1907 }
1908 return false;
1909 }
1910
1911 *nit = loop->nb_iterations_estimate;
1912 return true;
1913 }
1914
1915 /* Sets NIT to an upper bound for the maximum number of executions of the
1916 latch of the LOOP. If we have no reliable estimate, the function returns
1917 false, otherwise returns true. */
1918
1919 bool
1920 get_max_loop_iterations (const struct loop *loop, widest_int *nit)
1921 {
1922 if (!loop->any_upper_bound)
1923 return false;
1924
1925 *nit = loop->nb_iterations_upper_bound;
1926 return true;
1927 }
1928
1929 /* Similar to get_max_loop_iterations, but returns the estimate only
1930 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1931 on the number of iterations of LOOP could not be derived, returns -1. */
1932
1933 HOST_WIDE_INT
1934 get_max_loop_iterations_int (const struct loop *loop)
1935 {
1936 widest_int nit;
1937 HOST_WIDE_INT hwi_nit;
1938
1939 if (!get_max_loop_iterations (loop, &nit))
1940 return -1;
1941
1942 if (!wi::fits_shwi_p (nit))
1943 return -1;
1944 hwi_nit = nit.to_shwi ();
1945
1946 return hwi_nit < 0 ? -1 : hwi_nit;
1947 }
1948
1949 /* Sets NIT to an upper bound for the maximum number of executions of the
1950 latch of the LOOP. If we have no reliable estimate, the function returns
1951 false, otherwise returns true. */
1952
1953 bool
1954 get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
1955 {
1956 if (!loop->any_likely_upper_bound)
1957 return false;
1958
1959 *nit = loop->nb_iterations_likely_upper_bound;
1960 return true;
1961 }
1962
1963 /* Similar to get_max_loop_iterations, but returns the estimate only
1964 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1965 on the number of iterations of LOOP could not be derived, returns -1. */
1966
1967 HOST_WIDE_INT
1968 get_likely_max_loop_iterations_int (struct loop *loop)
1969 {
1970 widest_int nit;
1971 HOST_WIDE_INT hwi_nit;
1972
1973 if (!get_likely_max_loop_iterations (loop, &nit))
1974 return -1;
1975
1976 if (!wi::fits_shwi_p (nit))
1977 return -1;
1978 hwi_nit = nit.to_shwi ();
1979
1980 return hwi_nit < 0 ? -1 : hwi_nit;
1981 }
1982
1983 /* Returns the loop depth of the loop BB belongs to. */
1984
1985 int
1986 bb_loop_depth (const_basic_block bb)
1987 {
1988 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1989 }
1990
1991 /* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
1992
1993 void
1994 mark_loop_for_removal (loop_p loop)
1995 {
1996 if (loop->header == NULL)
1997 return;
1998 loop->former_header = loop->header;
1999 loop->header = NULL;
2000 loop->latch = NULL;
2001 loops_state_set (LOOPS_NEED_FIXUP);
2002 }