New syntax for -fsanitize-recover.
[gcc.git] / gcc / cfgloop.h
1 /* Natural loop functions
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #ifndef GCC_CFGLOOP_H
21 #define GCC_CFGLOOP_H
22
23 #include "double-int.h"
24 #include "wide-int.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "hashtab.h"
28 #include "hash-set.h"
29 #include "vec.h"
30 #include "machmode.h"
31 #include "tm.h"
32 #include "hard-reg-set.h"
33 #include "input.h"
34 #include "function.h"
35
36 /* Structure to hold decision about unrolling/peeling. */
37 enum lpt_dec
38 {
39 LPT_NONE,
40 LPT_UNROLL_CONSTANT,
41 LPT_UNROLL_RUNTIME,
42 LPT_UNROLL_STUPID
43 };
44
45 struct GTY (()) lpt_decision {
46 enum lpt_dec decision;
47 unsigned times;
48 };
49
50 /* The type of extend applied to an IV. */
51 enum iv_extend_code
52 {
53 IV_SIGN_EXTEND,
54 IV_ZERO_EXTEND,
55 IV_UNKNOWN_EXTEND
56 };
57
58 /* The structure describing a bound on number of iterations of a loop. */
59
60 struct GTY ((chain_next ("%h.next"))) nb_iter_bound {
61 /* The statement STMT is executed at most ... */
62 gimple stmt;
63
64 /* ... BOUND + 1 times (BOUND must be an unsigned constant).
65 The + 1 is added for the following reasons:
66
67 a) 0 would otherwise be unused, while we would need to care more about
68 overflows (as MAX + 1 is sometimes produced as the estimate on number
69 of executions of STMT).
70 b) it is consistent with the result of number_of_iterations_exit. */
71 widest_int bound;
72
73 /* True if the statement will cause the loop to be leaved the (at most)
74 BOUND + 1-st time it is executed, that is, all the statements after it
75 are executed at most BOUND times. */
76 bool is_exit;
77
78 /* The next bound in the list. */
79 struct nb_iter_bound *next;
80 };
81
82 /* Description of the loop exit. */
83
84 struct GTY ((for_user)) loop_exit {
85 /* The exit edge. */
86 edge e;
87
88 /* Previous and next exit in the list of the exits of the loop. */
89 struct loop_exit *prev;
90 struct loop_exit *next;
91
92 /* Next element in the list of loops from that E exits. */
93 struct loop_exit *next_e;
94 };
95
96 struct loop_exit_hasher : ggc_hasher<loop_exit *>
97 {
98 typedef edge compare_type;
99
100 static hashval_t hash (loop_exit *);
101 static bool equal (loop_exit *, edge);
102 static void remove (loop_exit *);
103 };
104
105 typedef struct loop *loop_p;
106
107 /* An integer estimation of the number of iterations. Estimate_state
108 describes what is the state of the estimation. */
109 enum loop_estimation
110 {
111 /* Estimate was not computed yet. */
112 EST_NOT_COMPUTED,
113 /* Estimate is ready. */
114 EST_AVAILABLE,
115 EST_LAST
116 };
117
118 /* Structure to hold information for each natural loop. */
119 struct GTY ((chain_next ("%h.next"))) loop {
120 /* Index into loops array. */
121 int num;
122
123 /* Number of loop insns. */
124 unsigned ninsns;
125
126 /* Basic block of loop header. */
127 basic_block header;
128
129 /* Basic block of loop latch. */
130 basic_block latch;
131
132 /* For loop unrolling/peeling decision. */
133 struct lpt_decision lpt_decision;
134
135 /* Average number of executed insns per iteration. */
136 unsigned av_ninsns;
137
138 /* Number of blocks contained within the loop. */
139 unsigned num_nodes;
140
141 /* Superloops of the loop, starting with the outermost loop. */
142 vec<loop_p, va_gc> *superloops;
143
144 /* The first inner (child) loop or NULL if innermost loop. */
145 struct loop *inner;
146
147 /* Link to the next (sibling) loop. */
148 struct loop *next;
149
150 /* Auxiliary info specific to a pass. */
151 PTR GTY ((skip (""))) aux;
152
153 /* The number of times the latch of the loop is executed. This can be an
154 INTEGER_CST, or a symbolic expression representing the number of
155 iterations like "N - 1", or a COND_EXPR containing the runtime
156 conditions under which the number of iterations is non zero.
157
158 Don't access this field directly: number_of_latch_executions
159 computes and caches the computed information in this field. */
160 tree nb_iterations;
161
162 /* An integer guaranteed to be greater or equal to nb_iterations. Only
163 valid if any_upper_bound is true. */
164 widest_int nb_iterations_upper_bound;
165
166 /* An integer giving an estimate on nb_iterations. Unlike
167 nb_iterations_upper_bound, there is no guarantee that it is at least
168 nb_iterations. */
169 widest_int nb_iterations_estimate;
170
171 bool any_upper_bound;
172 bool any_estimate;
173
174 /* True if the loop can be parallel. */
175 bool can_be_parallel;
176
177 /* True if -Waggressive-loop-optimizations warned about this loop
178 already. */
179 bool warned_aggressive_loop_optimizations;
180
181 /* An integer estimation of the number of iterations. Estimate_state
182 describes what is the state of the estimation. */
183 enum loop_estimation estimate_state;
184
185 /* If > 0, an integer, where the user asserted that for any
186 I in [ 0, nb_iterations ) and for any J in
187 [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
188 of the loop can be safely evaluated concurrently. */
189 int safelen;
190
191 /* True if this loop should never be vectorized. */
192 bool dont_vectorize;
193
194 /* True if we should try harder to vectorize this loop. */
195 bool force_vectorize;
196
197 /* For SIMD loops, this is a unique identifier of the loop, referenced
198 by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
199 builtins. */
200 tree simduid;
201
202 /* Upper bound on number of iterations of a loop. */
203 struct nb_iter_bound *bounds;
204
205 /* Head of the cyclic list of the exits of the loop. */
206 struct loop_exit *exits;
207
208 /* Number of iteration analysis data for RTL. */
209 struct niter_desc *simple_loop_desc;
210
211 /* For sanity checking during loop fixup we record here the former
212 loop header for loops marked for removal. Note that this prevents
213 the basic-block from being collected but its index can still be
214 reused. */
215 basic_block former_header;
216 };
217
218 /* Flags for state of loop structure. */
219 enum
220 {
221 LOOPS_HAVE_PREHEADERS = 1,
222 LOOPS_HAVE_SIMPLE_LATCHES = 2,
223 LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
224 LOOPS_HAVE_RECORDED_EXITS = 8,
225 LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
226 LOOP_CLOSED_SSA = 32,
227 LOOPS_NEED_FIXUP = 64,
228 LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
229 };
230
231 #define LOOPS_NORMAL (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
232 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
233 #define AVOID_CFG_MODIFICATIONS (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
234
235 /* Structure to hold CFG information about natural loops within a function. */
236 struct GTY (()) loops {
237 /* State of loops. */
238 int state;
239
240 /* Array of the loops. */
241 vec<loop_p, va_gc> *larray;
242
243 /* Maps edges to the list of their descriptions as loop exits. Edges
244 whose sources or destinations have loop_father == NULL (which may
245 happen during the cfg manipulations) should not appear in EXITS. */
246 hash_table<loop_exit_hasher> *GTY(()) exits;
247
248 /* Pointer to root of loop hierarchy tree. */
249 struct loop *tree_root;
250 };
251
252 /* Loop recognition. */
253 bool bb_loop_header_p (basic_block);
254 void init_loops_structure (struct function *, struct loops *, unsigned);
255 extern struct loops *flow_loops_find (struct loops *);
256 extern void disambiguate_loops_with_multiple_latches (void);
257 extern void flow_loops_free (struct loops *);
258 extern void flow_loops_dump (FILE *,
259 void (*)(const struct loop *, FILE *, int), int);
260 extern void flow_loop_dump (const struct loop *, FILE *,
261 void (*)(const struct loop *, FILE *, int), int);
262 struct loop *alloc_loop (void);
263 extern void flow_loop_free (struct loop *);
264 int flow_loop_nodes_find (basic_block, struct loop *);
265 unsigned fix_loop_structure (bitmap changed_bbs);
266 bool mark_irreducible_loops (void);
267 void release_recorded_exits (void);
268 void record_loop_exits (void);
269 void rescan_loop_exit (edge, bool, bool);
270
271 /* Loop data structure manipulation/querying. */
272 extern void flow_loop_tree_node_add (struct loop *, struct loop *);
273 extern void flow_loop_tree_node_remove (struct loop *);
274 extern void place_new_loop (struct function *, struct loop *);
275 extern void add_loop (struct loop *, struct loop *);
276 extern bool flow_loop_nested_p (const struct loop *, const struct loop *);
277 extern bool flow_bb_inside_loop_p (const struct loop *, const_basic_block);
278 extern struct loop * find_common_loop (struct loop *, struct loop *);
279 struct loop *superloop_at_depth (struct loop *, unsigned);
280 struct eni_weights_d;
281 extern int num_loop_insns (const struct loop *);
282 extern int average_num_loop_insns (const struct loop *);
283 extern unsigned get_loop_level (const struct loop *);
284 extern bool loop_exit_edge_p (const struct loop *, const_edge);
285 extern bool loop_exits_to_bb_p (struct loop *, basic_block);
286 extern bool loop_exits_from_bb_p (struct loop *, basic_block);
287 extern void mark_loop_exit_edges (void);
288 extern location_t get_loop_location (struct loop *loop);
289
290 /* Loops & cfg manipulation. */
291 extern basic_block *get_loop_body (const struct loop *);
292 extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
293 unsigned);
294 extern basic_block *get_loop_body_in_dom_order (const struct loop *);
295 extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
296 extern basic_block *get_loop_body_in_custom_order (const struct loop *,
297 int (*) (const void *, const void *));
298
299 extern vec<edge> get_loop_exit_edges (const struct loop *);
300 extern edge single_exit (const struct loop *);
301 extern edge single_likely_exit (struct loop *loop);
302 extern unsigned num_loop_branches (const struct loop *);
303
304 extern edge loop_preheader_edge (const struct loop *);
305 extern edge loop_latch_edge (const struct loop *);
306
307 extern void add_bb_to_loop (basic_block, struct loop *);
308 extern void remove_bb_from_loops (basic_block);
309
310 extern void cancel_loop_tree (struct loop *);
311 extern void delete_loop (struct loop *);
312
313 enum
314 {
315 CP_SIMPLE_PREHEADERS = 1,
316 CP_FALLTHRU_PREHEADERS = 2
317 };
318
319 basic_block create_preheader (struct loop *, int);
320 extern void create_preheaders (int);
321 extern void force_single_succ_latches (void);
322
323 extern void verify_loop_structure (void);
324
325 /* Loop analysis. */
326 extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
327 gcov_type expected_loop_iterations_unbounded (const struct loop *);
328 extern unsigned expected_loop_iterations (const struct loop *);
329 extern rtx doloop_condition_get (rtx);
330
331
332 /* Loop manipulation. */
333 extern bool can_duplicate_loop_p (const struct loop *loop);
334
335 #define DLTHE_FLAG_UPDATE_FREQ 1 /* Update frequencies in
336 duplicate_loop_to_header_edge. */
337 #define DLTHE_RECORD_COPY_NUMBER 2 /* Record copy number in the aux
338 field of newly create BB. */
339 #define DLTHE_FLAG_COMPLETTE_PEEL 4 /* Update frequencies expecting
340 a complete peeling. */
341
342 extern edge create_empty_if_region_on_edge (edge, tree);
343 extern struct loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
344 tree *, tree *, struct loop *);
345 extern struct loop * duplicate_loop (struct loop *, struct loop *);
346 extern void copy_loop_info (struct loop *loop, struct loop *target);
347 extern void duplicate_subloops (struct loop *, struct loop *);
348 extern bool duplicate_loop_to_header_edge (struct loop *, edge,
349 unsigned, sbitmap, edge,
350 vec<edge> *, int);
351 extern struct loop *loopify (edge, edge,
352 basic_block, edge, edge, bool,
353 unsigned, unsigned);
354 struct loop * loop_version (struct loop *, void *,
355 basic_block *, unsigned, unsigned, unsigned, bool);
356 extern bool remove_path (edge);
357 extern void unloop (struct loop *, bool *, bitmap);
358 extern void scale_loop_frequencies (struct loop *, int, int);
359 void mark_loop_for_removal (loop_p);
360
361
362 /* Induction variable analysis. */
363
364 /* The description of induction variable. The things are a bit complicated
365 due to need to handle subregs and extends. The value of the object described
366 by it can be obtained as follows (all computations are done in extend_mode):
367
368 Value in i-th iteration is
369 delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
370
371 If first_special is true, the value in the first iteration is
372 delta + mult * base
373
374 If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
375 subreg_{mode} (base + i * step)
376
377 The get_iv_value function can be used to obtain these expressions.
378
379 ??? Add a third mode field that would specify the mode in that inner
380 computation is done, which would enable it to be different from the
381 outer one? */
382
383 struct rtx_iv
384 {
385 /* Its base and step (mode of base and step is supposed to be extend_mode,
386 see the description above). */
387 rtx base, step;
388
389 /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
390 or IV_UNKNOWN_EXTEND). */
391 enum iv_extend_code extend;
392
393 /* Operations applied in the extended mode. */
394 rtx delta, mult;
395
396 /* The mode it is extended to. */
397 enum machine_mode extend_mode;
398
399 /* The mode the variable iterates in. */
400 enum machine_mode mode;
401
402 /* Whether the first iteration needs to be handled specially. */
403 unsigned first_special : 1;
404 };
405
406 /* The description of an exit from the loop and of the number of iterations
407 till we take the exit. */
408
409 struct GTY(()) niter_desc
410 {
411 /* The edge out of the loop. */
412 edge out_edge;
413
414 /* The other edge leading from the condition. */
415 edge in_edge;
416
417 /* True if we are able to say anything about number of iterations of the
418 loop. */
419 bool simple_p;
420
421 /* True if the loop iterates the constant number of times. */
422 bool const_iter;
423
424 /* Number of iterations if constant. */
425 uint64_t niter;
426
427 /* Assumptions under that the rest of the information is valid. */
428 rtx assumptions;
429
430 /* Assumptions under that the loop ends before reaching the latch,
431 even if value of niter_expr says otherwise. */
432 rtx noloop_assumptions;
433
434 /* Condition under that the loop is infinite. */
435 rtx infinite;
436
437 /* Whether the comparison is signed. */
438 bool signed_p;
439
440 /* The mode in that niter_expr should be computed. */
441 enum machine_mode mode;
442
443 /* The number of iterations of the loop. */
444 rtx niter_expr;
445 };
446
447 extern void iv_analysis_loop_init (struct loop *);
448 extern bool iv_analyze (rtx_insn *, rtx, struct rtx_iv *);
449 extern bool iv_analyze_result (rtx_insn *, rtx, struct rtx_iv *);
450 extern bool iv_analyze_expr (rtx_insn *, rtx, enum machine_mode,
451 struct rtx_iv *);
452 extern rtx get_iv_value (struct rtx_iv *, rtx);
453 extern bool biv_p (rtx_insn *, rtx);
454 extern void find_simple_exit (struct loop *, struct niter_desc *);
455 extern void iv_analysis_done (void);
456
457 extern struct niter_desc *get_simple_loop_desc (struct loop *loop);
458 extern void free_simple_loop_desc (struct loop *loop);
459
460 static inline struct niter_desc *
461 simple_loop_desc (struct loop *loop)
462 {
463 return loop->simple_loop_desc;
464 }
465
466 /* Accessors for the loop structures. */
467
468 /* Returns the loop with index NUM from FNs loop tree. */
469
470 static inline struct loop *
471 get_loop (struct function *fn, unsigned num)
472 {
473 return (*loops_for_fn (fn)->larray)[num];
474 }
475
476 /* Returns the number of superloops of LOOP. */
477
478 static inline unsigned
479 loop_depth (const struct loop *loop)
480 {
481 return vec_safe_length (loop->superloops);
482 }
483
484 /* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
485 loop. */
486
487 static inline struct loop *
488 loop_outer (const struct loop *loop)
489 {
490 unsigned n = vec_safe_length (loop->superloops);
491
492 if (n == 0)
493 return NULL;
494
495 return (*loop->superloops)[n - 1];
496 }
497
498 /* Returns true if LOOP has at least one exit edge. */
499
500 static inline bool
501 loop_has_exit_edges (const struct loop *loop)
502 {
503 return loop->exits->next->e != NULL;
504 }
505
506 /* Returns the list of loops in FN. */
507
508 inline vec<loop_p, va_gc> *
509 get_loops (struct function *fn)
510 {
511 struct loops *loops = loops_for_fn (fn);
512 if (!loops)
513 return NULL;
514
515 return loops->larray;
516 }
517
518 /* Returns the number of loops in FN (including the removed
519 ones and the fake loop that forms the root of the loop tree). */
520
521 static inline unsigned
522 number_of_loops (struct function *fn)
523 {
524 struct loops *loops = loops_for_fn (fn);
525 if (!loops)
526 return 0;
527
528 return vec_safe_length (loops->larray);
529 }
530
531 /* Returns true if state of the loops satisfies all properties
532 described by FLAGS. */
533
534 static inline bool
535 loops_state_satisfies_p (unsigned flags)
536 {
537 return (current_loops->state & flags) == flags;
538 }
539
540 /* Sets FLAGS to the loops state. */
541
542 static inline void
543 loops_state_set (unsigned flags)
544 {
545 current_loops->state |= flags;
546 }
547
548 /* Clears FLAGS from the loops state. */
549
550 static inline void
551 loops_state_clear (unsigned flags)
552 {
553 if (!current_loops)
554 return;
555 current_loops->state &= ~flags;
556 }
557
558 /* Loop iterators. */
559
560 /* Flags for loop iteration. */
561
562 enum li_flags
563 {
564 LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */
565 LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order,
566 starting from innermost ones. */
567 LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
568 };
569
570 /* The iterator for loops. */
571
572 struct loop_iterator
573 {
574 loop_iterator (loop_p *loop, unsigned flags);
575 ~loop_iterator ();
576
577 inline loop_p next ();
578
579 /* The list of loops to visit. */
580 vec<int> to_visit;
581
582 /* The index of the actual loop. */
583 unsigned idx;
584 };
585
586 inline loop_p
587 loop_iterator::next ()
588 {
589 int anum;
590
591 while (this->to_visit.iterate (this->idx, &anum))
592 {
593 this->idx++;
594 loop_p loop = get_loop (cfun, anum);
595 if (loop)
596 return loop;
597 }
598
599 return NULL;
600 }
601
602 inline
603 loop_iterator::loop_iterator (loop_p *loop, unsigned flags)
604 {
605 struct loop *aloop;
606 unsigned i;
607 int mn;
608
609 this->idx = 0;
610 if (!current_loops)
611 {
612 this->to_visit.create (0);
613 *loop = NULL;
614 return;
615 }
616
617 this->to_visit.create (number_of_loops (cfun));
618 mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
619
620 if (flags & LI_ONLY_INNERMOST)
621 {
622 for (i = 0; vec_safe_iterate (current_loops->larray, i, &aloop); i++)
623 if (aloop != NULL
624 && aloop->inner == NULL
625 && aloop->num >= mn)
626 this->to_visit.quick_push (aloop->num);
627 }
628 else if (flags & LI_FROM_INNERMOST)
629 {
630 /* Push the loops to LI->TO_VISIT in postorder. */
631 for (aloop = current_loops->tree_root;
632 aloop->inner != NULL;
633 aloop = aloop->inner)
634 continue;
635
636 while (1)
637 {
638 if (aloop->num >= mn)
639 this->to_visit.quick_push (aloop->num);
640
641 if (aloop->next)
642 {
643 for (aloop = aloop->next;
644 aloop->inner != NULL;
645 aloop = aloop->inner)
646 continue;
647 }
648 else if (!loop_outer (aloop))
649 break;
650 else
651 aloop = loop_outer (aloop);
652 }
653 }
654 else
655 {
656 /* Push the loops to LI->TO_VISIT in preorder. */
657 aloop = current_loops->tree_root;
658 while (1)
659 {
660 if (aloop->num >= mn)
661 this->to_visit.quick_push (aloop->num);
662
663 if (aloop->inner != NULL)
664 aloop = aloop->inner;
665 else
666 {
667 while (aloop != NULL && aloop->next == NULL)
668 aloop = loop_outer (aloop);
669 if (aloop == NULL)
670 break;
671 aloop = aloop->next;
672 }
673 }
674 }
675
676 *loop = this->next ();
677 }
678
679 inline
680 loop_iterator::~loop_iterator ()
681 {
682 this->to_visit.release ();
683 }
684
685 #define FOR_EACH_LOOP(LOOP, FLAGS) \
686 for (loop_iterator li(&(LOOP), FLAGS); \
687 (LOOP); \
688 (LOOP) = li.next ())
689
690 /* The properties of the target. */
691 struct target_cfgloop {
692 /* Number of available registers. */
693 unsigned x_target_avail_regs;
694
695 /* Number of available registers that are call-clobbered. */
696 unsigned x_target_clobbered_regs;
697
698 /* Number of registers reserved for temporary expressions. */
699 unsigned x_target_res_regs;
700
701 /* The cost for register when there still is some reserve, but we are
702 approaching the number of available registers. */
703 unsigned x_target_reg_cost[2];
704
705 /* The cost for register when we need to spill. */
706 unsigned x_target_spill_cost[2];
707 };
708
709 extern struct target_cfgloop default_target_cfgloop;
710 #if SWITCHABLE_TARGET
711 extern struct target_cfgloop *this_target_cfgloop;
712 #else
713 #define this_target_cfgloop (&default_target_cfgloop)
714 #endif
715
716 #define target_avail_regs \
717 (this_target_cfgloop->x_target_avail_regs)
718 #define target_clobbered_regs \
719 (this_target_cfgloop->x_target_clobbered_regs)
720 #define target_res_regs \
721 (this_target_cfgloop->x_target_res_regs)
722 #define target_reg_cost \
723 (this_target_cfgloop->x_target_reg_cost)
724 #define target_spill_cost \
725 (this_target_cfgloop->x_target_spill_cost)
726
727 /* Register pressure estimation for induction variable optimizations & loop
728 invariant motion. */
729 extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
730 extern void init_set_costs (void);
731
732 /* Loop optimizer initialization. */
733 extern void loop_optimizer_init (unsigned);
734 extern void loop_optimizer_finalize (void);
735
736 /* Optimization passes. */
737 enum
738 {
739 UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */
740 UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */
741 };
742
743 extern void doloop_optimize_loops (void);
744 extern void move_loop_invariants (void);
745 extern void scale_loop_profile (struct loop *loop, int scale, gcov_type iteration_bound);
746 extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
747
748 /* Returns the outermost loop of the loop nest that contains LOOP.*/
749 static inline struct loop *
750 loop_outermost (struct loop *loop)
751 {
752 unsigned n = vec_safe_length (loop->superloops);
753
754 if (n <= 1)
755 return loop;
756
757 return (*loop->superloops)[1];
758 }
759
760 extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
761 extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
762 extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *);
763 extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
764 extern bool get_max_loop_iterations (struct loop *loop, widest_int *nit);
765 extern int bb_loop_depth (const_basic_block);
766
767 /* Converts VAL to widest_int. */
768
769 static inline widest_int
770 gcov_type_to_wide_int (gcov_type val)
771 {
772 HOST_WIDE_INT a[2];
773
774 a[0] = (unsigned HOST_WIDE_INT) val;
775 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
776 the size of type. */
777 val >>= HOST_BITS_PER_WIDE_INT - 1;
778 val >>= 1;
779 a[1] = (unsigned HOST_WIDE_INT) val;
780
781 return widest_int::from_array (a, 2);
782 }
783 #endif /* GCC_CFGLOOP_H */