target.h (globalize_decl_name): New.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* Inlining decision heuristics
23
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
28
29 There are three major parts of this file:
30
31 cgraph_mark_inline implementation
32
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
36
37 inlining heuristics limits
38
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
42
43 inlining heuristics
44
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
47
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
53
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
57
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
61
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used in non-unit-at-a-time mode.
65
66 The inliner itself is split into several passes:
67
68 pass_inline_parameters
69
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
73
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
78
79 pass_early_inlining
80
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
85
86 This is the main inlining pass in non-unit-at-a-time.
87
88 With unit-at-a-time the pass is run during conversion into SSA form.
89 Only functions already converted into SSA form are inlined, so the
90 conversion must happen in topological order on the callgraph (that is
91 maintained by pass manager). The functions after inlining are early
92 optimized so the early inliner sees unoptimized function itself, but
93 all considered callees are already optimized allowing it to unfold
94 abstraction penalty on C++ effectivly and cheaply.
95
96 pass_ipa_early_inlining
97
98 With profiling, the early inlining is also neccesary to reduce
99 instrumentation costs on program with high abstraction penalty (doing
100 many redundant calls). This can't happen in parallel with early
101 optimization and profile instrumentation, because we would end up
102 re-instrumenting already instrumented function bodies we brought in via
103 inlining.
104
105 To avoid this, this pass is executed as IPA pass before profiling. It is
106 simple wrapper to pass_early_inlining and ensures first inlining.
107
108 pass_ipa_inline
109
110 This is the main pass implementing simple greedy algorithm to do inlining
111 of small functions that results in overall growth of compilation unit and
112 inlining of functions called once. The pass compute just so called inline
113 plan (representation of inlining to be done in callgraph) and unlike early
114 inlining it is not performing the inlining itself.
115
116 pass_apply_inline
117
118 This pass performs actual inlining according to pass_ipa_inline on given
119 function. Possible the function body before inlining is saved when it is
120 needed for further inlining later.
121 */
122
123 #include "config.h"
124 #include "system.h"
125 #include "coretypes.h"
126 #include "tm.h"
127 #include "tree.h"
128 #include "tree-inline.h"
129 #include "langhooks.h"
130 #include "flags.h"
131 #include "cgraph.h"
132 #include "diagnostic.h"
133 #include "timevar.h"
134 #include "params.h"
135 #include "fibheap.h"
136 #include "intl.h"
137 #include "tree-pass.h"
138 #include "hashtab.h"
139 #include "coverage.h"
140 #include "ggc.h"
141 #include "tree-flow.h"
142
143 /* Mode incremental inliner operate on:
144
145 In ALWAYS_INLINE only functions marked
146 always_inline are inlined. This mode is used after detecting cycle during
147 flattening.
148
149 In SIZE mode, only functions that reduce function body size after inlining
150 are inlined, this is used during early inlining.
151
152 In SPEED mode, all small functions are inlined. This might result in
153 unbounded growth of compilation unit and is used only in non-unit-at-a-time
154 mode.
155
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE,
161 INLINE_SPEED,
162 INLINE_ALL
163 };
164 static bool
165 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
166 int);
167
168
169 /* Statistics we collect about inlining algorithm. */
170 static int ncalls_inlined;
171 static int nfunctions_inlined;
172 static int initial_insns;
173 static int overall_insns;
174 static int max_insns;
175 static gcov_type max_count;
176
177 /* Estimate size of the function after inlining WHAT into TO. */
178
179 static int
180 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
181 struct cgraph_node *what)
182 {
183 int size;
184 tree fndecl = what->decl, arg;
185 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
186
187 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
188 call_insns += estimate_move_cost (TREE_TYPE (arg));
189 size = (what->global.insns - call_insns) * times + to->global.insns;
190 gcc_assert (size >= 0);
191 return size;
192 }
193
194 /* E is expected to be an edge being inlined. Clone destination node of
195 the edge and redirect it to the new clone.
196 DUPLICATE is used for bookkeeping on whether we are actually creating new
197 clones or re-using node originally representing out-of-line function call.
198 */
199 void
200 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_original)
201 {
202 HOST_WIDE_INT peak;
203 if (duplicate)
204 {
205 /* We may eliminate the need for out-of-line copy to be output.
206 In that case just go ahead and re-use it. */
207 if (!e->callee->callers->next_caller
208 && !e->callee->needed
209 && flag_unit_at_a_time)
210 {
211 gcc_assert (!e->callee->global.inlined_to);
212 if (DECL_SAVED_TREE (e->callee->decl))
213 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
214 duplicate = false;
215 }
216 else
217 {
218 struct cgraph_node *n;
219 n = cgraph_clone_node (e->callee, e->count, e->loop_nest,
220 update_original);
221 cgraph_redirect_edge_callee (e, n);
222 }
223 }
224
225 if (e->caller->global.inlined_to)
226 e->callee->global.inlined_to = e->caller->global.inlined_to;
227 else
228 e->callee->global.inlined_to = e->caller;
229 e->callee->global.stack_frame_offset
230 = e->caller->global.stack_frame_offset + e->caller->local.estimated_self_stack_size;
231 peak = e->callee->global.stack_frame_offset + e->callee->local.estimated_self_stack_size;
232 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
233 e->callee->global.inlined_to->global.estimated_stack_size = peak;
234
235 /* Recursively clone all bodies. */
236 for (e = e->callee->callees; e; e = e->next_callee)
237 if (!e->inline_failed)
238 cgraph_clone_inlined_nodes (e, duplicate, update_original);
239 }
240
241 /* Mark edge E as inlined and update callgraph accordingly.
242 UPDATE_ORIGINAL specify whether profile of original function should be
243 updated. */
244
245 void
246 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
247 {
248 int old_insns = 0, new_insns = 0;
249 struct cgraph_node *to = NULL, *what;
250
251 if (e->callee->inline_decl)
252 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
253
254 gcc_assert (e->inline_failed);
255 e->inline_failed = NULL;
256
257 if (!e->callee->global.inlined && flag_unit_at_a_time)
258 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
259 e->callee->global.inlined = true;
260
261 cgraph_clone_inlined_nodes (e, true, update_original);
262
263 what = e->callee;
264
265 /* Now update size of caller and all functions caller is inlined into. */
266 for (;e && !e->inline_failed; e = e->caller->callers)
267 {
268 old_insns = e->caller->global.insns;
269 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
270 what);
271 gcc_assert (new_insns >= 0);
272 to = e->caller;
273 to->global.insns = new_insns;
274 }
275 gcc_assert (what->global.inlined_to == to);
276 if (new_insns > old_insns)
277 overall_insns += new_insns - old_insns;
278 ncalls_inlined++;
279 }
280
281 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
282 Return following unredirected edge in the list of callers
283 of EDGE->CALLEE */
284
285 static struct cgraph_edge *
286 cgraph_mark_inline (struct cgraph_edge *edge)
287 {
288 struct cgraph_node *to = edge->caller;
289 struct cgraph_node *what = edge->callee;
290 struct cgraph_edge *e, *next;
291 int times = 0;
292
293 /* Look for all calls, mark them inline and clone recursively
294 all inlined functions. */
295 for (e = what->callers; e; e = next)
296 {
297 next = e->next_caller;
298 if (e->caller == to && e->inline_failed)
299 {
300 cgraph_mark_inline_edge (e, true);
301 if (e == edge)
302 edge = next;
303 times++;
304 }
305 }
306 gcc_assert (times);
307 return edge;
308 }
309
310 /* Estimate the growth caused by inlining NODE into all callees. */
311
312 static int
313 cgraph_estimate_growth (struct cgraph_node *node)
314 {
315 int growth = 0;
316 struct cgraph_edge *e;
317 if (node->global.estimated_growth != INT_MIN)
318 return node->global.estimated_growth;
319
320 for (e = node->callers; e; e = e->next_caller)
321 if (e->inline_failed)
322 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
323 - e->caller->global.insns);
324
325 /* ??? Wrong for self recursive functions or cases where we decide to not
326 inline for different reasons, but it is not big deal as in that case
327 we will keep the body around, but we will also avoid some inlining. */
328 if (!node->needed && !DECL_EXTERNAL (node->decl))
329 growth -= node->global.insns;
330
331 node->global.estimated_growth = growth;
332 return growth;
333 }
334
335 /* Return false when inlining WHAT into TO is not good idea
336 as it would cause too large growth of function bodies.
337 When ONE_ONLY is true, assume that only one call site is going
338 to be inlined, otherwise figure out how many call sites in
339 TO calls WHAT and verify that all can be inlined.
340 */
341
342 static bool
343 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
344 const char **reason, bool one_only)
345 {
346 int times = 0;
347 struct cgraph_edge *e;
348 int newsize;
349 int limit;
350 HOST_WIDE_INT stack_size_limit, inlined_stack;
351
352 if (one_only)
353 times = 1;
354 else
355 for (e = to->callees; e; e = e->next_callee)
356 if (e->callee == what)
357 times++;
358
359 if (to->global.inlined_to)
360 to = to->global.inlined_to;
361
362 /* When inlining large function body called once into small function,
363 take the inlined function as base for limiting the growth. */
364 if (to->local.self_insns > what->local.self_insns)
365 limit = to->local.self_insns;
366 else
367 limit = what->local.self_insns;
368
369 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
370
371 /* Check the size after inlining against the function limits. But allow
372 the function to shrink if it went over the limits by forced inlining. */
373 newsize = cgraph_estimate_size_after_inlining (times, to, what);
374 if (newsize >= to->global.insns
375 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
376 && newsize > limit)
377 {
378 if (reason)
379 *reason = N_("--param large-function-growth limit reached");
380 return false;
381 }
382
383 stack_size_limit = to->local.estimated_self_stack_size;
384
385 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
386
387 inlined_stack = (to->global.stack_frame_offset
388 + to->local.estimated_self_stack_size
389 + what->global.estimated_stack_size);
390 if (inlined_stack > stack_size_limit
391 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
392 {
393 if (reason)
394 *reason = N_("--param large-stack-frame-growth limit reached");
395 return false;
396 }
397 return true;
398 }
399
400 /* Return true when function N is small enough to be inlined. */
401
402 bool
403 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
404 {
405 tree decl = n->decl;
406
407 if (n->inline_decl)
408 decl = n->inline_decl;
409 if (!DECL_INLINE (decl))
410 {
411 if (reason)
412 *reason = N_("function not inlinable");
413 return false;
414 }
415
416 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
417 {
418 if (reason)
419 *reason = N_("function body not available");
420 return false;
421 }
422
423 if (DECL_DECLARED_INLINE_P (decl))
424 {
425 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
426 {
427 if (reason)
428 *reason = N_("--param max-inline-insns-single limit reached");
429 return false;
430 }
431 }
432 else
433 {
434 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
435 {
436 if (reason)
437 *reason = N_("--param max-inline-insns-auto limit reached");
438 return false;
439 }
440 }
441
442 return true;
443 }
444
445 /* Return true when inlining WHAT would create recursive inlining.
446 We call recursive inlining all cases where same function appears more than
447 once in the single recursion nest path in the inline graph. */
448
449 static bool
450 cgraph_recursive_inlining_p (struct cgraph_node *to,
451 struct cgraph_node *what,
452 const char **reason)
453 {
454 bool recursive;
455 if (to->global.inlined_to)
456 recursive = what->decl == to->global.inlined_to->decl;
457 else
458 recursive = what->decl == to->decl;
459 /* Marking recursive function inline has sane semantic and thus we should
460 not warn on it. */
461 if (recursive && reason)
462 *reason = (what->local.disregard_inline_limits
463 ? N_("recursive inlining") : "");
464 return recursive;
465 }
466
467 /* Return true if the call can be hot. */
468 static bool
469 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
470 {
471 if (profile_info && flag_branch_probabilities
472 && (edge->count
473 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
474 return false;
475 return true;
476 }
477
478 /* A cost model driving the inlining heuristics in a way so the edges with
479 smallest badness are inlined first. After each inlining is performed
480 the costs of all caller edges of nodes affected are recomputed so the
481 metrics may accurately depend on values such as number of inlinable callers
482 of the function or function body size.
483
484 With profiling we use number of executions of each edge to drive the cost.
485 We also should distinguish hot and cold calls where the cold calls are
486 inlined into only when code size is overall improved.
487 */
488
489 static int
490 cgraph_edge_badness (struct cgraph_edge *edge)
491 {
492 if (max_count)
493 {
494 int growth =
495 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
496 growth -= edge->caller->global.insns;
497
498 /* Always prefer inlining saving code size. */
499 if (growth <= 0)
500 return INT_MIN - growth;
501 return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
502 }
503 else
504 {
505 int nest = MIN (edge->loop_nest, 8);
506 int badness = cgraph_estimate_growth (edge->callee) * 256;
507
508 /* Decrease badness if call is nested. */
509 if (badness > 0)
510 badness >>= nest;
511 else
512 badness <<= nest;
513
514 /* Make recursive inlining happen always after other inlining is done. */
515 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
516 return badness + 1;
517 else
518 return badness;
519 }
520 }
521
522 /* Recompute heap nodes for each of caller edge. */
523
524 static void
525 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
526 bitmap updated_nodes)
527 {
528 struct cgraph_edge *edge;
529 const char *failed_reason;
530
531 if (!node->local.inlinable || node->local.disregard_inline_limits
532 || node->global.inlined_to)
533 return;
534 if (bitmap_bit_p (updated_nodes, node->uid))
535 return;
536 bitmap_set_bit (updated_nodes, node->uid);
537 node->global.estimated_growth = INT_MIN;
538
539 if (!node->local.inlinable)
540 return;
541 /* Prune out edges we won't inline into anymore. */
542 if (!cgraph_default_inline_p (node, &failed_reason))
543 {
544 for (edge = node->callers; edge; edge = edge->next_caller)
545 if (edge->aux)
546 {
547 fibheap_delete_node (heap, edge->aux);
548 edge->aux = NULL;
549 if (edge->inline_failed)
550 edge->inline_failed = failed_reason;
551 }
552 return;
553 }
554
555 for (edge = node->callers; edge; edge = edge->next_caller)
556 if (edge->inline_failed)
557 {
558 int badness = cgraph_edge_badness (edge);
559 if (edge->aux)
560 {
561 fibnode_t n = edge->aux;
562 gcc_assert (n->data == edge);
563 if (n->key == badness)
564 continue;
565
566 /* fibheap_replace_key only increase the keys. */
567 if (fibheap_replace_key (heap, n, badness))
568 continue;
569 fibheap_delete_node (heap, edge->aux);
570 }
571 edge->aux = fibheap_insert (heap, badness, edge);
572 }
573 }
574
575 /* Recompute heap nodes for each of caller edges of each of callees. */
576
577 static void
578 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
579 bitmap updated_nodes)
580 {
581 struct cgraph_edge *e;
582 node->global.estimated_growth = INT_MIN;
583
584 for (e = node->callees; e; e = e->next_callee)
585 if (e->inline_failed)
586 update_caller_keys (heap, e->callee, updated_nodes);
587 else if (!e->inline_failed)
588 update_callee_keys (heap, e->callee, updated_nodes);
589 }
590
591 /* Enqueue all recursive calls from NODE into priority queue depending on
592 how likely we want to recursively inline the call. */
593
594 static void
595 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
596 fibheap_t heap)
597 {
598 static int priority;
599 struct cgraph_edge *e;
600 for (e = where->callees; e; e = e->next_callee)
601 if (e->callee == node)
602 {
603 /* When profile feedback is available, prioritize by expected number
604 of calls. Without profile feedback we maintain simple queue
605 to order candidates via recursive depths. */
606 fibheap_insert (heap,
607 !max_count ? priority++
608 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
609 e);
610 }
611 for (e = where->callees; e; e = e->next_callee)
612 if (!e->inline_failed)
613 lookup_recursive_calls (node, e->callee, heap);
614 }
615
616 /* Decide on recursive inlining: in the case function has recursive calls,
617 inline until body size reaches given argument. */
618
619 static bool
620 cgraph_decide_recursive_inlining (struct cgraph_node *node)
621 {
622 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
623 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
624 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
625 fibheap_t heap;
626 struct cgraph_edge *e;
627 struct cgraph_node *master_clone, *next;
628 int depth = 0;
629 int n = 0;
630
631 if (DECL_DECLARED_INLINE_P (node->decl))
632 {
633 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
634 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
635 }
636
637 /* Make sure that function is small enough to be considered for inlining. */
638 if (!max_depth
639 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
640 return false;
641 heap = fibheap_new ();
642 lookup_recursive_calls (node, node, heap);
643 if (fibheap_empty (heap))
644 {
645 fibheap_delete (heap);
646 return false;
647 }
648
649 if (dump_file)
650 fprintf (dump_file,
651 " Performing recursive inlining on %s\n",
652 cgraph_node_name (node));
653
654 /* We need original clone to copy around. */
655 master_clone = cgraph_clone_node (node, node->count, 1, false);
656 master_clone->needed = true;
657 for (e = master_clone->callees; e; e = e->next_callee)
658 if (!e->inline_failed)
659 cgraph_clone_inlined_nodes (e, true, false);
660
661 /* Do the inlining and update list of recursive call during process. */
662 while (!fibheap_empty (heap)
663 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
664 <= limit))
665 {
666 struct cgraph_edge *curr = fibheap_extract_min (heap);
667 struct cgraph_node *cnode;
668
669 depth = 1;
670 for (cnode = curr->caller;
671 cnode->global.inlined_to; cnode = cnode->callers->caller)
672 if (node->decl == curr->callee->decl)
673 depth++;
674 if (depth > max_depth)
675 {
676 if (dump_file)
677 fprintf (dump_file,
678 " maxmal depth reached\n");
679 continue;
680 }
681
682 if (max_count)
683 {
684 if (!cgraph_maybe_hot_edge_p (curr))
685 {
686 if (dump_file)
687 fprintf (dump_file, " Not inlining cold call\n");
688 continue;
689 }
690 if (curr->count * 100 / node->count < probability)
691 {
692 if (dump_file)
693 fprintf (dump_file,
694 " Probability of edge is too small\n");
695 continue;
696 }
697 }
698
699 if (dump_file)
700 {
701 fprintf (dump_file,
702 " Inlining call of depth %i", depth);
703 if (node->count)
704 {
705 fprintf (dump_file, " called approx. %.2f times per call",
706 (double)curr->count / node->count);
707 }
708 fprintf (dump_file, "\n");
709 }
710 cgraph_redirect_edge_callee (curr, master_clone);
711 cgraph_mark_inline_edge (curr, false);
712 lookup_recursive_calls (node, curr->callee, heap);
713 n++;
714 }
715 if (!fibheap_empty (heap) && dump_file)
716 fprintf (dump_file, " Recursive inlining growth limit met.\n");
717
718 fibheap_delete (heap);
719 if (dump_file)
720 fprintf (dump_file,
721 "\n Inlined %i times, body grown from %i to %i insns\n", n,
722 master_clone->global.insns, node->global.insns);
723
724 /* Remove master clone we used for inlining. We rely that clones inlined
725 into master clone gets queued just before master clone so we don't
726 need recursion. */
727 for (node = cgraph_nodes; node != master_clone;
728 node = next)
729 {
730 next = node->next;
731 if (node->global.inlined_to == master_clone)
732 cgraph_remove_node (node);
733 }
734 cgraph_remove_node (master_clone);
735 /* FIXME: Recursive inlining actually reduces number of calls of the
736 function. At this place we should probably walk the function and
737 inline clones and compensate the counts accordingly. This probably
738 doesn't matter much in practice. */
739 return n > 0;
740 }
741
742 /* Set inline_failed for all callers of given function to REASON. */
743
744 static void
745 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
746 {
747 struct cgraph_edge *e;
748
749 if (dump_file)
750 fprintf (dump_file, "Inlining failed: %s\n", reason);
751 for (e = node->callers; e; e = e->next_caller)
752 if (e->inline_failed)
753 e->inline_failed = reason;
754 }
755
756 /* We use greedy algorithm for inlining of small functions:
757 All inline candidates are put into prioritized heap based on estimated
758 growth of the overall number of instructions and then update the estimates.
759
760 INLINED and INLINED_CALEES are just pointers to arrays large enough
761 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
762
763 static void
764 cgraph_decide_inlining_of_small_functions (void)
765 {
766 struct cgraph_node *node;
767 struct cgraph_edge *edge;
768 const char *failed_reason;
769 fibheap_t heap = fibheap_new ();
770 bitmap updated_nodes = BITMAP_ALLOC (NULL);
771
772 if (dump_file)
773 fprintf (dump_file, "\nDeciding on smaller functions:\n");
774
775 /* Put all inline candidates into the heap. */
776
777 for (node = cgraph_nodes; node; node = node->next)
778 {
779 if (!node->local.inlinable || !node->callers
780 || node->local.disregard_inline_limits)
781 continue;
782 if (dump_file)
783 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
784
785 node->global.estimated_growth = INT_MIN;
786 if (!cgraph_default_inline_p (node, &failed_reason))
787 {
788 cgraph_set_inline_failed (node, failed_reason);
789 continue;
790 }
791
792 for (edge = node->callers; edge; edge = edge->next_caller)
793 if (edge->inline_failed)
794 {
795 gcc_assert (!edge->aux);
796 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
797 }
798 }
799 while (overall_insns <= max_insns && (edge = fibheap_extract_min (heap)))
800 {
801 int old_insns = overall_insns;
802 struct cgraph_node *where;
803 int growth =
804 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
805
806 growth -= edge->caller->global.insns;
807
808 if (dump_file)
809 {
810 fprintf (dump_file,
811 "\nConsidering %s with %i insns\n",
812 cgraph_node_name (edge->callee),
813 edge->callee->global.insns);
814 fprintf (dump_file,
815 " to be inlined into %s\n"
816 " Estimated growth after inlined into all callees is %+i insns.\n"
817 " Estimated badness is %i.\n",
818 cgraph_node_name (edge->caller),
819 cgraph_estimate_growth (edge->callee),
820 cgraph_edge_badness (edge));
821 if (edge->count)
822 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
823 }
824 gcc_assert (edge->aux);
825 edge->aux = NULL;
826 if (!edge->inline_failed)
827 continue;
828
829 /* When not having profile info ready we don't weight by any way the
830 position of call in procedure itself. This means if call of
831 function A from function B seems profitable to inline, the recursive
832 call of function A in inline copy of A in B will look profitable too
833 and we end up inlining until reaching maximal function growth. This
834 is not good idea so prohibit the recursive inlining.
835
836 ??? When the frequencies are taken into account we might not need this
837 restriction. */
838 if (!max_count)
839 {
840 where = edge->caller;
841 while (where->global.inlined_to)
842 {
843 if (where->decl == edge->callee->decl)
844 break;
845 where = where->callers->caller;
846 }
847 if (where->global.inlined_to)
848 {
849 edge->inline_failed
850 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
851 if (dump_file)
852 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
853 continue;
854 }
855 }
856
857 if (!cgraph_maybe_hot_edge_p (edge) && growth > 0)
858 {
859 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
860 &edge->inline_failed))
861 {
862 edge->inline_failed =
863 N_("call is unlikely");
864 if (dump_file)
865 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
866 }
867 continue;
868 }
869 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
870 {
871 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
872 &edge->inline_failed))
873 {
874 if (dump_file)
875 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
876 }
877 continue;
878 }
879 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
880 &edge->inline_failed))
881 {
882 where = edge->caller;
883 if (where->global.inlined_to)
884 where = where->global.inlined_to;
885 if (!cgraph_decide_recursive_inlining (where))
886 continue;
887 update_callee_keys (heap, where, updated_nodes);
888 }
889 else
890 {
891 struct cgraph_node *callee;
892 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
893 &edge->inline_failed, true))
894 {
895 if (dump_file)
896 fprintf (dump_file, " Not inlining into %s:%s.\n",
897 cgraph_node_name (edge->caller), edge->inline_failed);
898 continue;
899 }
900 callee = edge->callee;
901 cgraph_mark_inline_edge (edge, true);
902 update_callee_keys (heap, callee, updated_nodes);
903 }
904 where = edge->caller;
905 if (where->global.inlined_to)
906 where = where->global.inlined_to;
907
908 /* Our profitability metric can depend on local properties
909 such as number of inlinable calls and size of the function body.
910 After inlining these properties might change for the function we
911 inlined into (since it's body size changed) and for the functions
912 called by function we inlined (since number of it inlinable callers
913 might change). */
914 update_caller_keys (heap, where, updated_nodes);
915 bitmap_clear (updated_nodes);
916
917 if (dump_file)
918 {
919 fprintf (dump_file,
920 " Inlined into %s which now has %i insns,"
921 "net change of %+i insns.\n",
922 cgraph_node_name (edge->caller),
923 edge->caller->global.insns,
924 overall_insns - old_insns);
925 }
926 }
927 while ((edge = fibheap_extract_min (heap)) != NULL)
928 {
929 gcc_assert (edge->aux);
930 edge->aux = NULL;
931 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
932 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
933 &edge->inline_failed))
934 edge->inline_failed = N_("--param inline-unit-growth limit reached");
935 }
936 fibheap_delete (heap);
937 BITMAP_FREE (updated_nodes);
938 }
939
940 /* Decide on the inlining. We do so in the topological order to avoid
941 expenses on updating data structures. */
942
943 static unsigned int
944 cgraph_decide_inlining (void)
945 {
946 struct cgraph_node *node;
947 int nnodes;
948 struct cgraph_node **order =
949 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
950 int old_insns = 0;
951 int i;
952
953 max_count = 0;
954 for (node = cgraph_nodes; node; node = node->next)
955 if (node->analyzed && (node->needed || node->reachable))
956 {
957 struct cgraph_edge *e;
958
959 initial_insns += node->local.self_insns;
960 gcc_assert (node->local.self_insns == node->global.insns);
961 for (e = node->callees; e; e = e->next_callee)
962 if (max_count < e->count)
963 max_count = e->count;
964 }
965 overall_insns = initial_insns;
966 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
967
968 max_insns = overall_insns;
969 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
970 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
971
972 max_insns = ((HOST_WIDEST_INT) max_insns
973 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
974
975 nnodes = cgraph_postorder (order);
976
977 if (dump_file)
978 fprintf (dump_file,
979 "\nDeciding on inlining. Starting with %i insns.\n",
980 initial_insns);
981
982 for (node = cgraph_nodes; node; node = node->next)
983 node->aux = 0;
984
985 if (dump_file)
986 fprintf (dump_file, "\nInlining always_inline functions:\n");
987
988 /* In the first pass mark all always_inline edges. Do this with a priority
989 so none of our later choices will make this impossible. */
990 for (i = nnodes - 1; i >= 0; i--)
991 {
992 struct cgraph_edge *e, *next;
993
994 node = order[i];
995
996 /* Handle nodes to be flattened, but don't update overall unit size. */
997 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
998 {
999 int old_overall_insns = overall_insns;
1000 if (dump_file)
1001 fprintf (dump_file,
1002 "Flattening %s\n", cgraph_node_name (node));
1003 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1004 overall_insns = old_overall_insns;
1005 }
1006
1007 if (!node->local.disregard_inline_limits)
1008 continue;
1009 if (dump_file)
1010 fprintf (dump_file,
1011 "\nConsidering %s %i insns (always inline)\n",
1012 cgraph_node_name (node), node->global.insns);
1013 old_insns = overall_insns;
1014 for (e = node->callers; e; e = next)
1015 {
1016 next = e->next_caller;
1017 if (!e->inline_failed)
1018 continue;
1019 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1020 &e->inline_failed))
1021 continue;
1022 cgraph_mark_inline_edge (e, true);
1023 if (dump_file)
1024 fprintf (dump_file,
1025 " Inlined into %s which now has %i insns.\n",
1026 cgraph_node_name (e->caller),
1027 e->caller->global.insns);
1028 }
1029 /* Inlining self recursive function might introduce new calls to
1030 thsemselves we didn't see in the loop above. Fill in the proper
1031 reason why inline failed. */
1032 for (e = node->callers; e; e = e->next_caller)
1033 if (e->inline_failed)
1034 e->inline_failed = N_("recursive inlining");
1035 if (dump_file)
1036 fprintf (dump_file,
1037 " Inlined for a net change of %+i insns.\n",
1038 overall_insns - old_insns);
1039 }
1040
1041 if (!flag_really_no_inline)
1042 cgraph_decide_inlining_of_small_functions ();
1043
1044 if (!flag_really_no_inline
1045 && flag_inline_functions_called_once)
1046 {
1047 if (dump_file)
1048 fprintf (dump_file, "\nDeciding on functions called once:\n");
1049
1050 /* And finally decide what functions are called once. */
1051
1052 for (i = nnodes - 1; i >= 0; i--)
1053 {
1054 node = order[i];
1055
1056 if (node->callers && !node->callers->next_caller && !node->needed
1057 && node->local.inlinable && node->callers->inline_failed
1058 && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl))
1059 {
1060 if (dump_file)
1061 {
1062 fprintf (dump_file,
1063 "\nConsidering %s %i insns.\n",
1064 cgraph_node_name (node), node->global.insns);
1065 fprintf (dump_file,
1066 " Called once from %s %i insns.\n",
1067 cgraph_node_name (node->callers->caller),
1068 node->callers->caller->global.insns);
1069 }
1070
1071 old_insns = overall_insns;
1072
1073 if (cgraph_check_inline_limits (node->callers->caller, node,
1074 NULL, false))
1075 {
1076 cgraph_mark_inline (node->callers);
1077 if (dump_file)
1078 fprintf (dump_file,
1079 " Inlined into %s which now has %i insns"
1080 " for a net change of %+i insns.\n",
1081 cgraph_node_name (node->callers->caller),
1082 node->callers->caller->global.insns,
1083 overall_insns - old_insns);
1084 }
1085 else
1086 {
1087 if (dump_file)
1088 fprintf (dump_file,
1089 " Inline limit reached, not inlined.\n");
1090 }
1091 }
1092 }
1093 }
1094
1095 if (dump_file)
1096 fprintf (dump_file,
1097 "\nInlined %i calls, eliminated %i functions, "
1098 "%i insns turned to %i insns.\n\n",
1099 ncalls_inlined, nfunctions_inlined, initial_insns,
1100 overall_insns);
1101 free (order);
1102 return 0;
1103 }
1104
1105 /* Try to inline edge E from incremental inliner. MODE specifies mode
1106 of inliner.
1107
1108 We are detecting cycles by storing mode of inliner into cgraph_node last
1109 time we visited it in the recursion. In general when mode is set, we have
1110 recursive inlining, but as an special case, we want to try harder inline
1111 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1112 flatten, b being always inline. Flattening 'a' will collapse
1113 a->b->c before hitting cycle. To accomondate always inline, we however
1114 need to inline a->b->c->b.
1115
1116 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1117 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1118 static bool
1119 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1120 {
1121 struct cgraph_node *callee = e->callee;
1122 enum inlining_mode callee_mode = (size_t) callee->aux;
1123 bool always_inline = e->callee->local.disregard_inline_limits;
1124
1125 /* We've hit cycle? */
1126 if (callee_mode)
1127 {
1128 /* It is first time we see it and we are not in ALWAY_INLINE only
1129 mode yet. and the function in question is always_inline. */
1130 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1131 mode = INLINE_ALWAYS_INLINE;
1132 /* Otheriwse it is time to give up. */
1133 else
1134 {
1135 if (dump_file)
1136 {
1137 indent_to (dump_file, depth);
1138 fprintf (dump_file,
1139 "Not inlining %s into %s to avoid cycle.\n",
1140 cgraph_node_name (callee),
1141 cgraph_node_name (e->caller));
1142 }
1143 e->inline_failed = (e->callee->local.disregard_inline_limits
1144 ? N_("recursive inlining") : "");
1145 return false;
1146 }
1147 }
1148
1149 callee->aux = (void *)(size_t) mode;
1150 if (dump_file)
1151 {
1152 indent_to (dump_file, depth);
1153 fprintf (dump_file, " Inlining %s into %s.\n",
1154 cgraph_node_name (e->callee),
1155 cgraph_node_name (e->caller));
1156 }
1157 cgraph_mark_inline (e);
1158
1159 /* In order to fully inline always_inline functions at -O0, we need to
1160 recurse here, since the inlined functions might not be processed by
1161 incremental inlining at all yet.
1162
1163 Also flattening needs to be done recursively. */
1164
1165 if (!flag_unit_at_a_time || mode == INLINE_ALL || always_inline)
1166 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1167 callee->aux = (void *)(size_t) callee_mode;
1168 return true;
1169 }
1170
1171 /* Decide on the inlining. We do so in the topological order to avoid
1172 expenses on updating data structures.
1173 DEPTH is depth of recursion, used only for debug output. */
1174
1175 static bool
1176 cgraph_decide_inlining_incrementally (struct cgraph_node *node, enum inlining_mode mode,
1177 int depth)
1178 {
1179 struct cgraph_edge *e;
1180 bool inlined = false;
1181 const char *failed_reason;
1182 enum inlining_mode old_mode;
1183
1184 #ifdef ENABLE_CHECKING
1185 verify_cgraph_node (node);
1186 #endif
1187
1188 old_mode = (size_t)node->aux;
1189
1190 if (mode != INLINE_ALWAYS_INLINE
1191 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1192 {
1193 if (dump_file)
1194 fprintf (dump_file, " Flattening %s\n", cgraph_node_name (node));
1195 mode = INLINE_ALL;
1196 }
1197
1198 node->aux = (void *)(size_t) mode;
1199
1200 /* First of all look for always inline functions. */
1201 for (e = node->callees; e; e = e->next_callee)
1202 {
1203 if (dump_file && e->callee->local.inlinable
1204 && (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1205 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl))))
1206 {
1207 fprintf (dump_file, " Ignoring %s: SSA form not computed yet.\n",
1208 cgraph_node_name (e->callee));
1209 }
1210 if ((e->callee->local.disregard_inline_limits
1211 || (mode == INLINE_ALL && e->callee->local.inlinable))
1212 && e->inline_failed
1213 && (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1214 == gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1215 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1216 /* ??? It is possible that renaming variable removed the function body
1217 in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */
1218 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1219 {
1220 inlined |= try_inline (e, mode, depth);
1221 }
1222 }
1223
1224 /* Now do the automatic inlining. */
1225 if (!flag_really_no_inline && mode != INLINE_ALL
1226 && mode != INLINE_ALWAYS_INLINE)
1227 for (e = node->callees; e; e = e->next_callee)
1228 if (e->callee->local.inlinable
1229 && e->inline_failed
1230 && !e->callee->local.disregard_inline_limits
1231 && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed)
1232 && (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1233 == gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1234 && (mode != INLINE_SIZE
1235 || (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1236 <= e->caller->global.insns))
1237 && cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1238 false)
1239 && (DECL_SAVED_TREE (e->callee->decl) || e->callee->inline_decl))
1240 {
1241 if (cgraph_default_inline_p (e->callee, &failed_reason))
1242 inlined |= try_inline (e, mode, depth);
1243 else if (!flag_unit_at_a_time)
1244 e->inline_failed = failed_reason;
1245 }
1246 node->aux = (void *)(size_t) old_mode;
1247 return inlined;
1248 }
1249
1250 /* When inlining shall be performed. */
1251 static bool
1252 cgraph_gate_inlining (void)
1253 {
1254 return flag_inline_trees;
1255 }
1256
1257 struct tree_opt_pass pass_ipa_inline =
1258 {
1259 "inline", /* name */
1260 cgraph_gate_inlining, /* gate */
1261 cgraph_decide_inlining, /* execute */
1262 NULL, /* sub */
1263 NULL, /* next */
1264 0, /* static_pass_number */
1265 TV_INLINE_HEURISTICS, /* tv_id */
1266 0, /* properties_required */
1267 PROP_cfg, /* properties_provided */
1268 0, /* properties_destroyed */
1269 TODO_remove_functions, /* todo_flags_finish */
1270 TODO_dump_cgraph | TODO_dump_func
1271 | TODO_remove_functions, /* todo_flags_finish */
1272 0 /* letter */
1273 };
1274
1275 /* Because inlining might remove no-longer reachable nodes, we need to
1276 keep the array visible to garbage collector to avoid reading collected
1277 out nodes. */
1278 static int nnodes;
1279 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1280
1281 /* Do inlining of small functions. Doing so early helps profiling and other
1282 passes to be somewhat more effective and avoids some code duplication in
1283 later real inlining pass for testcases with very many function calls. */
1284 static unsigned int
1285 cgraph_early_inlining (void)
1286 {
1287 struct cgraph_node *node = cgraph_node (current_function_decl);
1288 unsigned int todo = 0;
1289
1290 if (sorrycount || errorcount)
1291 return 0;
1292 if (cgraph_decide_inlining_incrementally (node,
1293 flag_unit_at_a_time
1294 ? INLINE_SIZE : INLINE_SPEED, 0))
1295 {
1296 timevar_push (TV_INTEGRATION);
1297 todo = optimize_inline_calls (current_function_decl);
1298 timevar_pop (TV_INTEGRATION);
1299 }
1300 return todo;
1301 }
1302
1303 /* When inlining shall be performed. */
1304 static bool
1305 cgraph_gate_early_inlining (void)
1306 {
1307 return flag_inline_trees && flag_early_inlining;
1308 }
1309
1310 struct tree_opt_pass pass_early_inline =
1311 {
1312 "einline", /* name */
1313 cgraph_gate_early_inlining, /* gate */
1314 cgraph_early_inlining, /* execute */
1315 NULL, /* sub */
1316 NULL, /* next */
1317 0, /* static_pass_number */
1318 TV_INLINE_HEURISTICS, /* tv_id */
1319 0, /* properties_required */
1320 PROP_cfg, /* properties_provided */
1321 0, /* properties_destroyed */
1322 0, /* todo_flags_start */
1323 TODO_dump_func, /* todo_flags_finish */
1324 0 /* letter */
1325 };
1326
1327 /* When inlining shall be performed. */
1328 static bool
1329 cgraph_gate_ipa_early_inlining (void)
1330 {
1331 return (flag_inline_trees && flag_early_inlining
1332 && (flag_branch_probabilities || flag_test_coverage
1333 || profile_arc_flag));
1334 }
1335
1336 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1337 before tree profiling so we have stand alone IPA pass for doing so. */
1338 struct tree_opt_pass pass_ipa_early_inline =
1339 {
1340 "einline_ipa", /* name */
1341 cgraph_gate_ipa_early_inlining, /* gate */
1342 NULL, /* execute */
1343 NULL, /* sub */
1344 NULL, /* next */
1345 0, /* static_pass_number */
1346 TV_INLINE_HEURISTICS, /* tv_id */
1347 0, /* properties_required */
1348 PROP_cfg, /* properties_provided */
1349 0, /* properties_destroyed */
1350 0, /* todo_flags_start */
1351 TODO_dump_cgraph, /* todo_flags_finish */
1352 0 /* letter */
1353 };
1354
1355 /* Compute parameters of functions used by inliner. */
1356 static unsigned int
1357 compute_inline_parameters (void)
1358 {
1359 struct cgraph_node *node = cgraph_node (current_function_decl);
1360
1361 gcc_assert (!node->global.inlined_to);
1362 node->local.estimated_self_stack_size = estimated_stack_frame_size ();
1363 node->global.estimated_stack_size = node->local.estimated_self_stack_size;
1364 node->global.stack_frame_offset = 0;
1365 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1366 node->local.self_insns = estimate_num_insns (current_function_decl);
1367 if (node->local.inlinable)
1368 node->local.disregard_inline_limits
1369 = lang_hooks.tree_inlining.disregard_inline_limits (current_function_decl);
1370 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1371 node->local.inlinable = 0;
1372 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1373 node->global.insns = node->local.self_insns;
1374 return 0;
1375 }
1376
1377 /* When inlining shall be performed. */
1378 static bool
1379 gate_inline_passes (void)
1380 {
1381 return flag_inline_trees;
1382 }
1383
1384 struct tree_opt_pass pass_inline_parameters =
1385 {
1386 NULL, /* name */
1387 gate_inline_passes, /* gate */
1388 compute_inline_parameters, /* execute */
1389 NULL, /* sub */
1390 NULL, /* next */
1391 0, /* static_pass_number */
1392 TV_INLINE_HEURISTICS, /* tv_id */
1393 0, /* properties_required */
1394 PROP_cfg, /* properties_provided */
1395 0, /* properties_destroyed */
1396 0, /* todo_flags_start */
1397 0, /* todo_flags_finish */
1398 0 /* letter */
1399 };
1400
1401 /* Apply inline plan to the function. */
1402 static unsigned int
1403 apply_inline (void)
1404 {
1405 unsigned int todo = 0;
1406 struct cgraph_edge *e;
1407 struct cgraph_node *node = cgraph_node (current_function_decl);
1408
1409 /* Even when not optimizing, ensure that always_inline functions get inlined.
1410 */
1411 if (!optimize)
1412 cgraph_decide_inlining_incrementally (node, INLINE_SPEED, 0);
1413
1414 /* We might need the body of this function so that we can expand
1415 it inline somewhere else. */
1416 if (cgraph_preserve_function_body_p (current_function_decl))
1417 save_inline_function_body (node);
1418
1419 for (e = node->callees; e; e = e->next_callee)
1420 if (!e->inline_failed || warn_inline)
1421 break;
1422 if (e)
1423 {
1424 timevar_push (TV_INTEGRATION);
1425 todo = optimize_inline_calls (current_function_decl);
1426 timevar_pop (TV_INTEGRATION);
1427 }
1428 /* In non-unit-at-a-time we must mark all referenced functions as needed. */
1429 if (!flag_unit_at_a_time)
1430 {
1431 struct cgraph_edge *e;
1432 for (e = node->callees; e; e = e->next_callee)
1433 if (e->callee->analyzed)
1434 cgraph_mark_needed_node (e->callee);
1435 }
1436 return todo | execute_fixup_cfg ();
1437 }
1438
1439 struct tree_opt_pass pass_apply_inline =
1440 {
1441 "apply_inline", /* name */
1442 NULL, /* gate */
1443 apply_inline, /* execute */
1444 NULL, /* sub */
1445 NULL, /* next */
1446 0, /* static_pass_number */
1447 TV_INLINE_HEURISTICS, /* tv_id */
1448 0, /* properties_required */
1449 PROP_cfg, /* properties_provided */
1450 0, /* properties_destroyed */
1451 0, /* todo_flags_start */
1452 TODO_dump_func | TODO_verify_flow
1453 | TODO_verify_stmts, /* todo_flags_finish */
1454 0 /* letter */
1455 };
1456
1457 #include "gt-ipa-inline.h"