backport: ChangeLog.tuples: ChangeLog from gimple-tuples-branch.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141
142 /* Mode incremental inliner operate on:
143
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
146 flattening.
147
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
150
151 in ALL mode, everything is inlined. This is used during flattening. */
152 enum inlining_mode {
153 INLINE_NONE = 0,
154 INLINE_ALWAYS_INLINE,
155 INLINE_SIZE,
156 INLINE_ALL
157 };
158 static bool
159 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
160 int);
161
162
163 /* Statistics we collect about inlining algorithm. */
164 static int ncalls_inlined;
165 static int nfunctions_inlined;
166 static int overall_insns;
167 static gcov_type max_count;
168
169 static inline struct inline_summary *
170 inline_summary (struct cgraph_node *node)
171 {
172 return &node->local.inline_summary;
173 }
174
175 /* Estimate size of the function after inlining WHAT into TO. */
176
177 static int
178 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
179 struct cgraph_node *what)
180 {
181 int size;
182 tree fndecl = what->decl, arg;
183 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
184
185 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
186 call_insns += estimate_move_cost (TREE_TYPE (arg));
187 size = (what->global.insns - call_insns) * times + to->global.insns;
188 gcc_assert (size >= 0);
189 return size;
190 }
191
192 /* E is expected to be an edge being inlined. Clone destination node of
193 the edge and redirect it to the new clone.
194 DUPLICATE is used for bookkeeping on whether we are actually creating new
195 clones or re-using node originally representing out-of-line function call.
196 */
197 void
198 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
199 bool update_original)
200 {
201 HOST_WIDE_INT peak;
202
203 if (duplicate)
204 {
205 /* We may eliminate the need for out-of-line copy to be output.
206 In that case just go ahead and re-use it. */
207 if (!e->callee->callers->next_caller
208 && !e->callee->needed
209 && !cgraph_new_nodes)
210 {
211 gcc_assert (!e->callee->global.inlined_to);
212 if (gimple_body (e->callee->decl))
213 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
214 duplicate = false;
215 }
216 else
217 {
218 struct cgraph_node *n;
219 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
220 update_original);
221 cgraph_redirect_edge_callee (e, n);
222 }
223 }
224
225 if (e->caller->global.inlined_to)
226 e->callee->global.inlined_to = e->caller->global.inlined_to;
227 else
228 e->callee->global.inlined_to = e->caller;
229 e->callee->global.stack_frame_offset
230 = e->caller->global.stack_frame_offset
231 + inline_summary (e->caller)->estimated_self_stack_size;
232 peak = e->callee->global.stack_frame_offset
233 + inline_summary (e->callee)->estimated_self_stack_size;
234 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
235 e->callee->global.inlined_to->global.estimated_stack_size = peak;
236
237 /* Recursively clone all bodies. */
238 for (e = e->callee->callees; e; e = e->next_callee)
239 if (!e->inline_failed)
240 cgraph_clone_inlined_nodes (e, duplicate, update_original);
241 }
242
243 /* Mark edge E as inlined and update callgraph accordingly.
244 UPDATE_ORIGINAL specify whether profile of original function should be
245 updated. */
246
247 void
248 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
249 {
250 int old_insns = 0, new_insns = 0;
251 struct cgraph_node *to = NULL, *what;
252
253 if (e->callee->inline_decl)
254 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
255
256 gcc_assert (e->inline_failed);
257 e->inline_failed = NULL;
258
259 if (!e->callee->global.inlined)
260 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
261 e->callee->global.inlined = true;
262
263 cgraph_clone_inlined_nodes (e, true, update_original);
264
265 what = e->callee;
266
267 /* Now update size of caller and all functions caller is inlined into. */
268 for (;e && !e->inline_failed; e = e->caller->callers)
269 {
270 old_insns = e->caller->global.insns;
271 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
272 what);
273 gcc_assert (new_insns >= 0);
274 to = e->caller;
275 to->global.insns = new_insns;
276 }
277 gcc_assert (what->global.inlined_to == to);
278 if (new_insns > old_insns)
279 overall_insns += new_insns - old_insns;
280 ncalls_inlined++;
281 }
282
283 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
284 Return following unredirected edge in the list of callers
285 of EDGE->CALLEE */
286
287 static struct cgraph_edge *
288 cgraph_mark_inline (struct cgraph_edge *edge)
289 {
290 struct cgraph_node *to = edge->caller;
291 struct cgraph_node *what = edge->callee;
292 struct cgraph_edge *e, *next;
293
294 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
295 /* Look for all calls, mark them inline and clone recursively
296 all inlined functions. */
297 for (e = what->callers; e; e = next)
298 {
299 next = e->next_caller;
300 if (e->caller == to && e->inline_failed)
301 {
302 cgraph_mark_inline_edge (e, true);
303 if (e == edge)
304 edge = next;
305 }
306 }
307
308 return edge;
309 }
310
311 /* Estimate the growth caused by inlining NODE into all callees. */
312
313 static int
314 cgraph_estimate_growth (struct cgraph_node *node)
315 {
316 int growth = 0;
317 struct cgraph_edge *e;
318 if (node->global.estimated_growth != INT_MIN)
319 return node->global.estimated_growth;
320
321 for (e = node->callers; e; e = e->next_caller)
322 if (e->inline_failed)
323 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
324 - e->caller->global.insns);
325
326 /* ??? Wrong for self recursive functions or cases where we decide to not
327 inline for different reasons, but it is not big deal as in that case
328 we will keep the body around, but we will also avoid some inlining. */
329 if (!node->needed && !DECL_EXTERNAL (node->decl))
330 growth -= node->global.insns;
331
332 node->global.estimated_growth = growth;
333 return growth;
334 }
335
336 /* Return false when inlining WHAT into TO is not good idea
337 as it would cause too large growth of function bodies.
338 When ONE_ONLY is true, assume that only one call site is going
339 to be inlined, otherwise figure out how many call sites in
340 TO calls WHAT and verify that all can be inlined.
341 */
342
343 static bool
344 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
345 const char **reason, bool one_only)
346 {
347 int times = 0;
348 struct cgraph_edge *e;
349 int newsize;
350 int limit;
351 HOST_WIDE_INT stack_size_limit, inlined_stack;
352
353 if (one_only)
354 times = 1;
355 else
356 for (e = to->callees; e; e = e->next_callee)
357 if (e->callee == what)
358 times++;
359
360 if (to->global.inlined_to)
361 to = to->global.inlined_to;
362
363 /* When inlining large function body called once into small function,
364 take the inlined function as base for limiting the growth. */
365 if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
366 limit = inline_summary (to)->self_insns;
367 else
368 limit = inline_summary (what)->self_insns;
369
370 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
371
372 /* Check the size after inlining against the function limits. But allow
373 the function to shrink if it went over the limits by forced inlining. */
374 newsize = cgraph_estimate_size_after_inlining (times, to, what);
375 if (newsize >= to->global.insns
376 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
377 && newsize > limit)
378 {
379 if (reason)
380 *reason = N_("--param large-function-growth limit reached");
381 return false;
382 }
383
384 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
385
386 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
387
388 inlined_stack = (to->global.stack_frame_offset
389 + inline_summary (to)->estimated_self_stack_size
390 + what->global.estimated_stack_size);
391 if (inlined_stack > stack_size_limit
392 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
393 {
394 if (reason)
395 *reason = N_("--param large-stack-frame-growth limit reached");
396 return false;
397 }
398 return true;
399 }
400
401 /* Return true when function N is small enough to be inlined. */
402
403 bool
404 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
405 {
406 tree decl = n->decl;
407
408 if (n->inline_decl)
409 decl = n->inline_decl;
410 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
411 {
412 if (reason)
413 *reason = N_("function not inline candidate");
414 return false;
415 }
416
417 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
418 {
419 if (reason)
420 *reason = N_("function body not available");
421 return false;
422 }
423
424 if (DECL_DECLARED_INLINE_P (decl))
425 {
426 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
427 {
428 if (reason)
429 *reason = N_("--param max-inline-insns-single limit reached");
430 return false;
431 }
432 }
433 else
434 {
435 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
436 {
437 if (reason)
438 *reason = N_("--param max-inline-insns-auto limit reached");
439 return false;
440 }
441 }
442
443 return true;
444 }
445
446 /* Return true when inlining WHAT would create recursive inlining.
447 We call recursive inlining all cases where same function appears more than
448 once in the single recursion nest path in the inline graph. */
449
450 static bool
451 cgraph_recursive_inlining_p (struct cgraph_node *to,
452 struct cgraph_node *what,
453 const char **reason)
454 {
455 bool recursive;
456 if (to->global.inlined_to)
457 recursive = what->decl == to->global.inlined_to->decl;
458 else
459 recursive = what->decl == to->decl;
460 /* Marking recursive function inline has sane semantic and thus we should
461 not warn on it. */
462 if (recursive && reason)
463 *reason = (what->local.disregard_inline_limits
464 ? N_("recursive inlining") : "");
465 return recursive;
466 }
467
468 /* Return true if the call can be hot. */
469 static bool
470 cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
471 {
472 if (profile_info && flag_branch_probabilities
473 && (edge->count
474 <= profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION)))
475 return false;
476 if (lookup_attribute ("cold", DECL_ATTRIBUTES (edge->callee->decl))
477 || lookup_attribute ("cold", DECL_ATTRIBUTES (edge->caller->decl)))
478 return false;
479 if (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl)))
480 return true;
481 if (flag_guess_branch_prob
482 && edge->frequency < (CGRAPH_FREQ_MAX
483 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
484 return false;
485 return true;
486 }
487
488 /* A cost model driving the inlining heuristics in a way so the edges with
489 smallest badness are inlined first. After each inlining is performed
490 the costs of all caller edges of nodes affected are recomputed so the
491 metrics may accurately depend on values such as number of inlinable callers
492 of the function or function body size. */
493
494 static int
495 cgraph_edge_badness (struct cgraph_edge *edge)
496 {
497 int badness;
498 int growth =
499 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
500
501 growth -= edge->caller->global.insns;
502
503 /* Always prefer inlining saving code size. */
504 if (growth <= 0)
505 badness = INT_MIN - growth;
506
507 /* When profiling is available, base priorities -(#calls / growth).
508 So we optimize for overall number of "executed" inlined calls. */
509 else if (max_count)
510 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
511
512 /* When function local profile is available, base priorities on
513 growth / frequency, so we optimize for overall frequency of inlined
514 calls. This is not too accurate since while the call might be frequent
515 within function, the function itself is infrequent.
516
517 Other objective to optimize for is number of different calls inlined.
518 We add the estimated growth after inlining all functions to bias the
519 priorities slightly in this direction (so fewer times called functions
520 of the same size gets priority). */
521 else if (flag_guess_branch_prob)
522 {
523 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
524 int growth =
525 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
526 growth -= edge->caller->global.insns;
527 badness = growth * 256;
528
529 /* Decrease badness if call is nested. */
530 /* Compress the range so we don't overflow. */
531 if (div > 256)
532 div = 256 + ceil_log2 (div) - 8;
533 if (div < 1)
534 div = 1;
535 if (badness > 0)
536 badness /= div;
537 badness += cgraph_estimate_growth (edge->callee);
538 }
539 /* When function local profile is not available or it does not give
540 useful information (ie frequency is zero), base the cost on
541 loop nest and overall size growth, so we optimize for overall number
542 of functions fully inlined in program. */
543 else
544 {
545 int nest = MIN (edge->loop_nest, 8);
546 badness = cgraph_estimate_growth (edge->callee) * 256;
547
548 /* Decrease badness if call is nested. */
549 if (badness > 0)
550 badness >>= nest;
551 else
552 {
553 badness <<= nest;
554 }
555 }
556 /* Make recursive inlining happen always after other inlining is done. */
557 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
558 return badness + 1;
559 else
560 return badness;
561 }
562
563 /* Recompute heap nodes for each of caller edge. */
564
565 static void
566 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
567 bitmap updated_nodes)
568 {
569 struct cgraph_edge *edge;
570 const char *failed_reason;
571
572 if (!node->local.inlinable || node->local.disregard_inline_limits
573 || node->global.inlined_to)
574 return;
575 if (bitmap_bit_p (updated_nodes, node->uid))
576 return;
577 bitmap_set_bit (updated_nodes, node->uid);
578 node->global.estimated_growth = INT_MIN;
579
580 if (!node->local.inlinable)
581 return;
582 /* Prune out edges we won't inline into anymore. */
583 if (!cgraph_default_inline_p (node, &failed_reason))
584 {
585 for (edge = node->callers; edge; edge = edge->next_caller)
586 if (edge->aux)
587 {
588 fibheap_delete_node (heap, (fibnode_t) edge->aux);
589 edge->aux = NULL;
590 if (edge->inline_failed)
591 edge->inline_failed = failed_reason;
592 }
593 return;
594 }
595
596 for (edge = node->callers; edge; edge = edge->next_caller)
597 if (edge->inline_failed)
598 {
599 int badness = cgraph_edge_badness (edge);
600 if (edge->aux)
601 {
602 fibnode_t n = (fibnode_t) edge->aux;
603 gcc_assert (n->data == edge);
604 if (n->key == badness)
605 continue;
606
607 /* fibheap_replace_key only increase the keys. */
608 if (fibheap_replace_key (heap, n, badness))
609 continue;
610 fibheap_delete_node (heap, (fibnode_t) edge->aux);
611 }
612 edge->aux = fibheap_insert (heap, badness, edge);
613 }
614 }
615
616 /* Recompute heap nodes for each of caller edges of each of callees. */
617
618 static void
619 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
620 bitmap updated_nodes)
621 {
622 struct cgraph_edge *e;
623 node->global.estimated_growth = INT_MIN;
624
625 for (e = node->callees; e; e = e->next_callee)
626 if (e->inline_failed)
627 update_caller_keys (heap, e->callee, updated_nodes);
628 else if (!e->inline_failed)
629 update_callee_keys (heap, e->callee, updated_nodes);
630 }
631
632 /* Enqueue all recursive calls from NODE into priority queue depending on
633 how likely we want to recursively inline the call. */
634
635 static void
636 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
637 fibheap_t heap)
638 {
639 static int priority;
640 struct cgraph_edge *e;
641 for (e = where->callees; e; e = e->next_callee)
642 if (e->callee == node)
643 {
644 /* When profile feedback is available, prioritize by expected number
645 of calls. Without profile feedback we maintain simple queue
646 to order candidates via recursive depths. */
647 fibheap_insert (heap,
648 !max_count ? priority++
649 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
650 e);
651 }
652 for (e = where->callees; e; e = e->next_callee)
653 if (!e->inline_failed)
654 lookup_recursive_calls (node, e->callee, heap);
655 }
656
657 /* Decide on recursive inlining: in the case function has recursive calls,
658 inline until body size reaches given argument. If any new indirect edges
659 are discovered in the process, add them to NEW_EDGES, unless it is NULL. */
660
661 static bool
662 cgraph_decide_recursive_inlining (struct cgraph_node *node,
663 VEC (cgraph_edge_p, heap) *new_edges)
664 {
665 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
666 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
667 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
668 fibheap_t heap;
669 struct cgraph_edge *e;
670 struct cgraph_node *master_clone, *next;
671 int depth = 0;
672 int n = 0;
673
674 if (optimize_size
675 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
676 return false;
677
678 if (DECL_DECLARED_INLINE_P (node->decl))
679 {
680 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
681 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
682 }
683
684 /* Make sure that function is small enough to be considered for inlining. */
685 if (!max_depth
686 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
687 return false;
688 heap = fibheap_new ();
689 lookup_recursive_calls (node, node, heap);
690 if (fibheap_empty (heap))
691 {
692 fibheap_delete (heap);
693 return false;
694 }
695
696 if (dump_file)
697 fprintf (dump_file,
698 " Performing recursive inlining on %s\n",
699 cgraph_node_name (node));
700
701 /* We need original clone to copy around. */
702 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
703 master_clone->needed = true;
704 for (e = master_clone->callees; e; e = e->next_callee)
705 if (!e->inline_failed)
706 cgraph_clone_inlined_nodes (e, true, false);
707
708 /* Do the inlining and update list of recursive call during process. */
709 while (!fibheap_empty (heap)
710 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
711 <= limit))
712 {
713 struct cgraph_edge *curr
714 = (struct cgraph_edge *) fibheap_extract_min (heap);
715 struct cgraph_node *cnode;
716
717 depth = 1;
718 for (cnode = curr->caller;
719 cnode->global.inlined_to; cnode = cnode->callers->caller)
720 if (node->decl == curr->callee->decl)
721 depth++;
722 if (depth > max_depth)
723 {
724 if (dump_file)
725 fprintf (dump_file,
726 " maximal depth reached\n");
727 continue;
728 }
729
730 if (max_count)
731 {
732 if (!cgraph_maybe_hot_edge_p (curr))
733 {
734 if (dump_file)
735 fprintf (dump_file, " Not inlining cold call\n");
736 continue;
737 }
738 if (curr->count * 100 / node->count < probability)
739 {
740 if (dump_file)
741 fprintf (dump_file,
742 " Probability of edge is too small\n");
743 continue;
744 }
745 }
746
747 if (dump_file)
748 {
749 fprintf (dump_file,
750 " Inlining call of depth %i", depth);
751 if (node->count)
752 {
753 fprintf (dump_file, " called approx. %.2f times per call",
754 (double)curr->count / node->count);
755 }
756 fprintf (dump_file, "\n");
757 }
758 cgraph_redirect_edge_callee (curr, master_clone);
759 cgraph_mark_inline_edge (curr, false);
760 if (flag_indirect_inlining)
761 ipa_propagate_indirect_call_infos (curr, new_edges);
762 lookup_recursive_calls (node, curr->callee, heap);
763 n++;
764 }
765 if (!fibheap_empty (heap) && dump_file)
766 fprintf (dump_file, " Recursive inlining growth limit met.\n");
767
768 fibheap_delete (heap);
769 if (dump_file)
770 fprintf (dump_file,
771 "\n Inlined %i times, body grown from %i to %i insns\n", n,
772 master_clone->global.insns, node->global.insns);
773
774 /* Remove master clone we used for inlining. We rely that clones inlined
775 into master clone gets queued just before master clone so we don't
776 need recursion. */
777 for (node = cgraph_nodes; node != master_clone;
778 node = next)
779 {
780 next = node->next;
781 if (node->global.inlined_to == master_clone)
782 cgraph_remove_node (node);
783 }
784 cgraph_remove_node (master_clone);
785 /* FIXME: Recursive inlining actually reduces number of calls of the
786 function. At this place we should probably walk the function and
787 inline clones and compensate the counts accordingly. This probably
788 doesn't matter much in practice. */
789 return n > 0;
790 }
791
792 /* Set inline_failed for all callers of given function to REASON. */
793
794 static void
795 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
796 {
797 struct cgraph_edge *e;
798
799 if (dump_file)
800 fprintf (dump_file, "Inlining failed: %s\n", reason);
801 for (e = node->callers; e; e = e->next_caller)
802 if (e->inline_failed)
803 e->inline_failed = reason;
804 }
805
806 /* Given whole compilation unit estimate of INSNS, compute how large we can
807 allow the unit to grow. */
808 static int
809 compute_max_insns (int insns)
810 {
811 int max_insns = insns;
812 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
813 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
814
815 return ((HOST_WIDEST_INT) max_insns
816 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
817 }
818
819 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
820 static void
821 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
822 {
823 while (VEC_length (cgraph_edge_p, new_edges) > 0)
824 {
825 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
826
827 gcc_assert (!edge->aux);
828 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
829 }
830 }
831
832
833 /* We use greedy algorithm for inlining of small functions:
834 All inline candidates are put into prioritized heap based on estimated
835 growth of the overall number of instructions and then update the estimates.
836
837 INLINED and INLINED_CALEES are just pointers to arrays large enough
838 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
839
840 static void
841 cgraph_decide_inlining_of_small_functions (void)
842 {
843 struct cgraph_node *node;
844 struct cgraph_edge *edge;
845 const char *failed_reason;
846 fibheap_t heap = fibheap_new ();
847 bitmap updated_nodes = BITMAP_ALLOC (NULL);
848 int min_insns, max_insns;
849 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
850
851 if (flag_indirect_inlining)
852 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
853
854 if (dump_file)
855 fprintf (dump_file, "\nDeciding on smaller functions:\n");
856
857 /* Put all inline candidates into the heap. */
858
859 for (node = cgraph_nodes; node; node = node->next)
860 {
861 if (!node->local.inlinable || !node->callers
862 || node->local.disregard_inline_limits)
863 continue;
864 if (dump_file)
865 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
866
867 node->global.estimated_growth = INT_MIN;
868 if (!cgraph_default_inline_p (node, &failed_reason))
869 {
870 cgraph_set_inline_failed (node, failed_reason);
871 continue;
872 }
873
874 for (edge = node->callers; edge; edge = edge->next_caller)
875 if (edge->inline_failed)
876 {
877 gcc_assert (!edge->aux);
878 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
879 }
880 }
881
882 max_insns = compute_max_insns (overall_insns);
883 min_insns = overall_insns;
884
885 while (overall_insns <= max_insns
886 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
887 {
888 int old_insns = overall_insns;
889 struct cgraph_node *where;
890 int growth =
891 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
892 const char *not_good = NULL;
893
894 growth -= edge->caller->global.insns;
895
896 if (dump_file)
897 {
898 fprintf (dump_file,
899 "\nConsidering %s with %i insns\n",
900 cgraph_node_name (edge->callee),
901 edge->callee->global.insns);
902 fprintf (dump_file,
903 " to be inlined into %s\n"
904 " Estimated growth after inlined into all callees is %+i insns.\n"
905 " Estimated badness is %i, frequency %.2f.\n",
906 cgraph_node_name (edge->caller),
907 cgraph_estimate_growth (edge->callee),
908 cgraph_edge_badness (edge),
909 edge->frequency / (double)CGRAPH_FREQ_BASE);
910 if (edge->count)
911 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
912 }
913 gcc_assert (edge->aux);
914 edge->aux = NULL;
915 if (!edge->inline_failed)
916 continue;
917
918 /* When not having profile info ready we don't weight by any way the
919 position of call in procedure itself. This means if call of
920 function A from function B seems profitable to inline, the recursive
921 call of function A in inline copy of A in B will look profitable too
922 and we end up inlining until reaching maximal function growth. This
923 is not good idea so prohibit the recursive inlining.
924
925 ??? When the frequencies are taken into account we might not need this
926 restriction. */
927 if (!max_count)
928 {
929 where = edge->caller;
930 while (where->global.inlined_to)
931 {
932 if (where->decl == edge->callee->decl)
933 break;
934 where = where->callers->caller;
935 }
936 if (where->global.inlined_to)
937 {
938 edge->inline_failed
939 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
940 if (dump_file)
941 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
942 continue;
943 }
944 }
945
946 if (!cgraph_maybe_hot_edge_p (edge))
947 not_good = N_("call is unlikely and code size would grow");
948 if (!flag_inline_functions
949 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
950 not_good = N_("function not declared inline and code size would grow");
951 if (optimize_size)
952 not_good = N_("optimizing for size and code size would grow");
953 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
954 {
955 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
956 &edge->inline_failed))
957 {
958 edge->inline_failed = not_good;
959 if (dump_file)
960 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
961 }
962 continue;
963 }
964 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
965 {
966 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
967 &edge->inline_failed))
968 {
969 if (dump_file)
970 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
971 }
972 continue;
973 }
974 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
975 {
976 gimple_call_set_cannot_inline (edge->call_stmt, true);
977 edge->inline_failed = N_("target specific option mismatch");
978 if (dump_file)
979 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
980 continue;
981 }
982 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
983 &edge->inline_failed))
984 {
985 where = edge->caller;
986 if (where->global.inlined_to)
987 where = where->global.inlined_to;
988 if (!cgraph_decide_recursive_inlining (where, new_indirect_edges))
989 continue;
990 if (flag_indirect_inlining)
991 add_new_edges_to_heap (heap, new_indirect_edges);
992 update_callee_keys (heap, where, updated_nodes);
993 }
994 else
995 {
996 struct cgraph_node *callee;
997 if (gimple_call_cannot_inline_p (edge->call_stmt)
998 || !cgraph_check_inline_limits (edge->caller, edge->callee,
999 &edge->inline_failed, true))
1000 {
1001 if (dump_file)
1002 fprintf (dump_file, " Not inlining into %s:%s.\n",
1003 cgraph_node_name (edge->caller), edge->inline_failed);
1004 continue;
1005 }
1006 callee = edge->callee;
1007 cgraph_mark_inline_edge (edge, true);
1008 if (flag_indirect_inlining)
1009 {
1010 ipa_propagate_indirect_call_infos (edge, new_indirect_edges);
1011 add_new_edges_to_heap (heap, new_indirect_edges);
1012 }
1013 update_callee_keys (heap, callee, updated_nodes);
1014 }
1015 where = edge->caller;
1016 if (where->global.inlined_to)
1017 where = where->global.inlined_to;
1018
1019 /* Our profitability metric can depend on local properties
1020 such as number of inlinable calls and size of the function body.
1021 After inlining these properties might change for the function we
1022 inlined into (since it's body size changed) and for the functions
1023 called by function we inlined (since number of it inlinable callers
1024 might change). */
1025 update_caller_keys (heap, where, updated_nodes);
1026 bitmap_clear (updated_nodes);
1027
1028 if (dump_file)
1029 {
1030 fprintf (dump_file,
1031 " Inlined into %s which now has %i insns,"
1032 "net change of %+i insns.\n",
1033 cgraph_node_name (edge->caller),
1034 edge->caller->global.insns,
1035 overall_insns - old_insns);
1036 }
1037 if (min_insns > overall_insns)
1038 {
1039 min_insns = overall_insns;
1040 max_insns = compute_max_insns (min_insns);
1041
1042 if (dump_file)
1043 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1044 }
1045 }
1046 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1047 {
1048 gcc_assert (edge->aux);
1049 edge->aux = NULL;
1050 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1051 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1052 &edge->inline_failed))
1053 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1054 }
1055
1056 if (new_indirect_edges)
1057 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1058 fibheap_delete (heap);
1059 BITMAP_FREE (updated_nodes);
1060 }
1061
1062 /* Decide on the inlining. We do so in the topological order to avoid
1063 expenses on updating data structures. */
1064
1065 static unsigned int
1066 cgraph_decide_inlining (void)
1067 {
1068 struct cgraph_node *node;
1069 int nnodes;
1070 struct cgraph_node **order =
1071 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1072 int old_insns = 0;
1073 int i;
1074 int initial_insns = 0;
1075
1076 max_count = 0;
1077 for (node = cgraph_nodes; node; node = node->next)
1078 if (node->analyzed && (node->needed || node->reachable))
1079 {
1080 struct cgraph_edge *e;
1081
1082 initial_insns += inline_summary (node)->self_insns;
1083 gcc_assert (inline_summary (node)->self_insns == node->global.insns);
1084 for (e = node->callees; e; e = e->next_callee)
1085 if (max_count < e->count)
1086 max_count = e->count;
1087 }
1088 overall_insns = initial_insns;
1089 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1090
1091 nnodes = cgraph_postorder (order);
1092
1093 if (dump_file)
1094 fprintf (dump_file,
1095 "\nDeciding on inlining. Starting with %i insns.\n",
1096 initial_insns);
1097
1098 for (node = cgraph_nodes; node; node = node->next)
1099 node->aux = 0;
1100
1101 if (dump_file)
1102 fprintf (dump_file, "\nInlining always_inline functions:\n");
1103
1104 /* In the first pass mark all always_inline edges. Do this with a priority
1105 so none of our later choices will make this impossible. */
1106 for (i = nnodes - 1; i >= 0; i--)
1107 {
1108 struct cgraph_edge *e, *next;
1109
1110 node = order[i];
1111
1112 /* Handle nodes to be flattened, but don't update overall unit size. */
1113 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1114 {
1115 if (dump_file)
1116 fprintf (dump_file,
1117 "Flattening %s\n", cgraph_node_name (node));
1118 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1119 }
1120
1121 if (!node->local.disregard_inline_limits)
1122 continue;
1123 if (dump_file)
1124 fprintf (dump_file,
1125 "\nConsidering %s %i insns (always inline)\n",
1126 cgraph_node_name (node), node->global.insns);
1127 old_insns = overall_insns;
1128 for (e = node->callers; e; e = next)
1129 {
1130 next = e->next_caller;
1131 if (!e->inline_failed || gimple_call_cannot_inline_p (e->call_stmt))
1132 continue;
1133 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1134 &e->inline_failed))
1135 continue;
1136 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1137 {
1138 gimple_call_set_cannot_inline (e->call_stmt, true);
1139 continue;
1140 }
1141 cgraph_mark_inline_edge (e, true);
1142 if (flag_indirect_inlining)
1143 ipa_propagate_indirect_call_infos (e, NULL);
1144 if (dump_file)
1145 fprintf (dump_file,
1146 " Inlined into %s which now has %i insns.\n",
1147 cgraph_node_name (e->caller),
1148 e->caller->global.insns);
1149 }
1150 /* Inlining self recursive function might introduce new calls to
1151 themselves we didn't see in the loop above. Fill in the proper
1152 reason why inline failed. */
1153 for (e = node->callers; e; e = e->next_caller)
1154 if (e->inline_failed)
1155 e->inline_failed = N_("recursive inlining");
1156 if (dump_file)
1157 fprintf (dump_file,
1158 " Inlined for a net change of %+i insns.\n",
1159 overall_insns - old_insns);
1160 }
1161
1162 if (!flag_really_no_inline)
1163 cgraph_decide_inlining_of_small_functions ();
1164
1165 /* After this point, any edge discovery performed by indirect inlining is no
1166 good so let's give up. */
1167 if (flag_indirect_inlining)
1168 free_all_ipa_structures_after_iinln ();
1169
1170 if (!flag_really_no_inline
1171 && flag_inline_functions_called_once)
1172 {
1173 if (dump_file)
1174 fprintf (dump_file, "\nDeciding on functions called once:\n");
1175
1176 /* And finally decide what functions are called once. */
1177 for (i = nnodes - 1; i >= 0; i--)
1178 {
1179 node = order[i];
1180
1181 if (node->callers
1182 && !node->callers->next_caller
1183 && !node->needed
1184 && node->local.inlinable
1185 && node->callers->inline_failed
1186 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1187 && !DECL_EXTERNAL (node->decl)
1188 && !DECL_COMDAT (node->decl))
1189 {
1190 if (dump_file)
1191 {
1192 fprintf (dump_file,
1193 "\nConsidering %s %i insns.\n",
1194 cgraph_node_name (node), node->global.insns);
1195 fprintf (dump_file,
1196 " Called once from %s %i insns.\n",
1197 cgraph_node_name (node->callers->caller),
1198 node->callers->caller->global.insns);
1199 }
1200
1201 old_insns = overall_insns;
1202
1203 if (cgraph_check_inline_limits (node->callers->caller, node,
1204 NULL, false))
1205 {
1206 cgraph_mark_inline (node->callers);
1207 if (dump_file)
1208 fprintf (dump_file,
1209 " Inlined into %s which now has %i insns"
1210 " for a net change of %+i insns.\n",
1211 cgraph_node_name (node->callers->caller),
1212 node->callers->caller->global.insns,
1213 overall_insns - old_insns);
1214 }
1215 else
1216 {
1217 if (dump_file)
1218 fprintf (dump_file,
1219 " Inline limit reached, not inlined.\n");
1220 }
1221 }
1222 }
1223 }
1224
1225 if (dump_file)
1226 fprintf (dump_file,
1227 "\nInlined %i calls, eliminated %i functions, "
1228 "%i insns turned to %i insns.\n\n",
1229 ncalls_inlined, nfunctions_inlined, initial_insns,
1230 overall_insns);
1231 free (order);
1232 return 0;
1233 }
1234
1235 /* Try to inline edge E from incremental inliner. MODE specifies mode
1236 of inliner.
1237
1238 We are detecting cycles by storing mode of inliner into cgraph_node last
1239 time we visited it in the recursion. In general when mode is set, we have
1240 recursive inlining, but as an special case, we want to try harder inline
1241 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1242 flatten, b being always inline. Flattening 'a' will collapse
1243 a->b->c before hitting cycle. To accommodate always inline, we however
1244 need to inline a->b->c->b.
1245
1246 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1247 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1248 static bool
1249 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1250 {
1251 struct cgraph_node *callee = e->callee;
1252 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1253 bool always_inline = e->callee->local.disregard_inline_limits;
1254
1255 /* We've hit cycle? */
1256 if (callee_mode)
1257 {
1258 /* It is first time we see it and we are not in ALWAY_INLINE only
1259 mode yet. and the function in question is always_inline. */
1260 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1261 {
1262 if (dump_file)
1263 {
1264 indent_to (dump_file, depth);
1265 fprintf (dump_file,
1266 "Hit cycle in %s, switching to always inline only.\n",
1267 cgraph_node_name (callee));
1268 }
1269 mode = INLINE_ALWAYS_INLINE;
1270 }
1271 /* Otherwise it is time to give up. */
1272 else
1273 {
1274 if (dump_file)
1275 {
1276 indent_to (dump_file, depth);
1277 fprintf (dump_file,
1278 "Not inlining %s into %s to avoid cycle.\n",
1279 cgraph_node_name (callee),
1280 cgraph_node_name (e->caller));
1281 }
1282 e->inline_failed = (e->callee->local.disregard_inline_limits
1283 ? N_("recursive inlining") : "");
1284 return false;
1285 }
1286 }
1287
1288 callee->aux = (void *)(size_t) mode;
1289 if (dump_file)
1290 {
1291 indent_to (dump_file, depth);
1292 fprintf (dump_file, " Inlining %s into %s.\n",
1293 cgraph_node_name (e->callee),
1294 cgraph_node_name (e->caller));
1295 }
1296 if (e->inline_failed)
1297 cgraph_mark_inline (e);
1298
1299 /* In order to fully inline always_inline functions, we need to
1300 recurse here, since the inlined functions might not be processed by
1301 incremental inlining at all yet.
1302
1303 Also flattening needs to be done recursively. */
1304
1305 if (mode == INLINE_ALL || always_inline)
1306 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1307 callee->aux = (void *)(size_t) callee_mode;
1308 return true;
1309 }
1310
1311 /* Decide on the inlining. We do so in the topological order to avoid
1312 expenses on updating data structures.
1313 DEPTH is depth of recursion, used only for debug output. */
1314
1315 static bool
1316 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1317 enum inlining_mode mode,
1318 int depth)
1319 {
1320 struct cgraph_edge *e;
1321 bool inlined = false;
1322 const char *failed_reason;
1323 enum inlining_mode old_mode;
1324
1325 #ifdef ENABLE_CHECKING
1326 verify_cgraph_node (node);
1327 #endif
1328
1329 old_mode = (enum inlining_mode) (size_t)node->aux;
1330
1331 if (mode != INLINE_ALWAYS_INLINE
1332 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1333 {
1334 if (dump_file)
1335 {
1336 indent_to (dump_file, depth);
1337 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1338 }
1339 mode = INLINE_ALL;
1340 }
1341
1342 node->aux = (void *)(size_t) mode;
1343
1344 /* First of all look for always inline functions. */
1345 for (e = node->callees; e; e = e->next_callee)
1346 {
1347 if (!e->callee->local.disregard_inline_limits
1348 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1349 continue;
1350 if (gimple_call_cannot_inline_p (e->call_stmt))
1351 continue;
1352 /* When the edge is already inlined, we just need to recurse into
1353 it in order to fully flatten the leaves. */
1354 if (!e->inline_failed && mode == INLINE_ALL)
1355 {
1356 inlined |= try_inline (e, mode, depth);
1357 continue;
1358 }
1359 if (dump_file)
1360 {
1361 indent_to (dump_file, depth);
1362 fprintf (dump_file,
1363 "Considering to always inline inline candidate %s.\n",
1364 cgraph_node_name (e->callee));
1365 }
1366 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1367 {
1368 if (dump_file)
1369 {
1370 indent_to (dump_file, depth);
1371 fprintf (dump_file, "Not inlining: recursive call.\n");
1372 }
1373 continue;
1374 }
1375 if (!tree_can_inline_p (node->decl, e->callee->decl))
1376 {
1377 gimple_call_set_cannot_inline (e->call_stmt, true);
1378 if (dump_file)
1379 {
1380 indent_to (dump_file, depth);
1381 fprintf (dump_file,
1382 "Not inlining: Target specific option mismatch.\n");
1383 }
1384 continue;
1385 }
1386 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1387 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1388 {
1389 if (dump_file)
1390 {
1391 indent_to (dump_file, depth);
1392 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1393 }
1394 continue;
1395 }
1396 if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
1397 {
1398 if (dump_file)
1399 {
1400 indent_to (dump_file, depth);
1401 fprintf (dump_file,
1402 "Not inlining: Function body no longer available.\n");
1403 }
1404 continue;
1405 }
1406 inlined |= try_inline (e, mode, depth);
1407 }
1408
1409 /* Now do the automatic inlining. */
1410 if (!flag_really_no_inline
1411 && mode != INLINE_ALL
1412 && mode != INLINE_ALWAYS_INLINE)
1413 for (e = node->callees; e; e = e->next_callee)
1414 {
1415 if (!e->callee->local.inlinable
1416 || !e->inline_failed
1417 || e->callee->local.disregard_inline_limits)
1418 continue;
1419 if (dump_file)
1420 fprintf (dump_file, "Considering inline candidate %s.\n",
1421 cgraph_node_name (e->callee));
1422 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1423 {
1424 if (dump_file)
1425 {
1426 indent_to (dump_file, depth);
1427 fprintf (dump_file, "Not inlining: recursive call.\n");
1428 }
1429 continue;
1430 }
1431 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1432 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1433 {
1434 if (dump_file)
1435 {
1436 indent_to (dump_file, depth);
1437 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1438 }
1439 continue;
1440 }
1441 /* When the function body would grow and inlining the function won't
1442 eliminate the need for offline copy of the function, don't inline.
1443 */
1444 if ((mode == INLINE_SIZE
1445 || (!flag_inline_functions
1446 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1447 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1448 > e->caller->global.insns)
1449 && cgraph_estimate_growth (e->callee) > 0)
1450 {
1451 if (dump_file)
1452 {
1453 indent_to (dump_file, depth);
1454 fprintf (dump_file,
1455 "Not inlining: code size would grow by %i insns.\n",
1456 cgraph_estimate_size_after_inlining (1, e->caller,
1457 e->callee)
1458 - e->caller->global.insns);
1459 }
1460 continue;
1461 }
1462 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1463 false)
1464 || gimple_call_cannot_inline_p (e->call_stmt))
1465 {
1466 if (dump_file)
1467 {
1468 indent_to (dump_file, depth);
1469 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1470 }
1471 continue;
1472 }
1473 if (!gimple_body (e->callee->decl) && !e->callee->inline_decl)
1474 {
1475 if (dump_file)
1476 {
1477 indent_to (dump_file, depth);
1478 fprintf (dump_file,
1479 "Not inlining: Function body no longer available.\n");
1480 }
1481 continue;
1482 }
1483 if (!tree_can_inline_p (node->decl, e->callee->decl))
1484 {
1485 gimple_call_set_cannot_inline (e->call_stmt, true);
1486 if (dump_file)
1487 {
1488 indent_to (dump_file, depth);
1489 fprintf (dump_file,
1490 "Not inlining: Target specific option mismatch.\n");
1491 }
1492 continue;
1493 }
1494 if (cgraph_default_inline_p (e->callee, &failed_reason))
1495 inlined |= try_inline (e, mode, depth);
1496 }
1497 node->aux = (void *)(size_t) old_mode;
1498 return inlined;
1499 }
1500
1501 /* Because inlining might remove no-longer reachable nodes, we need to
1502 keep the array visible to garbage collector to avoid reading collected
1503 out nodes. */
1504 static int nnodes;
1505 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1506
1507 /* Do inlining of small functions. Doing so early helps profiling and other
1508 passes to be somewhat more effective and avoids some code duplication in
1509 later real inlining pass for testcases with very many function calls. */
1510 static unsigned int
1511 cgraph_early_inlining (void)
1512 {
1513 struct cgraph_node *node = cgraph_node (current_function_decl);
1514 unsigned int todo = 0;
1515
1516 if (sorrycount || errorcount)
1517 return 0;
1518 if (cgraph_decide_inlining_incrementally (node, INLINE_SIZE, 0))
1519 {
1520 timevar_push (TV_INTEGRATION);
1521 todo = optimize_inline_calls (current_function_decl);
1522 timevar_pop (TV_INTEGRATION);
1523 }
1524 return todo;
1525 }
1526
1527 /* When inlining shall be performed. */
1528 static bool
1529 cgraph_gate_early_inlining (void)
1530 {
1531 return flag_early_inlining;
1532 }
1533
1534 struct gimple_opt_pass pass_early_inline =
1535 {
1536 {
1537 GIMPLE_PASS,
1538 "einline", /* name */
1539 cgraph_gate_early_inlining, /* gate */
1540 cgraph_early_inlining, /* execute */
1541 NULL, /* sub */
1542 NULL, /* next */
1543 0, /* static_pass_number */
1544 TV_INLINE_HEURISTICS, /* tv_id */
1545 0, /* properties_required */
1546 PROP_cfg, /* properties_provided */
1547 0, /* properties_destroyed */
1548 0, /* todo_flags_start */
1549 TODO_dump_func /* todo_flags_finish */
1550 }
1551 };
1552
1553 /* When inlining shall be performed. */
1554 static bool
1555 cgraph_gate_ipa_early_inlining (void)
1556 {
1557 return (flag_early_inlining
1558 && (flag_branch_probabilities || flag_test_coverage
1559 || profile_arc_flag));
1560 }
1561
1562 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1563 before tree profiling so we have stand alone IPA pass for doing so. */
1564 struct simple_ipa_opt_pass pass_ipa_early_inline =
1565 {
1566 {
1567 SIMPLE_IPA_PASS,
1568 "einline_ipa", /* name */
1569 cgraph_gate_ipa_early_inlining, /* gate */
1570 NULL, /* execute */
1571 NULL, /* sub */
1572 NULL, /* next */
1573 0, /* static_pass_number */
1574 TV_INLINE_HEURISTICS, /* tv_id */
1575 0, /* properties_required */
1576 PROP_cfg, /* properties_provided */
1577 0, /* properties_destroyed */
1578 0, /* todo_flags_start */
1579 TODO_dump_cgraph /* todo_flags_finish */
1580 }
1581 };
1582
1583 /* Compute parameters of functions used by inliner. */
1584 unsigned int
1585 compute_inline_parameters (struct cgraph_node *node)
1586 {
1587 gcc_assert (!node->global.inlined_to);
1588 inline_summary (node)->estimated_self_stack_size
1589 = estimated_stack_frame_size ();
1590 node->global.estimated_stack_size
1591 = inline_summary (node)->estimated_self_stack_size;
1592 node->global.stack_frame_offset = 0;
1593 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1594 inline_summary (node)->self_insns
1595 = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
1596 if (node->local.inlinable && !node->local.disregard_inline_limits)
1597 node->local.disregard_inline_limits
1598 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1599 if (flag_really_no_inline && !node->local.disregard_inline_limits)
1600 node->local.inlinable = 0;
1601 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1602 node->global.insns = inline_summary (node)->self_insns;
1603 return 0;
1604 }
1605
1606
1607 /* Compute parameters of functions used by inliner using
1608 current_function_decl. */
1609 static unsigned int
1610 compute_inline_parameters_for_current (void)
1611 {
1612 compute_inline_parameters (cgraph_node (current_function_decl));
1613 return 0;
1614 }
1615
1616 struct gimple_opt_pass pass_inline_parameters =
1617 {
1618 {
1619 GIMPLE_PASS,
1620 NULL, /* name */
1621 NULL, /* gate */
1622 compute_inline_parameters_for_current,/* execute */
1623 NULL, /* sub */
1624 NULL, /* next */
1625 0, /* static_pass_number */
1626 TV_INLINE_HEURISTICS, /* tv_id */
1627 0, /* properties_required */
1628 PROP_cfg, /* properties_provided */
1629 0, /* properties_destroyed */
1630 0, /* todo_flags_start */
1631 0 /* todo_flags_finish */
1632 }
1633 };
1634
1635 /* This function performs intraprocedural analyzis in NODE that is required to
1636 inline indirect calls. */
1637 static void
1638 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1639 {
1640 struct cgraph_edge *cs;
1641
1642 ipa_count_formal_params (node);
1643 ipa_create_param_decls_array (node);
1644 ipa_detect_param_modifications (node);
1645 ipa_analyze_params_uses (node);
1646
1647 if (dump_file)
1648 ipa_print_node_param_flags (dump_file, node);
1649
1650 for (cs = node->callees; cs; cs = cs->next_callee)
1651 {
1652 ipa_count_arguments (cs);
1653 ipa_compute_jump_functions (cs);
1654 }
1655
1656 if (dump_file)
1657 ipa_print_node_jump_functions (dump_file, node);
1658 }
1659
1660 /* Note function body size. */
1661 static void
1662 inline_generate_summary (void)
1663 {
1664 struct cgraph_node **order =
1665 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1666 int nnodes = cgraph_postorder (order);
1667 int i;
1668
1669 if (flag_indirect_inlining)
1670 {
1671 ipa_register_cgraph_hooks ();
1672 ipa_check_create_node_params ();
1673 ipa_check_create_edge_args ();
1674 }
1675
1676 for (i = nnodes - 1; i >= 0; i--)
1677 {
1678 struct cgraph_node *node = order[i];
1679
1680 /* Allow possibly removed nodes to be garbage collected. */
1681 order[i] = NULL;
1682 if (node->analyzed && (node->needed || node->reachable))
1683 {
1684 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1685 current_function_decl = node->decl;
1686 compute_inline_parameters (node);
1687
1688 if (flag_indirect_inlining)
1689 inline_indirect_intraprocedural_analysis (node);
1690
1691 pop_cfun ();
1692 }
1693 }
1694
1695 current_function_decl = NULL;
1696 free (order);
1697 return;
1698 }
1699
1700 /* Apply inline plan to function. */
1701 static unsigned int
1702 inline_transform (struct cgraph_node *node)
1703 {
1704 unsigned int todo = 0;
1705 struct cgraph_edge *e;
1706
1707 /* We might need the body of this function so that we can expand
1708 it inline somewhere else. */
1709 if (cgraph_preserve_function_body_p (current_function_decl))
1710 save_inline_function_body (node);
1711
1712 for (e = node->callees; e; e = e->next_callee)
1713 if (!e->inline_failed || warn_inline)
1714 break;
1715
1716 if (e)
1717 {
1718 timevar_push (TV_INTEGRATION);
1719 todo = optimize_inline_calls (current_function_decl);
1720 timevar_pop (TV_INTEGRATION);
1721 }
1722 return todo | execute_fixup_cfg ();
1723 }
1724
1725 struct ipa_opt_pass pass_ipa_inline =
1726 {
1727 {
1728 IPA_PASS,
1729 "inline", /* name */
1730 NULL, /* gate */
1731 cgraph_decide_inlining, /* execute */
1732 NULL, /* sub */
1733 NULL, /* next */
1734 0, /* static_pass_number */
1735 TV_INLINE_HEURISTICS, /* tv_id */
1736 0, /* properties_required */
1737 PROP_cfg, /* properties_provided */
1738 0, /* properties_destroyed */
1739 TODO_remove_functions, /* todo_flags_finish */
1740 TODO_dump_cgraph | TODO_dump_func
1741 | TODO_remove_functions /* todo_flags_finish */
1742 },
1743 inline_generate_summary, /* generate_summary */
1744 NULL, /* write_summary */
1745 NULL, /* read_summary */
1746 NULL, /* function_read_summary */
1747 0, /* TODOs */
1748 inline_transform, /* function_transform */
1749 NULL, /* variable_transform */
1750 };
1751
1752
1753 #include "gt-ipa-inline.h"