re PR tree-optimization/37315 (gcc.c-torture/execute/931018-1.c int-compare.c ieee...
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141
142 /* Mode incremental inliner operate on:
143
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
146 flattening.
147
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
150
151 in ALL mode, everything is inlined. This is used during flattening. */
152 enum inlining_mode {
153 INLINE_NONE = 0,
154 INLINE_ALWAYS_INLINE,
155 INLINE_SIZE,
156 INLINE_ALL
157 };
158 static bool
159 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
160 int);
161
162
163 /* Statistics we collect about inlining algorithm. */
164 static int ncalls_inlined;
165 static int nfunctions_inlined;
166 static int overall_insns;
167 static gcov_type max_count;
168
169 /* Holders of ipa cgraph hooks: */
170 static struct cgraph_node_hook_list *function_insertion_hook_holder;
171
172 static inline struct inline_summary *
173 inline_summary (struct cgraph_node *node)
174 {
175 return &node->local.inline_summary;
176 }
177
178 /* Estimate size of the function after inlining WHAT into TO. */
179
180 static int
181 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
182 struct cgraph_node *what)
183 {
184 int size;
185 tree fndecl = what->decl, arg;
186 int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
187
188 for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
189 call_insns += estimate_move_cost (TREE_TYPE (arg));
190 size = (what->global.insns - call_insns) * times + to->global.insns;
191 gcc_assert (size >= 0);
192 return size;
193 }
194
195 /* E is expected to be an edge being inlined. Clone destination node of
196 the edge and redirect it to the new clone.
197 DUPLICATE is used for bookkeeping on whether we are actually creating new
198 clones or re-using node originally representing out-of-line function call.
199 */
200 void
201 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
202 bool update_original)
203 {
204 HOST_WIDE_INT peak;
205
206 if (duplicate)
207 {
208 /* We may eliminate the need for out-of-line copy to be output.
209 In that case just go ahead and re-use it. */
210 if (!e->callee->callers->next_caller
211 && !e->callee->needed
212 && !cgraph_new_nodes)
213 {
214 gcc_assert (!e->callee->global.inlined_to);
215 if (e->callee->analyzed)
216 overall_insns -= e->callee->global.insns, nfunctions_inlined++;
217 duplicate = false;
218 }
219 else
220 {
221 struct cgraph_node *n;
222 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
223 update_original);
224 cgraph_redirect_edge_callee (e, n);
225 }
226 }
227
228 if (e->caller->global.inlined_to)
229 e->callee->global.inlined_to = e->caller->global.inlined_to;
230 else
231 e->callee->global.inlined_to = e->caller;
232 e->callee->global.stack_frame_offset
233 = e->caller->global.stack_frame_offset
234 + inline_summary (e->caller)->estimated_self_stack_size;
235 peak = e->callee->global.stack_frame_offset
236 + inline_summary (e->callee)->estimated_self_stack_size;
237 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
238 e->callee->global.inlined_to->global.estimated_stack_size = peak;
239
240 /* Recursively clone all bodies. */
241 for (e = e->callee->callees; e; e = e->next_callee)
242 if (!e->inline_failed)
243 cgraph_clone_inlined_nodes (e, duplicate, update_original);
244 }
245
246 /* Mark edge E as inlined and update callgraph accordingly.
247 UPDATE_ORIGINAL specify whether profile of original function should be
248 updated. */
249
250 void
251 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original)
252 {
253 int old_insns = 0, new_insns = 0;
254 struct cgraph_node *to = NULL, *what;
255
256 if (e->callee->inline_decl)
257 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
258
259 gcc_assert (e->inline_failed);
260 e->inline_failed = NULL;
261
262 if (!e->callee->global.inlined)
263 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
264 e->callee->global.inlined = true;
265
266 cgraph_clone_inlined_nodes (e, true, update_original);
267
268 what = e->callee;
269
270 /* Now update size of caller and all functions caller is inlined into. */
271 for (;e && !e->inline_failed; e = e->caller->callers)
272 {
273 old_insns = e->caller->global.insns;
274 new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
275 what);
276 gcc_assert (new_insns >= 0);
277 to = e->caller;
278 to->global.insns = new_insns;
279 }
280 gcc_assert (what->global.inlined_to == to);
281 if (new_insns > old_insns)
282 overall_insns += new_insns - old_insns;
283 ncalls_inlined++;
284 }
285
286 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
287 Return following unredirected edge in the list of callers
288 of EDGE->CALLEE */
289
290 static struct cgraph_edge *
291 cgraph_mark_inline (struct cgraph_edge *edge)
292 {
293 struct cgraph_node *to = edge->caller;
294 struct cgraph_node *what = edge->callee;
295 struct cgraph_edge *e, *next;
296
297 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
298 /* Look for all calls, mark them inline and clone recursively
299 all inlined functions. */
300 for (e = what->callers; e; e = next)
301 {
302 next = e->next_caller;
303 if (e->caller == to && e->inline_failed)
304 {
305 cgraph_mark_inline_edge (e, true);
306 if (e == edge)
307 edge = next;
308 }
309 }
310
311 return edge;
312 }
313
314 /* Estimate the growth caused by inlining NODE into all callees. */
315
316 static int
317 cgraph_estimate_growth (struct cgraph_node *node)
318 {
319 int growth = 0;
320 struct cgraph_edge *e;
321 bool self_recursive = false;
322
323 if (node->global.estimated_growth != INT_MIN)
324 return node->global.estimated_growth;
325
326 for (e = node->callers; e; e = e->next_caller)
327 {
328 if (e->caller == node)
329 self_recursive = true;
330 if (e->inline_failed)
331 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
332 - e->caller->global.insns);
333 }
334
335 /* ??? Wrong for non-trivially self recursive functions or cases where
336 we decide to not inline for different reasons, but it is not big deal
337 as in that case we will keep the body around, but we will also avoid
338 some inlining. */
339 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
340 growth -= node->global.insns;
341
342 node->global.estimated_growth = growth;
343 return growth;
344 }
345
346 /* Return false when inlining WHAT into TO is not good idea
347 as it would cause too large growth of function bodies.
348 When ONE_ONLY is true, assume that only one call site is going
349 to be inlined, otherwise figure out how many call sites in
350 TO calls WHAT and verify that all can be inlined.
351 */
352
353 static bool
354 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
355 const char **reason, bool one_only)
356 {
357 int times = 0;
358 struct cgraph_edge *e;
359 int newsize;
360 int limit;
361 HOST_WIDE_INT stack_size_limit, inlined_stack;
362
363 if (one_only)
364 times = 1;
365 else
366 for (e = to->callees; e; e = e->next_callee)
367 if (e->callee == what)
368 times++;
369
370 if (to->global.inlined_to)
371 to = to->global.inlined_to;
372
373 /* When inlining large function body called once into small function,
374 take the inlined function as base for limiting the growth. */
375 if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
376 limit = inline_summary (to)->self_insns;
377 else
378 limit = inline_summary (what)->self_insns;
379
380 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
381
382 /* Check the size after inlining against the function limits. But allow
383 the function to shrink if it went over the limits by forced inlining. */
384 newsize = cgraph_estimate_size_after_inlining (times, to, what);
385 if (newsize >= to->global.insns
386 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
387 && newsize > limit)
388 {
389 if (reason)
390 *reason = N_("--param large-function-growth limit reached");
391 return false;
392 }
393
394 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
395
396 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
397
398 inlined_stack = (to->global.stack_frame_offset
399 + inline_summary (to)->estimated_self_stack_size
400 + what->global.estimated_stack_size);
401 if (inlined_stack > stack_size_limit
402 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
403 {
404 if (reason)
405 *reason = N_("--param large-stack-frame-growth limit reached");
406 return false;
407 }
408 return true;
409 }
410
411 /* Return true when function N is small enough to be inlined. */
412
413 bool
414 cgraph_default_inline_p (struct cgraph_node *n, const char **reason)
415 {
416 tree decl = n->decl;
417
418 if (n->inline_decl)
419 decl = n->inline_decl;
420 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
421 {
422 if (reason)
423 *reason = N_("function not inline candidate");
424 return false;
425 }
426
427 if (!DECL_STRUCT_FUNCTION (decl)->cfg)
428 {
429 if (reason)
430 *reason = N_("function body not available");
431 return false;
432 }
433
434 if (DECL_DECLARED_INLINE_P (decl))
435 {
436 if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
437 {
438 if (reason)
439 *reason = N_("--param max-inline-insns-single limit reached");
440 return false;
441 }
442 }
443 else
444 {
445 if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
446 {
447 if (reason)
448 *reason = N_("--param max-inline-insns-auto limit reached");
449 return false;
450 }
451 }
452
453 return true;
454 }
455
456 /* Return true when inlining WHAT would create recursive inlining.
457 We call recursive inlining all cases where same function appears more than
458 once in the single recursion nest path in the inline graph. */
459
460 static bool
461 cgraph_recursive_inlining_p (struct cgraph_node *to,
462 struct cgraph_node *what,
463 const char **reason)
464 {
465 bool recursive;
466 if (to->global.inlined_to)
467 recursive = what->decl == to->global.inlined_to->decl;
468 else
469 recursive = what->decl == to->decl;
470 /* Marking recursive function inline has sane semantic and thus we should
471 not warn on it. */
472 if (recursive && reason)
473 *reason = (what->local.disregard_inline_limits
474 ? N_("recursive inlining") : "");
475 return recursive;
476 }
477
478 /* A cost model driving the inlining heuristics in a way so the edges with
479 smallest badness are inlined first. After each inlining is performed
480 the costs of all caller edges of nodes affected are recomputed so the
481 metrics may accurately depend on values such as number of inlinable callers
482 of the function or function body size. */
483
484 static int
485 cgraph_edge_badness (struct cgraph_edge *edge)
486 {
487 int badness;
488 int growth =
489 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
490
491 growth -= edge->caller->global.insns;
492
493 /* Always prefer inlining saving code size. */
494 if (growth <= 0)
495 badness = INT_MIN - growth;
496
497 /* When profiling is available, base priorities -(#calls / growth).
498 So we optimize for overall number of "executed" inlined calls. */
499 else if (max_count)
500 badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
501
502 /* When function local profile is available, base priorities on
503 growth / frequency, so we optimize for overall frequency of inlined
504 calls. This is not too accurate since while the call might be frequent
505 within function, the function itself is infrequent.
506
507 Other objective to optimize for is number of different calls inlined.
508 We add the estimated growth after inlining all functions to bias the
509 priorities slightly in this direction (so fewer times called functions
510 of the same size gets priority). */
511 else if (flag_guess_branch_prob)
512 {
513 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
514 int growth =
515 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
516 growth -= edge->caller->global.insns;
517 badness = growth * 256;
518
519 /* Decrease badness if call is nested. */
520 /* Compress the range so we don't overflow. */
521 if (div > 256)
522 div = 256 + ceil_log2 (div) - 8;
523 if (div < 1)
524 div = 1;
525 if (badness > 0)
526 badness /= div;
527 badness += cgraph_estimate_growth (edge->callee);
528 }
529 /* When function local profile is not available or it does not give
530 useful information (ie frequency is zero), base the cost on
531 loop nest and overall size growth, so we optimize for overall number
532 of functions fully inlined in program. */
533 else
534 {
535 int nest = MIN (edge->loop_nest, 8);
536 badness = cgraph_estimate_growth (edge->callee) * 256;
537
538 /* Decrease badness if call is nested. */
539 if (badness > 0)
540 badness >>= nest;
541 else
542 {
543 badness <<= nest;
544 }
545 }
546 /* Make recursive inlining happen always after other inlining is done. */
547 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
548 return badness + 1;
549 else
550 return badness;
551 }
552
553 /* Recompute heap nodes for each of caller edge. */
554
555 static void
556 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
557 bitmap updated_nodes)
558 {
559 struct cgraph_edge *edge;
560 const char *failed_reason;
561
562 if (!node->local.inlinable || node->local.disregard_inline_limits
563 || node->global.inlined_to)
564 return;
565 if (bitmap_bit_p (updated_nodes, node->uid))
566 return;
567 bitmap_set_bit (updated_nodes, node->uid);
568 node->global.estimated_growth = INT_MIN;
569
570 if (!node->local.inlinable)
571 return;
572 /* Prune out edges we won't inline into anymore. */
573 if (!cgraph_default_inline_p (node, &failed_reason))
574 {
575 for (edge = node->callers; edge; edge = edge->next_caller)
576 if (edge->aux)
577 {
578 fibheap_delete_node (heap, (fibnode_t) edge->aux);
579 edge->aux = NULL;
580 if (edge->inline_failed)
581 edge->inline_failed = failed_reason;
582 }
583 return;
584 }
585
586 for (edge = node->callers; edge; edge = edge->next_caller)
587 if (edge->inline_failed)
588 {
589 int badness = cgraph_edge_badness (edge);
590 if (edge->aux)
591 {
592 fibnode_t n = (fibnode_t) edge->aux;
593 gcc_assert (n->data == edge);
594 if (n->key == badness)
595 continue;
596
597 /* fibheap_replace_key only increase the keys. */
598 if (fibheap_replace_key (heap, n, badness))
599 continue;
600 fibheap_delete_node (heap, (fibnode_t) edge->aux);
601 }
602 edge->aux = fibheap_insert (heap, badness, edge);
603 }
604 }
605
606 /* Recompute heap nodes for each of caller edges of each of callees. */
607
608 static void
609 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
610 bitmap updated_nodes)
611 {
612 struct cgraph_edge *e;
613 node->global.estimated_growth = INT_MIN;
614
615 for (e = node->callees; e; e = e->next_callee)
616 if (e->inline_failed)
617 update_caller_keys (heap, e->callee, updated_nodes);
618 else if (!e->inline_failed)
619 update_callee_keys (heap, e->callee, updated_nodes);
620 }
621
622 /* Enqueue all recursive calls from NODE into priority queue depending on
623 how likely we want to recursively inline the call. */
624
625 static void
626 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
627 fibheap_t heap)
628 {
629 static int priority;
630 struct cgraph_edge *e;
631 for (e = where->callees; e; e = e->next_callee)
632 if (e->callee == node)
633 {
634 /* When profile feedback is available, prioritize by expected number
635 of calls. Without profile feedback we maintain simple queue
636 to order candidates via recursive depths. */
637 fibheap_insert (heap,
638 !max_count ? priority++
639 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
640 e);
641 }
642 for (e = where->callees; e; e = e->next_callee)
643 if (!e->inline_failed)
644 lookup_recursive_calls (node, e->callee, heap);
645 }
646
647 /* Decide on recursive inlining: in the case function has recursive calls,
648 inline until body size reaches given argument. If any new indirect edges
649 are discovered in the process, add them to NEW_EDGES, unless it is NULL. */
650
651 static bool
652 cgraph_decide_recursive_inlining (struct cgraph_node *node,
653 VEC (cgraph_edge_p, heap) *new_edges)
654 {
655 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
656 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
657 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
658 fibheap_t heap;
659 struct cgraph_edge *e;
660 struct cgraph_node *master_clone, *next;
661 int depth = 0;
662 int n = 0;
663
664 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
665 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
666 return false;
667
668 if (DECL_DECLARED_INLINE_P (node->decl))
669 {
670 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
671 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
672 }
673
674 /* Make sure that function is small enough to be considered for inlining. */
675 if (!max_depth
676 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
677 return false;
678 heap = fibheap_new ();
679 lookup_recursive_calls (node, node, heap);
680 if (fibheap_empty (heap))
681 {
682 fibheap_delete (heap);
683 return false;
684 }
685
686 if (dump_file)
687 fprintf (dump_file,
688 " Performing recursive inlining on %s\n",
689 cgraph_node_name (node));
690
691 /* We need original clone to copy around. */
692 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
693 master_clone->needed = true;
694 for (e = master_clone->callees; e; e = e->next_callee)
695 if (!e->inline_failed)
696 cgraph_clone_inlined_nodes (e, true, false);
697
698 /* Do the inlining and update list of recursive call during process. */
699 while (!fibheap_empty (heap)
700 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
701 <= limit))
702 {
703 struct cgraph_edge *curr
704 = (struct cgraph_edge *) fibheap_extract_min (heap);
705 struct cgraph_node *cnode;
706
707 depth = 1;
708 for (cnode = curr->caller;
709 cnode->global.inlined_to; cnode = cnode->callers->caller)
710 if (node->decl == curr->callee->decl)
711 depth++;
712 if (depth > max_depth)
713 {
714 if (dump_file)
715 fprintf (dump_file,
716 " maximal depth reached\n");
717 continue;
718 }
719
720 if (max_count)
721 {
722 if (!cgraph_maybe_hot_edge_p (curr))
723 {
724 if (dump_file)
725 fprintf (dump_file, " Not inlining cold call\n");
726 continue;
727 }
728 if (curr->count * 100 / node->count < probability)
729 {
730 if (dump_file)
731 fprintf (dump_file,
732 " Probability of edge is too small\n");
733 continue;
734 }
735 }
736
737 if (dump_file)
738 {
739 fprintf (dump_file,
740 " Inlining call of depth %i", depth);
741 if (node->count)
742 {
743 fprintf (dump_file, " called approx. %.2f times per call",
744 (double)curr->count / node->count);
745 }
746 fprintf (dump_file, "\n");
747 }
748 cgraph_redirect_edge_callee (curr, master_clone);
749 cgraph_mark_inline_edge (curr, false);
750 if (flag_indirect_inlining)
751 ipa_propagate_indirect_call_infos (curr, new_edges);
752 lookup_recursive_calls (node, curr->callee, heap);
753 n++;
754 }
755 if (!fibheap_empty (heap) && dump_file)
756 fprintf (dump_file, " Recursive inlining growth limit met.\n");
757
758 fibheap_delete (heap);
759 if (dump_file)
760 fprintf (dump_file,
761 "\n Inlined %i times, body grown from %i to %i insns\n", n,
762 master_clone->global.insns, node->global.insns);
763
764 /* Remove master clone we used for inlining. We rely that clones inlined
765 into master clone gets queued just before master clone so we don't
766 need recursion. */
767 for (node = cgraph_nodes; node != master_clone;
768 node = next)
769 {
770 next = node->next;
771 if (node->global.inlined_to == master_clone)
772 cgraph_remove_node (node);
773 }
774 cgraph_remove_node (master_clone);
775 /* FIXME: Recursive inlining actually reduces number of calls of the
776 function. At this place we should probably walk the function and
777 inline clones and compensate the counts accordingly. This probably
778 doesn't matter much in practice. */
779 return n > 0;
780 }
781
782 /* Set inline_failed for all callers of given function to REASON. */
783
784 static void
785 cgraph_set_inline_failed (struct cgraph_node *node, const char *reason)
786 {
787 struct cgraph_edge *e;
788
789 if (dump_file)
790 fprintf (dump_file, "Inlining failed: %s\n", reason);
791 for (e = node->callers; e; e = e->next_caller)
792 if (e->inline_failed)
793 e->inline_failed = reason;
794 }
795
796 /* Given whole compilation unit estimate of INSNS, compute how large we can
797 allow the unit to grow. */
798 static int
799 compute_max_insns (int insns)
800 {
801 int max_insns = insns;
802 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
803 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
804
805 return ((HOST_WIDEST_INT) max_insns
806 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
807 }
808
809 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
810 static void
811 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
812 {
813 while (VEC_length (cgraph_edge_p, new_edges) > 0)
814 {
815 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
816
817 gcc_assert (!edge->aux);
818 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
819 }
820 }
821
822
823 /* We use greedy algorithm for inlining of small functions:
824 All inline candidates are put into prioritized heap based on estimated
825 growth of the overall number of instructions and then update the estimates.
826
827 INLINED and INLINED_CALEES are just pointers to arrays large enough
828 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
829
830 static void
831 cgraph_decide_inlining_of_small_functions (void)
832 {
833 struct cgraph_node *node;
834 struct cgraph_edge *edge;
835 const char *failed_reason;
836 fibheap_t heap = fibheap_new ();
837 bitmap updated_nodes = BITMAP_ALLOC (NULL);
838 int min_insns, max_insns;
839 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
840
841 if (flag_indirect_inlining)
842 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
843
844 if (dump_file)
845 fprintf (dump_file, "\nDeciding on smaller functions:\n");
846
847 /* Put all inline candidates into the heap. */
848
849 for (node = cgraph_nodes; node; node = node->next)
850 {
851 if (!node->local.inlinable || !node->callers
852 || node->local.disregard_inline_limits)
853 continue;
854 if (dump_file)
855 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
856
857 node->global.estimated_growth = INT_MIN;
858 if (!cgraph_default_inline_p (node, &failed_reason))
859 {
860 cgraph_set_inline_failed (node, failed_reason);
861 continue;
862 }
863
864 for (edge = node->callers; edge; edge = edge->next_caller)
865 if (edge->inline_failed)
866 {
867 gcc_assert (!edge->aux);
868 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
869 }
870 }
871
872 max_insns = compute_max_insns (overall_insns);
873 min_insns = overall_insns;
874
875 while (overall_insns <= max_insns
876 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
877 {
878 int old_insns = overall_insns;
879 struct cgraph_node *where;
880 int growth =
881 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
882 const char *not_good = NULL;
883
884 growth -= edge->caller->global.insns;
885
886 if (dump_file)
887 {
888 fprintf (dump_file,
889 "\nConsidering %s with %i insns\n",
890 cgraph_node_name (edge->callee),
891 edge->callee->global.insns);
892 fprintf (dump_file,
893 " to be inlined into %s\n"
894 " Estimated growth after inlined into all callees is %+i insns.\n"
895 " Estimated badness is %i, frequency %.2f.\n",
896 cgraph_node_name (edge->caller),
897 cgraph_estimate_growth (edge->callee),
898 cgraph_edge_badness (edge),
899 edge->frequency / (double)CGRAPH_FREQ_BASE);
900 if (edge->count)
901 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
902 }
903 gcc_assert (edge->aux);
904 edge->aux = NULL;
905 if (!edge->inline_failed)
906 continue;
907
908 /* When not having profile info ready we don't weight by any way the
909 position of call in procedure itself. This means if call of
910 function A from function B seems profitable to inline, the recursive
911 call of function A in inline copy of A in B will look profitable too
912 and we end up inlining until reaching maximal function growth. This
913 is not good idea so prohibit the recursive inlining.
914
915 ??? When the frequencies are taken into account we might not need this
916 restriction.
917
918 We need to be cureful here, in some testcases, e.g. directivec.c in
919 libcpp, we can estimate self recursive function to have negative growth
920 for inlining completely.
921 */
922 if (!edge->count)
923 {
924 where = edge->caller;
925 while (where->global.inlined_to)
926 {
927 if (where->decl == edge->callee->decl)
928 break;
929 where = where->callers->caller;
930 }
931 if (where->global.inlined_to)
932 {
933 edge->inline_failed
934 = (edge->callee->local.disregard_inline_limits ? N_("recursive inlining") : "");
935 if (dump_file)
936 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
937 continue;
938 }
939 }
940
941 if (!cgraph_maybe_hot_edge_p (edge))
942 not_good = N_("call is unlikely and code size would grow");
943 if (!flag_inline_functions
944 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
945 not_good = N_("function not declared inline and code size would grow");
946 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
947 not_good = N_("optimizing for size and code size would grow");
948 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
949 {
950 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
951 &edge->inline_failed))
952 {
953 edge->inline_failed = not_good;
954 if (dump_file)
955 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
956 }
957 continue;
958 }
959 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
960 {
961 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
962 &edge->inline_failed))
963 {
964 if (dump_file)
965 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
966 }
967 continue;
968 }
969 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
970 {
971 gimple_call_set_cannot_inline (edge->call_stmt, true);
972 edge->inline_failed = N_("target specific option mismatch");
973 if (dump_file)
974 fprintf (dump_file, " inline_failed:%s.\n", edge->inline_failed);
975 continue;
976 }
977 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
978 &edge->inline_failed))
979 {
980 where = edge->caller;
981 if (where->global.inlined_to)
982 where = where->global.inlined_to;
983 if (!cgraph_decide_recursive_inlining (where, new_indirect_edges))
984 continue;
985 if (flag_indirect_inlining)
986 add_new_edges_to_heap (heap, new_indirect_edges);
987 update_callee_keys (heap, where, updated_nodes);
988 }
989 else
990 {
991 struct cgraph_node *callee;
992 if (gimple_call_cannot_inline_p (edge->call_stmt)
993 || !cgraph_check_inline_limits (edge->caller, edge->callee,
994 &edge->inline_failed, true))
995 {
996 if (dump_file)
997 fprintf (dump_file, " Not inlining into %s:%s.\n",
998 cgraph_node_name (edge->caller), edge->inline_failed);
999 continue;
1000 }
1001 callee = edge->callee;
1002 cgraph_mark_inline_edge (edge, true);
1003 if (flag_indirect_inlining)
1004 {
1005 ipa_propagate_indirect_call_infos (edge, new_indirect_edges);
1006 add_new_edges_to_heap (heap, new_indirect_edges);
1007 }
1008 update_callee_keys (heap, callee, updated_nodes);
1009 }
1010 where = edge->caller;
1011 if (where->global.inlined_to)
1012 where = where->global.inlined_to;
1013
1014 /* Our profitability metric can depend on local properties
1015 such as number of inlinable calls and size of the function body.
1016 After inlining these properties might change for the function we
1017 inlined into (since it's body size changed) and for the functions
1018 called by function we inlined (since number of it inlinable callers
1019 might change). */
1020 update_caller_keys (heap, where, updated_nodes);
1021 bitmap_clear (updated_nodes);
1022
1023 if (dump_file)
1024 {
1025 fprintf (dump_file,
1026 " Inlined into %s which now has %i insns,"
1027 "net change of %+i insns.\n",
1028 cgraph_node_name (edge->caller),
1029 edge->caller->global.insns,
1030 overall_insns - old_insns);
1031 }
1032 if (min_insns > overall_insns)
1033 {
1034 min_insns = overall_insns;
1035 max_insns = compute_max_insns (min_insns);
1036
1037 if (dump_file)
1038 fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
1039 }
1040 }
1041 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1042 {
1043 gcc_assert (edge->aux);
1044 edge->aux = NULL;
1045 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1046 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1047 &edge->inline_failed))
1048 edge->inline_failed = N_("--param inline-unit-growth limit reached");
1049 }
1050
1051 if (new_indirect_edges)
1052 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1053 fibheap_delete (heap);
1054 BITMAP_FREE (updated_nodes);
1055 }
1056
1057 /* Decide on the inlining. We do so in the topological order to avoid
1058 expenses on updating data structures. */
1059
1060 static unsigned int
1061 cgraph_decide_inlining (void)
1062 {
1063 struct cgraph_node *node;
1064 int nnodes;
1065 struct cgraph_node **order =
1066 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1067 int old_insns = 0;
1068 int i;
1069 int initial_insns = 0;
1070
1071 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1072
1073 max_count = 0;
1074 for (node = cgraph_nodes; node; node = node->next)
1075 if (node->analyzed && (node->needed || node->reachable))
1076 {
1077 struct cgraph_edge *e;
1078
1079 initial_insns += inline_summary (node)->self_insns;
1080 gcc_assert (inline_summary (node)->self_insns == node->global.insns);
1081 for (e = node->callees; e; e = e->next_callee)
1082 if (max_count < e->count)
1083 max_count = e->count;
1084 }
1085 overall_insns = initial_insns;
1086 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1087
1088 nnodes = cgraph_postorder (order);
1089
1090 if (dump_file)
1091 fprintf (dump_file,
1092 "\nDeciding on inlining. Starting with %i insns.\n",
1093 initial_insns);
1094
1095 for (node = cgraph_nodes; node; node = node->next)
1096 node->aux = 0;
1097
1098 if (dump_file)
1099 fprintf (dump_file, "\nInlining always_inline functions:\n");
1100
1101 /* In the first pass mark all always_inline edges. Do this with a priority
1102 so none of our later choices will make this impossible. */
1103 for (i = nnodes - 1; i >= 0; i--)
1104 {
1105 struct cgraph_edge *e, *next;
1106
1107 node = order[i];
1108
1109 /* Handle nodes to be flattened, but don't update overall unit size. */
1110 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1111 {
1112 if (dump_file)
1113 fprintf (dump_file,
1114 "Flattening %s\n", cgraph_node_name (node));
1115 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1116 }
1117
1118 if (!node->local.disregard_inline_limits)
1119 continue;
1120 if (dump_file)
1121 fprintf (dump_file,
1122 "\nConsidering %s %i insns (always inline)\n",
1123 cgraph_node_name (node), node->global.insns);
1124 old_insns = overall_insns;
1125 for (e = node->callers; e; e = next)
1126 {
1127 next = e->next_caller;
1128 if (!e->inline_failed || gimple_call_cannot_inline_p (e->call_stmt))
1129 continue;
1130 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1131 &e->inline_failed))
1132 continue;
1133 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1134 {
1135 gimple_call_set_cannot_inline (e->call_stmt, true);
1136 continue;
1137 }
1138 cgraph_mark_inline_edge (e, true);
1139 if (flag_indirect_inlining)
1140 ipa_propagate_indirect_call_infos (e, NULL);
1141 if (dump_file)
1142 fprintf (dump_file,
1143 " Inlined into %s which now has %i insns.\n",
1144 cgraph_node_name (e->caller),
1145 e->caller->global.insns);
1146 }
1147 /* Inlining self recursive function might introduce new calls to
1148 themselves we didn't see in the loop above. Fill in the proper
1149 reason why inline failed. */
1150 for (e = node->callers; e; e = e->next_caller)
1151 if (e->inline_failed)
1152 e->inline_failed = N_("recursive inlining");
1153 if (dump_file)
1154 fprintf (dump_file,
1155 " Inlined for a net change of %+i insns.\n",
1156 overall_insns - old_insns);
1157 }
1158
1159 cgraph_decide_inlining_of_small_functions ();
1160
1161 /* After this point, any edge discovery performed by indirect inlining is no
1162 good so let's give up. */
1163 if (flag_indirect_inlining)
1164 free_all_ipa_structures_after_iinln ();
1165
1166 if (flag_inline_functions_called_once)
1167 {
1168 if (dump_file)
1169 fprintf (dump_file, "\nDeciding on functions called once:\n");
1170
1171 /* And finally decide what functions are called once. */
1172 for (i = nnodes - 1; i >= 0; i--)
1173 {
1174 node = order[i];
1175
1176 if (node->callers
1177 && !node->callers->next_caller
1178 && !node->needed
1179 && node->local.inlinable
1180 && node->callers->inline_failed
1181 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1182 && !DECL_EXTERNAL (node->decl)
1183 && !DECL_COMDAT (node->decl))
1184 {
1185 if (dump_file)
1186 {
1187 fprintf (dump_file,
1188 "\nConsidering %s %i insns.\n",
1189 cgraph_node_name (node), node->global.insns);
1190 fprintf (dump_file,
1191 " Called once from %s %i insns.\n",
1192 cgraph_node_name (node->callers->caller),
1193 node->callers->caller->global.insns);
1194 }
1195
1196 old_insns = overall_insns;
1197
1198 if (cgraph_check_inline_limits (node->callers->caller, node,
1199 NULL, false))
1200 {
1201 cgraph_mark_inline (node->callers);
1202 if (dump_file)
1203 fprintf (dump_file,
1204 " Inlined into %s which now has %i insns"
1205 " for a net change of %+i insns.\n",
1206 cgraph_node_name (node->callers->caller),
1207 node->callers->caller->global.insns,
1208 overall_insns - old_insns);
1209 }
1210 else
1211 {
1212 if (dump_file)
1213 fprintf (dump_file,
1214 " Inline limit reached, not inlined.\n");
1215 }
1216 }
1217 }
1218 }
1219
1220 if (dump_file)
1221 fprintf (dump_file,
1222 "\nInlined %i calls, eliminated %i functions, "
1223 "%i insns turned to %i insns.\n\n",
1224 ncalls_inlined, nfunctions_inlined, initial_insns,
1225 overall_insns);
1226 free (order);
1227 return 0;
1228 }
1229
1230 /* Try to inline edge E from incremental inliner. MODE specifies mode
1231 of inliner.
1232
1233 We are detecting cycles by storing mode of inliner into cgraph_node last
1234 time we visited it in the recursion. In general when mode is set, we have
1235 recursive inlining, but as an special case, we want to try harder inline
1236 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1237 flatten, b being always inline. Flattening 'a' will collapse
1238 a->b->c before hitting cycle. To accommodate always inline, we however
1239 need to inline a->b->c->b.
1240
1241 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1242 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1243 static bool
1244 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1245 {
1246 struct cgraph_node *callee = e->callee;
1247 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1248 bool always_inline = e->callee->local.disregard_inline_limits;
1249
1250 /* We've hit cycle? */
1251 if (callee_mode)
1252 {
1253 /* It is first time we see it and we are not in ALWAY_INLINE only
1254 mode yet. and the function in question is always_inline. */
1255 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1256 {
1257 if (dump_file)
1258 {
1259 indent_to (dump_file, depth);
1260 fprintf (dump_file,
1261 "Hit cycle in %s, switching to always inline only.\n",
1262 cgraph_node_name (callee));
1263 }
1264 mode = INLINE_ALWAYS_INLINE;
1265 }
1266 /* Otherwise it is time to give up. */
1267 else
1268 {
1269 if (dump_file)
1270 {
1271 indent_to (dump_file, depth);
1272 fprintf (dump_file,
1273 "Not inlining %s into %s to avoid cycle.\n",
1274 cgraph_node_name (callee),
1275 cgraph_node_name (e->caller));
1276 }
1277 e->inline_failed = (e->callee->local.disregard_inline_limits
1278 ? N_("recursive inlining") : "");
1279 return false;
1280 }
1281 }
1282
1283 callee->aux = (void *)(size_t) mode;
1284 if (dump_file)
1285 {
1286 indent_to (dump_file, depth);
1287 fprintf (dump_file, " Inlining %s into %s.\n",
1288 cgraph_node_name (e->callee),
1289 cgraph_node_name (e->caller));
1290 }
1291 if (e->inline_failed)
1292 cgraph_mark_inline (e);
1293
1294 /* In order to fully inline always_inline functions, we need to
1295 recurse here, since the inlined functions might not be processed by
1296 incremental inlining at all yet.
1297
1298 Also flattening needs to be done recursively. */
1299
1300 if (mode == INLINE_ALL || always_inline)
1301 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1302 callee->aux = (void *)(size_t) callee_mode;
1303 return true;
1304 }
1305
1306 /* Decide on the inlining. We do so in the topological order to avoid
1307 expenses on updating data structures.
1308 DEPTH is depth of recursion, used only for debug output. */
1309
1310 static bool
1311 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1312 enum inlining_mode mode,
1313 int depth)
1314 {
1315 struct cgraph_edge *e;
1316 bool inlined = false;
1317 const char *failed_reason;
1318 enum inlining_mode old_mode;
1319
1320 #ifdef ENABLE_CHECKING
1321 verify_cgraph_node (node);
1322 #endif
1323
1324 old_mode = (enum inlining_mode) (size_t)node->aux;
1325
1326 if (mode != INLINE_ALWAYS_INLINE
1327 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1328 {
1329 if (dump_file)
1330 {
1331 indent_to (dump_file, depth);
1332 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1333 }
1334 mode = INLINE_ALL;
1335 }
1336
1337 node->aux = (void *)(size_t) mode;
1338
1339 /* First of all look for always inline functions. */
1340 for (e = node->callees; e; e = e->next_callee)
1341 {
1342 if (!e->callee->local.disregard_inline_limits
1343 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1344 continue;
1345 if (gimple_call_cannot_inline_p (e->call_stmt))
1346 continue;
1347 /* When the edge is already inlined, we just need to recurse into
1348 it in order to fully flatten the leaves. */
1349 if (!e->inline_failed && mode == INLINE_ALL)
1350 {
1351 inlined |= try_inline (e, mode, depth);
1352 continue;
1353 }
1354 if (dump_file)
1355 {
1356 indent_to (dump_file, depth);
1357 fprintf (dump_file,
1358 "Considering to always inline inline candidate %s.\n",
1359 cgraph_node_name (e->callee));
1360 }
1361 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1362 {
1363 if (dump_file)
1364 {
1365 indent_to (dump_file, depth);
1366 fprintf (dump_file, "Not inlining: recursive call.\n");
1367 }
1368 continue;
1369 }
1370 if (!tree_can_inline_p (node->decl, e->callee->decl))
1371 {
1372 gimple_call_set_cannot_inline (e->call_stmt, true);
1373 if (dump_file)
1374 {
1375 indent_to (dump_file, depth);
1376 fprintf (dump_file,
1377 "Not inlining: Target specific option mismatch.\n");
1378 }
1379 continue;
1380 }
1381 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1382 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1383 {
1384 if (dump_file)
1385 {
1386 indent_to (dump_file, depth);
1387 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1388 }
1389 continue;
1390 }
1391 if (!e->callee->analyzed && !e->callee->inline_decl)
1392 {
1393 if (dump_file)
1394 {
1395 indent_to (dump_file, depth);
1396 fprintf (dump_file,
1397 "Not inlining: Function body no longer available.\n");
1398 }
1399 continue;
1400 }
1401 inlined |= try_inline (e, mode, depth);
1402 }
1403
1404 /* Now do the automatic inlining. */
1405 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1406 for (e = node->callees; e; e = e->next_callee)
1407 {
1408 if (!e->callee->local.inlinable
1409 || !e->inline_failed
1410 || e->callee->local.disregard_inline_limits)
1411 continue;
1412 if (dump_file)
1413 fprintf (dump_file, "Considering inline candidate %s.\n",
1414 cgraph_node_name (e->callee));
1415 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1416 {
1417 if (dump_file)
1418 {
1419 indent_to (dump_file, depth);
1420 fprintf (dump_file, "Not inlining: recursive call.\n");
1421 }
1422 continue;
1423 }
1424 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1425 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1426 {
1427 if (dump_file)
1428 {
1429 indent_to (dump_file, depth);
1430 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1431 }
1432 continue;
1433 }
1434 /* When the function body would grow and inlining the function won't
1435 eliminate the need for offline copy of the function, don't inline.
1436 */
1437 if ((mode == INLINE_SIZE
1438 || (!flag_inline_functions
1439 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1440 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1441 > e->caller->global.insns)
1442 && cgraph_estimate_growth (e->callee) > 0)
1443 {
1444 if (dump_file)
1445 {
1446 indent_to (dump_file, depth);
1447 fprintf (dump_file,
1448 "Not inlining: code size would grow by %i insns.\n",
1449 cgraph_estimate_size_after_inlining (1, e->caller,
1450 e->callee)
1451 - e->caller->global.insns);
1452 }
1453 continue;
1454 }
1455 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1456 false)
1457 || gimple_call_cannot_inline_p (e->call_stmt))
1458 {
1459 if (dump_file)
1460 {
1461 indent_to (dump_file, depth);
1462 fprintf (dump_file, "Not inlining: %s.\n", e->inline_failed);
1463 }
1464 continue;
1465 }
1466 if (!e->callee->analyzed && !e->callee->inline_decl)
1467 {
1468 if (dump_file)
1469 {
1470 indent_to (dump_file, depth);
1471 fprintf (dump_file,
1472 "Not inlining: Function body no longer available.\n");
1473 }
1474 continue;
1475 }
1476 if (!tree_can_inline_p (node->decl, e->callee->decl))
1477 {
1478 gimple_call_set_cannot_inline (e->call_stmt, true);
1479 if (dump_file)
1480 {
1481 indent_to (dump_file, depth);
1482 fprintf (dump_file,
1483 "Not inlining: Target specific option mismatch.\n");
1484 }
1485 continue;
1486 }
1487 if (cgraph_default_inline_p (e->callee, &failed_reason))
1488 inlined |= try_inline (e, mode, depth);
1489 }
1490 node->aux = (void *)(size_t) old_mode;
1491 return inlined;
1492 }
1493
1494 /* Because inlining might remove no-longer reachable nodes, we need to
1495 keep the array visible to garbage collector to avoid reading collected
1496 out nodes. */
1497 static int nnodes;
1498 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1499
1500 /* Do inlining of small functions. Doing so early helps profiling and other
1501 passes to be somewhat more effective and avoids some code duplication in
1502 later real inlining pass for testcases with very many function calls. */
1503 static unsigned int
1504 cgraph_early_inlining (void)
1505 {
1506 struct cgraph_node *node = cgraph_node (current_function_decl);
1507 unsigned int todo = 0;
1508
1509 if (sorrycount || errorcount)
1510 return 0;
1511 if (cgraph_decide_inlining_incrementally (node, INLINE_SIZE, 0))
1512 {
1513 timevar_push (TV_INTEGRATION);
1514 todo = optimize_inline_calls (current_function_decl);
1515 timevar_pop (TV_INTEGRATION);
1516 }
1517 return todo;
1518 }
1519
1520 /* When inlining shall be performed. */
1521 static bool
1522 cgraph_gate_early_inlining (void)
1523 {
1524 return flag_early_inlining;
1525 }
1526
1527 struct gimple_opt_pass pass_early_inline =
1528 {
1529 {
1530 GIMPLE_PASS,
1531 "einline", /* name */
1532 cgraph_gate_early_inlining, /* gate */
1533 cgraph_early_inlining, /* execute */
1534 NULL, /* sub */
1535 NULL, /* next */
1536 0, /* static_pass_number */
1537 TV_INLINE_HEURISTICS, /* tv_id */
1538 0, /* properties_required */
1539 PROP_cfg, /* properties_provided */
1540 0, /* properties_destroyed */
1541 0, /* todo_flags_start */
1542 TODO_dump_func /* todo_flags_finish */
1543 }
1544 };
1545
1546 /* When inlining shall be performed. */
1547 static bool
1548 cgraph_gate_ipa_early_inlining (void)
1549 {
1550 return (flag_early_inlining
1551 && (flag_branch_probabilities || flag_test_coverage
1552 || profile_arc_flag));
1553 }
1554
1555 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1556 before tree profiling so we have stand alone IPA pass for doing so. */
1557 struct simple_ipa_opt_pass pass_ipa_early_inline =
1558 {
1559 {
1560 SIMPLE_IPA_PASS,
1561 "einline_ipa", /* name */
1562 cgraph_gate_ipa_early_inlining, /* gate */
1563 NULL, /* execute */
1564 NULL, /* sub */
1565 NULL, /* next */
1566 0, /* static_pass_number */
1567 TV_INLINE_HEURISTICS, /* tv_id */
1568 0, /* properties_required */
1569 PROP_cfg, /* properties_provided */
1570 0, /* properties_destroyed */
1571 0, /* todo_flags_start */
1572 TODO_dump_cgraph /* todo_flags_finish */
1573 }
1574 };
1575
1576 /* Compute parameters of functions used by inliner. */
1577 unsigned int
1578 compute_inline_parameters (struct cgraph_node *node)
1579 {
1580 gcc_assert (!node->global.inlined_to);
1581 inline_summary (node)->estimated_self_stack_size
1582 = estimated_stack_frame_size ();
1583 node->global.estimated_stack_size
1584 = inline_summary (node)->estimated_self_stack_size;
1585 node->global.stack_frame_offset = 0;
1586 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1587 inline_summary (node)->self_insns
1588 = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
1589 if (node->local.inlinable && !node->local.disregard_inline_limits)
1590 node->local.disregard_inline_limits
1591 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1592 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1593 node->global.insns = inline_summary (node)->self_insns;
1594 return 0;
1595 }
1596
1597
1598 /* Compute parameters of functions used by inliner using
1599 current_function_decl. */
1600 static unsigned int
1601 compute_inline_parameters_for_current (void)
1602 {
1603 compute_inline_parameters (cgraph_node (current_function_decl));
1604 return 0;
1605 }
1606
1607 struct gimple_opt_pass pass_inline_parameters =
1608 {
1609 {
1610 GIMPLE_PASS,
1611 NULL, /* name */
1612 NULL, /* gate */
1613 compute_inline_parameters_for_current,/* execute */
1614 NULL, /* sub */
1615 NULL, /* next */
1616 0, /* static_pass_number */
1617 TV_INLINE_HEURISTICS, /* tv_id */
1618 0, /* properties_required */
1619 PROP_cfg, /* properties_provided */
1620 0, /* properties_destroyed */
1621 0, /* todo_flags_start */
1622 0 /* todo_flags_finish */
1623 }
1624 };
1625
1626 /* This function performs intraprocedural analyzis in NODE that is required to
1627 inline indirect calls. */
1628 static void
1629 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1630 {
1631 struct cgraph_edge *cs;
1632
1633 if (!flag_ipa_cp)
1634 {
1635 ipa_count_formal_params (node);
1636 ipa_create_param_decls_array (node);
1637 ipa_detect_param_modifications (node);
1638 }
1639 ipa_analyze_params_uses (node);
1640
1641 if (!flag_ipa_cp)
1642 for (cs = node->callees; cs; cs = cs->next_callee)
1643 {
1644 ipa_count_arguments (cs);
1645 ipa_compute_jump_functions (cs);
1646 }
1647
1648 if (dump_file)
1649 {
1650 ipa_print_node_params (dump_file, node);
1651 ipa_print_node_jump_functions (dump_file, node);
1652 }
1653 }
1654
1655 /* Note function body size. */
1656 static void
1657 analyze_function (struct cgraph_node *node)
1658 {
1659 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1660 current_function_decl = node->decl;
1661
1662 compute_inline_parameters (node);
1663 if (flag_indirect_inlining)
1664 inline_indirect_intraprocedural_analysis (node);
1665
1666 current_function_decl = NULL;
1667 pop_cfun ();
1668 }
1669
1670 /* Called when new function is inserted to callgraph late. */
1671 static void
1672 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1673 {
1674 analyze_function (node);
1675 }
1676
1677 /* Note function body size. */
1678 static void
1679 inline_generate_summary (void)
1680 {
1681 struct cgraph_node *node;
1682
1683 function_insertion_hook_holder =
1684 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1685
1686 if (flag_indirect_inlining)
1687 {
1688 ipa_register_cgraph_hooks ();
1689 ipa_check_create_node_params ();
1690 ipa_check_create_edge_args ();
1691 }
1692
1693 for (node = cgraph_nodes; node; node = node->next)
1694 if (node->analyzed)
1695 analyze_function (node);
1696
1697 return;
1698 }
1699
1700 /* Apply inline plan to function. */
1701 static unsigned int
1702 inline_transform (struct cgraph_node *node)
1703 {
1704 unsigned int todo = 0;
1705 struct cgraph_edge *e;
1706
1707 /* We might need the body of this function so that we can expand
1708 it inline somewhere else. */
1709 if (cgraph_preserve_function_body_p (node->decl))
1710 save_inline_function_body (node);
1711
1712 for (e = node->callees; e; e = e->next_callee)
1713 if (!e->inline_failed || warn_inline)
1714 break;
1715
1716 if (e)
1717 {
1718 timevar_push (TV_INTEGRATION);
1719 todo = optimize_inline_calls (current_function_decl);
1720 timevar_pop (TV_INTEGRATION);
1721 }
1722 return todo | execute_fixup_cfg ();
1723 }
1724
1725 struct ipa_opt_pass pass_ipa_inline =
1726 {
1727 {
1728 IPA_PASS,
1729 "inline", /* name */
1730 NULL, /* gate */
1731 cgraph_decide_inlining, /* execute */
1732 NULL, /* sub */
1733 NULL, /* next */
1734 0, /* static_pass_number */
1735 TV_INLINE_HEURISTICS, /* tv_id */
1736 0, /* properties_required */
1737 PROP_cfg, /* properties_provided */
1738 0, /* properties_destroyed */
1739 TODO_remove_functions, /* todo_flags_finish */
1740 TODO_dump_cgraph | TODO_dump_func
1741 | TODO_remove_functions /* todo_flags_finish */
1742 },
1743 inline_generate_summary, /* generate_summary */
1744 NULL, /* write_summary */
1745 NULL, /* read_summary */
1746 NULL, /* function_read_summary */
1747 0, /* TODOs */
1748 inline_transform, /* function_transform */
1749 NULL, /* variable_transform */
1750 };
1751
1752
1753 #include "gt-ipa-inline.h"