ipa-cp.c (ipcp_write_summary, [...]): New functions.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
214 */
215 void
216 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
217 bool update_original)
218 {
219 HOST_WIDE_INT peak;
220
221 if (duplicate)
222 {
223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e->callee->callers->next_caller
226 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
227 && !cgraph_new_nodes)
228 {
229 gcc_assert (!e->callee->global.inlined_to);
230 if (e->callee->analyzed)
231 {
232 overall_size -= e->callee->global.size;
233 nfunctions_inlined++;
234 }
235 duplicate = false;
236 e->callee->local.externally_visible = false;
237 }
238 else
239 {
240 struct cgraph_node *n;
241 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
242 update_original, NULL);
243 cgraph_redirect_edge_callee (e, n);
244 }
245 }
246
247 if (e->caller->global.inlined_to)
248 e->callee->global.inlined_to = e->caller->global.inlined_to;
249 else
250 e->callee->global.inlined_to = e->caller;
251 e->callee->global.stack_frame_offset
252 = e->caller->global.stack_frame_offset
253 + inline_summary (e->caller)->estimated_self_stack_size;
254 peak = e->callee->global.stack_frame_offset
255 + inline_summary (e->callee)->estimated_self_stack_size;
256 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
257 e->callee->global.inlined_to->global.estimated_stack_size = peak;
258
259 /* Recursively clone all bodies. */
260 for (e = e->callee->callees; e; e = e->next_callee)
261 if (!e->inline_failed)
262 cgraph_clone_inlined_nodes (e, duplicate, update_original);
263 }
264
265 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
266 specify whether profile of original function should be updated. If any new
267 indirect edges are discovered in the process, add them to NEW_EDGES, unless
268 it is NULL. Return true iff any new callgraph edges were discovered as a
269 result of inlining. */
270
271 static bool
272 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
273 VEC (cgraph_edge_p, heap) **new_edges)
274 {
275 int old_size = 0, new_size = 0;
276 struct cgraph_node *to = NULL, *what;
277 struct cgraph_edge *curr = e;
278 int freq;
279 bool duplicate = false;
280 int orig_size = e->callee->global.size;
281
282 gcc_assert (e->inline_failed);
283 e->inline_failed = CIF_OK;
284
285 if (!e->callee->global.inlined)
286 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
287 e->callee->global.inlined = true;
288
289 if (e->callee->callers->next_caller
290 || !cgraph_can_remove_if_no_direct_calls_p (e->callee))
291 duplicate = true;
292 cgraph_clone_inlined_nodes (e, true, update_original);
293
294 what = e->callee;
295
296 freq = e->frequency;
297 /* Now update size of caller and all functions caller is inlined into. */
298 for (;e && !e->inline_failed; e = e->caller->callers)
299 {
300 to = e->caller;
301 old_size = e->caller->global.size;
302 new_size = cgraph_estimate_size_after_inlining (1, to, what);
303 to->global.size = new_size;
304 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
305 }
306 gcc_assert (what->global.inlined_to == to);
307 if (new_size > old_size)
308 overall_size += new_size - old_size;
309 if (!duplicate)
310 overall_size -= orig_size;
311 ncalls_inlined++;
312
313 if (flag_indirect_inlining)
314 return ipa_propagate_indirect_call_infos (curr, new_edges);
315 else
316 return false;
317 }
318
319 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
320 Return following unredirected edge in the list of callers
321 of EDGE->CALLEE */
322
323 static struct cgraph_edge *
324 cgraph_mark_inline (struct cgraph_edge *edge)
325 {
326 struct cgraph_node *to = edge->caller;
327 struct cgraph_node *what = edge->callee;
328 struct cgraph_edge *e, *next;
329
330 gcc_assert (!edge->call_stmt_cannot_inline_p);
331 /* Look for all calls, mark them inline and clone recursively
332 all inlined functions. */
333 for (e = what->callers; e; e = next)
334 {
335 next = e->next_caller;
336 if (e->caller == to && e->inline_failed)
337 {
338 cgraph_mark_inline_edge (e, true, NULL);
339 if (e == edge)
340 edge = next;
341 }
342 }
343
344 return edge;
345 }
346
347 /* Estimate the growth caused by inlining NODE into all callees. */
348
349 static int
350 cgraph_estimate_growth (struct cgraph_node *node)
351 {
352 int growth = 0;
353 struct cgraph_edge *e;
354 bool self_recursive = false;
355
356 if (node->global.estimated_growth != INT_MIN)
357 return node->global.estimated_growth;
358
359 for (e = node->callers; e; e = e->next_caller)
360 {
361 if (e->caller == node)
362 self_recursive = true;
363 if (e->inline_failed)
364 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
365 - e->caller->global.size);
366 }
367
368 /* ??? Wrong for non-trivially self recursive functions or cases where
369 we decide to not inline for different reasons, but it is not big deal
370 as in that case we will keep the body around, but we will also avoid
371 some inlining. */
372 if (cgraph_only_called_directly_p (node)
373 && !DECL_EXTERNAL (node->decl) && !self_recursive)
374 growth -= node->global.size;
375
376 node->global.estimated_growth = growth;
377 return growth;
378 }
379
380 /* Return false when inlining WHAT into TO is not good idea
381 as it would cause too large growth of function bodies.
382 When ONE_ONLY is true, assume that only one call site is going
383 to be inlined, otherwise figure out how many call sites in
384 TO calls WHAT and verify that all can be inlined.
385 */
386
387 static bool
388 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
389 cgraph_inline_failed_t *reason, bool one_only)
390 {
391 int times = 0;
392 struct cgraph_edge *e;
393 int newsize;
394 int limit;
395 HOST_WIDE_INT stack_size_limit, inlined_stack;
396
397 if (one_only)
398 times = 1;
399 else
400 for (e = to->callees; e; e = e->next_callee)
401 if (e->callee == what)
402 times++;
403
404 if (to->global.inlined_to)
405 to = to->global.inlined_to;
406
407 /* When inlining large function body called once into small function,
408 take the inlined function as base for limiting the growth. */
409 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
410 limit = inline_summary (to)->self_size;
411 else
412 limit = inline_summary (what)->self_size;
413
414 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
415
416 /* Check the size after inlining against the function limits. But allow
417 the function to shrink if it went over the limits by forced inlining. */
418 newsize = cgraph_estimate_size_after_inlining (times, to, what);
419 if (newsize >= to->global.size
420 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
421 && newsize > limit)
422 {
423 if (reason)
424 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
425 return false;
426 }
427
428 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
429
430 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
431
432 inlined_stack = (to->global.stack_frame_offset
433 + inline_summary (to)->estimated_self_stack_size
434 + what->global.estimated_stack_size);
435 if (inlined_stack > stack_size_limit
436 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
437 {
438 if (reason)
439 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
440 return false;
441 }
442 return true;
443 }
444
445 /* Return true when function N is small enough to be inlined. */
446
447 static bool
448 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
449 {
450 tree decl = n->decl;
451
452 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
453 {
454 if (reason)
455 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
456 return false;
457 }
458
459 if (!n->analyzed)
460 {
461 if (reason)
462 *reason = CIF_BODY_NOT_AVAILABLE;
463 return false;
464 }
465
466 if (DECL_DECLARED_INLINE_P (decl))
467 {
468 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
469 {
470 if (reason)
471 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
472 return false;
473 }
474 }
475 else
476 {
477 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
478 {
479 if (reason)
480 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
481 return false;
482 }
483 }
484
485 return true;
486 }
487
488 /* Return true when inlining WHAT would create recursive inlining.
489 We call recursive inlining all cases where same function appears more than
490 once in the single recursion nest path in the inline graph. */
491
492 static bool
493 cgraph_recursive_inlining_p (struct cgraph_node *to,
494 struct cgraph_node *what,
495 cgraph_inline_failed_t *reason)
496 {
497 bool recursive;
498 if (to->global.inlined_to)
499 recursive = what->decl == to->global.inlined_to->decl;
500 else
501 recursive = what->decl == to->decl;
502 /* Marking recursive function inline has sane semantic and thus we should
503 not warn on it. */
504 if (recursive && reason)
505 *reason = (what->local.disregard_inline_limits
506 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
507 return recursive;
508 }
509
510 /* A cost model driving the inlining heuristics in a way so the edges with
511 smallest badness are inlined first. After each inlining is performed
512 the costs of all caller edges of nodes affected are recomputed so the
513 metrics may accurately depend on values such as number of inlinable callers
514 of the function or function body size. */
515
516 static int
517 cgraph_edge_badness (struct cgraph_edge *edge)
518 {
519 gcov_type badness;
520 int growth =
521 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
522
523 growth -= edge->caller->global.size;
524
525 /* Always prefer inlining saving code size. */
526 if (growth <= 0)
527 badness = INT_MIN - growth;
528
529 /* When profiling is available, base priorities -(#calls / growth).
530 So we optimize for overall number of "executed" inlined calls. */
531 else if (max_count)
532 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
533 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
534
535 /* When function local profile is available, base priorities on
536 growth / frequency, so we optimize for overall frequency of inlined
537 calls. This is not too accurate since while the call might be frequent
538 within function, the function itself is infrequent.
539
540 Other objective to optimize for is number of different calls inlined.
541 We add the estimated growth after inlining all functions to bias the
542 priorities slightly in this direction (so fewer times called functions
543 of the same size gets priority). */
544 else if (flag_guess_branch_prob)
545 {
546 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
547 badness = growth * 10000;
548 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
549 / (edge->callee->global.time + 1) + 1, 100);
550
551
552 /* Decrease badness if call is nested. */
553 /* Compress the range so we don't overflow. */
554 if (div > 10000)
555 div = 10000 + ceil_log2 (div) - 8;
556 if (div < 1)
557 div = 1;
558 if (badness > 0)
559 badness /= div;
560 badness += cgraph_estimate_growth (edge->callee);
561 if (badness > INT_MAX)
562 badness = INT_MAX;
563 }
564 /* When function local profile is not available or it does not give
565 useful information (ie frequency is zero), base the cost on
566 loop nest and overall size growth, so we optimize for overall number
567 of functions fully inlined in program. */
568 else
569 {
570 int nest = MIN (edge->loop_nest, 8);
571 badness = cgraph_estimate_growth (edge->callee) * 256;
572
573 /* Decrease badness if call is nested. */
574 if (badness > 0)
575 badness >>= nest;
576 else
577 {
578 badness <<= nest;
579 }
580 }
581 /* Make recursive inlining happen always after other inlining is done. */
582 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
583 return badness + 1;
584 else
585 return badness;
586 }
587
588 /* Recompute heap nodes for each of caller edge. */
589
590 static void
591 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
592 bitmap updated_nodes)
593 {
594 struct cgraph_edge *edge;
595 cgraph_inline_failed_t failed_reason;
596
597 if (!node->local.inlinable || node->local.disregard_inline_limits
598 || node->global.inlined_to)
599 return;
600 if (bitmap_bit_p (updated_nodes, node->uid))
601 return;
602 bitmap_set_bit (updated_nodes, node->uid);
603 node->global.estimated_growth = INT_MIN;
604
605 if (!node->local.inlinable)
606 return;
607 /* Prune out edges we won't inline into anymore. */
608 if (!cgraph_default_inline_p (node, &failed_reason))
609 {
610 for (edge = node->callers; edge; edge = edge->next_caller)
611 if (edge->aux)
612 {
613 fibheap_delete_node (heap, (fibnode_t) edge->aux);
614 edge->aux = NULL;
615 if (edge->inline_failed)
616 edge->inline_failed = failed_reason;
617 }
618 return;
619 }
620
621 for (edge = node->callers; edge; edge = edge->next_caller)
622 if (edge->inline_failed)
623 {
624 int badness = cgraph_edge_badness (edge);
625 if (edge->aux)
626 {
627 fibnode_t n = (fibnode_t) edge->aux;
628 gcc_assert (n->data == edge);
629 if (n->key == badness)
630 continue;
631
632 /* fibheap_replace_key only increase the keys. */
633 if (fibheap_replace_key (heap, n, badness))
634 continue;
635 fibheap_delete_node (heap, (fibnode_t) edge->aux);
636 }
637 edge->aux = fibheap_insert (heap, badness, edge);
638 }
639 }
640
641 /* Recompute heap nodes for each of caller edges of each of callees. */
642
643 static void
644 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
645 bitmap updated_nodes)
646 {
647 struct cgraph_edge *e;
648 node->global.estimated_growth = INT_MIN;
649
650 for (e = node->callees; e; e = e->next_callee)
651 if (e->inline_failed)
652 update_caller_keys (heap, e->callee, updated_nodes);
653 else if (!e->inline_failed)
654 update_callee_keys (heap, e->callee, updated_nodes);
655 }
656
657 /* Enqueue all recursive calls from NODE into priority queue depending on
658 how likely we want to recursively inline the call. */
659
660 static void
661 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
662 fibheap_t heap)
663 {
664 static int priority;
665 struct cgraph_edge *e;
666 for (e = where->callees; e; e = e->next_callee)
667 if (e->callee == node)
668 {
669 /* When profile feedback is available, prioritize by expected number
670 of calls. Without profile feedback we maintain simple queue
671 to order candidates via recursive depths. */
672 fibheap_insert (heap,
673 !max_count ? priority++
674 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
675 e);
676 }
677 for (e = where->callees; e; e = e->next_callee)
678 if (!e->inline_failed)
679 lookup_recursive_calls (node, e->callee, heap);
680 }
681
682 /* Decide on recursive inlining: in the case function has recursive calls,
683 inline until body size reaches given argument. If any new indirect edges
684 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
685 is NULL. */
686
687 static bool
688 cgraph_decide_recursive_inlining (struct cgraph_node *node,
689 VEC (cgraph_edge_p, heap) **new_edges)
690 {
691 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
692 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
693 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
694 fibheap_t heap;
695 struct cgraph_edge *e;
696 struct cgraph_node *master_clone, *next;
697 int depth = 0;
698 int n = 0;
699
700 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
701 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
702 return false;
703
704 if (DECL_DECLARED_INLINE_P (node->decl))
705 {
706 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
707 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
708 }
709
710 /* Make sure that function is small enough to be considered for inlining. */
711 if (!max_depth
712 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
713 return false;
714 heap = fibheap_new ();
715 lookup_recursive_calls (node, node, heap);
716 if (fibheap_empty (heap))
717 {
718 fibheap_delete (heap);
719 return false;
720 }
721
722 if (dump_file)
723 fprintf (dump_file,
724 " Performing recursive inlining on %s\n",
725 cgraph_node_name (node));
726
727 /* We need original clone to copy around. */
728 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
729 false, NULL);
730 master_clone->needed = true;
731 for (e = master_clone->callees; e; e = e->next_callee)
732 if (!e->inline_failed)
733 cgraph_clone_inlined_nodes (e, true, false);
734
735 /* Do the inlining and update list of recursive call during process. */
736 while (!fibheap_empty (heap)
737 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
738 <= limit))
739 {
740 struct cgraph_edge *curr
741 = (struct cgraph_edge *) fibheap_extract_min (heap);
742 struct cgraph_node *cnode;
743
744 depth = 1;
745 for (cnode = curr->caller;
746 cnode->global.inlined_to; cnode = cnode->callers->caller)
747 if (node->decl == curr->callee->decl)
748 depth++;
749 if (depth > max_depth)
750 {
751 if (dump_file)
752 fprintf (dump_file,
753 " maximal depth reached\n");
754 continue;
755 }
756
757 if (max_count)
758 {
759 if (!cgraph_maybe_hot_edge_p (curr))
760 {
761 if (dump_file)
762 fprintf (dump_file, " Not inlining cold call\n");
763 continue;
764 }
765 if (curr->count * 100 / node->count < probability)
766 {
767 if (dump_file)
768 fprintf (dump_file,
769 " Probability of edge is too small\n");
770 continue;
771 }
772 }
773
774 if (dump_file)
775 {
776 fprintf (dump_file,
777 " Inlining call of depth %i", depth);
778 if (node->count)
779 {
780 fprintf (dump_file, " called approx. %.2f times per call",
781 (double)curr->count / node->count);
782 }
783 fprintf (dump_file, "\n");
784 }
785 cgraph_redirect_edge_callee (curr, master_clone);
786 cgraph_mark_inline_edge (curr, false, new_edges);
787 lookup_recursive_calls (node, curr->callee, heap);
788 n++;
789 }
790 if (!fibheap_empty (heap) && dump_file)
791 fprintf (dump_file, " Recursive inlining growth limit met.\n");
792
793 fibheap_delete (heap);
794 if (dump_file)
795 fprintf (dump_file,
796 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
797 master_clone->global.size, node->global.size,
798 master_clone->global.time, node->global.time);
799
800 /* Remove master clone we used for inlining. We rely that clones inlined
801 into master clone gets queued just before master clone so we don't
802 need recursion. */
803 for (node = cgraph_nodes; node != master_clone;
804 node = next)
805 {
806 next = node->next;
807 if (node->global.inlined_to == master_clone)
808 cgraph_remove_node (node);
809 }
810 cgraph_remove_node (master_clone);
811 /* FIXME: Recursive inlining actually reduces number of calls of the
812 function. At this place we should probably walk the function and
813 inline clones and compensate the counts accordingly. This probably
814 doesn't matter much in practice. */
815 return n > 0;
816 }
817
818 /* Set inline_failed for all callers of given function to REASON. */
819
820 static void
821 cgraph_set_inline_failed (struct cgraph_node *node,
822 cgraph_inline_failed_t reason)
823 {
824 struct cgraph_edge *e;
825
826 if (dump_file)
827 fprintf (dump_file, "Inlining failed: %s\n",
828 cgraph_inline_failed_string (reason));
829 for (e = node->callers; e; e = e->next_caller)
830 if (e->inline_failed)
831 e->inline_failed = reason;
832 }
833
834 /* Given whole compilation unit estimate of INSNS, compute how large we can
835 allow the unit to grow. */
836 static int
837 compute_max_insns (int insns)
838 {
839 int max_insns = insns;
840 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
841 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
842
843 return ((HOST_WIDEST_INT) max_insns
844 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
845 }
846
847 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
848 static void
849 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
850 {
851 while (VEC_length (cgraph_edge_p, new_edges) > 0)
852 {
853 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
854
855 gcc_assert (!edge->aux);
856 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
857 }
858 }
859
860
861 /* We use greedy algorithm for inlining of small functions:
862 All inline candidates are put into prioritized heap based on estimated
863 growth of the overall number of instructions and then update the estimates.
864
865 INLINED and INLINED_CALEES are just pointers to arrays large enough
866 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
867
868 static void
869 cgraph_decide_inlining_of_small_functions (void)
870 {
871 struct cgraph_node *node;
872 struct cgraph_edge *edge;
873 cgraph_inline_failed_t failed_reason;
874 fibheap_t heap = fibheap_new ();
875 bitmap updated_nodes = BITMAP_ALLOC (NULL);
876 int min_size, max_size;
877 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
878
879 if (flag_indirect_inlining)
880 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
881
882 if (dump_file)
883 fprintf (dump_file, "\nDeciding on smaller functions:\n");
884
885 /* Put all inline candidates into the heap. */
886
887 for (node = cgraph_nodes; node; node = node->next)
888 {
889 if (!node->local.inlinable || !node->callers
890 || node->local.disregard_inline_limits)
891 continue;
892 if (dump_file)
893 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
894
895 node->global.estimated_growth = INT_MIN;
896 if (!cgraph_default_inline_p (node, &failed_reason))
897 {
898 cgraph_set_inline_failed (node, failed_reason);
899 continue;
900 }
901
902 for (edge = node->callers; edge; edge = edge->next_caller)
903 if (edge->inline_failed)
904 {
905 gcc_assert (!edge->aux);
906 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
907 }
908 }
909
910 max_size = compute_max_insns (overall_size);
911 min_size = overall_size;
912
913 while (overall_size <= max_size
914 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
915 {
916 int old_size = overall_size;
917 struct cgraph_node *where;
918 int growth =
919 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
920 cgraph_inline_failed_t not_good = CIF_OK;
921
922 growth -= edge->caller->global.size;
923
924 if (dump_file)
925 {
926 fprintf (dump_file,
927 "\nConsidering %s with %i size\n",
928 cgraph_node_name (edge->callee),
929 edge->callee->global.size);
930 fprintf (dump_file,
931 " to be inlined into %s in %s:%i\n"
932 " Estimated growth after inlined into all callees is %+i insns.\n"
933 " Estimated badness is %i, frequency %.2f.\n",
934 cgraph_node_name (edge->caller),
935 gimple_filename ((const_gimple) edge->call_stmt),
936 gimple_lineno ((const_gimple) edge->call_stmt),
937 cgraph_estimate_growth (edge->callee),
938 cgraph_edge_badness (edge),
939 edge->frequency / (double)CGRAPH_FREQ_BASE);
940 if (edge->count)
941 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
942 }
943 gcc_assert (edge->aux);
944 edge->aux = NULL;
945 if (!edge->inline_failed)
946 continue;
947
948 /* When not having profile info ready we don't weight by any way the
949 position of call in procedure itself. This means if call of
950 function A from function B seems profitable to inline, the recursive
951 call of function A in inline copy of A in B will look profitable too
952 and we end up inlining until reaching maximal function growth. This
953 is not good idea so prohibit the recursive inlining.
954
955 ??? When the frequencies are taken into account we might not need this
956 restriction.
957
958 We need to be cureful here, in some testcases, e.g. directivec.c in
959 libcpp, we can estimate self recursive function to have negative growth
960 for inlining completely.
961 */
962 if (!edge->count)
963 {
964 where = edge->caller;
965 while (where->global.inlined_to)
966 {
967 if (where->decl == edge->callee->decl)
968 break;
969 where = where->callers->caller;
970 }
971 if (where->global.inlined_to)
972 {
973 edge->inline_failed
974 = (edge->callee->local.disregard_inline_limits
975 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
976 if (dump_file)
977 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
978 continue;
979 }
980 }
981
982 if (!cgraph_maybe_hot_edge_p (edge))
983 not_good = CIF_UNLIKELY_CALL;
984 if (!flag_inline_functions
985 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
986 not_good = CIF_NOT_DECLARED_INLINED;
987 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
988 not_good = CIF_OPTIMIZING_FOR_SIZE;
989 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
990 {
991 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
992 &edge->inline_failed))
993 {
994 edge->inline_failed = not_good;
995 if (dump_file)
996 fprintf (dump_file, " inline_failed:%s.\n",
997 cgraph_inline_failed_string (edge->inline_failed));
998 }
999 continue;
1000 }
1001 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1002 {
1003 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1004 &edge->inline_failed))
1005 {
1006 if (dump_file)
1007 fprintf (dump_file, " inline_failed:%s.\n",
1008 cgraph_inline_failed_string (edge->inline_failed));
1009 }
1010 continue;
1011 }
1012 if (!tree_can_inline_p (edge))
1013 {
1014 if (dump_file)
1015 fprintf (dump_file, " inline_failed:%s.\n",
1016 cgraph_inline_failed_string (edge->inline_failed));
1017 continue;
1018 }
1019 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1020 &edge->inline_failed))
1021 {
1022 where = edge->caller;
1023 if (where->global.inlined_to)
1024 where = where->global.inlined_to;
1025 if (!cgraph_decide_recursive_inlining (where,
1026 flag_indirect_inlining
1027 ? &new_indirect_edges : NULL))
1028 continue;
1029 if (flag_indirect_inlining)
1030 add_new_edges_to_heap (heap, new_indirect_edges);
1031 update_callee_keys (heap, where, updated_nodes);
1032 }
1033 else
1034 {
1035 struct cgraph_node *callee;
1036 if (edge->call_stmt_cannot_inline_p
1037 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1038 &edge->inline_failed, true))
1039 {
1040 if (dump_file)
1041 fprintf (dump_file, " Not inlining into %s:%s.\n",
1042 cgraph_node_name (edge->caller),
1043 cgraph_inline_failed_string (edge->inline_failed));
1044 continue;
1045 }
1046 callee = edge->callee;
1047 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1048 if (flag_indirect_inlining)
1049 add_new_edges_to_heap (heap, new_indirect_edges);
1050
1051 update_callee_keys (heap, callee, updated_nodes);
1052 }
1053 where = edge->caller;
1054 if (where->global.inlined_to)
1055 where = where->global.inlined_to;
1056
1057 /* Our profitability metric can depend on local properties
1058 such as number of inlinable calls and size of the function body.
1059 After inlining these properties might change for the function we
1060 inlined into (since it's body size changed) and for the functions
1061 called by function we inlined (since number of it inlinable callers
1062 might change). */
1063 update_caller_keys (heap, where, updated_nodes);
1064 bitmap_clear (updated_nodes);
1065
1066 if (dump_file)
1067 {
1068 fprintf (dump_file,
1069 " Inlined into %s which now has size %i and self time %i,"
1070 "net change of %+i.\n",
1071 cgraph_node_name (edge->caller),
1072 edge->caller->global.time,
1073 edge->caller->global.size,
1074 overall_size - old_size);
1075 }
1076 if (min_size > overall_size)
1077 {
1078 min_size = overall_size;
1079 max_size = compute_max_insns (min_size);
1080
1081 if (dump_file)
1082 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1083 }
1084 }
1085 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1086 {
1087 gcc_assert (edge->aux);
1088 edge->aux = NULL;
1089 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1090 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1091 &edge->inline_failed))
1092 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1093 }
1094
1095 if (new_indirect_edges)
1096 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1097 fibheap_delete (heap);
1098 BITMAP_FREE (updated_nodes);
1099 }
1100
1101 /* Decide on the inlining. We do so in the topological order to avoid
1102 expenses on updating data structures. */
1103
1104 static unsigned int
1105 cgraph_decide_inlining (void)
1106 {
1107 struct cgraph_node *node;
1108 int nnodes;
1109 struct cgraph_node **order =
1110 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1111 int old_size = 0;
1112 int i;
1113 bool redo_always_inline = true;
1114 int initial_size = 0;
1115
1116 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1117 if (in_lto_p && flag_indirect_inlining)
1118 ipa_update_after_lto_read ();
1119
1120 max_count = 0;
1121 max_benefit = 0;
1122 for (node = cgraph_nodes; node; node = node->next)
1123 if (node->analyzed)
1124 {
1125 struct cgraph_edge *e;
1126
1127 gcc_assert (inline_summary (node)->self_size == node->global.size);
1128 initial_size += node->global.size;
1129 for (e = node->callees; e; e = e->next_callee)
1130 if (max_count < e->count)
1131 max_count = e->count;
1132 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1133 max_benefit = inline_summary (node)->time_inlining_benefit;
1134 }
1135 gcc_assert (in_lto_p
1136 || !max_count
1137 || (profile_info && flag_branch_probabilities));
1138 overall_size = initial_size;
1139
1140 nnodes = cgraph_postorder (order);
1141
1142 if (dump_file)
1143 fprintf (dump_file,
1144 "\nDeciding on inlining. Starting with size %i.\n",
1145 initial_size);
1146
1147 for (node = cgraph_nodes; node; node = node->next)
1148 node->aux = 0;
1149
1150 if (dump_file)
1151 fprintf (dump_file, "\nInlining always_inline functions:\n");
1152
1153 /* In the first pass mark all always_inline edges. Do this with a priority
1154 so none of our later choices will make this impossible. */
1155 while (redo_always_inline)
1156 {
1157 redo_always_inline = false;
1158 for (i = nnodes - 1; i >= 0; i--)
1159 {
1160 struct cgraph_edge *e, *next;
1161
1162 node = order[i];
1163
1164 /* Handle nodes to be flattened, but don't update overall unit
1165 size. */
1166 if (lookup_attribute ("flatten",
1167 DECL_ATTRIBUTES (node->decl)) != NULL)
1168 {
1169 if (dump_file)
1170 fprintf (dump_file,
1171 "Flattening %s\n", cgraph_node_name (node));
1172 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1173 }
1174
1175 if (!node->local.disregard_inline_limits)
1176 continue;
1177 if (dump_file)
1178 fprintf (dump_file,
1179 "\nConsidering %s size:%i (always inline)\n",
1180 cgraph_node_name (node), node->global.size);
1181 old_size = overall_size;
1182 for (e = node->callers; e; e = next)
1183 {
1184 next = e->next_caller;
1185 if (!e->inline_failed || e->call_stmt_cannot_inline_p)
1186 continue;
1187 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1188 &e->inline_failed))
1189 continue;
1190 if (!tree_can_inline_p (e))
1191 continue;
1192 if (cgraph_mark_inline_edge (e, true, NULL))
1193 redo_always_inline = true;
1194 if (dump_file)
1195 fprintf (dump_file,
1196 " Inlined into %s which now has size %i.\n",
1197 cgraph_node_name (e->caller),
1198 e->caller->global.size);
1199 }
1200 /* Inlining self recursive function might introduce new calls to
1201 themselves we didn't see in the loop above. Fill in the proper
1202 reason why inline failed. */
1203 for (e = node->callers; e; e = e->next_caller)
1204 if (e->inline_failed)
1205 e->inline_failed = CIF_RECURSIVE_INLINING;
1206 if (dump_file)
1207 fprintf (dump_file,
1208 " Inlined for a net change of %+i size.\n",
1209 overall_size - old_size);
1210 }
1211 }
1212
1213 cgraph_decide_inlining_of_small_functions ();
1214
1215 if (flag_inline_functions_called_once)
1216 {
1217 if (dump_file)
1218 fprintf (dump_file, "\nDeciding on functions called once:\n");
1219
1220 /* And finally decide what functions are called once. */
1221 for (i = nnodes - 1; i >= 0; i--)
1222 {
1223 node = order[i];
1224
1225 if (node->callers
1226 && !node->callers->next_caller
1227 && cgraph_only_called_directly_p (node)
1228 && node->local.inlinable
1229 && node->callers->inline_failed
1230 && node->callers->caller != node
1231 && node->callers->caller->global.inlined_to != node
1232 && !node->callers->call_stmt_cannot_inline_p
1233 && !DECL_EXTERNAL (node->decl)
1234 && !DECL_COMDAT (node->decl))
1235 {
1236 old_size = overall_size;
1237 if (dump_file)
1238 {
1239 fprintf (dump_file,
1240 "\nConsidering %s size %i.\n",
1241 cgraph_node_name (node), node->global.size);
1242 fprintf (dump_file,
1243 " Called once from %s %i insns.\n",
1244 cgraph_node_name (node->callers->caller),
1245 node->callers->caller->global.size);
1246 }
1247
1248 if (cgraph_check_inline_limits (node->callers->caller, node,
1249 NULL, false))
1250 {
1251 cgraph_mark_inline (node->callers);
1252 if (dump_file)
1253 fprintf (dump_file,
1254 " Inlined into %s which now has %i size"
1255 " for a net change of %+i size.\n",
1256 cgraph_node_name (node->callers->caller),
1257 node->callers->caller->global.size,
1258 overall_size - old_size);
1259 }
1260 else
1261 {
1262 if (dump_file)
1263 fprintf (dump_file,
1264 " Inline limit reached, not inlined.\n");
1265 }
1266 }
1267 }
1268 }
1269
1270 /* Free ipa-prop structures if they are no longer needed. */
1271 if (flag_indirect_inlining)
1272 free_all_ipa_structures_after_iinln ();
1273
1274 if (dump_file)
1275 fprintf (dump_file,
1276 "\nInlined %i calls, eliminated %i functions, "
1277 "size %i turned to %i size.\n\n",
1278 ncalls_inlined, nfunctions_inlined, initial_size,
1279 overall_size);
1280 free (order);
1281 return 0;
1282 }
1283
1284 /* Try to inline edge E from incremental inliner. MODE specifies mode
1285 of inliner.
1286
1287 We are detecting cycles by storing mode of inliner into cgraph_node last
1288 time we visited it in the recursion. In general when mode is set, we have
1289 recursive inlining, but as an special case, we want to try harder inline
1290 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1291 flatten, b being always inline. Flattening 'a' will collapse
1292 a->b->c before hitting cycle. To accommodate always inline, we however
1293 need to inline a->b->c->b.
1294
1295 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1296 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1297 static bool
1298 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1299 {
1300 struct cgraph_node *callee = e->callee;
1301 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1302 bool always_inline = e->callee->local.disregard_inline_limits;
1303 bool inlined = false;
1304
1305 /* We've hit cycle? */
1306 if (callee_mode)
1307 {
1308 /* It is first time we see it and we are not in ALWAY_INLINE only
1309 mode yet. and the function in question is always_inline. */
1310 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1311 {
1312 if (dump_file)
1313 {
1314 indent_to (dump_file, depth);
1315 fprintf (dump_file,
1316 "Hit cycle in %s, switching to always inline only.\n",
1317 cgraph_node_name (callee));
1318 }
1319 mode = INLINE_ALWAYS_INLINE;
1320 }
1321 /* Otherwise it is time to give up. */
1322 else
1323 {
1324 if (dump_file)
1325 {
1326 indent_to (dump_file, depth);
1327 fprintf (dump_file,
1328 "Not inlining %s into %s to avoid cycle.\n",
1329 cgraph_node_name (callee),
1330 cgraph_node_name (e->caller));
1331 }
1332 e->inline_failed = (e->callee->local.disregard_inline_limits
1333 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1334 return false;
1335 }
1336 }
1337
1338 callee->aux = (void *)(size_t) mode;
1339 if (dump_file)
1340 {
1341 indent_to (dump_file, depth);
1342 fprintf (dump_file, " Inlining %s into %s.\n",
1343 cgraph_node_name (e->callee),
1344 cgraph_node_name (e->caller));
1345 }
1346 if (e->inline_failed)
1347 {
1348 cgraph_mark_inline (e);
1349
1350 /* In order to fully inline always_inline functions, we need to
1351 recurse here, since the inlined functions might not be processed by
1352 incremental inlining at all yet.
1353
1354 Also flattening needs to be done recursively. */
1355
1356 if (mode == INLINE_ALL || always_inline)
1357 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1358 inlined = true;
1359 }
1360 callee->aux = (void *)(size_t) callee_mode;
1361 return inlined;
1362 }
1363
1364 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1365 in leaf functions. */
1366 static bool
1367 leaf_node_p (struct cgraph_node *n)
1368 {
1369 struct cgraph_edge *e;
1370 for (e = n->callees; e; e = e->next_callee)
1371 if (!DECL_BUILT_IN (e->callee->decl)
1372 || (!TREE_READONLY (e->callee->decl)
1373 || DECL_PURE_P (e->callee->decl)))
1374 return false;
1375 return true;
1376 }
1377
1378 /* Decide on the inlining. We do so in the topological order to avoid
1379 expenses on updating data structures.
1380 DEPTH is depth of recursion, used only for debug output. */
1381
1382 static bool
1383 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1384 enum inlining_mode mode,
1385 int depth)
1386 {
1387 struct cgraph_edge *e;
1388 bool inlined = false;
1389 cgraph_inline_failed_t failed_reason;
1390 enum inlining_mode old_mode;
1391
1392 #ifdef ENABLE_CHECKING
1393 verify_cgraph_node (node);
1394 #endif
1395
1396 old_mode = (enum inlining_mode) (size_t)node->aux;
1397
1398 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1399 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1400 {
1401 if (dump_file)
1402 {
1403 indent_to (dump_file, depth);
1404 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1405 }
1406 mode = INLINE_ALL;
1407 }
1408
1409 node->aux = (void *)(size_t) mode;
1410
1411 /* First of all look for always inline functions. */
1412 if (mode != INLINE_SIZE_NORECURSIVE)
1413 for (e = node->callees; e; e = e->next_callee)
1414 {
1415 if (!e->callee->local.disregard_inline_limits
1416 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1417 continue;
1418 if (e->call_stmt_cannot_inline_p)
1419 continue;
1420 /* When the edge is already inlined, we just need to recurse into
1421 it in order to fully flatten the leaves. */
1422 if (!e->inline_failed && mode == INLINE_ALL)
1423 {
1424 inlined |= try_inline (e, mode, depth);
1425 continue;
1426 }
1427 if (dump_file)
1428 {
1429 indent_to (dump_file, depth);
1430 fprintf (dump_file,
1431 "Considering to always inline inline candidate %s.\n",
1432 cgraph_node_name (e->callee));
1433 }
1434 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1435 {
1436 if (dump_file)
1437 {
1438 indent_to (dump_file, depth);
1439 fprintf (dump_file, "Not inlining: recursive call.\n");
1440 }
1441 continue;
1442 }
1443 if (!tree_can_inline_p (e))
1444 {
1445 if (dump_file)
1446 {
1447 indent_to (dump_file, depth);
1448 fprintf (dump_file,
1449 "Not inlining: %s",
1450 cgraph_inline_failed_string (e->inline_failed));
1451 }
1452 continue;
1453 }
1454 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1455 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1456 {
1457 if (dump_file)
1458 {
1459 indent_to (dump_file, depth);
1460 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1461 }
1462 continue;
1463 }
1464 if (!e->callee->analyzed)
1465 {
1466 if (dump_file)
1467 {
1468 indent_to (dump_file, depth);
1469 fprintf (dump_file,
1470 "Not inlining: Function body no longer available.\n");
1471 }
1472 continue;
1473 }
1474 inlined |= try_inline (e, mode, depth);
1475 }
1476
1477 /* Now do the automatic inlining. */
1478 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1479 for (e = node->callees; e; e = e->next_callee)
1480 {
1481 int allowed_growth = 0;
1482 if (!e->callee->local.inlinable
1483 || !e->inline_failed
1484 || e->callee->local.disregard_inline_limits)
1485 continue;
1486 if (dump_file)
1487 fprintf (dump_file, "Considering inline candidate %s.\n",
1488 cgraph_node_name (e->callee));
1489 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1490 {
1491 if (dump_file)
1492 {
1493 indent_to (dump_file, depth);
1494 fprintf (dump_file, "Not inlining: recursive call.\n");
1495 }
1496 continue;
1497 }
1498 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1499 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1500 {
1501 if (dump_file)
1502 {
1503 indent_to (dump_file, depth);
1504 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1505 }
1506 continue;
1507 }
1508
1509 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1510 && optimize_function_for_speed_p (cfun))
1511 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1512
1513 /* When the function body would grow and inlining the function won't
1514 eliminate the need for offline copy of the function, don't inline.
1515 */
1516 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1517 || (!flag_inline_functions
1518 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1519 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1520 > e->caller->global.size + allowed_growth)
1521 && cgraph_estimate_growth (e->callee) > allowed_growth)
1522 {
1523 if (dump_file)
1524 {
1525 indent_to (dump_file, depth);
1526 fprintf (dump_file,
1527 "Not inlining: code size would grow by %i.\n",
1528 cgraph_estimate_size_after_inlining (1, e->caller,
1529 e->callee)
1530 - e->caller->global.size);
1531 }
1532 continue;
1533 }
1534 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1535 false)
1536 || e->call_stmt_cannot_inline_p)
1537 {
1538 if (dump_file)
1539 {
1540 indent_to (dump_file, depth);
1541 fprintf (dump_file, "Not inlining: %s.\n",
1542 cgraph_inline_failed_string (e->inline_failed));
1543 }
1544 continue;
1545 }
1546 if (!e->callee->analyzed)
1547 {
1548 if (dump_file)
1549 {
1550 indent_to (dump_file, depth);
1551 fprintf (dump_file,
1552 "Not inlining: Function body no longer available.\n");
1553 }
1554 continue;
1555 }
1556 if (!tree_can_inline_p (e))
1557 {
1558 if (dump_file)
1559 {
1560 indent_to (dump_file, depth);
1561 fprintf (dump_file,
1562 "Not inlining: %s.",
1563 cgraph_inline_failed_string (e->inline_failed));
1564 }
1565 continue;
1566 }
1567 if (cgraph_default_inline_p (e->callee, &failed_reason))
1568 inlined |= try_inline (e, mode, depth);
1569 }
1570 node->aux = (void *)(size_t) old_mode;
1571 return inlined;
1572 }
1573
1574 /* Because inlining might remove no-longer reachable nodes, we need to
1575 keep the array visible to garbage collector to avoid reading collected
1576 out nodes. */
1577 static int nnodes;
1578 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1579
1580 /* Do inlining of small functions. Doing so early helps profiling and other
1581 passes to be somewhat more effective and avoids some code duplication in
1582 later real inlining pass for testcases with very many function calls. */
1583 static unsigned int
1584 cgraph_early_inlining (void)
1585 {
1586 struct cgraph_node *node = cgraph_node (current_function_decl);
1587 unsigned int todo = 0;
1588 int iterations = 0;
1589
1590 if (sorrycount || errorcount)
1591 return 0;
1592 while (cgraph_decide_inlining_incrementally (node,
1593 iterations
1594 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0)
1595 && iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS))
1596 {
1597 timevar_push (TV_INTEGRATION);
1598 todo |= optimize_inline_calls (current_function_decl);
1599 iterations++;
1600 timevar_pop (TV_INTEGRATION);
1601 }
1602 if (dump_file)
1603 fprintf (dump_file, "Iterations: %i\n", iterations);
1604 cfun->always_inline_functions_inlined = true;
1605 return todo;
1606 }
1607
1608 /* When inlining shall be performed. */
1609 static bool
1610 cgraph_gate_early_inlining (void)
1611 {
1612 return flag_early_inlining;
1613 }
1614
1615 struct gimple_opt_pass pass_early_inline =
1616 {
1617 {
1618 GIMPLE_PASS,
1619 "einline", /* name */
1620 cgraph_gate_early_inlining, /* gate */
1621 cgraph_early_inlining, /* execute */
1622 NULL, /* sub */
1623 NULL, /* next */
1624 0, /* static_pass_number */
1625 TV_INLINE_HEURISTICS, /* tv_id */
1626 0, /* properties_required */
1627 0, /* properties_provided */
1628 0, /* properties_destroyed */
1629 0, /* todo_flags_start */
1630 TODO_dump_func /* todo_flags_finish */
1631 }
1632 };
1633
1634 /* When inlining shall be performed. */
1635 static bool
1636 cgraph_gate_ipa_early_inlining (void)
1637 {
1638 return (flag_early_inlining
1639 && !in_lto_p
1640 && (flag_branch_probabilities || flag_test_coverage
1641 || profile_arc_flag));
1642 }
1643
1644 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1645 before tree profiling so we have stand alone IPA pass for doing so. */
1646 struct simple_ipa_opt_pass pass_ipa_early_inline =
1647 {
1648 {
1649 SIMPLE_IPA_PASS,
1650 "einline_ipa", /* name */
1651 cgraph_gate_ipa_early_inlining, /* gate */
1652 NULL, /* execute */
1653 NULL, /* sub */
1654 NULL, /* next */
1655 0, /* static_pass_number */
1656 TV_INLINE_HEURISTICS, /* tv_id */
1657 0, /* properties_required */
1658 0, /* properties_provided */
1659 0, /* properties_destroyed */
1660 0, /* todo_flags_start */
1661 TODO_dump_cgraph /* todo_flags_finish */
1662 }
1663 };
1664
1665 /* See if statement might disappear after inlining. We are not terribly
1666 sophisficated, basically looking for simple abstraction penalty wrappers. */
1667
1668 static bool
1669 likely_eliminated_by_inlining_p (gimple stmt)
1670 {
1671 enum gimple_code code = gimple_code (stmt);
1672 switch (code)
1673 {
1674 case GIMPLE_RETURN:
1675 return true;
1676 case GIMPLE_ASSIGN:
1677 if (gimple_num_ops (stmt) != 2)
1678 return false;
1679
1680 /* Casts of parameters, loads from parameters passed by reference
1681 and stores to return value or parameters are probably free after
1682 inlining. */
1683 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1684 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1685 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1686 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1687 {
1688 tree rhs = gimple_assign_rhs1 (stmt);
1689 tree lhs = gimple_assign_lhs (stmt);
1690 tree inner_rhs = rhs;
1691 tree inner_lhs = lhs;
1692 bool rhs_free = false;
1693 bool lhs_free = false;
1694
1695 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1696 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1697 while (handled_component_p (inner_rhs)
1698 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1699 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1700
1701
1702 if (TREE_CODE (inner_rhs) == PARM_DECL
1703 || (TREE_CODE (inner_rhs) == SSA_NAME
1704 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1705 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1706 rhs_free = true;
1707 if (rhs_free && is_gimple_reg (lhs))
1708 lhs_free = true;
1709 if (((TREE_CODE (inner_lhs) == PARM_DECL
1710 || (TREE_CODE (inner_lhs) == SSA_NAME
1711 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1712 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1713 && inner_lhs != lhs)
1714 || TREE_CODE (inner_lhs) == RESULT_DECL
1715 || (TREE_CODE (inner_lhs) == SSA_NAME
1716 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1717 lhs_free = true;
1718 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1719 rhs_free = true;
1720 if (lhs_free && rhs_free)
1721 return true;
1722 }
1723 return false;
1724 default:
1725 return false;
1726 }
1727 }
1728
1729 /* Compute function body size parameters for NODE. */
1730
1731 static void
1732 estimate_function_body_sizes (struct cgraph_node *node)
1733 {
1734 gcov_type time = 0;
1735 gcov_type time_inlining_benefit = 0;
1736 int size = 0;
1737 int size_inlining_benefit = 0;
1738 basic_block bb;
1739 gimple_stmt_iterator bsi;
1740 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1741 tree arg;
1742 int freq;
1743 tree funtype = TREE_TYPE (node->decl);
1744
1745 if (dump_file)
1746 fprintf (dump_file, "Analyzing function body size: %s\n",
1747 cgraph_node_name (node));
1748
1749 gcc_assert (my_function && my_function->cfg);
1750 FOR_EACH_BB_FN (bb, my_function)
1751 {
1752 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1753 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1754 {
1755 gimple stmt = gsi_stmt (bsi);
1756 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1757 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1758
1759 if (dump_file && (dump_flags & TDF_DETAILS))
1760 {
1761 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1762 freq, this_size, this_time);
1763 print_gimple_stmt (dump_file, stmt, 0, 0);
1764 }
1765 this_time *= freq;
1766 time += this_time;
1767 size += this_size;
1768 if (likely_eliminated_by_inlining_p (stmt))
1769 {
1770 size_inlining_benefit += this_size;
1771 time_inlining_benefit += this_time;
1772 if (dump_file && (dump_flags & TDF_DETAILS))
1773 fprintf (dump_file, " Likely eliminated\n");
1774 }
1775 gcc_assert (time >= 0);
1776 gcc_assert (size >= 0);
1777 }
1778 }
1779 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1780 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1781 / CGRAPH_FREQ_BASE);
1782 if (dump_file)
1783 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1784 (int)time, (int)time_inlining_benefit,
1785 size, size_inlining_benefit);
1786 time_inlining_benefit += eni_time_weights.call_cost;
1787 size_inlining_benefit += eni_size_weights.call_cost;
1788 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1789 {
1790 int cost = estimate_move_cost (TREE_TYPE (funtype));
1791 time_inlining_benefit += cost;
1792 size_inlining_benefit += cost;
1793 }
1794 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1795 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1796 {
1797 int cost = estimate_move_cost (TREE_TYPE (arg));
1798 time_inlining_benefit += cost;
1799 size_inlining_benefit += cost;
1800 }
1801 if (time_inlining_benefit > MAX_TIME)
1802 time_inlining_benefit = MAX_TIME;
1803 if (time > MAX_TIME)
1804 time = MAX_TIME;
1805 inline_summary (node)->self_time = time;
1806 inline_summary (node)->self_size = size;
1807 if (dump_file)
1808 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1809 (int)time, (int)time_inlining_benefit,
1810 size, size_inlining_benefit);
1811 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1812 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1813 }
1814
1815 /* Compute parameters of functions used by inliner. */
1816 unsigned int
1817 compute_inline_parameters (struct cgraph_node *node)
1818 {
1819 HOST_WIDE_INT self_stack_size;
1820
1821 gcc_assert (!node->global.inlined_to);
1822
1823 /* Estimate the stack size for the function. But not at -O0
1824 because estimated_stack_frame_size is a quadratic problem. */
1825 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1826 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1827 node->global.estimated_stack_size = self_stack_size;
1828 node->global.stack_frame_offset = 0;
1829
1830 /* Can this function be inlined at all? */
1831 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1832 if (node->local.inlinable && !node->local.disregard_inline_limits)
1833 node->local.disregard_inline_limits
1834 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1835 estimate_function_body_sizes (node);
1836 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1837 node->global.time = inline_summary (node)->self_time;
1838 node->global.size = inline_summary (node)->self_size;
1839 return 0;
1840 }
1841
1842
1843 /* Compute parameters of functions used by inliner using
1844 current_function_decl. */
1845 static unsigned int
1846 compute_inline_parameters_for_current (void)
1847 {
1848 compute_inline_parameters (cgraph_node (current_function_decl));
1849 return 0;
1850 }
1851
1852 struct gimple_opt_pass pass_inline_parameters =
1853 {
1854 {
1855 GIMPLE_PASS,
1856 "inline_param", /* name */
1857 NULL, /* gate */
1858 compute_inline_parameters_for_current,/* execute */
1859 NULL, /* sub */
1860 NULL, /* next */
1861 0, /* static_pass_number */
1862 TV_INLINE_HEURISTICS, /* tv_id */
1863 0, /* properties_required */
1864 0, /* properties_provided */
1865 0, /* properties_destroyed */
1866 0, /* todo_flags_start */
1867 0 /* todo_flags_finish */
1868 }
1869 };
1870
1871 /* This function performs intraprocedural analyzis in NODE that is required to
1872 inline indirect calls. */
1873 static void
1874 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1875 {
1876 struct cgraph_edge *cs;
1877
1878 if (!flag_ipa_cp)
1879 {
1880 ipa_initialize_node_params (node);
1881 ipa_detect_param_modifications (node);
1882 }
1883 ipa_analyze_params_uses (node);
1884
1885 if (!flag_ipa_cp)
1886 for (cs = node->callees; cs; cs = cs->next_callee)
1887 {
1888 ipa_count_arguments (cs);
1889 ipa_compute_jump_functions (cs);
1890 }
1891
1892 if (dump_file)
1893 {
1894 ipa_print_node_params (dump_file, node);
1895 ipa_print_node_jump_functions (dump_file, node);
1896 }
1897 }
1898
1899 /* Note function body size. */
1900 static void
1901 analyze_function (struct cgraph_node *node)
1902 {
1903 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1904 current_function_decl = node->decl;
1905
1906 compute_inline_parameters (node);
1907 if (flag_indirect_inlining)
1908 inline_indirect_intraprocedural_analysis (node);
1909
1910 current_function_decl = NULL;
1911 pop_cfun ();
1912 }
1913
1914 /* Called when new function is inserted to callgraph late. */
1915 static void
1916 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1917 {
1918 analyze_function (node);
1919 }
1920
1921 /* Note function body size. */
1922 static void
1923 inline_generate_summary (void)
1924 {
1925 struct cgraph_node *node;
1926
1927 function_insertion_hook_holder =
1928 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1929
1930 if (flag_indirect_inlining)
1931 {
1932 ipa_register_cgraph_hooks ();
1933 ipa_check_create_node_params ();
1934 ipa_check_create_edge_args ();
1935 }
1936
1937 for (node = cgraph_nodes; node; node = node->next)
1938 if (node->analyzed)
1939 analyze_function (node);
1940
1941 return;
1942 }
1943
1944 /* Apply inline plan to function. */
1945 static unsigned int
1946 inline_transform (struct cgraph_node *node)
1947 {
1948 unsigned int todo = 0;
1949 struct cgraph_edge *e;
1950
1951 /* We might need the body of this function so that we can expand
1952 it inline somewhere else. */
1953 if (cgraph_preserve_function_body_p (node->decl))
1954 save_inline_function_body (node);
1955
1956 for (e = node->callees; e; e = e->next_callee)
1957 if (!e->inline_failed || warn_inline)
1958 break;
1959
1960 if (e)
1961 {
1962 timevar_push (TV_INTEGRATION);
1963 todo = optimize_inline_calls (current_function_decl);
1964 timevar_pop (TV_INTEGRATION);
1965 }
1966 cfun->always_inline_functions_inlined = true;
1967 cfun->after_inlining = true;
1968 return todo | execute_fixup_cfg ();
1969 }
1970
1971 /* Read inline summary. Jump functions are shared among ipa-cp
1972 and inliner, so when ipa-cp is active, we don't need to write them
1973 twice. */
1974
1975 static void
1976 inline_read_summary (void)
1977 {
1978 if (flag_indirect_inlining)
1979 {
1980 ipa_register_cgraph_hooks ();
1981 if (!flag_ipa_cp)
1982 ipa_prop_read_jump_functions ();
1983 }
1984 function_insertion_hook_holder =
1985 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1986 }
1987
1988 /* Write inline summary for node in SET.
1989 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
1990 active, we don't need to write them twice. */
1991
1992 static void
1993 inline_write_summary (cgraph_node_set set)
1994 {
1995 if (flag_indirect_inlining && !flag_ipa_cp)
1996 ipa_prop_write_jump_functions (set);
1997 }
1998
1999 struct ipa_opt_pass_d pass_ipa_inline =
2000 {
2001 {
2002 IPA_PASS,
2003 "inline", /* name */
2004 NULL, /* gate */
2005 cgraph_decide_inlining, /* execute */
2006 NULL, /* sub */
2007 NULL, /* next */
2008 0, /* static_pass_number */
2009 TV_INLINE_HEURISTICS, /* tv_id */
2010 0, /* properties_required */
2011 0, /* properties_provided */
2012 0, /* properties_destroyed */
2013 TODO_remove_functions, /* todo_flags_finish */
2014 TODO_dump_cgraph | TODO_dump_func
2015 | TODO_remove_functions /* todo_flags_finish */
2016 },
2017 inline_generate_summary, /* generate_summary */
2018 inline_write_summary, /* write_summary */
2019 inline_read_summary, /* read_summary */
2020 NULL, /* function_read_summary */
2021 0, /* TODOs */
2022 inline_transform, /* function_transform */
2023 NULL, /* variable_transform */
2024 };
2025
2026
2027 #include "gt-ipa-inline.h"