cgraph.c (dump_cgraph_node): Dump size/time/benefit.
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE,
159 INLINE_ALL
160 };
161 static bool
162 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
163 int);
164
165
166 /* Statistics we collect about inlining algorithm. */
167 static int ncalls_inlined;
168 static int nfunctions_inlined;
169 static int overall_size;
170 static gcov_type max_count, max_benefit;
171
172 /* Holders of ipa cgraph hooks: */
173 static struct cgraph_node_hook_list *function_insertion_hook_holder;
174
175 static inline struct inline_summary *
176 inline_summary (struct cgraph_node *node)
177 {
178 return &node->local.inline_summary;
179 }
180
181 /* Estimate self time of the function after inlining WHAT into TO. */
182
183 static int
184 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
185 struct cgraph_node *what)
186 {
187 gcov_type time = (((gcov_type)what->global.time - inline_summary
188 (what)->time_inlining_benefit)
189 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
190 + to->global.time;
191 if (time < 0)
192 time = 0;
193 if (time > MAX_TIME)
194 time = MAX_TIME;
195 return time;
196 }
197
198 /* Estimate self time of the function after inlining WHAT into TO. */
199
200 static int
201 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
202 struct cgraph_node *what)
203 {
204 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
205 gcc_assert (size >= 0);
206 return size;
207 }
208
209 /* E is expected to be an edge being inlined. Clone destination node of
210 the edge and redirect it to the new clone.
211 DUPLICATE is used for bookkeeping on whether we are actually creating new
212 clones or re-using node originally representing out-of-line function call.
213 */
214 void
215 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
216 bool update_original)
217 {
218 HOST_WIDE_INT peak;
219
220 if (duplicate)
221 {
222 /* We may eliminate the need for out-of-line copy to be output.
223 In that case just go ahead and re-use it. */
224 if (!e->callee->callers->next_caller
225 && !e->callee->needed
226 && !cgraph_new_nodes)
227 {
228 gcc_assert (!e->callee->global.inlined_to);
229 if (e->callee->analyzed)
230 {
231 overall_size -= e->callee->global.size;
232 nfunctions_inlined++;
233 }
234 duplicate = false;
235 }
236 else
237 {
238 struct cgraph_node *n;
239 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
240 update_original);
241 cgraph_redirect_edge_callee (e, n);
242 }
243 }
244
245 if (e->caller->global.inlined_to)
246 e->callee->global.inlined_to = e->caller->global.inlined_to;
247 else
248 e->callee->global.inlined_to = e->caller;
249 e->callee->global.stack_frame_offset
250 = e->caller->global.stack_frame_offset
251 + inline_summary (e->caller)->estimated_self_stack_size;
252 peak = e->callee->global.stack_frame_offset
253 + inline_summary (e->callee)->estimated_self_stack_size;
254 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
255 e->callee->global.inlined_to->global.estimated_stack_size = peak;
256
257 /* Recursively clone all bodies. */
258 for (e = e->callee->callees; e; e = e->next_callee)
259 if (!e->inline_failed)
260 cgraph_clone_inlined_nodes (e, duplicate, update_original);
261 }
262
263 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
264 specify whether profile of original function should be updated. If any new
265 indirect edges are discovered in the process, add them to NEW_EDGES, unless
266 it is NULL. Return true iff any new callgraph edges were discovered as a
267 result of inlining. */
268
269 static bool
270 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
271 VEC (cgraph_edge_p, heap) **new_edges)
272 {
273 int old_size = 0, new_size = 0;
274 struct cgraph_node *to = NULL, *what;
275 struct cgraph_edge *curr = e;
276
277 if (e->callee->inline_decl)
278 cgraph_redirect_edge_callee (e, cgraph_node (e->callee->inline_decl));
279
280 gcc_assert (e->inline_failed);
281 e->inline_failed = CIF_OK;
282
283 if (!e->callee->global.inlined)
284 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
285 e->callee->global.inlined = true;
286
287 cgraph_clone_inlined_nodes (e, true, update_original);
288
289 what = e->callee;
290
291 /* Now update size of caller and all functions caller is inlined into. */
292 for (;e && !e->inline_failed; e = e->caller->callers)
293 {
294 to = e->caller;
295 old_size = e->caller->global.size;
296 new_size = cgraph_estimate_size_after_inlining (1, to, what);
297 to->global.size = new_size;
298 to->global.time = cgraph_estimate_time_after_inlining (e->frequency, to, what);
299 }
300 gcc_assert (what->global.inlined_to == to);
301 if (new_size > old_size)
302 overall_size += new_size - old_size;
303 ncalls_inlined++;
304
305 if (flag_indirect_inlining)
306 return ipa_propagate_indirect_call_infos (curr, new_edges);
307 else
308 return false;
309 }
310
311 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
312 Return following unredirected edge in the list of callers
313 of EDGE->CALLEE */
314
315 static struct cgraph_edge *
316 cgraph_mark_inline (struct cgraph_edge *edge)
317 {
318 struct cgraph_node *to = edge->caller;
319 struct cgraph_node *what = edge->callee;
320 struct cgraph_edge *e, *next;
321
322 gcc_assert (!gimple_call_cannot_inline_p (edge->call_stmt));
323 /* Look for all calls, mark them inline and clone recursively
324 all inlined functions. */
325 for (e = what->callers; e; e = next)
326 {
327 next = e->next_caller;
328 if (e->caller == to && e->inline_failed)
329 {
330 cgraph_mark_inline_edge (e, true, NULL);
331 if (e == edge)
332 edge = next;
333 }
334 }
335
336 return edge;
337 }
338
339 /* Estimate the growth caused by inlining NODE into all callees. */
340
341 static int
342 cgraph_estimate_growth (struct cgraph_node *node)
343 {
344 int growth = 0;
345 struct cgraph_edge *e;
346 bool self_recursive = false;
347
348 if (node->global.estimated_growth != INT_MIN)
349 return node->global.estimated_growth;
350
351 for (e = node->callers; e; e = e->next_caller)
352 {
353 if (e->caller == node)
354 self_recursive = true;
355 if (e->inline_failed)
356 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
357 - e->caller->global.size);
358 }
359
360 /* ??? Wrong for non-trivially self recursive functions or cases where
361 we decide to not inline for different reasons, but it is not big deal
362 as in that case we will keep the body around, but we will also avoid
363 some inlining. */
364 if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
365 growth -= node->global.size;
366
367 node->global.estimated_growth = growth;
368 return growth;
369 }
370
371 /* Return false when inlining WHAT into TO is not good idea
372 as it would cause too large growth of function bodies.
373 When ONE_ONLY is true, assume that only one call site is going
374 to be inlined, otherwise figure out how many call sites in
375 TO calls WHAT and verify that all can be inlined.
376 */
377
378 static bool
379 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
380 cgraph_inline_failed_t *reason, bool one_only)
381 {
382 int times = 0;
383 struct cgraph_edge *e;
384 int newsize;
385 int limit;
386 HOST_WIDE_INT stack_size_limit, inlined_stack;
387
388 if (one_only)
389 times = 1;
390 else
391 for (e = to->callees; e; e = e->next_callee)
392 if (e->callee == what)
393 times++;
394
395 if (to->global.inlined_to)
396 to = to->global.inlined_to;
397
398 /* When inlining large function body called once into small function,
399 take the inlined function as base for limiting the growth. */
400 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
401 limit = inline_summary (to)->self_size;
402 else
403 limit = inline_summary (what)->self_size;
404
405 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
406
407 /* Check the size after inlining against the function limits. But allow
408 the function to shrink if it went over the limits by forced inlining. */
409 newsize = cgraph_estimate_size_after_inlining (times, to, what);
410 if (newsize >= to->global.size
411 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
412 && newsize > limit)
413 {
414 if (reason)
415 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
416 return false;
417 }
418
419 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
420
421 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
422
423 inlined_stack = (to->global.stack_frame_offset
424 + inline_summary (to)->estimated_self_stack_size
425 + what->global.estimated_stack_size);
426 if (inlined_stack > stack_size_limit
427 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
428 {
429 if (reason)
430 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
431 return false;
432 }
433 return true;
434 }
435
436 /* Return true when function N is small enough to be inlined. */
437
438 static bool
439 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
440 {
441 tree decl = n->decl;
442
443 if (n->inline_decl)
444 decl = n->inline_decl;
445 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
446 {
447 if (reason)
448 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
449 return false;
450 }
451
452 if (!n->analyzed)
453 {
454 if (reason)
455 *reason = CIF_BODY_NOT_AVAILABLE;
456 return false;
457 }
458
459 if (DECL_DECLARED_INLINE_P (decl))
460 {
461 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
462 {
463 if (reason)
464 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
465 return false;
466 }
467 }
468 else
469 {
470 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
471 {
472 if (reason)
473 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
474 return false;
475 }
476 }
477
478 return true;
479 }
480
481 /* Return true when inlining WHAT would create recursive inlining.
482 We call recursive inlining all cases where same function appears more than
483 once in the single recursion nest path in the inline graph. */
484
485 static bool
486 cgraph_recursive_inlining_p (struct cgraph_node *to,
487 struct cgraph_node *what,
488 cgraph_inline_failed_t *reason)
489 {
490 bool recursive;
491 if (to->global.inlined_to)
492 recursive = what->decl == to->global.inlined_to->decl;
493 else
494 recursive = what->decl == to->decl;
495 /* Marking recursive function inline has sane semantic and thus we should
496 not warn on it. */
497 if (recursive && reason)
498 *reason = (what->local.disregard_inline_limits
499 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
500 return recursive;
501 }
502
503 /* A cost model driving the inlining heuristics in a way so the edges with
504 smallest badness are inlined first. After each inlining is performed
505 the costs of all caller edges of nodes affected are recomputed so the
506 metrics may accurately depend on values such as number of inlinable callers
507 of the function or function body size. */
508
509 static int
510 cgraph_edge_badness (struct cgraph_edge *edge)
511 {
512 int badness;
513 int growth =
514 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
515
516 growth -= edge->caller->global.size;
517
518 /* Always prefer inlining saving code size. */
519 if (growth <= 0)
520 badness = INT_MIN - growth;
521
522 /* When profiling is available, base priorities -(#calls / growth).
523 So we optimize for overall number of "executed" inlined calls. */
524 else if (max_count)
525 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
526 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
527
528 /* When function local profile is available, base priorities on
529 growth / frequency, so we optimize for overall frequency of inlined
530 calls. This is not too accurate since while the call might be frequent
531 within function, the function itself is infrequent.
532
533 Other objective to optimize for is number of different calls inlined.
534 We add the estimated growth after inlining all functions to bias the
535 priorities slightly in this direction (so fewer times called functions
536 of the same size gets priority). */
537 else if (flag_guess_branch_prob)
538 {
539 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
540 badness = growth * 256;
541 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
542 / (edge->callee->global.time + 1) + 1, 100);
543
544
545 /* Decrease badness if call is nested. */
546 /* Compress the range so we don't overflow. */
547 if (div > 256)
548 div = 256 + ceil_log2 (div) - 8;
549 if (div < 1)
550 div = 1;
551 if (badness > 0)
552 badness /= div;
553 badness += cgraph_estimate_growth (edge->callee);
554 }
555 /* When function local profile is not available or it does not give
556 useful information (ie frequency is zero), base the cost on
557 loop nest and overall size growth, so we optimize for overall number
558 of functions fully inlined in program. */
559 else
560 {
561 int nest = MIN (edge->loop_nest, 8);
562 badness = cgraph_estimate_growth (edge->callee) * 256;
563
564 /* Decrease badness if call is nested. */
565 if (badness > 0)
566 badness >>= nest;
567 else
568 {
569 badness <<= nest;
570 }
571 }
572 /* Make recursive inlining happen always after other inlining is done. */
573 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
574 return badness + 1;
575 else
576 return badness;
577 }
578
579 /* Recompute heap nodes for each of caller edge. */
580
581 static void
582 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
583 bitmap updated_nodes)
584 {
585 struct cgraph_edge *edge;
586 cgraph_inline_failed_t failed_reason;
587
588 if (!node->local.inlinable || node->local.disregard_inline_limits
589 || node->global.inlined_to)
590 return;
591 if (bitmap_bit_p (updated_nodes, node->uid))
592 return;
593 bitmap_set_bit (updated_nodes, node->uid);
594 node->global.estimated_growth = INT_MIN;
595
596 if (!node->local.inlinable)
597 return;
598 /* Prune out edges we won't inline into anymore. */
599 if (!cgraph_default_inline_p (node, &failed_reason))
600 {
601 for (edge = node->callers; edge; edge = edge->next_caller)
602 if (edge->aux)
603 {
604 fibheap_delete_node (heap, (fibnode_t) edge->aux);
605 edge->aux = NULL;
606 if (edge->inline_failed)
607 edge->inline_failed = failed_reason;
608 }
609 return;
610 }
611
612 for (edge = node->callers; edge; edge = edge->next_caller)
613 if (edge->inline_failed)
614 {
615 int badness = cgraph_edge_badness (edge);
616 if (edge->aux)
617 {
618 fibnode_t n = (fibnode_t) edge->aux;
619 gcc_assert (n->data == edge);
620 if (n->key == badness)
621 continue;
622
623 /* fibheap_replace_key only increase the keys. */
624 if (fibheap_replace_key (heap, n, badness))
625 continue;
626 fibheap_delete_node (heap, (fibnode_t) edge->aux);
627 }
628 edge->aux = fibheap_insert (heap, badness, edge);
629 }
630 }
631
632 /* Recompute heap nodes for each of caller edges of each of callees. */
633
634 static void
635 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
636 bitmap updated_nodes)
637 {
638 struct cgraph_edge *e;
639 node->global.estimated_growth = INT_MIN;
640
641 for (e = node->callees; e; e = e->next_callee)
642 if (e->inline_failed)
643 update_caller_keys (heap, e->callee, updated_nodes);
644 else if (!e->inline_failed)
645 update_callee_keys (heap, e->callee, updated_nodes);
646 }
647
648 /* Enqueue all recursive calls from NODE into priority queue depending on
649 how likely we want to recursively inline the call. */
650
651 static void
652 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
653 fibheap_t heap)
654 {
655 static int priority;
656 struct cgraph_edge *e;
657 for (e = where->callees; e; e = e->next_callee)
658 if (e->callee == node)
659 {
660 /* When profile feedback is available, prioritize by expected number
661 of calls. Without profile feedback we maintain simple queue
662 to order candidates via recursive depths. */
663 fibheap_insert (heap,
664 !max_count ? priority++
665 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
666 e);
667 }
668 for (e = where->callees; e; e = e->next_callee)
669 if (!e->inline_failed)
670 lookup_recursive_calls (node, e->callee, heap);
671 }
672
673 /* Decide on recursive inlining: in the case function has recursive calls,
674 inline until body size reaches given argument. If any new indirect edges
675 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
676 is NULL. */
677
678 static bool
679 cgraph_decide_recursive_inlining (struct cgraph_node *node,
680 VEC (cgraph_edge_p, heap) **new_edges)
681 {
682 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
683 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
684 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
685 fibheap_t heap;
686 struct cgraph_edge *e;
687 struct cgraph_node *master_clone, *next;
688 int depth = 0;
689 int n = 0;
690
691 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
692 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
693 return false;
694
695 if (DECL_DECLARED_INLINE_P (node->decl))
696 {
697 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
698 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
699 }
700
701 /* Make sure that function is small enough to be considered for inlining. */
702 if (!max_depth
703 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
704 return false;
705 heap = fibheap_new ();
706 lookup_recursive_calls (node, node, heap);
707 if (fibheap_empty (heap))
708 {
709 fibheap_delete (heap);
710 return false;
711 }
712
713 if (dump_file)
714 fprintf (dump_file,
715 " Performing recursive inlining on %s\n",
716 cgraph_node_name (node));
717
718 /* We need original clone to copy around. */
719 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false);
720 master_clone->needed = true;
721 for (e = master_clone->callees; e; e = e->next_callee)
722 if (!e->inline_failed)
723 cgraph_clone_inlined_nodes (e, true, false);
724
725 /* Do the inlining and update list of recursive call during process. */
726 while (!fibheap_empty (heap)
727 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
728 <= limit))
729 {
730 struct cgraph_edge *curr
731 = (struct cgraph_edge *) fibheap_extract_min (heap);
732 struct cgraph_node *cnode;
733
734 depth = 1;
735 for (cnode = curr->caller;
736 cnode->global.inlined_to; cnode = cnode->callers->caller)
737 if (node->decl == curr->callee->decl)
738 depth++;
739 if (depth > max_depth)
740 {
741 if (dump_file)
742 fprintf (dump_file,
743 " maximal depth reached\n");
744 continue;
745 }
746
747 if (max_count)
748 {
749 if (!cgraph_maybe_hot_edge_p (curr))
750 {
751 if (dump_file)
752 fprintf (dump_file, " Not inlining cold call\n");
753 continue;
754 }
755 if (curr->count * 100 / node->count < probability)
756 {
757 if (dump_file)
758 fprintf (dump_file,
759 " Probability of edge is too small\n");
760 continue;
761 }
762 }
763
764 if (dump_file)
765 {
766 fprintf (dump_file,
767 " Inlining call of depth %i", depth);
768 if (node->count)
769 {
770 fprintf (dump_file, " called approx. %.2f times per call",
771 (double)curr->count / node->count);
772 }
773 fprintf (dump_file, "\n");
774 }
775 cgraph_redirect_edge_callee (curr, master_clone);
776 cgraph_mark_inline_edge (curr, false, new_edges);
777 lookup_recursive_calls (node, curr->callee, heap);
778 n++;
779 }
780 if (!fibheap_empty (heap) && dump_file)
781 fprintf (dump_file, " Recursive inlining growth limit met.\n");
782
783 fibheap_delete (heap);
784 if (dump_file)
785 fprintf (dump_file,
786 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
787 master_clone->global.size, node->global.size,
788 master_clone->global.time, node->global.time);
789
790 /* Remove master clone we used for inlining. We rely that clones inlined
791 into master clone gets queued just before master clone so we don't
792 need recursion. */
793 for (node = cgraph_nodes; node != master_clone;
794 node = next)
795 {
796 next = node->next;
797 if (node->global.inlined_to == master_clone)
798 cgraph_remove_node (node);
799 }
800 cgraph_remove_node (master_clone);
801 /* FIXME: Recursive inlining actually reduces number of calls of the
802 function. At this place we should probably walk the function and
803 inline clones and compensate the counts accordingly. This probably
804 doesn't matter much in practice. */
805 return n > 0;
806 }
807
808 /* Set inline_failed for all callers of given function to REASON. */
809
810 static void
811 cgraph_set_inline_failed (struct cgraph_node *node,
812 cgraph_inline_failed_t reason)
813 {
814 struct cgraph_edge *e;
815
816 if (dump_file)
817 fprintf (dump_file, "Inlining failed: %s\n",
818 cgraph_inline_failed_string (reason));
819 for (e = node->callers; e; e = e->next_caller)
820 if (e->inline_failed)
821 e->inline_failed = reason;
822 }
823
824 /* Given whole compilation unit estimate of INSNS, compute how large we can
825 allow the unit to grow. */
826 static int
827 compute_max_insns (int insns)
828 {
829 int max_insns = insns;
830 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
831 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
832
833 return ((HOST_WIDEST_INT) max_insns
834 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
835 }
836
837 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
838 static void
839 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
840 {
841 while (VEC_length (cgraph_edge_p, new_edges) > 0)
842 {
843 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
844
845 gcc_assert (!edge->aux);
846 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
847 }
848 }
849
850
851 /* We use greedy algorithm for inlining of small functions:
852 All inline candidates are put into prioritized heap based on estimated
853 growth of the overall number of instructions and then update the estimates.
854
855 INLINED and INLINED_CALEES are just pointers to arrays large enough
856 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
857
858 static void
859 cgraph_decide_inlining_of_small_functions (void)
860 {
861 struct cgraph_node *node;
862 struct cgraph_edge *edge;
863 cgraph_inline_failed_t failed_reason;
864 fibheap_t heap = fibheap_new ();
865 bitmap updated_nodes = BITMAP_ALLOC (NULL);
866 int min_size, max_size;
867 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
868
869 if (flag_indirect_inlining)
870 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
871
872 if (dump_file)
873 fprintf (dump_file, "\nDeciding on smaller functions:\n");
874
875 /* Put all inline candidates into the heap. */
876
877 for (node = cgraph_nodes; node; node = node->next)
878 {
879 if (!node->local.inlinable || !node->callers
880 || node->local.disregard_inline_limits)
881 continue;
882 if (dump_file)
883 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
884
885 node->global.estimated_growth = INT_MIN;
886 if (!cgraph_default_inline_p (node, &failed_reason))
887 {
888 cgraph_set_inline_failed (node, failed_reason);
889 continue;
890 }
891
892 for (edge = node->callers; edge; edge = edge->next_caller)
893 if (edge->inline_failed)
894 {
895 gcc_assert (!edge->aux);
896 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
897 }
898 }
899
900 max_size = compute_max_insns (overall_size);
901 min_size = overall_size;
902
903 while (overall_size <= max_size
904 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
905 {
906 int old_size = overall_size;
907 struct cgraph_node *where;
908 int growth =
909 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
910 cgraph_inline_failed_t not_good = CIF_OK;
911
912 growth -= edge->caller->global.size;
913
914 if (dump_file)
915 {
916 fprintf (dump_file,
917 "\nConsidering %s with %i size\n",
918 cgraph_node_name (edge->callee),
919 edge->callee->global.size);
920 fprintf (dump_file,
921 " to be inlined into %s in %s:%i\n"
922 " Estimated growth after inlined into all callees is %+i insns.\n"
923 " Estimated badness is %i, frequency %.2f.\n",
924 cgraph_node_name (edge->caller),
925 gimple_filename ((const_gimple) edge->call_stmt),
926 gimple_lineno ((const_gimple) edge->call_stmt),
927 cgraph_estimate_growth (edge->callee),
928 cgraph_edge_badness (edge),
929 edge->frequency / (double)CGRAPH_FREQ_BASE);
930 if (edge->count)
931 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
932 }
933 gcc_assert (edge->aux);
934 edge->aux = NULL;
935 if (!edge->inline_failed)
936 continue;
937
938 /* When not having profile info ready we don't weight by any way the
939 position of call in procedure itself. This means if call of
940 function A from function B seems profitable to inline, the recursive
941 call of function A in inline copy of A in B will look profitable too
942 and we end up inlining until reaching maximal function growth. This
943 is not good idea so prohibit the recursive inlining.
944
945 ??? When the frequencies are taken into account we might not need this
946 restriction.
947
948 We need to be cureful here, in some testcases, e.g. directivec.c in
949 libcpp, we can estimate self recursive function to have negative growth
950 for inlining completely.
951 */
952 if (!edge->count)
953 {
954 where = edge->caller;
955 while (where->global.inlined_to)
956 {
957 if (where->decl == edge->callee->decl)
958 break;
959 where = where->callers->caller;
960 }
961 if (where->global.inlined_to)
962 {
963 edge->inline_failed
964 = (edge->callee->local.disregard_inline_limits
965 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
966 if (dump_file)
967 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
968 continue;
969 }
970 }
971
972 if (!cgraph_maybe_hot_edge_p (edge))
973 not_good = CIF_UNLIKELY_CALL;
974 if (!flag_inline_functions
975 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
976 not_good = CIF_NOT_DECLARED_INLINED;
977 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
978 not_good = CIF_OPTIMIZING_FOR_SIZE;
979 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
980 {
981 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
982 &edge->inline_failed))
983 {
984 edge->inline_failed = not_good;
985 if (dump_file)
986 fprintf (dump_file, " inline_failed:%s.\n",
987 cgraph_inline_failed_string (edge->inline_failed));
988 }
989 continue;
990 }
991 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
992 {
993 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
994 &edge->inline_failed))
995 {
996 if (dump_file)
997 fprintf (dump_file, " inline_failed:%s.\n",
998 cgraph_inline_failed_string (edge->inline_failed));
999 }
1000 continue;
1001 }
1002 if (!tree_can_inline_p (edge->caller->decl, edge->callee->decl))
1003 {
1004 gimple_call_set_cannot_inline (edge->call_stmt, true);
1005 edge->inline_failed = CIF_TARGET_OPTION_MISMATCH;
1006 if (dump_file)
1007 fprintf (dump_file, " inline_failed:%s.\n",
1008 cgraph_inline_failed_string (edge->inline_failed));
1009 continue;
1010 }
1011 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1012 &edge->inline_failed))
1013 {
1014 where = edge->caller;
1015 if (where->global.inlined_to)
1016 where = where->global.inlined_to;
1017 if (!cgraph_decide_recursive_inlining (where,
1018 flag_indirect_inlining
1019 ? &new_indirect_edges : NULL))
1020 continue;
1021 if (flag_indirect_inlining)
1022 add_new_edges_to_heap (heap, new_indirect_edges);
1023 update_callee_keys (heap, where, updated_nodes);
1024 }
1025 else
1026 {
1027 struct cgraph_node *callee;
1028 if (gimple_call_cannot_inline_p (edge->call_stmt)
1029 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1030 &edge->inline_failed, true))
1031 {
1032 if (dump_file)
1033 fprintf (dump_file, " Not inlining into %s:%s.\n",
1034 cgraph_node_name (edge->caller),
1035 cgraph_inline_failed_string (edge->inline_failed));
1036 continue;
1037 }
1038 callee = edge->callee;
1039 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1040 if (flag_indirect_inlining)
1041 add_new_edges_to_heap (heap, new_indirect_edges);
1042
1043 update_callee_keys (heap, callee, updated_nodes);
1044 }
1045 where = edge->caller;
1046 if (where->global.inlined_to)
1047 where = where->global.inlined_to;
1048
1049 /* Our profitability metric can depend on local properties
1050 such as number of inlinable calls and size of the function body.
1051 After inlining these properties might change for the function we
1052 inlined into (since it's body size changed) and for the functions
1053 called by function we inlined (since number of it inlinable callers
1054 might change). */
1055 update_caller_keys (heap, where, updated_nodes);
1056 bitmap_clear (updated_nodes);
1057
1058 if (dump_file)
1059 {
1060 fprintf (dump_file,
1061 " Inlined into %s which now has sie %i and self time %i,"
1062 "net change of %+i.\n",
1063 cgraph_node_name (edge->caller),
1064 edge->caller->global.time,
1065 edge->caller->global.size,
1066 overall_size - old_size);
1067 }
1068 if (min_size > overall_size)
1069 {
1070 min_size = overall_size;
1071 max_size = compute_max_insns (min_size);
1072
1073 if (dump_file)
1074 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1075 }
1076 }
1077 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1078 {
1079 gcc_assert (edge->aux);
1080 edge->aux = NULL;
1081 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1082 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1083 &edge->inline_failed))
1084 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1085 }
1086
1087 if (new_indirect_edges)
1088 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1089 fibheap_delete (heap);
1090 BITMAP_FREE (updated_nodes);
1091 }
1092
1093 /* Decide on the inlining. We do so in the topological order to avoid
1094 expenses on updating data structures. */
1095
1096 static unsigned int
1097 cgraph_decide_inlining (void)
1098 {
1099 struct cgraph_node *node;
1100 int nnodes;
1101 struct cgraph_node **order =
1102 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1103 int old_size = 0;
1104 int i;
1105 bool redo_always_inline = true;
1106 int initial_size = 0;
1107
1108 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1109
1110 max_count = 0;
1111 max_benefit = 0;
1112 for (node = cgraph_nodes; node; node = node->next)
1113 if (node->analyzed)
1114 {
1115 struct cgraph_edge *e;
1116
1117 gcc_assert (inline_summary (node)->self_size == node->global.size);
1118 gcc_assert (node->needed || node->reachable);
1119 initial_size += node->global.size;
1120 for (e = node->callees; e; e = e->next_callee)
1121 if (max_count < e->count)
1122 max_count = e->count;
1123 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1124 max_benefit = inline_summary (node)->time_inlining_benefit;
1125 }
1126 gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
1127 overall_size = initial_size;
1128
1129 nnodes = cgraph_postorder (order);
1130
1131 if (dump_file)
1132 fprintf (dump_file,
1133 "\nDeciding on inlining. Starting with size %i.\n",
1134 initial_size);
1135
1136 for (node = cgraph_nodes; node; node = node->next)
1137 node->aux = 0;
1138
1139 if (dump_file)
1140 fprintf (dump_file, "\nInlining always_inline functions:\n");
1141
1142 /* In the first pass mark all always_inline edges. Do this with a priority
1143 so none of our later choices will make this impossible. */
1144 while (redo_always_inline)
1145 {
1146 redo_always_inline = false;
1147 for (i = nnodes - 1; i >= 0; i--)
1148 {
1149 struct cgraph_edge *e, *next;
1150
1151 node = order[i];
1152
1153 /* Handle nodes to be flattened, but don't update overall unit
1154 size. */
1155 if (lookup_attribute ("flatten",
1156 DECL_ATTRIBUTES (node->decl)) != NULL)
1157 {
1158 if (dump_file)
1159 fprintf (dump_file,
1160 "Flattening %s\n", cgraph_node_name (node));
1161 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1162 }
1163
1164 if (!node->local.disregard_inline_limits)
1165 continue;
1166 if (dump_file)
1167 fprintf (dump_file,
1168 "\nConsidering %s size:%i (always inline)\n",
1169 cgraph_node_name (node), node->global.size);
1170 old_size = overall_size;
1171 for (e = node->callers; e; e = next)
1172 {
1173 next = e->next_caller;
1174 if (!e->inline_failed
1175 || gimple_call_cannot_inline_p (e->call_stmt))
1176 continue;
1177 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1178 &e->inline_failed))
1179 continue;
1180 if (!tree_can_inline_p (e->caller->decl, e->callee->decl))
1181 {
1182 gimple_call_set_cannot_inline (e->call_stmt, true);
1183 continue;
1184 }
1185 if (cgraph_mark_inline_edge (e, true, NULL))
1186 redo_always_inline = true;
1187 if (dump_file)
1188 fprintf (dump_file,
1189 " Inlined into %s which now has size %i.\n",
1190 cgraph_node_name (e->caller),
1191 e->caller->global.size);
1192 }
1193 /* Inlining self recursive function might introduce new calls to
1194 themselves we didn't see in the loop above. Fill in the proper
1195 reason why inline failed. */
1196 for (e = node->callers; e; e = e->next_caller)
1197 if (e->inline_failed)
1198 e->inline_failed = CIF_RECURSIVE_INLINING;
1199 if (dump_file)
1200 fprintf (dump_file,
1201 " Inlined for a net change of %+i size.\n",
1202 overall_size - old_size);
1203 }
1204 }
1205
1206 cgraph_decide_inlining_of_small_functions ();
1207
1208 if (flag_inline_functions_called_once)
1209 {
1210 if (dump_file)
1211 fprintf (dump_file, "\nDeciding on functions called once:\n");
1212
1213 /* And finally decide what functions are called once. */
1214 for (i = nnodes - 1; i >= 0; i--)
1215 {
1216 node = order[i];
1217
1218 if (node->callers
1219 && !node->callers->next_caller
1220 && !node->needed
1221 && node->local.inlinable
1222 && node->callers->inline_failed
1223 && !gimple_call_cannot_inline_p (node->callers->call_stmt)
1224 && !DECL_EXTERNAL (node->decl)
1225 && !DECL_COMDAT (node->decl))
1226 {
1227 if (dump_file)
1228 {
1229 fprintf (dump_file,
1230 "\nConsidering %s size %i.\n",
1231 cgraph_node_name (node), node->global.size);
1232 fprintf (dump_file,
1233 " Called once from %s %i insns.\n",
1234 cgraph_node_name (node->callers->caller),
1235 node->callers->caller->global.size);
1236 }
1237
1238 if (cgraph_check_inline_limits (node->callers->caller, node,
1239 NULL, false))
1240 {
1241 cgraph_mark_inline (node->callers);
1242 if (dump_file)
1243 fprintf (dump_file,
1244 " Inlined into %s which now has %i size"
1245 " for a net change of %+i size.\n",
1246 cgraph_node_name (node->callers->caller),
1247 node->callers->caller->global.size,
1248 overall_size - old_size);
1249 }
1250 else
1251 {
1252 if (dump_file)
1253 fprintf (dump_file,
1254 " Inline limit reached, not inlined.\n");
1255 }
1256 }
1257 }
1258 }
1259
1260 /* Free ipa-prop structures if they are no longer needed. */
1261 if (flag_indirect_inlining)
1262 free_all_ipa_structures_after_iinln ();
1263
1264 if (dump_file)
1265 fprintf (dump_file,
1266 "\nInlined %i calls, eliminated %i functions, "
1267 "size %i turned to %i size.\n\n",
1268 ncalls_inlined, nfunctions_inlined, initial_size,
1269 overall_size);
1270 free (order);
1271 return 0;
1272 }
1273
1274 /* Try to inline edge E from incremental inliner. MODE specifies mode
1275 of inliner.
1276
1277 We are detecting cycles by storing mode of inliner into cgraph_node last
1278 time we visited it in the recursion. In general when mode is set, we have
1279 recursive inlining, but as an special case, we want to try harder inline
1280 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1281 flatten, b being always inline. Flattening 'a' will collapse
1282 a->b->c before hitting cycle. To accommodate always inline, we however
1283 need to inline a->b->c->b.
1284
1285 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1286 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1287 static bool
1288 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1289 {
1290 struct cgraph_node *callee = e->callee;
1291 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1292 bool always_inline = e->callee->local.disregard_inline_limits;
1293
1294 /* We've hit cycle? */
1295 if (callee_mode)
1296 {
1297 /* It is first time we see it and we are not in ALWAY_INLINE only
1298 mode yet. and the function in question is always_inline. */
1299 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1300 {
1301 if (dump_file)
1302 {
1303 indent_to (dump_file, depth);
1304 fprintf (dump_file,
1305 "Hit cycle in %s, switching to always inline only.\n",
1306 cgraph_node_name (callee));
1307 }
1308 mode = INLINE_ALWAYS_INLINE;
1309 }
1310 /* Otherwise it is time to give up. */
1311 else
1312 {
1313 if (dump_file)
1314 {
1315 indent_to (dump_file, depth);
1316 fprintf (dump_file,
1317 "Not inlining %s into %s to avoid cycle.\n",
1318 cgraph_node_name (callee),
1319 cgraph_node_name (e->caller));
1320 }
1321 e->inline_failed = (e->callee->local.disregard_inline_limits
1322 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1323 return false;
1324 }
1325 }
1326
1327 callee->aux = (void *)(size_t) mode;
1328 if (dump_file)
1329 {
1330 indent_to (dump_file, depth);
1331 fprintf (dump_file, " Inlining %s into %s.\n",
1332 cgraph_node_name (e->callee),
1333 cgraph_node_name (e->caller));
1334 }
1335 if (e->inline_failed)
1336 {
1337 cgraph_mark_inline (e);
1338
1339 /* In order to fully inline always_inline functions, we need to
1340 recurse here, since the inlined functions might not be processed by
1341 incremental inlining at all yet.
1342
1343 Also flattening needs to be done recursively. */
1344
1345 if (mode == INLINE_ALL || always_inline)
1346 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1347 }
1348 callee->aux = (void *)(size_t) callee_mode;
1349 return true;
1350 }
1351
1352 /* Decide on the inlining. We do so in the topological order to avoid
1353 expenses on updating data structures.
1354 DEPTH is depth of recursion, used only for debug output. */
1355
1356 static bool
1357 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1358 enum inlining_mode mode,
1359 int depth)
1360 {
1361 struct cgraph_edge *e;
1362 bool inlined = false;
1363 cgraph_inline_failed_t failed_reason;
1364 enum inlining_mode old_mode;
1365
1366 #ifdef ENABLE_CHECKING
1367 verify_cgraph_node (node);
1368 #endif
1369
1370 old_mode = (enum inlining_mode) (size_t)node->aux;
1371
1372 if (mode != INLINE_ALWAYS_INLINE
1373 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1374 {
1375 if (dump_file)
1376 {
1377 indent_to (dump_file, depth);
1378 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1379 }
1380 mode = INLINE_ALL;
1381 }
1382
1383 node->aux = (void *)(size_t) mode;
1384
1385 /* First of all look for always inline functions. */
1386 for (e = node->callees; e; e = e->next_callee)
1387 {
1388 if (!e->callee->local.disregard_inline_limits
1389 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1390 continue;
1391 if (gimple_call_cannot_inline_p (e->call_stmt))
1392 continue;
1393 /* When the edge is already inlined, we just need to recurse into
1394 it in order to fully flatten the leaves. */
1395 if (!e->inline_failed && mode == INLINE_ALL)
1396 {
1397 inlined |= try_inline (e, mode, depth);
1398 continue;
1399 }
1400 if (dump_file)
1401 {
1402 indent_to (dump_file, depth);
1403 fprintf (dump_file,
1404 "Considering to always inline inline candidate %s.\n",
1405 cgraph_node_name (e->callee));
1406 }
1407 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1408 {
1409 if (dump_file)
1410 {
1411 indent_to (dump_file, depth);
1412 fprintf (dump_file, "Not inlining: recursive call.\n");
1413 }
1414 continue;
1415 }
1416 if (!tree_can_inline_p (node->decl, e->callee->decl))
1417 {
1418 gimple_call_set_cannot_inline (e->call_stmt, true);
1419 if (dump_file)
1420 {
1421 indent_to (dump_file, depth);
1422 fprintf (dump_file,
1423 "Not inlining: Target specific option mismatch.\n");
1424 }
1425 continue;
1426 }
1427 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1428 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1429 {
1430 if (dump_file)
1431 {
1432 indent_to (dump_file, depth);
1433 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1434 }
1435 continue;
1436 }
1437 if (!e->callee->analyzed && !e->callee->inline_decl)
1438 {
1439 if (dump_file)
1440 {
1441 indent_to (dump_file, depth);
1442 fprintf (dump_file,
1443 "Not inlining: Function body no longer available.\n");
1444 }
1445 continue;
1446 }
1447 inlined |= try_inline (e, mode, depth);
1448 }
1449
1450 /* Now do the automatic inlining. */
1451 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
1452 for (e = node->callees; e; e = e->next_callee)
1453 {
1454 int allowed_growth = 0;
1455 if (!e->callee->local.inlinable
1456 || !e->inline_failed
1457 || e->callee->local.disregard_inline_limits)
1458 continue;
1459 if (dump_file)
1460 fprintf (dump_file, "Considering inline candidate %s.\n",
1461 cgraph_node_name (e->callee));
1462 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1463 {
1464 if (dump_file)
1465 {
1466 indent_to (dump_file, depth);
1467 fprintf (dump_file, "Not inlining: recursive call.\n");
1468 }
1469 continue;
1470 }
1471 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1472 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1473 {
1474 if (dump_file)
1475 {
1476 indent_to (dump_file, depth);
1477 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1478 }
1479 continue;
1480 }
1481
1482 if (cgraph_maybe_hot_edge_p (e))
1483 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1484
1485 /* When the function body would grow and inlining the function won't
1486 eliminate the need for offline copy of the function, don't inline.
1487 */
1488 if ((mode == INLINE_SIZE
1489 || (!flag_inline_functions
1490 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1491 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1492 >= e->caller->global.size + allowed_growth)
1493 && cgraph_estimate_growth (e->callee) >= allowed_growth)
1494 {
1495 if (dump_file)
1496 {
1497 indent_to (dump_file, depth);
1498 fprintf (dump_file,
1499 "Not inlining: code size would grow by %i.\n",
1500 cgraph_estimate_size_after_inlining (1, e->caller,
1501 e->callee)
1502 - e->caller->global.size);
1503 }
1504 continue;
1505 }
1506 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1507 false)
1508 || gimple_call_cannot_inline_p (e->call_stmt))
1509 {
1510 if (dump_file)
1511 {
1512 indent_to (dump_file, depth);
1513 fprintf (dump_file, "Not inlining: %s.\n",
1514 cgraph_inline_failed_string (e->inline_failed));
1515 }
1516 continue;
1517 }
1518 if (!e->callee->analyzed && !e->callee->inline_decl)
1519 {
1520 if (dump_file)
1521 {
1522 indent_to (dump_file, depth);
1523 fprintf (dump_file,
1524 "Not inlining: Function body no longer available.\n");
1525 }
1526 continue;
1527 }
1528 if (!tree_can_inline_p (node->decl, e->callee->decl))
1529 {
1530 gimple_call_set_cannot_inline (e->call_stmt, true);
1531 if (dump_file)
1532 {
1533 indent_to (dump_file, depth);
1534 fprintf (dump_file,
1535 "Not inlining: Target specific option mismatch.\n");
1536 }
1537 continue;
1538 }
1539 if (cgraph_default_inline_p (e->callee, &failed_reason))
1540 inlined |= try_inline (e, mode, depth);
1541 }
1542 node->aux = (void *)(size_t) old_mode;
1543 return inlined;
1544 }
1545
1546 /* Because inlining might remove no-longer reachable nodes, we need to
1547 keep the array visible to garbage collector to avoid reading collected
1548 out nodes. */
1549 static int nnodes;
1550 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1551
1552 /* Do inlining of small functions. Doing so early helps profiling and other
1553 passes to be somewhat more effective and avoids some code duplication in
1554 later real inlining pass for testcases with very many function calls. */
1555 static unsigned int
1556 cgraph_early_inlining (void)
1557 {
1558 struct cgraph_node *node = cgraph_node (current_function_decl);
1559 unsigned int todo = 0;
1560
1561 if (sorrycount || errorcount)
1562 return 0;
1563 if (cgraph_decide_inlining_incrementally (node, INLINE_SIZE, 0))
1564 {
1565 timevar_push (TV_INTEGRATION);
1566 todo = optimize_inline_calls (current_function_decl);
1567 timevar_pop (TV_INTEGRATION);
1568 }
1569 cfun->always_inline_functions_inlined = true;
1570 return todo;
1571 }
1572
1573 /* When inlining shall be performed. */
1574 static bool
1575 cgraph_gate_early_inlining (void)
1576 {
1577 return flag_early_inlining;
1578 }
1579
1580 struct gimple_opt_pass pass_early_inline =
1581 {
1582 {
1583 GIMPLE_PASS,
1584 "einline", /* name */
1585 cgraph_gate_early_inlining, /* gate */
1586 cgraph_early_inlining, /* execute */
1587 NULL, /* sub */
1588 NULL, /* next */
1589 0, /* static_pass_number */
1590 TV_INLINE_HEURISTICS, /* tv_id */
1591 0, /* properties_required */
1592 0, /* properties_provided */
1593 0, /* properties_destroyed */
1594 0, /* todo_flags_start */
1595 TODO_dump_func /* todo_flags_finish */
1596 }
1597 };
1598
1599 /* When inlining shall be performed. */
1600 static bool
1601 cgraph_gate_ipa_early_inlining (void)
1602 {
1603 return (flag_early_inlining
1604 && (flag_branch_probabilities || flag_test_coverage
1605 || profile_arc_flag));
1606 }
1607
1608 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1609 before tree profiling so we have stand alone IPA pass for doing so. */
1610 struct simple_ipa_opt_pass pass_ipa_early_inline =
1611 {
1612 {
1613 SIMPLE_IPA_PASS,
1614 "einline_ipa", /* name */
1615 cgraph_gate_ipa_early_inlining, /* gate */
1616 NULL, /* execute */
1617 NULL, /* sub */
1618 NULL, /* next */
1619 0, /* static_pass_number */
1620 TV_INLINE_HEURISTICS, /* tv_id */
1621 0, /* properties_required */
1622 0, /* properties_provided */
1623 0, /* properties_destroyed */
1624 0, /* todo_flags_start */
1625 TODO_dump_cgraph /* todo_flags_finish */
1626 }
1627 };
1628
1629 /* See if statement might disappear after inlining. We are not terribly
1630 sophisficated, basically looking for simple abstraction penalty wrappers. */
1631
1632 static bool
1633 likely_eliminated_by_inlining_p (gimple stmt)
1634 {
1635 enum gimple_code code = gimple_code (stmt);
1636 switch (code)
1637 {
1638 case GIMPLE_RETURN:
1639 return true;
1640 case GIMPLE_ASSIGN:
1641 if (gimple_num_ops (stmt) != 2)
1642 return false;
1643
1644 /* Casts of parameters, loads from parameters passed by reference
1645 and stores to return value or parameters are probably free after
1646 inlining. */
1647 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1648 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1649 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1650 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1651 {
1652 tree rhs = gimple_assign_rhs1 (stmt);
1653 tree lhs = gimple_assign_lhs (stmt);
1654 tree inner_rhs = rhs;
1655 tree inner_lhs = lhs;
1656 bool rhs_free = false;
1657 bool lhs_free = false;
1658
1659 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1660 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1661 while (handled_component_p (inner_rhs)
1662 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1663 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1664
1665
1666 if (TREE_CODE (inner_rhs) == PARM_DECL
1667 || (TREE_CODE (inner_rhs) == SSA_NAME
1668 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1669 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1670 rhs_free = true;
1671 if (rhs_free && is_gimple_reg (lhs))
1672 lhs_free = true;
1673 if (((TREE_CODE (inner_lhs) == PARM_DECL
1674 || (TREE_CODE (inner_lhs) == SSA_NAME
1675 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1676 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1677 && inner_lhs != lhs)
1678 || TREE_CODE (inner_lhs) == RESULT_DECL
1679 || (TREE_CODE (inner_lhs) == SSA_NAME
1680 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1681 lhs_free = true;
1682 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1683 rhs_free = true;
1684 if (lhs_free && rhs_free)
1685 return true;
1686 }
1687 return false;
1688 default:
1689 return false;
1690 }
1691 }
1692
1693 /* Compute function body size parameters for NODE. */
1694
1695 static void
1696 estimate_function_body_sizes (struct cgraph_node *node)
1697 {
1698 gcov_type time = 0;
1699 gcov_type time_inlining_benefit = 0;
1700 int size = 0;
1701 int size_inlining_benefit = 0;
1702 basic_block bb;
1703 gimple_stmt_iterator bsi;
1704 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1705 tree arg;
1706 int freq;
1707 tree funtype = TREE_TYPE (node->decl);
1708 bitmap must_not_throw = must_not_throw_labels ();
1709
1710 if (dump_file)
1711 {
1712 fprintf (dump_file, "Analyzing function body size: %s\n", cgraph_node_name (node));
1713 }
1714
1715 gcc_assert (my_function && my_function->cfg);
1716 FOR_EACH_BB_FN (bb, my_function)
1717 {
1718 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1719 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1720 {
1721 int this_size = estimate_num_insns (gsi_stmt (bsi), &eni_size_weights);
1722 int this_time = estimate_num_insns (gsi_stmt (bsi), &eni_time_weights);
1723
1724 /* MUST_NOT_THROW is usually handled by runtime calling terminate and stopping
1725 stacking unwinding. However when there is local cleanup that can resume
1726 to MUST_NOT_THROW then we generate explicit handler containing
1727 std::terminate () call.
1728
1729 Because inlining of function can introduce new cleanup region, prior
1730 inlining we keep std::terinate () calls for every MUST_NOT_THROW containing
1731 function call. Wast majority of these will be eliminated after inlining
1732 and crossjumping will inify possible duplicated calls. So ignore
1733 the handlers for function body estimates. */
1734 if (gimple_code (gsi_stmt (bsi)) == GIMPLE_LABEL
1735 && bitmap_bit_p (must_not_throw,
1736 LABEL_DECL_UID (gimple_label_label (gsi_stmt (bsi)))))
1737 {
1738 if (dump_file)
1739 fprintf (dump_file, " MUST_NOT_THROW landing pad. Ignoring whole BB.\n");
1740 }
1741 if (dump_file)
1742 {
1743 fprintf (dump_file, " freq:%6i size:%3i time:%3i ", freq, this_size, this_time);
1744 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
1745 }
1746 this_time *= freq;
1747 time += this_time;
1748 size += this_size;
1749 if (likely_eliminated_by_inlining_p (gsi_stmt (bsi)))
1750 {
1751 size_inlining_benefit += this_size;
1752 time_inlining_benefit += this_time;
1753 if (dump_file)
1754 fprintf (dump_file, " Likely eliminated\n");
1755 }
1756 gcc_assert (time >= 0);
1757 gcc_assert (size >= 0);
1758 }
1759 }
1760 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1761 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1762 / CGRAPH_FREQ_BASE);
1763 if (dump_file)
1764 {
1765 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1766 (int)time, (int)time_inlining_benefit,
1767 size, size_inlining_benefit);
1768 }
1769 time_inlining_benefit += eni_time_weights.call_cost;
1770 size_inlining_benefit += eni_size_weights.call_cost;
1771 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1772 {
1773 int cost = estimate_move_cost (TREE_TYPE (funtype));
1774 time_inlining_benefit += cost;
1775 size_inlining_benefit += cost;
1776 }
1777 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1778 {
1779 int cost = estimate_move_cost (TREE_TYPE (arg));
1780 time_inlining_benefit += cost;
1781 size_inlining_benefit += cost;
1782 }
1783 if (time_inlining_benefit > MAX_TIME)
1784 time_inlining_benefit = MAX_TIME;
1785 if (time > MAX_TIME)
1786 time = MAX_TIME;
1787 inline_summary (node)->self_time = time;
1788 inline_summary (node)->self_size = size;
1789 if (dump_file)
1790 {
1791 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1792 (int)time, (int)time_inlining_benefit,
1793 size, size_inlining_benefit);
1794 }
1795 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1796 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1797 BITMAP_FREE (must_not_throw);
1798 }
1799
1800 /* Compute parameters of functions used by inliner. */
1801 unsigned int
1802 compute_inline_parameters (struct cgraph_node *node)
1803 {
1804 HOST_WIDE_INT self_stack_size;
1805
1806 gcc_assert (!node->global.inlined_to);
1807
1808 /* Estimate the stack size for the function. But not at -O0
1809 because estimated_stack_frame_size is a quadratic problem. */
1810 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1811 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1812 node->global.estimated_stack_size = self_stack_size;
1813 node->global.stack_frame_offset = 0;
1814
1815 /* Can this function be inlined at all? */
1816 node->local.inlinable = tree_inlinable_function_p (current_function_decl);
1817 if (node->local.inlinable && !node->local.disregard_inline_limits)
1818 node->local.disregard_inline_limits
1819 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
1820 estimate_function_body_sizes (node);
1821 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1822 node->global.time = inline_summary (node)->self_time;
1823 node->global.size = inline_summary (node)->self_size;
1824 return 0;
1825 }
1826
1827
1828 /* Compute parameters of functions used by inliner using
1829 current_function_decl. */
1830 static unsigned int
1831 compute_inline_parameters_for_current (void)
1832 {
1833 compute_inline_parameters (cgraph_node (current_function_decl));
1834 return 0;
1835 }
1836
1837 struct gimple_opt_pass pass_inline_parameters =
1838 {
1839 {
1840 GIMPLE_PASS,
1841 "inline_param", /* name */
1842 NULL, /* gate */
1843 compute_inline_parameters_for_current,/* execute */
1844 NULL, /* sub */
1845 NULL, /* next */
1846 0, /* static_pass_number */
1847 TV_INLINE_HEURISTICS, /* tv_id */
1848 0, /* properties_required */
1849 0, /* properties_provided */
1850 0, /* properties_destroyed */
1851 0, /* todo_flags_start */
1852 0 /* todo_flags_finish */
1853 }
1854 };
1855
1856 /* This function performs intraprocedural analyzis in NODE that is required to
1857 inline indirect calls. */
1858 static void
1859 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1860 {
1861 struct cgraph_edge *cs;
1862
1863 if (!flag_ipa_cp)
1864 {
1865 ipa_initialize_node_params (node);
1866 ipa_detect_param_modifications (node);
1867 }
1868 ipa_analyze_params_uses (node);
1869
1870 if (!flag_ipa_cp)
1871 for (cs = node->callees; cs; cs = cs->next_callee)
1872 {
1873 ipa_count_arguments (cs);
1874 ipa_compute_jump_functions (cs);
1875 }
1876
1877 if (dump_file)
1878 {
1879 ipa_print_node_params (dump_file, node);
1880 ipa_print_node_jump_functions (dump_file, node);
1881 }
1882 }
1883
1884 /* Note function body size. */
1885 static void
1886 analyze_function (struct cgraph_node *node)
1887 {
1888 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1889 current_function_decl = node->decl;
1890
1891 compute_inline_parameters (node);
1892 if (flag_indirect_inlining)
1893 inline_indirect_intraprocedural_analysis (node);
1894
1895 current_function_decl = NULL;
1896 pop_cfun ();
1897 }
1898
1899 /* Called when new function is inserted to callgraph late. */
1900 static void
1901 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1902 {
1903 analyze_function (node);
1904 }
1905
1906 /* Note function body size. */
1907 static void
1908 inline_generate_summary (void)
1909 {
1910 struct cgraph_node *node;
1911
1912 function_insertion_hook_holder =
1913 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1914
1915 if (flag_indirect_inlining)
1916 {
1917 ipa_register_cgraph_hooks ();
1918 ipa_check_create_node_params ();
1919 ipa_check_create_edge_args ();
1920 }
1921
1922 for (node = cgraph_nodes; node; node = node->next)
1923 if (node->analyzed)
1924 analyze_function (node);
1925
1926 return;
1927 }
1928
1929 /* Apply inline plan to function. */
1930 static unsigned int
1931 inline_transform (struct cgraph_node *node)
1932 {
1933 unsigned int todo = 0;
1934 struct cgraph_edge *e;
1935
1936 /* We might need the body of this function so that we can expand
1937 it inline somewhere else. */
1938 if (cgraph_preserve_function_body_p (node->decl))
1939 save_inline_function_body (node);
1940
1941 for (e = node->callees; e; e = e->next_callee)
1942 if (!e->inline_failed || warn_inline)
1943 break;
1944
1945 if (e)
1946 {
1947 timevar_push (TV_INTEGRATION);
1948 todo = optimize_inline_calls (current_function_decl);
1949 timevar_pop (TV_INTEGRATION);
1950 }
1951 cfun->always_inline_functions_inlined = true;
1952 cfun->after_inlining = true;
1953 return todo | execute_fixup_cfg ();
1954 }
1955
1956 struct ipa_opt_pass pass_ipa_inline =
1957 {
1958 {
1959 IPA_PASS,
1960 "inline", /* name */
1961 NULL, /* gate */
1962 cgraph_decide_inlining, /* execute */
1963 NULL, /* sub */
1964 NULL, /* next */
1965 0, /* static_pass_number */
1966 TV_INLINE_HEURISTICS, /* tv_id */
1967 0, /* properties_required */
1968 0, /* properties_provided */
1969 0, /* properties_destroyed */
1970 TODO_remove_functions, /* todo_flags_finish */
1971 TODO_dump_cgraph | TODO_dump_func
1972 | TODO_remove_functions /* todo_flags_finish */
1973 },
1974 inline_generate_summary, /* generate_summary */
1975 NULL, /* write_summary */
1976 NULL, /* read_summary */
1977 NULL, /* function_read_summary */
1978 0, /* TODOs */
1979 inline_transform, /* function_transform */
1980 NULL, /* variable_transform */
1981 };
1982
1983
1984 #include "gt-ipa-inline.h"