re PR middle-end/37448 (cannot compile big function)
[gcc.git] / gcc / ipa-inline.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
26 callgraph.
27
28 There are three major parts of this file:
29
30 cgraph_mark_inline implementation
31
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
34 statistics)
35
36 inlining heuristics limits
37
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
40 growth and so on).
41
42 inlining heuristics
43
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
46
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
52
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
56
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
60
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
64
65 The inliner itself is split into several passes:
66
67 pass_inline_parameters
68
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
72
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
76 on the function body.
77
78 pass_early_inlining
79
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
83 whole unit.
84
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
91 cheaply.
92
93 pass_ipa_early_inlining
94
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
100 inlining.
101
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
104
105 pass_ipa_inline
106
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
112
113 pass_apply_inline
114
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
118 */
119
120 #include "config.h"
121 #include "system.h"
122 #include "coretypes.h"
123 #include "tm.h"
124 #include "tree.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
127 #include "flags.h"
128 #include "cgraph.h"
129 #include "diagnostic.h"
130 #include "timevar.h"
131 #include "params.h"
132 #include "fibheap.h"
133 #include "intl.h"
134 #include "tree-pass.h"
135 #include "hashtab.h"
136 #include "coverage.h"
137 #include "ggc.h"
138 #include "tree-flow.h"
139 #include "rtl.h"
140 #include "ipa-prop.h"
141 #include "except.h"
142
143 #define MAX_TIME 1000000000
144
145 /* Mode incremental inliner operate on:
146
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
149 flattening.
150
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
153
154 in ALL mode, everything is inlined. This is used during flattening. */
155 enum inlining_mode {
156 INLINE_NONE = 0,
157 INLINE_ALWAYS_INLINE,
158 INLINE_SIZE_NORECURSIVE,
159 INLINE_SIZE,
160 INLINE_ALL
161 };
162 static bool
163 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
164 int);
165
166
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined;
169 static int nfunctions_inlined;
170 static int overall_size;
171 static gcov_type max_count, max_benefit;
172
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list *function_insertion_hook_holder;
175
176 static inline struct inline_summary *
177 inline_summary (struct cgraph_node *node)
178 {
179 return &node->local.inline_summary;
180 }
181
182 /* Estimate self time of the function after inlining WHAT into TO. */
183
184 static int
185 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
186 struct cgraph_node *what)
187 {
188 gcov_type time = (((gcov_type)what->global.time
189 - inline_summary (what)->time_inlining_benefit)
190 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
191 + to->global.time;
192 if (time < 0)
193 time = 0;
194 if (time > MAX_TIME)
195 time = MAX_TIME;
196 return time;
197 }
198
199 /* Estimate self time of the function after inlining WHAT into TO. */
200
201 static int
202 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
203 struct cgraph_node *what)
204 {
205 int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
206 gcc_assert (size >= 0);
207 return size;
208 }
209
210 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
211 by NEST. */
212
213 static void
214 update_noncloned_frequencies (struct cgraph_node *node,
215 int freq_scale, int nest)
216 {
217 struct cgraph_edge *e;
218
219 /* We do not want to ignore high loop nest after freq drops to 0. */
220 if (!freq_scale)
221 freq_scale = 1;
222 for (e = node->callees; e; e = e->next_callee)
223 {
224 e->loop_nest += nest;
225 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
226 if (e->frequency > CGRAPH_FREQ_MAX)
227 e->frequency = CGRAPH_FREQ_MAX;
228 if (!e->inline_failed)
229 update_noncloned_frequencies (e->callee, freq_scale, nest);
230 }
231 }
232
233 /* E is expected to be an edge being inlined. Clone destination node of
234 the edge and redirect it to the new clone.
235 DUPLICATE is used for bookkeeping on whether we are actually creating new
236 clones or re-using node originally representing out-of-line function call.
237 */
238 void
239 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
240 bool update_original)
241 {
242 HOST_WIDE_INT peak;
243
244 if (duplicate)
245 {
246 /* We may eliminate the need for out-of-line copy to be output.
247 In that case just go ahead and re-use it. */
248 if (!e->callee->callers->next_caller
249 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
250 /* Don't reuse if more than one function shares a comdat group.
251 If the other function(s) are needed, we need to emit even
252 this function out of line. */
253 && !e->callee->same_comdat_group
254 && !cgraph_new_nodes)
255 {
256 gcc_assert (!e->callee->global.inlined_to);
257 if (e->callee->analyzed)
258 {
259 overall_size -= e->callee->global.size;
260 nfunctions_inlined++;
261 }
262 duplicate = false;
263 e->callee->local.externally_visible = false;
264 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
265 }
266 else
267 {
268 struct cgraph_node *n;
269 n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
270 update_original, NULL);
271 cgraph_redirect_edge_callee (e, n);
272 }
273 }
274
275 if (e->caller->global.inlined_to)
276 e->callee->global.inlined_to = e->caller->global.inlined_to;
277 else
278 e->callee->global.inlined_to = e->caller;
279 e->callee->global.stack_frame_offset
280 = e->caller->global.stack_frame_offset
281 + inline_summary (e->caller)->estimated_self_stack_size;
282 peak = e->callee->global.stack_frame_offset
283 + inline_summary (e->callee)->estimated_self_stack_size;
284 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
285 e->callee->global.inlined_to->global.estimated_stack_size = peak;
286
287 /* Recursively clone all bodies. */
288 for (e = e->callee->callees; e; e = e->next_callee)
289 if (!e->inline_failed)
290 cgraph_clone_inlined_nodes (e, duplicate, update_original);
291 }
292
293 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
294 specify whether profile of original function should be updated. If any new
295 indirect edges are discovered in the process, add them to NEW_EDGES, unless
296 it is NULL. Return true iff any new callgraph edges were discovered as a
297 result of inlining. */
298
299 static bool
300 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
301 VEC (cgraph_edge_p, heap) **new_edges)
302 {
303 int old_size = 0, new_size = 0;
304 struct cgraph_node *to = NULL, *what;
305 struct cgraph_edge *curr = e;
306 int freq;
307 bool duplicate = false;
308 int orig_size = e->callee->global.size;
309
310 gcc_assert (e->inline_failed);
311 e->inline_failed = CIF_OK;
312
313 if (!e->callee->global.inlined)
314 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
315 e->callee->global.inlined = true;
316
317 if (e->callee->callers->next_caller
318 || !cgraph_can_remove_if_no_direct_calls_p (e->callee)
319 || e->callee->same_comdat_group)
320 duplicate = true;
321 cgraph_clone_inlined_nodes (e, true, update_original);
322
323 what = e->callee;
324
325 freq = e->frequency;
326 /* Now update size of caller and all functions caller is inlined into. */
327 for (;e && !e->inline_failed; e = e->caller->callers)
328 {
329 to = e->caller;
330 old_size = e->caller->global.size;
331 new_size = cgraph_estimate_size_after_inlining (1, to, what);
332 to->global.size = new_size;
333 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
334 }
335 gcc_assert (what->global.inlined_to == to);
336 if (new_size > old_size)
337 overall_size += new_size - old_size;
338 if (!duplicate)
339 overall_size -= orig_size;
340 ncalls_inlined++;
341
342 if (flag_indirect_inlining)
343 return ipa_propagate_indirect_call_infos (curr, new_edges);
344 else
345 return false;
346 }
347
348 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
349 Return following unredirected edge in the list of callers
350 of EDGE->CALLEE */
351
352 static struct cgraph_edge *
353 cgraph_mark_inline (struct cgraph_edge *edge)
354 {
355 struct cgraph_node *to = edge->caller;
356 struct cgraph_node *what = edge->callee;
357 struct cgraph_edge *e, *next;
358
359 gcc_assert (!edge->call_stmt_cannot_inline_p);
360 /* Look for all calls, mark them inline and clone recursively
361 all inlined functions. */
362 for (e = what->callers; e; e = next)
363 {
364 next = e->next_caller;
365 if (e->caller == to && e->inline_failed)
366 {
367 cgraph_mark_inline_edge (e, true, NULL);
368 if (e == edge)
369 edge = next;
370 }
371 }
372
373 return edge;
374 }
375
376 /* Estimate the growth caused by inlining NODE into all callees. */
377
378 static int
379 cgraph_estimate_growth (struct cgraph_node *node)
380 {
381 int growth = 0;
382 struct cgraph_edge *e;
383 bool self_recursive = false;
384
385 if (node->global.estimated_growth != INT_MIN)
386 return node->global.estimated_growth;
387
388 for (e = node->callers; e; e = e->next_caller)
389 {
390 if (e->caller == node)
391 self_recursive = true;
392 if (e->inline_failed)
393 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
394 - e->caller->global.size);
395 }
396
397 /* ??? Wrong for non-trivially self recursive functions or cases where
398 we decide to not inline for different reasons, but it is not big deal
399 as in that case we will keep the body around, but we will also avoid
400 some inlining. */
401 if (cgraph_only_called_directly_p (node)
402 && !DECL_EXTERNAL (node->decl) && !self_recursive)
403 growth -= node->global.size;
404
405 node->global.estimated_growth = growth;
406 return growth;
407 }
408
409 /* Return false when inlining WHAT into TO is not good idea
410 as it would cause too large growth of function bodies.
411 When ONE_ONLY is true, assume that only one call site is going
412 to be inlined, otherwise figure out how many call sites in
413 TO calls WHAT and verify that all can be inlined.
414 */
415
416 static bool
417 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
418 cgraph_inline_failed_t *reason, bool one_only)
419 {
420 int times = 0;
421 struct cgraph_edge *e;
422 int newsize;
423 int limit;
424 HOST_WIDE_INT stack_size_limit, inlined_stack;
425
426 if (one_only)
427 times = 1;
428 else
429 for (e = to->callees; e; e = e->next_callee)
430 if (e->callee == what)
431 times++;
432
433 if (to->global.inlined_to)
434 to = to->global.inlined_to;
435
436 /* When inlining large function body called once into small function,
437 take the inlined function as base for limiting the growth. */
438 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
439 limit = inline_summary (to)->self_size;
440 else
441 limit = inline_summary (what)->self_size;
442
443 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
444
445 /* Check the size after inlining against the function limits. But allow
446 the function to shrink if it went over the limits by forced inlining. */
447 newsize = cgraph_estimate_size_after_inlining (times, to, what);
448 if (newsize >= to->global.size
449 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
450 && newsize > limit)
451 {
452 if (reason)
453 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
454 return false;
455 }
456
457 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
458
459 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
460
461 inlined_stack = (to->global.stack_frame_offset
462 + inline_summary (to)->estimated_self_stack_size
463 + what->global.estimated_stack_size);
464 if (inlined_stack > stack_size_limit
465 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
466 {
467 if (reason)
468 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
469 return false;
470 }
471 return true;
472 }
473
474 /* Return true when function N is small enough to be inlined. */
475
476 static bool
477 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
478 {
479 tree decl = n->decl;
480
481 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
482 {
483 if (reason)
484 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
485 return false;
486 }
487
488 if (!n->analyzed)
489 {
490 if (reason)
491 *reason = CIF_BODY_NOT_AVAILABLE;
492 return false;
493 }
494
495 if (DECL_DECLARED_INLINE_P (decl))
496 {
497 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
498 {
499 if (reason)
500 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
501 return false;
502 }
503 }
504 else
505 {
506 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
507 {
508 if (reason)
509 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
510 return false;
511 }
512 }
513
514 return true;
515 }
516
517 /* Return true when inlining WHAT would create recursive inlining.
518 We call recursive inlining all cases where same function appears more than
519 once in the single recursion nest path in the inline graph. */
520
521 static bool
522 cgraph_recursive_inlining_p (struct cgraph_node *to,
523 struct cgraph_node *what,
524 cgraph_inline_failed_t *reason)
525 {
526 bool recursive;
527 if (to->global.inlined_to)
528 recursive = what->decl == to->global.inlined_to->decl;
529 else
530 recursive = what->decl == to->decl;
531 /* Marking recursive function inline has sane semantic and thus we should
532 not warn on it. */
533 if (recursive && reason)
534 *reason = (what->local.disregard_inline_limits
535 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
536 return recursive;
537 }
538
539 /* A cost model driving the inlining heuristics in a way so the edges with
540 smallest badness are inlined first. After each inlining is performed
541 the costs of all caller edges of nodes affected are recomputed so the
542 metrics may accurately depend on values such as number of inlinable callers
543 of the function or function body size. */
544
545 static int
546 cgraph_edge_badness (struct cgraph_edge *edge)
547 {
548 gcov_type badness;
549 int growth =
550 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
551
552 growth -= edge->caller->global.size;
553
554 /* Always prefer inlining saving code size. */
555 if (growth <= 0)
556 badness = INT_MIN - growth;
557
558 /* When profiling is available, base priorities -(#calls / growth).
559 So we optimize for overall number of "executed" inlined calls. */
560 else if (max_count)
561 badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
562 * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
563
564 /* When function local profile is available, base priorities on
565 growth / frequency, so we optimize for overall frequency of inlined
566 calls. This is not too accurate since while the call might be frequent
567 within function, the function itself is infrequent.
568
569 Other objective to optimize for is number of different calls inlined.
570 We add the estimated growth after inlining all functions to bias the
571 priorities slightly in this direction (so fewer times called functions
572 of the same size gets priority). */
573 else if (flag_guess_branch_prob)
574 {
575 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
576 badness = growth * 10000;
577 div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
578 / (edge->callee->global.time + 1) + 1, 100);
579
580
581 /* Decrease badness if call is nested. */
582 /* Compress the range so we don't overflow. */
583 if (div > 10000)
584 div = 10000 + ceil_log2 (div) - 8;
585 if (div < 1)
586 div = 1;
587 if (badness > 0)
588 badness /= div;
589 badness += cgraph_estimate_growth (edge->callee);
590 if (badness > INT_MAX)
591 badness = INT_MAX;
592 }
593 /* When function local profile is not available or it does not give
594 useful information (ie frequency is zero), base the cost on
595 loop nest and overall size growth, so we optimize for overall number
596 of functions fully inlined in program. */
597 else
598 {
599 int nest = MIN (edge->loop_nest, 8);
600 badness = cgraph_estimate_growth (edge->callee) * 256;
601
602 /* Decrease badness if call is nested. */
603 if (badness > 0)
604 badness >>= nest;
605 else
606 {
607 badness <<= nest;
608 }
609 }
610 /* Make recursive inlining happen always after other inlining is done. */
611 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
612 return badness + 1;
613 else
614 return badness;
615 }
616
617 /* Recompute heap nodes for each of caller edge. */
618
619 static void
620 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
621 bitmap updated_nodes)
622 {
623 struct cgraph_edge *edge;
624 cgraph_inline_failed_t failed_reason;
625
626 if (!node->local.inlinable || node->local.disregard_inline_limits
627 || node->global.inlined_to)
628 return;
629 if (bitmap_bit_p (updated_nodes, node->uid))
630 return;
631 bitmap_set_bit (updated_nodes, node->uid);
632 node->global.estimated_growth = INT_MIN;
633
634 if (!node->local.inlinable)
635 return;
636 /* Prune out edges we won't inline into anymore. */
637 if (!cgraph_default_inline_p (node, &failed_reason))
638 {
639 for (edge = node->callers; edge; edge = edge->next_caller)
640 if (edge->aux)
641 {
642 fibheap_delete_node (heap, (fibnode_t) edge->aux);
643 edge->aux = NULL;
644 if (edge->inline_failed)
645 edge->inline_failed = failed_reason;
646 }
647 return;
648 }
649
650 for (edge = node->callers; edge; edge = edge->next_caller)
651 if (edge->inline_failed)
652 {
653 int badness = cgraph_edge_badness (edge);
654 if (edge->aux)
655 {
656 fibnode_t n = (fibnode_t) edge->aux;
657 gcc_assert (n->data == edge);
658 if (n->key == badness)
659 continue;
660
661 /* fibheap_replace_key only increase the keys. */
662 if (fibheap_replace_key (heap, n, badness))
663 continue;
664 fibheap_delete_node (heap, (fibnode_t) edge->aux);
665 }
666 edge->aux = fibheap_insert (heap, badness, edge);
667 }
668 }
669
670 /* Recompute heap nodes for each of caller edges of each of callees. */
671
672 static void
673 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
674 bitmap updated_nodes)
675 {
676 struct cgraph_edge *e;
677 node->global.estimated_growth = INT_MIN;
678
679 for (e = node->callees; e; e = e->next_callee)
680 if (e->inline_failed)
681 update_caller_keys (heap, e->callee, updated_nodes);
682 else if (!e->inline_failed)
683 update_callee_keys (heap, e->callee, updated_nodes);
684 }
685
686 /* Enqueue all recursive calls from NODE into priority queue depending on
687 how likely we want to recursively inline the call. */
688
689 static void
690 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
691 fibheap_t heap)
692 {
693 static int priority;
694 struct cgraph_edge *e;
695 for (e = where->callees; e; e = e->next_callee)
696 if (e->callee == node)
697 {
698 /* When profile feedback is available, prioritize by expected number
699 of calls. Without profile feedback we maintain simple queue
700 to order candidates via recursive depths. */
701 fibheap_insert (heap,
702 !max_count ? priority++
703 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
704 e);
705 }
706 for (e = where->callees; e; e = e->next_callee)
707 if (!e->inline_failed)
708 lookup_recursive_calls (node, e->callee, heap);
709 }
710
711 /* Decide on recursive inlining: in the case function has recursive calls,
712 inline until body size reaches given argument. If any new indirect edges
713 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
714 is NULL. */
715
716 static bool
717 cgraph_decide_recursive_inlining (struct cgraph_node *node,
718 VEC (cgraph_edge_p, heap) **new_edges)
719 {
720 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
721 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
722 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
723 fibheap_t heap;
724 struct cgraph_edge *e;
725 struct cgraph_node *master_clone, *next;
726 int depth = 0;
727 int n = 0;
728
729 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
730 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
731 return false;
732
733 if (DECL_DECLARED_INLINE_P (node->decl))
734 {
735 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
736 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
737 }
738
739 /* Make sure that function is small enough to be considered for inlining. */
740 if (!max_depth
741 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
742 return false;
743 heap = fibheap_new ();
744 lookup_recursive_calls (node, node, heap);
745 if (fibheap_empty (heap))
746 {
747 fibheap_delete (heap);
748 return false;
749 }
750
751 if (dump_file)
752 fprintf (dump_file,
753 " Performing recursive inlining on %s\n",
754 cgraph_node_name (node));
755
756 /* We need original clone to copy around. */
757 master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1,
758 false, NULL);
759 master_clone->needed = true;
760 for (e = master_clone->callees; e; e = e->next_callee)
761 if (!e->inline_failed)
762 cgraph_clone_inlined_nodes (e, true, false);
763
764 /* Do the inlining and update list of recursive call during process. */
765 while (!fibheap_empty (heap)
766 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
767 <= limit))
768 {
769 struct cgraph_edge *curr
770 = (struct cgraph_edge *) fibheap_extract_min (heap);
771 struct cgraph_node *cnode;
772
773 depth = 1;
774 for (cnode = curr->caller;
775 cnode->global.inlined_to; cnode = cnode->callers->caller)
776 if (node->decl == curr->callee->decl)
777 depth++;
778 if (depth > max_depth)
779 {
780 if (dump_file)
781 fprintf (dump_file,
782 " maximal depth reached\n");
783 continue;
784 }
785
786 if (max_count)
787 {
788 if (!cgraph_maybe_hot_edge_p (curr))
789 {
790 if (dump_file)
791 fprintf (dump_file, " Not inlining cold call\n");
792 continue;
793 }
794 if (curr->count * 100 / node->count < probability)
795 {
796 if (dump_file)
797 fprintf (dump_file,
798 " Probability of edge is too small\n");
799 continue;
800 }
801 }
802
803 if (dump_file)
804 {
805 fprintf (dump_file,
806 " Inlining call of depth %i", depth);
807 if (node->count)
808 {
809 fprintf (dump_file, " called approx. %.2f times per call",
810 (double)curr->count / node->count);
811 }
812 fprintf (dump_file, "\n");
813 }
814 cgraph_redirect_edge_callee (curr, master_clone);
815 cgraph_mark_inline_edge (curr, false, new_edges);
816 lookup_recursive_calls (node, curr->callee, heap);
817 n++;
818 }
819 if (!fibheap_empty (heap) && dump_file)
820 fprintf (dump_file, " Recursive inlining growth limit met.\n");
821
822 fibheap_delete (heap);
823 if (dump_file)
824 fprintf (dump_file,
825 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
826 master_clone->global.size, node->global.size,
827 master_clone->global.time, node->global.time);
828
829 /* Remove master clone we used for inlining. We rely that clones inlined
830 into master clone gets queued just before master clone so we don't
831 need recursion. */
832 for (node = cgraph_nodes; node != master_clone;
833 node = next)
834 {
835 next = node->next;
836 if (node->global.inlined_to == master_clone)
837 cgraph_remove_node (node);
838 }
839 cgraph_remove_node (master_clone);
840 /* FIXME: Recursive inlining actually reduces number of calls of the
841 function. At this place we should probably walk the function and
842 inline clones and compensate the counts accordingly. This probably
843 doesn't matter much in practice. */
844 return n > 0;
845 }
846
847 /* Set inline_failed for all callers of given function to REASON. */
848
849 static void
850 cgraph_set_inline_failed (struct cgraph_node *node,
851 cgraph_inline_failed_t reason)
852 {
853 struct cgraph_edge *e;
854
855 if (dump_file)
856 fprintf (dump_file, "Inlining failed: %s\n",
857 cgraph_inline_failed_string (reason));
858 for (e = node->callers; e; e = e->next_caller)
859 if (e->inline_failed)
860 e->inline_failed = reason;
861 }
862
863 /* Given whole compilation unit estimate of INSNS, compute how large we can
864 allow the unit to grow. */
865 static int
866 compute_max_insns (int insns)
867 {
868 int max_insns = insns;
869 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
870 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
871
872 return ((HOST_WIDEST_INT) max_insns
873 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
874 }
875
876 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
877 static void
878 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
879 {
880 while (VEC_length (cgraph_edge_p, new_edges) > 0)
881 {
882 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
883
884 gcc_assert (!edge->aux);
885 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
886 }
887 }
888
889
890 /* We use greedy algorithm for inlining of small functions:
891 All inline candidates are put into prioritized heap based on estimated
892 growth of the overall number of instructions and then update the estimates.
893
894 INLINED and INLINED_CALEES are just pointers to arrays large enough
895 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
896
897 static void
898 cgraph_decide_inlining_of_small_functions (void)
899 {
900 struct cgraph_node *node;
901 struct cgraph_edge *edge;
902 cgraph_inline_failed_t failed_reason;
903 fibheap_t heap = fibheap_new ();
904 bitmap updated_nodes = BITMAP_ALLOC (NULL);
905 int min_size, max_size;
906 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
907
908 if (flag_indirect_inlining)
909 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
910
911 if (dump_file)
912 fprintf (dump_file, "\nDeciding on smaller functions:\n");
913
914 /* Put all inline candidates into the heap. */
915
916 for (node = cgraph_nodes; node; node = node->next)
917 {
918 if (!node->local.inlinable || !node->callers
919 || node->local.disregard_inline_limits)
920 continue;
921 if (dump_file)
922 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
923
924 node->global.estimated_growth = INT_MIN;
925 if (!cgraph_default_inline_p (node, &failed_reason))
926 {
927 cgraph_set_inline_failed (node, failed_reason);
928 continue;
929 }
930
931 for (edge = node->callers; edge; edge = edge->next_caller)
932 if (edge->inline_failed)
933 {
934 gcc_assert (!edge->aux);
935 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge), edge);
936 }
937 }
938
939 max_size = compute_max_insns (overall_size);
940 min_size = overall_size;
941
942 while (overall_size <= max_size
943 && (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
944 {
945 int old_size = overall_size;
946 struct cgraph_node *where;
947 int growth =
948 cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
949 cgraph_inline_failed_t not_good = CIF_OK;
950
951 growth -= edge->caller->global.size;
952
953 if (dump_file)
954 {
955 fprintf (dump_file,
956 "\nConsidering %s with %i size\n",
957 cgraph_node_name (edge->callee),
958 edge->callee->global.size);
959 fprintf (dump_file,
960 " to be inlined into %s in %s:%i\n"
961 " Estimated growth after inlined into all callees is %+i insns.\n"
962 " Estimated badness is %i, frequency %.2f.\n",
963 cgraph_node_name (edge->caller),
964 gimple_filename ((const_gimple) edge->call_stmt),
965 gimple_lineno ((const_gimple) edge->call_stmt),
966 cgraph_estimate_growth (edge->callee),
967 cgraph_edge_badness (edge),
968 edge->frequency / (double)CGRAPH_FREQ_BASE);
969 if (edge->count)
970 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
971 }
972 gcc_assert (edge->aux);
973 edge->aux = NULL;
974 if (!edge->inline_failed)
975 continue;
976
977 /* When not having profile info ready we don't weight by any way the
978 position of call in procedure itself. This means if call of
979 function A from function B seems profitable to inline, the recursive
980 call of function A in inline copy of A in B will look profitable too
981 and we end up inlining until reaching maximal function growth. This
982 is not good idea so prohibit the recursive inlining.
983
984 ??? When the frequencies are taken into account we might not need this
985 restriction.
986
987 We need to be cureful here, in some testcases, e.g. directivec.c in
988 libcpp, we can estimate self recursive function to have negative growth
989 for inlining completely.
990 */
991 if (!edge->count)
992 {
993 where = edge->caller;
994 while (where->global.inlined_to)
995 {
996 if (where->decl == edge->callee->decl)
997 break;
998 where = where->callers->caller;
999 }
1000 if (where->global.inlined_to)
1001 {
1002 edge->inline_failed
1003 = (edge->callee->local.disregard_inline_limits
1004 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1005 if (dump_file)
1006 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1007 continue;
1008 }
1009 }
1010
1011 if (!cgraph_maybe_hot_edge_p (edge))
1012 not_good = CIF_UNLIKELY_CALL;
1013 if (!flag_inline_functions
1014 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1015 not_good = CIF_NOT_DECLARED_INLINED;
1016 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1017 not_good = CIF_OPTIMIZING_FOR_SIZE;
1018 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1019 {
1020 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1021 &edge->inline_failed))
1022 {
1023 edge->inline_failed = not_good;
1024 if (dump_file)
1025 fprintf (dump_file, " inline_failed:%s.\n",
1026 cgraph_inline_failed_string (edge->inline_failed));
1027 }
1028 continue;
1029 }
1030 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1031 {
1032 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1033 &edge->inline_failed))
1034 {
1035 if (dump_file)
1036 fprintf (dump_file, " inline_failed:%s.\n",
1037 cgraph_inline_failed_string (edge->inline_failed));
1038 }
1039 continue;
1040 }
1041 if (!tree_can_inline_p (edge))
1042 {
1043 if (dump_file)
1044 fprintf (dump_file, " inline_failed:%s.\n",
1045 cgraph_inline_failed_string (edge->inline_failed));
1046 continue;
1047 }
1048 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1049 &edge->inline_failed))
1050 {
1051 where = edge->caller;
1052 if (where->global.inlined_to)
1053 where = where->global.inlined_to;
1054 if (!cgraph_decide_recursive_inlining (where,
1055 flag_indirect_inlining
1056 ? &new_indirect_edges : NULL))
1057 continue;
1058 if (flag_indirect_inlining)
1059 add_new_edges_to_heap (heap, new_indirect_edges);
1060 update_callee_keys (heap, where, updated_nodes);
1061 }
1062 else
1063 {
1064 struct cgraph_node *callee;
1065 if (edge->call_stmt_cannot_inline_p
1066 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1067 &edge->inline_failed, true))
1068 {
1069 if (dump_file)
1070 fprintf (dump_file, " Not inlining into %s:%s.\n",
1071 cgraph_node_name (edge->caller),
1072 cgraph_inline_failed_string (edge->inline_failed));
1073 continue;
1074 }
1075 callee = edge->callee;
1076 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1077 if (flag_indirect_inlining)
1078 add_new_edges_to_heap (heap, new_indirect_edges);
1079
1080 update_callee_keys (heap, callee, updated_nodes);
1081 }
1082 where = edge->caller;
1083 if (where->global.inlined_to)
1084 where = where->global.inlined_to;
1085
1086 /* Our profitability metric can depend on local properties
1087 such as number of inlinable calls and size of the function body.
1088 After inlining these properties might change for the function we
1089 inlined into (since it's body size changed) and for the functions
1090 called by function we inlined (since number of it inlinable callers
1091 might change). */
1092 update_caller_keys (heap, where, updated_nodes);
1093 bitmap_clear (updated_nodes);
1094
1095 if (dump_file)
1096 {
1097 fprintf (dump_file,
1098 " Inlined into %s which now has size %i and self time %i,"
1099 "net change of %+i.\n",
1100 cgraph_node_name (edge->caller),
1101 edge->caller->global.time,
1102 edge->caller->global.size,
1103 overall_size - old_size);
1104 }
1105 if (min_size > overall_size)
1106 {
1107 min_size = overall_size;
1108 max_size = compute_max_insns (min_size);
1109
1110 if (dump_file)
1111 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1112 }
1113 }
1114 while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
1115 {
1116 gcc_assert (edge->aux);
1117 edge->aux = NULL;
1118 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1119 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1120 &edge->inline_failed))
1121 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1122 }
1123
1124 if (new_indirect_edges)
1125 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1126 fibheap_delete (heap);
1127 BITMAP_FREE (updated_nodes);
1128 }
1129
1130 /* Decide on the inlining. We do so in the topological order to avoid
1131 expenses on updating data structures. */
1132
1133 static unsigned int
1134 cgraph_decide_inlining (void)
1135 {
1136 struct cgraph_node *node;
1137 int nnodes;
1138 struct cgraph_node **order =
1139 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1140 int old_size = 0;
1141 int i;
1142 bool redo_always_inline = true;
1143 int initial_size = 0;
1144
1145 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1146 if (in_lto_p && flag_indirect_inlining)
1147 ipa_update_after_lto_read ();
1148
1149 max_count = 0;
1150 max_benefit = 0;
1151 for (node = cgraph_nodes; node; node = node->next)
1152 if (node->analyzed)
1153 {
1154 struct cgraph_edge *e;
1155
1156 gcc_assert (inline_summary (node)->self_size == node->global.size);
1157 initial_size += node->global.size;
1158 for (e = node->callees; e; e = e->next_callee)
1159 if (max_count < e->count)
1160 max_count = e->count;
1161 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1162 max_benefit = inline_summary (node)->time_inlining_benefit;
1163 }
1164 gcc_assert (in_lto_p
1165 || !max_count
1166 || (profile_info && flag_branch_probabilities));
1167 overall_size = initial_size;
1168
1169 nnodes = cgraph_postorder (order);
1170
1171 if (dump_file)
1172 fprintf (dump_file,
1173 "\nDeciding on inlining. Starting with size %i.\n",
1174 initial_size);
1175
1176 for (node = cgraph_nodes; node; node = node->next)
1177 node->aux = 0;
1178
1179 if (dump_file)
1180 fprintf (dump_file, "\nInlining always_inline functions:\n");
1181
1182 /* In the first pass mark all always_inline edges. Do this with a priority
1183 so none of our later choices will make this impossible. */
1184 while (redo_always_inline)
1185 {
1186 redo_always_inline = false;
1187 for (i = nnodes - 1; i >= 0; i--)
1188 {
1189 struct cgraph_edge *e, *next;
1190
1191 node = order[i];
1192
1193 /* Handle nodes to be flattened, but don't update overall unit
1194 size. */
1195 if (lookup_attribute ("flatten",
1196 DECL_ATTRIBUTES (node->decl)) != NULL)
1197 {
1198 if (dump_file)
1199 fprintf (dump_file,
1200 "Flattening %s\n", cgraph_node_name (node));
1201 cgraph_decide_inlining_incrementally (node, INLINE_ALL, 0);
1202 }
1203
1204 if (!node->local.disregard_inline_limits)
1205 continue;
1206 if (dump_file)
1207 fprintf (dump_file,
1208 "\nConsidering %s size:%i (always inline)\n",
1209 cgraph_node_name (node), node->global.size);
1210 old_size = overall_size;
1211 for (e = node->callers; e; e = next)
1212 {
1213 next = e->next_caller;
1214 if (!e->inline_failed || e->call_stmt_cannot_inline_p)
1215 continue;
1216 if (cgraph_recursive_inlining_p (e->caller, e->callee,
1217 &e->inline_failed))
1218 continue;
1219 if (!tree_can_inline_p (e))
1220 continue;
1221 if (cgraph_mark_inline_edge (e, true, NULL))
1222 redo_always_inline = true;
1223 if (dump_file)
1224 fprintf (dump_file,
1225 " Inlined into %s which now has size %i.\n",
1226 cgraph_node_name (e->caller),
1227 e->caller->global.size);
1228 }
1229 /* Inlining self recursive function might introduce new calls to
1230 themselves we didn't see in the loop above. Fill in the proper
1231 reason why inline failed. */
1232 for (e = node->callers; e; e = e->next_caller)
1233 if (e->inline_failed)
1234 e->inline_failed = CIF_RECURSIVE_INLINING;
1235 if (dump_file)
1236 fprintf (dump_file,
1237 " Inlined for a net change of %+i size.\n",
1238 overall_size - old_size);
1239 }
1240 }
1241
1242 cgraph_decide_inlining_of_small_functions ();
1243
1244 if (flag_inline_functions_called_once)
1245 {
1246 if (dump_file)
1247 fprintf (dump_file, "\nDeciding on functions called once:\n");
1248
1249 /* And finally decide what functions are called once. */
1250 for (i = nnodes - 1; i >= 0; i--)
1251 {
1252 node = order[i];
1253
1254 if (node->callers
1255 && !node->callers->next_caller
1256 && cgraph_only_called_directly_p (node)
1257 && node->local.inlinable
1258 && node->callers->inline_failed
1259 && node->callers->caller != node
1260 && node->callers->caller->global.inlined_to != node
1261 && !node->callers->call_stmt_cannot_inline_p
1262 && !DECL_EXTERNAL (node->decl)
1263 && !DECL_COMDAT (node->decl))
1264 {
1265 cgraph_inline_failed_t reason;
1266 old_size = overall_size;
1267 if (dump_file)
1268 {
1269 fprintf (dump_file,
1270 "\nConsidering %s size %i.\n",
1271 cgraph_node_name (node), node->global.size);
1272 fprintf (dump_file,
1273 " Called once from %s %i insns.\n",
1274 cgraph_node_name (node->callers->caller),
1275 node->callers->caller->global.size);
1276 }
1277
1278 if (cgraph_check_inline_limits (node->callers->caller, node,
1279 &reason, false))
1280 {
1281 cgraph_mark_inline (node->callers);
1282 if (dump_file)
1283 fprintf (dump_file,
1284 " Inlined into %s which now has %i size"
1285 " for a net change of %+i size.\n",
1286 cgraph_node_name (node->callers->caller),
1287 node->callers->caller->global.size,
1288 overall_size - old_size);
1289 }
1290 else
1291 {
1292 if (dump_file)
1293 fprintf (dump_file,
1294 " Not inlining: %s.\n",
1295 cgraph_inline_failed_string (reason));
1296 }
1297 }
1298 }
1299 }
1300
1301 /* Free ipa-prop structures if they are no longer needed. */
1302 if (flag_indirect_inlining)
1303 free_all_ipa_structures_after_iinln ();
1304
1305 if (dump_file)
1306 fprintf (dump_file,
1307 "\nInlined %i calls, eliminated %i functions, "
1308 "size %i turned to %i size.\n\n",
1309 ncalls_inlined, nfunctions_inlined, initial_size,
1310 overall_size);
1311 free (order);
1312 return 0;
1313 }
1314
1315 /* Try to inline edge E from incremental inliner. MODE specifies mode
1316 of inliner.
1317
1318 We are detecting cycles by storing mode of inliner into cgraph_node last
1319 time we visited it in the recursion. In general when mode is set, we have
1320 recursive inlining, but as an special case, we want to try harder inline
1321 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1322 flatten, b being always inline. Flattening 'a' will collapse
1323 a->b->c before hitting cycle. To accommodate always inline, we however
1324 need to inline a->b->c->b.
1325
1326 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1327 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1328 static bool
1329 try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
1330 {
1331 struct cgraph_node *callee = e->callee;
1332 enum inlining_mode callee_mode = (enum inlining_mode) (size_t) callee->aux;
1333 bool always_inline = e->callee->local.disregard_inline_limits;
1334 bool inlined = false;
1335
1336 /* We've hit cycle? */
1337 if (callee_mode)
1338 {
1339 /* It is first time we see it and we are not in ALWAY_INLINE only
1340 mode yet. and the function in question is always_inline. */
1341 if (always_inline && mode != INLINE_ALWAYS_INLINE)
1342 {
1343 if (dump_file)
1344 {
1345 indent_to (dump_file, depth);
1346 fprintf (dump_file,
1347 "Hit cycle in %s, switching to always inline only.\n",
1348 cgraph_node_name (callee));
1349 }
1350 mode = INLINE_ALWAYS_INLINE;
1351 }
1352 /* Otherwise it is time to give up. */
1353 else
1354 {
1355 if (dump_file)
1356 {
1357 indent_to (dump_file, depth);
1358 fprintf (dump_file,
1359 "Not inlining %s into %s to avoid cycle.\n",
1360 cgraph_node_name (callee),
1361 cgraph_node_name (e->caller));
1362 }
1363 e->inline_failed = (e->callee->local.disregard_inline_limits
1364 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1365 return false;
1366 }
1367 }
1368
1369 callee->aux = (void *)(size_t) mode;
1370 if (dump_file)
1371 {
1372 indent_to (dump_file, depth);
1373 fprintf (dump_file, " Inlining %s into %s.\n",
1374 cgraph_node_name (e->callee),
1375 cgraph_node_name (e->caller));
1376 }
1377 if (e->inline_failed)
1378 {
1379 cgraph_mark_inline (e);
1380
1381 /* In order to fully inline always_inline functions, we need to
1382 recurse here, since the inlined functions might not be processed by
1383 incremental inlining at all yet.
1384
1385 Also flattening needs to be done recursively. */
1386
1387 if (mode == INLINE_ALL || always_inline)
1388 cgraph_decide_inlining_incrementally (e->callee, mode, depth + 1);
1389 inlined = true;
1390 }
1391 callee->aux = (void *)(size_t) callee_mode;
1392 return inlined;
1393 }
1394
1395 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1396 in leaf functions. */
1397 static bool
1398 leaf_node_p (struct cgraph_node *n)
1399 {
1400 struct cgraph_edge *e;
1401 for (e = n->callees; e; e = e->next_callee)
1402 if (!DECL_BUILT_IN (e->callee->decl)
1403 || (!TREE_READONLY (e->callee->decl)
1404 || DECL_PURE_P (e->callee->decl)))
1405 return false;
1406 return true;
1407 }
1408
1409 /* Decide on the inlining. We do so in the topological order to avoid
1410 expenses on updating data structures.
1411 DEPTH is depth of recursion, used only for debug output. */
1412
1413 static bool
1414 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1415 enum inlining_mode mode,
1416 int depth)
1417 {
1418 struct cgraph_edge *e;
1419 bool inlined = false;
1420 cgraph_inline_failed_t failed_reason;
1421 enum inlining_mode old_mode;
1422
1423 #ifdef ENABLE_CHECKING
1424 verify_cgraph_node (node);
1425 #endif
1426
1427 old_mode = (enum inlining_mode) (size_t)node->aux;
1428
1429 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1430 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1431 {
1432 if (dump_file)
1433 {
1434 indent_to (dump_file, depth);
1435 fprintf (dump_file, "Flattening %s\n", cgraph_node_name (node));
1436 }
1437 mode = INLINE_ALL;
1438 }
1439
1440 node->aux = (void *)(size_t) mode;
1441
1442 /* First of all look for always inline functions. */
1443 if (mode != INLINE_SIZE_NORECURSIVE)
1444 for (e = node->callees; e; e = e->next_callee)
1445 {
1446 if (!e->callee->local.disregard_inline_limits
1447 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1448 continue;
1449 if (e->call_stmt_cannot_inline_p)
1450 continue;
1451 /* When the edge is already inlined, we just need to recurse into
1452 it in order to fully flatten the leaves. */
1453 if (!e->inline_failed && mode == INLINE_ALL)
1454 {
1455 inlined |= try_inline (e, mode, depth);
1456 continue;
1457 }
1458 if (dump_file)
1459 {
1460 indent_to (dump_file, depth);
1461 fprintf (dump_file,
1462 "Considering to always inline inline candidate %s.\n",
1463 cgraph_node_name (e->callee));
1464 }
1465 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1466 {
1467 if (dump_file)
1468 {
1469 indent_to (dump_file, depth);
1470 fprintf (dump_file, "Not inlining: recursive call.\n");
1471 }
1472 continue;
1473 }
1474 if (!tree_can_inline_p (e))
1475 {
1476 if (dump_file)
1477 {
1478 indent_to (dump_file, depth);
1479 fprintf (dump_file,
1480 "Not inlining: %s",
1481 cgraph_inline_failed_string (e->inline_failed));
1482 }
1483 continue;
1484 }
1485 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1486 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1487 {
1488 if (dump_file)
1489 {
1490 indent_to (dump_file, depth);
1491 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1492 }
1493 continue;
1494 }
1495 if (!e->callee->analyzed)
1496 {
1497 if (dump_file)
1498 {
1499 indent_to (dump_file, depth);
1500 fprintf (dump_file,
1501 "Not inlining: Function body no longer available.\n");
1502 }
1503 continue;
1504 }
1505 inlined |= try_inline (e, mode, depth);
1506 }
1507
1508 /* Now do the automatic inlining. */
1509 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1510 /* Never inline regular functions into always-inline functions
1511 during incremental inlining. */
1512 && !node->local.disregard_inline_limits)
1513 {
1514 bitmap visited = BITMAP_ALLOC (NULL);
1515 for (e = node->callees; e; e = e->next_callee)
1516 {
1517 int allowed_growth = 0;
1518 if (!e->callee->local.inlinable
1519 || !e->inline_failed
1520 || e->callee->local.disregard_inline_limits)
1521 continue;
1522 /* We are inlining a function to all call-sites in node
1523 or to none. So visit each candidate only once. */
1524 if (!bitmap_set_bit (visited, e->callee->uid))
1525 continue;
1526 if (dump_file)
1527 fprintf (dump_file, "Considering inline candidate %s.\n",
1528 cgraph_node_name (e->callee));
1529 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1530 {
1531 if (dump_file)
1532 {
1533 indent_to (dump_file, depth);
1534 fprintf (dump_file, "Not inlining: recursive call.\n");
1535 }
1536 continue;
1537 }
1538 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1539 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1540 {
1541 if (dump_file)
1542 {
1543 indent_to (dump_file, depth);
1544 fprintf (dump_file,
1545 "Not inlining: SSA form does not match.\n");
1546 }
1547 continue;
1548 }
1549
1550 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1551 && optimize_function_for_speed_p (cfun))
1552 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1553
1554 /* When the function body would grow and inlining the function
1555 won't eliminate the need for offline copy of the function,
1556 don't inline. */
1557 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1558 || (!flag_inline_functions
1559 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1560 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1561 > e->caller->global.size + allowed_growth)
1562 && cgraph_estimate_growth (e->callee) > allowed_growth)
1563 {
1564 if (dump_file)
1565 {
1566 indent_to (dump_file, depth);
1567 fprintf (dump_file,
1568 "Not inlining: code size would grow by %i.\n",
1569 cgraph_estimate_size_after_inlining (1, e->caller,
1570 e->callee)
1571 - e->caller->global.size);
1572 }
1573 continue;
1574 }
1575 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1576 false)
1577 || e->call_stmt_cannot_inline_p)
1578 {
1579 if (dump_file)
1580 {
1581 indent_to (dump_file, depth);
1582 fprintf (dump_file, "Not inlining: %s.\n",
1583 cgraph_inline_failed_string (e->inline_failed));
1584 }
1585 continue;
1586 }
1587 if (!e->callee->analyzed)
1588 {
1589 if (dump_file)
1590 {
1591 indent_to (dump_file, depth);
1592 fprintf (dump_file,
1593 "Not inlining: Function body no longer available.\n");
1594 }
1595 continue;
1596 }
1597 if (!tree_can_inline_p (e))
1598 {
1599 if (dump_file)
1600 {
1601 indent_to (dump_file, depth);
1602 fprintf (dump_file,
1603 "Not inlining: %s.",
1604 cgraph_inline_failed_string (e->inline_failed));
1605 }
1606 continue;
1607 }
1608 if (cgraph_default_inline_p (e->callee, &failed_reason))
1609 inlined |= try_inline (e, mode, depth);
1610 }
1611 BITMAP_FREE (visited);
1612 }
1613 node->aux = (void *)(size_t) old_mode;
1614 return inlined;
1615 }
1616
1617 /* Because inlining might remove no-longer reachable nodes, we need to
1618 keep the array visible to garbage collector to avoid reading collected
1619 out nodes. */
1620 static int nnodes;
1621 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1622
1623 /* Do inlining of small functions. Doing so early helps profiling and other
1624 passes to be somewhat more effective and avoids some code duplication in
1625 later real inlining pass for testcases with very many function calls. */
1626 static unsigned int
1627 cgraph_early_inlining (void)
1628 {
1629 struct cgraph_node *node = cgraph_node (current_function_decl);
1630 unsigned int todo = 0;
1631 int iterations = 0;
1632
1633 if (sorrycount || errorcount)
1634 return 0;
1635 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1636 && cgraph_decide_inlining_incrementally (node,
1637 iterations
1638 ? INLINE_SIZE_NORECURSIVE : INLINE_SIZE, 0))
1639 {
1640 timevar_push (TV_INTEGRATION);
1641 todo |= optimize_inline_calls (current_function_decl);
1642 iterations++;
1643 timevar_pop (TV_INTEGRATION);
1644 }
1645 if (dump_file)
1646 fprintf (dump_file, "Iterations: %i\n", iterations);
1647 cfun->always_inline_functions_inlined = true;
1648 return todo;
1649 }
1650
1651 /* When inlining shall be performed. */
1652 static bool
1653 cgraph_gate_early_inlining (void)
1654 {
1655 return flag_early_inlining;
1656 }
1657
1658 struct gimple_opt_pass pass_early_inline =
1659 {
1660 {
1661 GIMPLE_PASS,
1662 "einline", /* name */
1663 cgraph_gate_early_inlining, /* gate */
1664 cgraph_early_inlining, /* execute */
1665 NULL, /* sub */
1666 NULL, /* next */
1667 0, /* static_pass_number */
1668 TV_INLINE_HEURISTICS, /* tv_id */
1669 0, /* properties_required */
1670 0, /* properties_provided */
1671 0, /* properties_destroyed */
1672 0, /* todo_flags_start */
1673 TODO_dump_func /* todo_flags_finish */
1674 }
1675 };
1676
1677 /* When inlining shall be performed. */
1678 static bool
1679 cgraph_gate_ipa_early_inlining (void)
1680 {
1681 return (flag_early_inlining
1682 && !in_lto_p
1683 && (flag_branch_probabilities || flag_test_coverage
1684 || profile_arc_flag));
1685 }
1686
1687 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1688 before tree profiling so we have stand alone IPA pass for doing so. */
1689 struct simple_ipa_opt_pass pass_ipa_early_inline =
1690 {
1691 {
1692 SIMPLE_IPA_PASS,
1693 "einline_ipa", /* name */
1694 cgraph_gate_ipa_early_inlining, /* gate */
1695 NULL, /* execute */
1696 NULL, /* sub */
1697 NULL, /* next */
1698 0, /* static_pass_number */
1699 TV_INLINE_HEURISTICS, /* tv_id */
1700 0, /* properties_required */
1701 0, /* properties_provided */
1702 0, /* properties_destroyed */
1703 0, /* todo_flags_start */
1704 TODO_dump_cgraph /* todo_flags_finish */
1705 }
1706 };
1707
1708 /* See if statement might disappear after inlining. We are not terribly
1709 sophisficated, basically looking for simple abstraction penalty wrappers. */
1710
1711 static bool
1712 likely_eliminated_by_inlining_p (gimple stmt)
1713 {
1714 enum gimple_code code = gimple_code (stmt);
1715 switch (code)
1716 {
1717 case GIMPLE_RETURN:
1718 return true;
1719 case GIMPLE_ASSIGN:
1720 if (gimple_num_ops (stmt) != 2)
1721 return false;
1722
1723 /* Casts of parameters, loads from parameters passed by reference
1724 and stores to return value or parameters are probably free after
1725 inlining. */
1726 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1727 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1728 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1729 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1730 {
1731 tree rhs = gimple_assign_rhs1 (stmt);
1732 tree lhs = gimple_assign_lhs (stmt);
1733 tree inner_rhs = rhs;
1734 tree inner_lhs = lhs;
1735 bool rhs_free = false;
1736 bool lhs_free = false;
1737
1738 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1739 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1740 while (handled_component_p (inner_rhs)
1741 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1742 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1743
1744
1745 if (TREE_CODE (inner_rhs) == PARM_DECL
1746 || (TREE_CODE (inner_rhs) == SSA_NAME
1747 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1748 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1749 rhs_free = true;
1750 if (rhs_free && is_gimple_reg (lhs))
1751 lhs_free = true;
1752 if (((TREE_CODE (inner_lhs) == PARM_DECL
1753 || (TREE_CODE (inner_lhs) == SSA_NAME
1754 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1755 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1756 && inner_lhs != lhs)
1757 || TREE_CODE (inner_lhs) == RESULT_DECL
1758 || (TREE_CODE (inner_lhs) == SSA_NAME
1759 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1760 lhs_free = true;
1761 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1762 rhs_free = true;
1763 if (lhs_free && rhs_free)
1764 return true;
1765 }
1766 return false;
1767 default:
1768 return false;
1769 }
1770 }
1771
1772 /* Compute function body size parameters for NODE. */
1773
1774 static void
1775 estimate_function_body_sizes (struct cgraph_node *node)
1776 {
1777 gcov_type time = 0;
1778 gcov_type time_inlining_benefit = 0;
1779 int size = 0;
1780 int size_inlining_benefit = 0;
1781 basic_block bb;
1782 gimple_stmt_iterator bsi;
1783 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1784 tree arg;
1785 int freq;
1786 tree funtype = TREE_TYPE (node->decl);
1787
1788 if (dump_file)
1789 fprintf (dump_file, "Analyzing function body size: %s\n",
1790 cgraph_node_name (node));
1791
1792 gcc_assert (my_function && my_function->cfg);
1793 FOR_EACH_BB_FN (bb, my_function)
1794 {
1795 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1796 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1797 {
1798 gimple stmt = gsi_stmt (bsi);
1799 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1800 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1801
1802 if (dump_file && (dump_flags & TDF_DETAILS))
1803 {
1804 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1805 freq, this_size, this_time);
1806 print_gimple_stmt (dump_file, stmt, 0, 0);
1807 }
1808 this_time *= freq;
1809 time += this_time;
1810 size += this_size;
1811 if (likely_eliminated_by_inlining_p (stmt))
1812 {
1813 size_inlining_benefit += this_size;
1814 time_inlining_benefit += this_time;
1815 if (dump_file && (dump_flags & TDF_DETAILS))
1816 fprintf (dump_file, " Likely eliminated\n");
1817 }
1818 gcc_assert (time >= 0);
1819 gcc_assert (size >= 0);
1820 }
1821 }
1822 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1823 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1824 / CGRAPH_FREQ_BASE);
1825 if (dump_file)
1826 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1827 (int)time, (int)time_inlining_benefit,
1828 size, size_inlining_benefit);
1829 time_inlining_benefit += eni_time_weights.call_cost;
1830 size_inlining_benefit += eni_size_weights.call_cost;
1831 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1832 {
1833 int cost = estimate_move_cost (TREE_TYPE (funtype));
1834 time_inlining_benefit += cost;
1835 size_inlining_benefit += cost;
1836 }
1837 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1838 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1839 {
1840 int cost = estimate_move_cost (TREE_TYPE (arg));
1841 time_inlining_benefit += cost;
1842 size_inlining_benefit += cost;
1843 }
1844 if (time_inlining_benefit > MAX_TIME)
1845 time_inlining_benefit = MAX_TIME;
1846 if (time > MAX_TIME)
1847 time = MAX_TIME;
1848 inline_summary (node)->self_time = time;
1849 inline_summary (node)->self_size = size;
1850 if (dump_file)
1851 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1852 (int)time, (int)time_inlining_benefit,
1853 size, size_inlining_benefit);
1854 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1855 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1856 }
1857
1858 /* Compute parameters of functions used by inliner. */
1859 unsigned int
1860 compute_inline_parameters (struct cgraph_node *node)
1861 {
1862 HOST_WIDE_INT self_stack_size;
1863
1864 gcc_assert (!node->global.inlined_to);
1865
1866 /* Estimate the stack size for the function. But not at -O0
1867 because estimated_stack_frame_size is a quadratic problem. */
1868 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1869 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1870 node->global.estimated_stack_size = self_stack_size;
1871 node->global.stack_frame_offset = 0;
1872
1873 /* Can this function be inlined at all? */
1874 node->local.inlinable = tree_inlinable_function_p (node->decl);
1875 if (node->local.inlinable && !node->local.disregard_inline_limits)
1876 node->local.disregard_inline_limits
1877 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
1878 estimate_function_body_sizes (node);
1879 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1880 node->global.time = inline_summary (node)->self_time;
1881 node->global.size = inline_summary (node)->self_size;
1882 return 0;
1883 }
1884
1885
1886 /* Compute parameters of functions used by inliner using
1887 current_function_decl. */
1888 static unsigned int
1889 compute_inline_parameters_for_current (void)
1890 {
1891 compute_inline_parameters (cgraph_node (current_function_decl));
1892 return 0;
1893 }
1894
1895 struct gimple_opt_pass pass_inline_parameters =
1896 {
1897 {
1898 GIMPLE_PASS,
1899 "inline_param", /* name */
1900 NULL, /* gate */
1901 compute_inline_parameters_for_current,/* execute */
1902 NULL, /* sub */
1903 NULL, /* next */
1904 0, /* static_pass_number */
1905 TV_INLINE_HEURISTICS, /* tv_id */
1906 0, /* properties_required */
1907 0, /* properties_provided */
1908 0, /* properties_destroyed */
1909 0, /* todo_flags_start */
1910 0 /* todo_flags_finish */
1911 }
1912 };
1913
1914 /* This function performs intraprocedural analyzis in NODE that is required to
1915 inline indirect calls. */
1916 static void
1917 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
1918 {
1919 struct cgraph_edge *cs;
1920
1921 if (!flag_ipa_cp)
1922 {
1923 ipa_initialize_node_params (node);
1924 ipa_detect_param_modifications (node);
1925 }
1926 ipa_analyze_params_uses (node);
1927
1928 if (!flag_ipa_cp)
1929 for (cs = node->callees; cs; cs = cs->next_callee)
1930 {
1931 ipa_count_arguments (cs);
1932 ipa_compute_jump_functions (cs);
1933 }
1934
1935 if (dump_file)
1936 {
1937 ipa_print_node_params (dump_file, node);
1938 ipa_print_node_jump_functions (dump_file, node);
1939 }
1940 }
1941
1942 /* Note function body size. */
1943 static void
1944 analyze_function (struct cgraph_node *node)
1945 {
1946 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
1947 current_function_decl = node->decl;
1948
1949 compute_inline_parameters (node);
1950 if (flag_indirect_inlining)
1951 inline_indirect_intraprocedural_analysis (node);
1952
1953 current_function_decl = NULL;
1954 pop_cfun ();
1955 }
1956
1957 /* Called when new function is inserted to callgraph late. */
1958 static void
1959 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
1960 {
1961 analyze_function (node);
1962 }
1963
1964 /* Note function body size. */
1965 static void
1966 inline_generate_summary (void)
1967 {
1968 struct cgraph_node *node;
1969
1970 function_insertion_hook_holder =
1971 cgraph_add_function_insertion_hook (&add_new_function, NULL);
1972
1973 if (flag_indirect_inlining)
1974 {
1975 ipa_register_cgraph_hooks ();
1976 ipa_check_create_node_params ();
1977 ipa_check_create_edge_args ();
1978 }
1979
1980 for (node = cgraph_nodes; node; node = node->next)
1981 if (node->analyzed)
1982 analyze_function (node);
1983
1984 return;
1985 }
1986
1987 /* Apply inline plan to function. */
1988 static unsigned int
1989 inline_transform (struct cgraph_node *node)
1990 {
1991 unsigned int todo = 0;
1992 struct cgraph_edge *e;
1993
1994 /* FIXME: Currently the passmanager is adding inline transform more than once to some
1995 clones. This needs revisiting after WPA cleanups. */
1996 if (cfun->after_inlining)
1997 return 0;
1998
1999 /* We might need the body of this function so that we can expand
2000 it inline somewhere else. */
2001 if (cgraph_preserve_function_body_p (node->decl))
2002 save_inline_function_body (node);
2003
2004 for (e = node->callees; e; e = e->next_callee)
2005 if (!e->inline_failed || warn_inline)
2006 break;
2007
2008 if (e)
2009 {
2010 timevar_push (TV_INTEGRATION);
2011 todo = optimize_inline_calls (current_function_decl);
2012 timevar_pop (TV_INTEGRATION);
2013 }
2014 cfun->always_inline_functions_inlined = true;
2015 cfun->after_inlining = true;
2016 return todo | execute_fixup_cfg ();
2017 }
2018
2019 /* Read inline summary. Jump functions are shared among ipa-cp
2020 and inliner, so when ipa-cp is active, we don't need to write them
2021 twice. */
2022
2023 static void
2024 inline_read_summary (void)
2025 {
2026 if (flag_indirect_inlining)
2027 {
2028 ipa_register_cgraph_hooks ();
2029 if (!flag_ipa_cp)
2030 ipa_prop_read_jump_functions ();
2031 }
2032 function_insertion_hook_holder =
2033 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2034 }
2035
2036 /* Write inline summary for node in SET.
2037 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2038 active, we don't need to write them twice. */
2039
2040 static void
2041 inline_write_summary (cgraph_node_set set)
2042 {
2043 if (flag_indirect_inlining && !flag_ipa_cp)
2044 ipa_prop_write_jump_functions (set);
2045 }
2046
2047 struct ipa_opt_pass_d pass_ipa_inline =
2048 {
2049 {
2050 IPA_PASS,
2051 "inline", /* name */
2052 NULL, /* gate */
2053 cgraph_decide_inlining, /* execute */
2054 NULL, /* sub */
2055 NULL, /* next */
2056 0, /* static_pass_number */
2057 TV_INLINE_HEURISTICS, /* tv_id */
2058 0, /* properties_required */
2059 0, /* properties_provided */
2060 0, /* properties_destroyed */
2061 TODO_remove_functions, /* todo_flags_finish */
2062 TODO_dump_cgraph | TODO_dump_func
2063 | TODO_remove_functions /* todo_flags_finish */
2064 },
2065 inline_generate_summary, /* generate_summary */
2066 inline_write_summary, /* write_summary */
2067 inline_read_summary, /* read_summary */
2068 NULL, /* function_read_summary */
2069 lto_ipa_fixup_call_notes, /* stmt_fixup */
2070 0, /* TODOs */
2071 inline_transform, /* function_transform */
2072 NULL, /* variable_transform */
2073 };
2074
2075
2076 #include "gt-ipa-inline.h"