re PR ipa/64153 (r218205 miscompiles libgomp)
[gcc.git] / gcc / ipa-inline-analysis.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Analysis used by the inliner and other passes limiting code size growth.
22
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
32
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
37
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
40
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
51
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
58
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
62
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
66
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "tm.h"
71 #include "tree.h"
72 #include "stor-layout.h"
73 #include "stringpool.h"
74 #include "print-tree.h"
75 #include "tree-inline.h"
76 #include "langhooks.h"
77 #include "flags.h"
78 #include "diagnostic.h"
79 #include "gimple-pretty-print.h"
80 #include "params.h"
81 #include "tree-pass.h"
82 #include "coverage.h"
83 #include "predict.h"
84 #include "vec.h"
85 #include "hashtab.h"
86 #include "hash-set.h"
87 #include "machmode.h"
88 #include "hard-reg-set.h"
89 #include "input.h"
90 #include "function.h"
91 #include "dominance.h"
92 #include "cfg.h"
93 #include "cfganal.h"
94 #include "basic-block.h"
95 #include "tree-ssa-alias.h"
96 #include "internal-fn.h"
97 #include "gimple-expr.h"
98 #include "is-a.h"
99 #include "gimple.h"
100 #include "gimple-iterator.h"
101 #include "gimple-ssa.h"
102 #include "tree-cfg.h"
103 #include "tree-phinodes.h"
104 #include "ssa-iterators.h"
105 #include "tree-ssanames.h"
106 #include "tree-ssa-loop-niter.h"
107 #include "tree-ssa-loop.h"
108 #include "hash-map.h"
109 #include "plugin-api.h"
110 #include "ipa-ref.h"
111 #include "cgraph.h"
112 #include "alloc-pool.h"
113 #include "ipa-prop.h"
114 #include "lto-streamer.h"
115 #include "data-streamer.h"
116 #include "tree-streamer.h"
117 #include "ipa-inline.h"
118 #include "cfgloop.h"
119 #include "tree-scalar-evolution.h"
120 #include "ipa-utils.h"
121 #include "cilk.h"
122 #include "cfgexpand.h"
123
124 /* Estimate runtime of function can easilly run into huge numbers with many
125 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
126 integer. For anything larger we use gcov_type. */
127 #define MAX_TIME 500000
128
129 /* Number of bits in integer, but we really want to be stable across different
130 hosts. */
131 #define NUM_CONDITIONS 32
132
133 enum predicate_conditions
134 {
135 predicate_false_condition = 0,
136 predicate_not_inlined_condition = 1,
137 predicate_first_dynamic_condition = 2
138 };
139
140 /* Special condition code we use to represent test that operand is compile time
141 constant. */
142 #define IS_NOT_CONSTANT ERROR_MARK
143 /* Special condition code we use to represent test that operand is not changed
144 across invocation of the function. When operand IS_NOT_CONSTANT it is always
145 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
146 of executions even when they are not compile time constants. */
147 #define CHANGED IDENTIFIER_NODE
148
149 /* Holders of ipa cgraph hooks: */
150 static struct cgraph_node_hook_list *function_insertion_hook_holder;
151 static struct cgraph_node_hook_list *node_removal_hook_holder;
152 static struct cgraph_2node_hook_list *node_duplication_hook_holder;
153 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
154 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
155 static void inline_node_removal_hook (struct cgraph_node *, void *);
156 static void inline_node_duplication_hook (struct cgraph_node *,
157 struct cgraph_node *, void *);
158 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
159 static void inline_edge_duplication_hook (struct cgraph_edge *,
160 struct cgraph_edge *, void *);
161
162 /* VECtor holding inline summaries.
163 In GGC memory because conditions might point to constant trees. */
164 vec<inline_summary_t, va_gc> *inline_summary_vec;
165 vec<inline_edge_summary_t> inline_edge_summary_vec;
166
167 /* Cached node/edge growths. */
168 vec<int> node_growth_cache;
169 vec<edge_growth_cache_entry> edge_growth_cache;
170
171 /* Edge predicates goes here. */
172 static alloc_pool edge_predicate_pool;
173
174 /* Return true predicate (tautology).
175 We represent it by empty list of clauses. */
176
177 static inline struct predicate
178 true_predicate (void)
179 {
180 struct predicate p;
181 p.clause[0] = 0;
182 return p;
183 }
184
185
186 /* Return predicate testing single condition number COND. */
187
188 static inline struct predicate
189 single_cond_predicate (int cond)
190 {
191 struct predicate p;
192 p.clause[0] = 1 << cond;
193 p.clause[1] = 0;
194 return p;
195 }
196
197
198 /* Return false predicate. First clause require false condition. */
199
200 static inline struct predicate
201 false_predicate (void)
202 {
203 return single_cond_predicate (predicate_false_condition);
204 }
205
206
207 /* Return true if P is (true). */
208
209 static inline bool
210 true_predicate_p (struct predicate *p)
211 {
212 return !p->clause[0];
213 }
214
215
216 /* Return true if P is (false). */
217
218 static inline bool
219 false_predicate_p (struct predicate *p)
220 {
221 if (p->clause[0] == (1 << predicate_false_condition))
222 {
223 gcc_checking_assert (!p->clause[1]
224 && p->clause[0] == 1 << predicate_false_condition);
225 return true;
226 }
227 return false;
228 }
229
230
231 /* Return predicate that is set true when function is not inlined. */
232
233 static inline struct predicate
234 not_inlined_predicate (void)
235 {
236 return single_cond_predicate (predicate_not_inlined_condition);
237 }
238
239 /* Simple description of whether a memory load or a condition refers to a load
240 from an aggregate and if so, how and where from in the aggregate.
241 Individual fields have the same meaning like fields with the same name in
242 struct condition. */
243
244 struct agg_position_info
245 {
246 HOST_WIDE_INT offset;
247 bool agg_contents;
248 bool by_ref;
249 };
250
251 /* Add condition to condition list CONDS. AGGPOS describes whether the used
252 oprand is loaded from an aggregate and where in the aggregate it is. It can
253 be NULL, which means this not a load from an aggregate. */
254
255 static struct predicate
256 add_condition (struct inline_summary *summary, int operand_num,
257 struct agg_position_info *aggpos,
258 enum tree_code code, tree val)
259 {
260 int i;
261 struct condition *c;
262 struct condition new_cond;
263 HOST_WIDE_INT offset;
264 bool agg_contents, by_ref;
265
266 if (aggpos)
267 {
268 offset = aggpos->offset;
269 agg_contents = aggpos->agg_contents;
270 by_ref = aggpos->by_ref;
271 }
272 else
273 {
274 offset = 0;
275 agg_contents = false;
276 by_ref = false;
277 }
278
279 gcc_checking_assert (operand_num >= 0);
280 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
281 {
282 if (c->operand_num == operand_num
283 && c->code == code
284 && c->val == val
285 && c->agg_contents == agg_contents
286 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
287 return single_cond_predicate (i + predicate_first_dynamic_condition);
288 }
289 /* Too many conditions. Give up and return constant true. */
290 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
291 return true_predicate ();
292
293 new_cond.operand_num = operand_num;
294 new_cond.code = code;
295 new_cond.val = val;
296 new_cond.agg_contents = agg_contents;
297 new_cond.by_ref = by_ref;
298 new_cond.offset = offset;
299 vec_safe_push (summary->conds, new_cond);
300 return single_cond_predicate (i + predicate_first_dynamic_condition);
301 }
302
303
304 /* Add clause CLAUSE into the predicate P. */
305
306 static inline void
307 add_clause (conditions conditions, struct predicate *p, clause_t clause)
308 {
309 int i;
310 int i2;
311 int insert_here = -1;
312 int c1, c2;
313
314 /* True clause. */
315 if (!clause)
316 return;
317
318 /* False clause makes the whole predicate false. Kill the other variants. */
319 if (clause == (1 << predicate_false_condition))
320 {
321 p->clause[0] = (1 << predicate_false_condition);
322 p->clause[1] = 0;
323 return;
324 }
325 if (false_predicate_p (p))
326 return;
327
328 /* No one should be silly enough to add false into nontrivial clauses. */
329 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
330
331 /* Look where to insert the clause. At the same time prune out
332 clauses of P that are implied by the new clause and thus
333 redundant. */
334 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
335 {
336 p->clause[i2] = p->clause[i];
337
338 if (!p->clause[i])
339 break;
340
341 /* If p->clause[i] implies clause, there is nothing to add. */
342 if ((p->clause[i] & clause) == p->clause[i])
343 {
344 /* We had nothing to add, none of clauses should've become
345 redundant. */
346 gcc_checking_assert (i == i2);
347 return;
348 }
349
350 if (p->clause[i] < clause && insert_here < 0)
351 insert_here = i2;
352
353 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
354 Otherwise the p->clause[i] has to stay. */
355 if ((p->clause[i] & clause) != clause)
356 i2++;
357 }
358
359 /* Look for clauses that are obviously true. I.e.
360 op0 == 5 || op0 != 5. */
361 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
362 {
363 condition *cc1;
364 if (!(clause & (1 << c1)))
365 continue;
366 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
367 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
368 and thus there is no point for looking for them. */
369 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
370 continue;
371 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
372 if (clause & (1 << c2))
373 {
374 condition *cc1 =
375 &(*conditions)[c1 - predicate_first_dynamic_condition];
376 condition *cc2 =
377 &(*conditions)[c2 - predicate_first_dynamic_condition];
378 if (cc1->operand_num == cc2->operand_num
379 && cc1->val == cc2->val
380 && cc2->code != IS_NOT_CONSTANT
381 && cc2->code != CHANGED
382 && cc1->code == invert_tree_comparison
383 (cc2->code,
384 HONOR_NANS (TYPE_MODE (TREE_TYPE (cc1->val)))))
385 return;
386 }
387 }
388
389
390 /* We run out of variants. Be conservative in positive direction. */
391 if (i2 == MAX_CLAUSES)
392 return;
393 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
394 p->clause[i2 + 1] = 0;
395 if (insert_here >= 0)
396 for (; i2 > insert_here; i2--)
397 p->clause[i2] = p->clause[i2 - 1];
398 else
399 insert_here = i2;
400 p->clause[insert_here] = clause;
401 }
402
403
404 /* Return P & P2. */
405
406 static struct predicate
407 and_predicates (conditions conditions,
408 struct predicate *p, struct predicate *p2)
409 {
410 struct predicate out = *p;
411 int i;
412
413 /* Avoid busy work. */
414 if (false_predicate_p (p2) || true_predicate_p (p))
415 return *p2;
416 if (false_predicate_p (p) || true_predicate_p (p2))
417 return *p;
418
419 /* See how far predicates match. */
420 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
421 {
422 gcc_checking_assert (i < MAX_CLAUSES);
423 }
424
425 /* Combine the predicates rest. */
426 for (; p2->clause[i]; i++)
427 {
428 gcc_checking_assert (i < MAX_CLAUSES);
429 add_clause (conditions, &out, p2->clause[i]);
430 }
431 return out;
432 }
433
434
435 /* Return true if predicates are obviously equal. */
436
437 static inline bool
438 predicates_equal_p (struct predicate *p, struct predicate *p2)
439 {
440 int i;
441 for (i = 0; p->clause[i]; i++)
442 {
443 gcc_checking_assert (i < MAX_CLAUSES);
444 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
445 gcc_checking_assert (!p2->clause[i]
446 || p2->clause[i] > p2->clause[i + 1]);
447 if (p->clause[i] != p2->clause[i])
448 return false;
449 }
450 return !p2->clause[i];
451 }
452
453
454 /* Return P | P2. */
455
456 static struct predicate
457 or_predicates (conditions conditions,
458 struct predicate *p, struct predicate *p2)
459 {
460 struct predicate out = true_predicate ();
461 int i, j;
462
463 /* Avoid busy work. */
464 if (false_predicate_p (p2) || true_predicate_p (p))
465 return *p;
466 if (false_predicate_p (p) || true_predicate_p (p2))
467 return *p2;
468 if (predicates_equal_p (p, p2))
469 return *p;
470
471 /* OK, combine the predicates. */
472 for (i = 0; p->clause[i]; i++)
473 for (j = 0; p2->clause[j]; j++)
474 {
475 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
476 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
477 }
478 return out;
479 }
480
481
482 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
483 if predicate P is known to be false. */
484
485 static bool
486 evaluate_predicate (struct predicate *p, clause_t possible_truths)
487 {
488 int i;
489
490 /* True remains true. */
491 if (true_predicate_p (p))
492 return true;
493
494 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
495
496 /* See if we can find clause we can disprove. */
497 for (i = 0; p->clause[i]; i++)
498 {
499 gcc_checking_assert (i < MAX_CLAUSES);
500 if (!(p->clause[i] & possible_truths))
501 return false;
502 }
503 return true;
504 }
505
506 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
507 instruction will be recomputed per invocation of the inlined call. */
508
509 static int
510 predicate_probability (conditions conds,
511 struct predicate *p, clause_t possible_truths,
512 vec<inline_param_summary> inline_param_summary)
513 {
514 int i;
515 int combined_prob = REG_BR_PROB_BASE;
516
517 /* True remains true. */
518 if (true_predicate_p (p))
519 return REG_BR_PROB_BASE;
520
521 if (false_predicate_p (p))
522 return 0;
523
524 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
525
526 /* See if we can find clause we can disprove. */
527 for (i = 0; p->clause[i]; i++)
528 {
529 gcc_checking_assert (i < MAX_CLAUSES);
530 if (!(p->clause[i] & possible_truths))
531 return 0;
532 else
533 {
534 int this_prob = 0;
535 int i2;
536 if (!inline_param_summary.exists ())
537 return REG_BR_PROB_BASE;
538 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
539 if ((p->clause[i] & possible_truths) & (1 << i2))
540 {
541 if (i2 >= predicate_first_dynamic_condition)
542 {
543 condition *c =
544 &(*conds)[i2 - predicate_first_dynamic_condition];
545 if (c->code == CHANGED
546 && (c->operand_num <
547 (int) inline_param_summary.length ()))
548 {
549 int iprob =
550 inline_param_summary[c->operand_num].change_prob;
551 this_prob = MAX (this_prob, iprob);
552 }
553 else
554 this_prob = REG_BR_PROB_BASE;
555 }
556 else
557 this_prob = REG_BR_PROB_BASE;
558 }
559 combined_prob = MIN (this_prob, combined_prob);
560 if (!combined_prob)
561 return 0;
562 }
563 }
564 return combined_prob;
565 }
566
567
568 /* Dump conditional COND. */
569
570 static void
571 dump_condition (FILE *f, conditions conditions, int cond)
572 {
573 condition *c;
574 if (cond == predicate_false_condition)
575 fprintf (f, "false");
576 else if (cond == predicate_not_inlined_condition)
577 fprintf (f, "not inlined");
578 else
579 {
580 c = &(*conditions)[cond - predicate_first_dynamic_condition];
581 fprintf (f, "op%i", c->operand_num);
582 if (c->agg_contents)
583 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
584 c->by_ref ? "ref " : "", c->offset);
585 if (c->code == IS_NOT_CONSTANT)
586 {
587 fprintf (f, " not constant");
588 return;
589 }
590 if (c->code == CHANGED)
591 {
592 fprintf (f, " changed");
593 return;
594 }
595 fprintf (f, " %s ", op_symbol_code (c->code));
596 print_generic_expr (f, c->val, 1);
597 }
598 }
599
600
601 /* Dump clause CLAUSE. */
602
603 static void
604 dump_clause (FILE *f, conditions conds, clause_t clause)
605 {
606 int i;
607 bool found = false;
608 fprintf (f, "(");
609 if (!clause)
610 fprintf (f, "true");
611 for (i = 0; i < NUM_CONDITIONS; i++)
612 if (clause & (1 << i))
613 {
614 if (found)
615 fprintf (f, " || ");
616 found = true;
617 dump_condition (f, conds, i);
618 }
619 fprintf (f, ")");
620 }
621
622
623 /* Dump predicate PREDICATE. */
624
625 static void
626 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
627 {
628 int i;
629 if (true_predicate_p (pred))
630 dump_clause (f, conds, 0);
631 else
632 for (i = 0; pred->clause[i]; i++)
633 {
634 if (i)
635 fprintf (f, " && ");
636 dump_clause (f, conds, pred->clause[i]);
637 }
638 fprintf (f, "\n");
639 }
640
641
642 /* Dump inline hints. */
643 void
644 dump_inline_hints (FILE *f, inline_hints hints)
645 {
646 if (!hints)
647 return;
648 fprintf (f, "inline hints:");
649 if (hints & INLINE_HINT_indirect_call)
650 {
651 hints &= ~INLINE_HINT_indirect_call;
652 fprintf (f, " indirect_call");
653 }
654 if (hints & INLINE_HINT_loop_iterations)
655 {
656 hints &= ~INLINE_HINT_loop_iterations;
657 fprintf (f, " loop_iterations");
658 }
659 if (hints & INLINE_HINT_loop_stride)
660 {
661 hints &= ~INLINE_HINT_loop_stride;
662 fprintf (f, " loop_stride");
663 }
664 if (hints & INLINE_HINT_same_scc)
665 {
666 hints &= ~INLINE_HINT_same_scc;
667 fprintf (f, " same_scc");
668 }
669 if (hints & INLINE_HINT_in_scc)
670 {
671 hints &= ~INLINE_HINT_in_scc;
672 fprintf (f, " in_scc");
673 }
674 if (hints & INLINE_HINT_cross_module)
675 {
676 hints &= ~INLINE_HINT_cross_module;
677 fprintf (f, " cross_module");
678 }
679 if (hints & INLINE_HINT_declared_inline)
680 {
681 hints &= ~INLINE_HINT_declared_inline;
682 fprintf (f, " declared_inline");
683 }
684 if (hints & INLINE_HINT_array_index)
685 {
686 hints &= ~INLINE_HINT_array_index;
687 fprintf (f, " array_index");
688 }
689 if (hints & INLINE_HINT_known_hot)
690 {
691 hints &= ~INLINE_HINT_known_hot;
692 fprintf (f, " known_hot");
693 }
694 gcc_assert (!hints);
695 }
696
697
698 /* Record SIZE and TIME under condition PRED into the inline summary. */
699
700 static void
701 account_size_time (struct inline_summary *summary, int size, int time,
702 struct predicate *pred)
703 {
704 size_time_entry *e;
705 bool found = false;
706 int i;
707
708 if (false_predicate_p (pred))
709 return;
710
711 /* We need to create initial empty unconitional clause, but otherwie
712 we don't need to account empty times and sizes. */
713 if (!size && !time && summary->entry)
714 return;
715
716 /* Watch overflow that might result from insane profiles. */
717 if (time > MAX_TIME * INLINE_TIME_SCALE)
718 time = MAX_TIME * INLINE_TIME_SCALE;
719 gcc_assert (time >= 0);
720
721 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
722 if (predicates_equal_p (&e->predicate, pred))
723 {
724 found = true;
725 break;
726 }
727 if (i == 256)
728 {
729 i = 0;
730 found = true;
731 e = &(*summary->entry)[0];
732 gcc_assert (!e->predicate.clause[0]);
733 if (dump_file && (dump_flags & TDF_DETAILS))
734 fprintf (dump_file,
735 "\t\tReached limit on number of entries, "
736 "ignoring the predicate.");
737 }
738 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
739 {
740 fprintf (dump_file,
741 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
742 ((double) size) / INLINE_SIZE_SCALE,
743 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
744 dump_predicate (dump_file, summary->conds, pred);
745 }
746 if (!found)
747 {
748 struct size_time_entry new_entry;
749 new_entry.size = size;
750 new_entry.time = time;
751 new_entry.predicate = *pred;
752 vec_safe_push (summary->entry, new_entry);
753 }
754 else
755 {
756 e->size += size;
757 e->time += time;
758 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
759 e->time = MAX_TIME * INLINE_TIME_SCALE;
760 }
761 }
762
763 /* Set predicate for edge E. */
764
765 static void
766 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
767 {
768 struct inline_edge_summary *es = inline_edge_summary (e);
769
770 /* If the edge is determined to be never executed, redirect it
771 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
772 if (predicate && false_predicate_p (predicate) && e->callee)
773 {
774 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
775
776 e->redirect_callee (cgraph_node::get_create
777 (builtin_decl_implicit (BUILT_IN_UNREACHABLE)));
778 e->inline_failed = CIF_UNREACHABLE;
779 if (callee)
780 callee->remove_symbol_and_inline_clones ();
781 }
782 if (predicate && !true_predicate_p (predicate))
783 {
784 if (!es->predicate)
785 es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
786 *es->predicate = *predicate;
787 }
788 else
789 {
790 if (es->predicate)
791 pool_free (edge_predicate_pool, es->predicate);
792 es->predicate = NULL;
793 }
794 }
795
796 /* Set predicate for hint *P. */
797
798 static void
799 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
800 {
801 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
802 {
803 if (*p)
804 pool_free (edge_predicate_pool, *p);
805 *p = NULL;
806 }
807 else
808 {
809 if (!*p)
810 *p = (struct predicate *) pool_alloc (edge_predicate_pool);
811 **p = new_predicate;
812 }
813 }
814
815
816 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
817 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
818 Return clause of possible truths. When INLINE_P is true, assume that we are
819 inlining.
820
821 ERROR_MARK means compile time invariant. */
822
823 static clause_t
824 evaluate_conditions_for_known_args (struct cgraph_node *node,
825 bool inline_p,
826 vec<tree> known_vals,
827 vec<ipa_agg_jump_function_p>
828 known_aggs)
829 {
830 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
831 struct inline_summary *info = inline_summary (node);
832 int i;
833 struct condition *c;
834
835 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
836 {
837 tree val;
838 tree res;
839
840 /* We allow call stmt to have fewer arguments than the callee function
841 (especially for K&R style programs). So bound check here (we assume
842 known_aggs vector, if non-NULL, has the same length as
843 known_vals). */
844 gcc_checking_assert (!known_aggs.exists ()
845 || (known_vals.length () == known_aggs.length ()));
846 if (c->operand_num >= (int) known_vals.length ())
847 {
848 clause |= 1 << (i + predicate_first_dynamic_condition);
849 continue;
850 }
851
852 if (c->agg_contents)
853 {
854 struct ipa_agg_jump_function *agg;
855
856 if (c->code == CHANGED
857 && !c->by_ref
858 && (known_vals[c->operand_num] == error_mark_node))
859 continue;
860
861 if (known_aggs.exists ())
862 {
863 agg = known_aggs[c->operand_num];
864 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
865 }
866 else
867 val = NULL_TREE;
868 }
869 else
870 {
871 val = known_vals[c->operand_num];
872 if (val == error_mark_node && c->code != CHANGED)
873 val = NULL_TREE;
874 }
875
876 if (!val)
877 {
878 clause |= 1 << (i + predicate_first_dynamic_condition);
879 continue;
880 }
881 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
882 continue;
883
884 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
885 TYPE_SIZE (TREE_TYPE (val)), 0))
886 {
887 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
888
889 res = val
890 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
891 : NULL;
892
893 if (res && integer_zerop (res))
894 continue;
895 }
896 clause |= 1 << (i + predicate_first_dynamic_condition);
897 }
898 return clause;
899 }
900
901
902 /* Work out what conditions might be true at invocation of E. */
903
904 static void
905 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
906 clause_t *clause_ptr,
907 vec<tree> *known_vals_ptr,
908 vec<ipa_polymorphic_call_context>
909 *known_contexts_ptr,
910 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
911 {
912 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
913 struct inline_summary *info = inline_summary (callee);
914 vec<tree> known_vals = vNULL;
915 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
916
917 if (clause_ptr)
918 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
919 if (known_vals_ptr)
920 known_vals_ptr->create (0);
921 if (known_contexts_ptr)
922 known_contexts_ptr->create (0);
923
924 if (ipa_node_params_vector.exists ()
925 && !e->call_stmt_cannot_inline_p
926 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
927 {
928 struct ipa_node_params *parms_info;
929 struct ipa_edge_args *args = IPA_EDGE_REF (e);
930 struct inline_edge_summary *es = inline_edge_summary (e);
931 int i, count = ipa_get_cs_argument_count (args);
932
933 if (e->caller->global.inlined_to)
934 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
935 else
936 parms_info = IPA_NODE_REF (e->caller);
937
938 if (count && (info->conds || known_vals_ptr))
939 known_vals.safe_grow_cleared (count);
940 if (count && (info->conds || known_aggs_ptr))
941 known_aggs.safe_grow_cleared (count);
942 if (count && known_contexts_ptr)
943 known_contexts_ptr->safe_grow_cleared (count);
944
945 for (i = 0; i < count; i++)
946 {
947 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
948 tree cst = ipa_value_from_jfunc (parms_info, jf);
949 if (cst)
950 {
951 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
952 if (known_vals.exists ())
953 known_vals[i] = cst;
954 }
955 else if (inline_p && !es->param[i].change_prob)
956 known_vals[i] = error_mark_node;
957
958 if (known_contexts_ptr)
959 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
960 i, jf);
961 /* TODO: When IPA-CP starts propagating and merging aggregate jump
962 functions, use its knowledge of the caller too, just like the
963 scalar case above. */
964 known_aggs[i] = &jf->agg;
965 }
966 }
967
968 if (clause_ptr)
969 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
970 known_vals, known_aggs);
971
972 if (known_vals_ptr)
973 *known_vals_ptr = known_vals;
974 else
975 known_vals.release ();
976
977 if (known_aggs_ptr)
978 *known_aggs_ptr = known_aggs;
979 else
980 known_aggs.release ();
981 }
982
983
984 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
985
986 static void
987 inline_summary_alloc (void)
988 {
989 if (!node_removal_hook_holder)
990 node_removal_hook_holder =
991 symtab->add_cgraph_removal_hook (&inline_node_removal_hook, NULL);
992 if (!edge_removal_hook_holder)
993 edge_removal_hook_holder =
994 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
995 if (!node_duplication_hook_holder)
996 node_duplication_hook_holder =
997 symtab->add_cgraph_duplication_hook (&inline_node_duplication_hook, NULL);
998 if (!edge_duplication_hook_holder)
999 edge_duplication_hook_holder =
1000 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1001
1002 if (vec_safe_length (inline_summary_vec) <= (unsigned) symtab->cgraph_max_uid)
1003 vec_safe_grow_cleared (inline_summary_vec, symtab->cgraph_max_uid + 1);
1004 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1005 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1006 if (!edge_predicate_pool)
1007 edge_predicate_pool = create_alloc_pool ("edge predicates",
1008 sizeof (struct predicate), 10);
1009 }
1010
1011 /* We are called multiple time for given function; clear
1012 data from previous run so they are not cumulated. */
1013
1014 static void
1015 reset_inline_edge_summary (struct cgraph_edge *e)
1016 {
1017 if (e->uid < (int) inline_edge_summary_vec.length ())
1018 {
1019 struct inline_edge_summary *es = inline_edge_summary (e);
1020
1021 es->call_stmt_size = es->call_stmt_time = 0;
1022 if (es->predicate)
1023 pool_free (edge_predicate_pool, es->predicate);
1024 es->predicate = NULL;
1025 es->param.release ();
1026 }
1027 }
1028
1029 /* We are called multiple time for given function; clear
1030 data from previous run so they are not cumulated. */
1031
1032 static void
1033 reset_inline_summary (struct cgraph_node *node)
1034 {
1035 struct inline_summary *info = inline_summary (node);
1036 struct cgraph_edge *e;
1037
1038 info->self_size = info->self_time = 0;
1039 info->estimated_stack_size = 0;
1040 info->estimated_self_stack_size = 0;
1041 info->stack_frame_offset = 0;
1042 info->size = 0;
1043 info->time = 0;
1044 info->growth = 0;
1045 info->scc_no = 0;
1046 if (info->loop_iterations)
1047 {
1048 pool_free (edge_predicate_pool, info->loop_iterations);
1049 info->loop_iterations = NULL;
1050 }
1051 if (info->loop_stride)
1052 {
1053 pool_free (edge_predicate_pool, info->loop_stride);
1054 info->loop_stride = NULL;
1055 }
1056 if (info->array_index)
1057 {
1058 pool_free (edge_predicate_pool, info->array_index);
1059 info->array_index = NULL;
1060 }
1061 vec_free (info->conds);
1062 vec_free (info->entry);
1063 for (e = node->callees; e; e = e->next_callee)
1064 reset_inline_edge_summary (e);
1065 for (e = node->indirect_calls; e; e = e->next_callee)
1066 reset_inline_edge_summary (e);
1067 }
1068
1069 /* Hook that is called by cgraph.c when a node is removed. */
1070
1071 static void
1072 inline_node_removal_hook (struct cgraph_node *node,
1073 void *data ATTRIBUTE_UNUSED)
1074 {
1075 struct inline_summary *info;
1076 if (vec_safe_length (inline_summary_vec) <= (unsigned) node->uid)
1077 return;
1078 info = inline_summary (node);
1079 reset_inline_summary (node);
1080 memset (info, 0, sizeof (inline_summary_t));
1081 }
1082
1083 /* Remap predicate P of former function to be predicate of duplicated function.
1084 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1085 INFO is inline summary of the duplicated node. */
1086
1087 static struct predicate
1088 remap_predicate_after_duplication (struct predicate *p,
1089 clause_t possible_truths,
1090 struct inline_summary *info)
1091 {
1092 struct predicate new_predicate = true_predicate ();
1093 int j;
1094 for (j = 0; p->clause[j]; j++)
1095 if (!(possible_truths & p->clause[j]))
1096 {
1097 new_predicate = false_predicate ();
1098 break;
1099 }
1100 else
1101 add_clause (info->conds, &new_predicate,
1102 possible_truths & p->clause[j]);
1103 return new_predicate;
1104 }
1105
1106 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1107 Additionally care about allocating new memory slot for updated predicate
1108 and set it to NULL when it becomes true or false (and thus uninteresting).
1109 */
1110
1111 static void
1112 remap_hint_predicate_after_duplication (struct predicate **p,
1113 clause_t possible_truths,
1114 struct inline_summary *info)
1115 {
1116 struct predicate new_predicate;
1117
1118 if (!*p)
1119 return;
1120
1121 new_predicate = remap_predicate_after_duplication (*p,
1122 possible_truths, info);
1123 /* We do not want to free previous predicate; it is used by node origin. */
1124 *p = NULL;
1125 set_hint_predicate (p, new_predicate);
1126 }
1127
1128
1129 /* Hook that is called by cgraph.c when a node is duplicated. */
1130
1131 static void
1132 inline_node_duplication_hook (struct cgraph_node *src,
1133 struct cgraph_node *dst,
1134 ATTRIBUTE_UNUSED void *data)
1135 {
1136 struct inline_summary *info;
1137 inline_summary_alloc ();
1138 info = inline_summary (dst);
1139 memcpy (info, inline_summary (src), sizeof (struct inline_summary));
1140 /* TODO: as an optimization, we may avoid copying conditions
1141 that are known to be false or true. */
1142 info->conds = vec_safe_copy (info->conds);
1143
1144 /* When there are any replacements in the function body, see if we can figure
1145 out that something was optimized out. */
1146 if (ipa_node_params_vector.exists () && dst->clone.tree_map)
1147 {
1148 vec<size_time_entry, va_gc> *entry = info->entry;
1149 /* Use SRC parm info since it may not be copied yet. */
1150 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1151 vec<tree> known_vals = vNULL;
1152 int count = ipa_get_param_count (parms_info);
1153 int i, j;
1154 clause_t possible_truths;
1155 struct predicate true_pred = true_predicate ();
1156 size_time_entry *e;
1157 int optimized_out_size = 0;
1158 bool inlined_to_p = false;
1159 struct cgraph_edge *edge;
1160
1161 info->entry = 0;
1162 known_vals.safe_grow_cleared (count);
1163 for (i = 0; i < count; i++)
1164 {
1165 struct ipa_replace_map *r;
1166
1167 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1168 {
1169 if (((!r->old_tree && r->parm_num == i)
1170 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1171 && r->replace_p && !r->ref_p)
1172 {
1173 known_vals[i] = r->new_tree;
1174 break;
1175 }
1176 }
1177 }
1178 possible_truths = evaluate_conditions_for_known_args (dst, false,
1179 known_vals,
1180 vNULL);
1181 known_vals.release ();
1182
1183 account_size_time (info, 0, 0, &true_pred);
1184
1185 /* Remap size_time vectors.
1186 Simplify the predicate by prunning out alternatives that are known
1187 to be false.
1188 TODO: as on optimization, we can also eliminate conditions known
1189 to be true. */
1190 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1191 {
1192 struct predicate new_predicate;
1193 new_predicate = remap_predicate_after_duplication (&e->predicate,
1194 possible_truths,
1195 info);
1196 if (false_predicate_p (&new_predicate))
1197 optimized_out_size += e->size;
1198 else
1199 account_size_time (info, e->size, e->time, &new_predicate);
1200 }
1201
1202 /* Remap edge predicates with the same simplification as above.
1203 Also copy constantness arrays. */
1204 for (edge = dst->callees; edge; edge = edge->next_callee)
1205 {
1206 struct predicate new_predicate;
1207 struct inline_edge_summary *es = inline_edge_summary (edge);
1208
1209 if (!edge->inline_failed)
1210 inlined_to_p = true;
1211 if (!es->predicate)
1212 continue;
1213 new_predicate = remap_predicate_after_duplication (es->predicate,
1214 possible_truths,
1215 info);
1216 if (false_predicate_p (&new_predicate)
1217 && !false_predicate_p (es->predicate))
1218 {
1219 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1220 edge->frequency = 0;
1221 }
1222 edge_set_predicate (edge, &new_predicate);
1223 }
1224
1225 /* Remap indirect edge predicates with the same simplificaiton as above.
1226 Also copy constantness arrays. */
1227 for (edge = dst->indirect_calls; edge; edge = edge->next_callee)
1228 {
1229 struct predicate new_predicate;
1230 struct inline_edge_summary *es = inline_edge_summary (edge);
1231
1232 gcc_checking_assert (edge->inline_failed);
1233 if (!es->predicate)
1234 continue;
1235 new_predicate = remap_predicate_after_duplication (es->predicate,
1236 possible_truths,
1237 info);
1238 if (false_predicate_p (&new_predicate)
1239 && !false_predicate_p (es->predicate))
1240 {
1241 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1242 edge->frequency = 0;
1243 }
1244 edge_set_predicate (edge, &new_predicate);
1245 }
1246 remap_hint_predicate_after_duplication (&info->loop_iterations,
1247 possible_truths, info);
1248 remap_hint_predicate_after_duplication (&info->loop_stride,
1249 possible_truths, info);
1250 remap_hint_predicate_after_duplication (&info->array_index,
1251 possible_truths, info);
1252
1253 /* If inliner or someone after inliner will ever start producing
1254 non-trivial clones, we will get trouble with lack of information
1255 about updating self sizes, because size vectors already contains
1256 sizes of the calees. */
1257 gcc_assert (!inlined_to_p || !optimized_out_size);
1258 }
1259 else
1260 {
1261 info->entry = vec_safe_copy (info->entry);
1262 if (info->loop_iterations)
1263 {
1264 predicate p = *info->loop_iterations;
1265 info->loop_iterations = NULL;
1266 set_hint_predicate (&info->loop_iterations, p);
1267 }
1268 if (info->loop_stride)
1269 {
1270 predicate p = *info->loop_stride;
1271 info->loop_stride = NULL;
1272 set_hint_predicate (&info->loop_stride, p);
1273 }
1274 if (info->array_index)
1275 {
1276 predicate p = *info->array_index;
1277 info->array_index = NULL;
1278 set_hint_predicate (&info->array_index, p);
1279 }
1280 }
1281 inline_update_overall_summary (dst);
1282 }
1283
1284
1285 /* Hook that is called by cgraph.c when a node is duplicated. */
1286
1287 static void
1288 inline_edge_duplication_hook (struct cgraph_edge *src,
1289 struct cgraph_edge *dst,
1290 ATTRIBUTE_UNUSED void *data)
1291 {
1292 struct inline_edge_summary *info;
1293 struct inline_edge_summary *srcinfo;
1294 inline_summary_alloc ();
1295 info = inline_edge_summary (dst);
1296 srcinfo = inline_edge_summary (src);
1297 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1298 info->predicate = NULL;
1299 edge_set_predicate (dst, srcinfo->predicate);
1300 info->param = srcinfo->param.copy ();
1301 }
1302
1303
1304 /* Keep edge cache consistent across edge removal. */
1305
1306 static void
1307 inline_edge_removal_hook (struct cgraph_edge *edge,
1308 void *data ATTRIBUTE_UNUSED)
1309 {
1310 if (edge_growth_cache.exists ())
1311 reset_edge_growth_cache (edge);
1312 reset_inline_edge_summary (edge);
1313 }
1314
1315
1316 /* Initialize growth caches. */
1317
1318 void
1319 initialize_growth_caches (void)
1320 {
1321 if (symtab->edges_max_uid)
1322 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1323 if (symtab->cgraph_max_uid)
1324 node_growth_cache.safe_grow_cleared (symtab->cgraph_max_uid);
1325 }
1326
1327
1328 /* Free growth caches. */
1329
1330 void
1331 free_growth_caches (void)
1332 {
1333 edge_growth_cache.release ();
1334 node_growth_cache.release ();
1335 }
1336
1337
1338 /* Dump edge summaries associated to NODE and recursively to all clones.
1339 Indent by INDENT. */
1340
1341 static void
1342 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1343 struct inline_summary *info)
1344 {
1345 struct cgraph_edge *edge;
1346 for (edge = node->callees; edge; edge = edge->next_callee)
1347 {
1348 struct inline_edge_summary *es = inline_edge_summary (edge);
1349 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1350 int i;
1351
1352 fprintf (f,
1353 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1354 " time: %2i callee size:%2i stack:%2i",
1355 indent, "", callee->name (), callee->order,
1356 !edge->inline_failed
1357 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1358 indent, "", es->loop_depth, edge->frequency,
1359 es->call_stmt_size, es->call_stmt_time,
1360 (int) inline_summary (callee)->size / INLINE_SIZE_SCALE,
1361 (int) inline_summary (callee)->estimated_stack_size);
1362
1363 if (es->predicate)
1364 {
1365 fprintf (f, " predicate: ");
1366 dump_predicate (f, info->conds, es->predicate);
1367 }
1368 else
1369 fprintf (f, "\n");
1370 if (es->param.exists ())
1371 for (i = 0; i < (int) es->param.length (); i++)
1372 {
1373 int prob = es->param[i].change_prob;
1374
1375 if (!prob)
1376 fprintf (f, "%*s op%i is compile time invariant\n",
1377 indent + 2, "", i);
1378 else if (prob != REG_BR_PROB_BASE)
1379 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1380 prob * 100.0 / REG_BR_PROB_BASE);
1381 }
1382 if (!edge->inline_failed)
1383 {
1384 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1385 " callee size %i\n",
1386 indent + 2, "",
1387 (int) inline_summary (callee)->stack_frame_offset,
1388 (int) inline_summary (callee)->estimated_self_stack_size,
1389 (int) inline_summary (callee)->estimated_stack_size);
1390 dump_inline_edge_summary (f, indent + 2, callee, info);
1391 }
1392 }
1393 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1394 {
1395 struct inline_edge_summary *es = inline_edge_summary (edge);
1396 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1397 " time: %2i",
1398 indent, "",
1399 es->loop_depth,
1400 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1401 if (es->predicate)
1402 {
1403 fprintf (f, "predicate: ");
1404 dump_predicate (f, info->conds, es->predicate);
1405 }
1406 else
1407 fprintf (f, "\n");
1408 }
1409 }
1410
1411
1412 void
1413 dump_inline_summary (FILE *f, struct cgraph_node *node)
1414 {
1415 if (node->definition)
1416 {
1417 struct inline_summary *s = inline_summary (node);
1418 size_time_entry *e;
1419 int i;
1420 fprintf (f, "Inline summary for %s/%i", node->name (),
1421 node->order);
1422 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1423 fprintf (f, " always_inline");
1424 if (s->inlinable)
1425 fprintf (f, " inlinable");
1426 fprintf (f, "\n self time: %i\n", s->self_time);
1427 fprintf (f, " global time: %i\n", s->time);
1428 fprintf (f, " self size: %i\n", s->self_size);
1429 fprintf (f, " global size: %i\n", s->size);
1430 fprintf (f, " min size: %i\n", s->min_size);
1431 fprintf (f, " self stack: %i\n",
1432 (int) s->estimated_self_stack_size);
1433 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1434 if (s->growth)
1435 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1436 if (s->scc_no)
1437 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1438 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1439 {
1440 fprintf (f, " size:%f, time:%f, predicate:",
1441 (double) e->size / INLINE_SIZE_SCALE,
1442 (double) e->time / INLINE_TIME_SCALE);
1443 dump_predicate (f, s->conds, &e->predicate);
1444 }
1445 if (s->loop_iterations)
1446 {
1447 fprintf (f, " loop iterations:");
1448 dump_predicate (f, s->conds, s->loop_iterations);
1449 }
1450 if (s->loop_stride)
1451 {
1452 fprintf (f, " loop stride:");
1453 dump_predicate (f, s->conds, s->loop_stride);
1454 }
1455 if (s->array_index)
1456 {
1457 fprintf (f, " array index:");
1458 dump_predicate (f, s->conds, s->array_index);
1459 }
1460 fprintf (f, " calls:\n");
1461 dump_inline_edge_summary (f, 4, node, s);
1462 fprintf (f, "\n");
1463 }
1464 }
1465
1466 DEBUG_FUNCTION void
1467 debug_inline_summary (struct cgraph_node *node)
1468 {
1469 dump_inline_summary (stderr, node);
1470 }
1471
1472 void
1473 dump_inline_summaries (FILE *f)
1474 {
1475 struct cgraph_node *node;
1476
1477 FOR_EACH_DEFINED_FUNCTION (node)
1478 if (!node->global.inlined_to)
1479 dump_inline_summary (f, node);
1480 }
1481
1482 /* Give initial reasons why inlining would fail on EDGE. This gets either
1483 nullified or usually overwritten by more precise reasons later. */
1484
1485 void
1486 initialize_inline_failed (struct cgraph_edge *e)
1487 {
1488 struct cgraph_node *callee = e->callee;
1489
1490 if (e->indirect_unknown_callee)
1491 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1492 else if (!callee->definition)
1493 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1494 else if (callee->local.redefined_extern_inline)
1495 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1496 else if (e->call_stmt_cannot_inline_p)
1497 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1498 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1499 /* We can't inline if the function is spawing a function. */
1500 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1501 else
1502 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1503 }
1504
1505 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1506 boolean variable pointed to by DATA. */
1507
1508 static bool
1509 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1510 void *data)
1511 {
1512 bool *b = (bool *) data;
1513 *b = true;
1514 return true;
1515 }
1516
1517 /* If OP refers to value of function parameter, return the corresponding
1518 parameter. */
1519
1520 static tree
1521 unmodified_parm_1 (gimple stmt, tree op)
1522 {
1523 /* SSA_NAME referring to parm default def? */
1524 if (TREE_CODE (op) == SSA_NAME
1525 && SSA_NAME_IS_DEFAULT_DEF (op)
1526 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1527 return SSA_NAME_VAR (op);
1528 /* Non-SSA parm reference? */
1529 if (TREE_CODE (op) == PARM_DECL)
1530 {
1531 bool modified = false;
1532
1533 ao_ref refd;
1534 ao_ref_init (&refd, op);
1535 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1536 NULL);
1537 if (!modified)
1538 return op;
1539 }
1540 return NULL_TREE;
1541 }
1542
1543 /* If OP refers to value of function parameter, return the corresponding
1544 parameter. Also traverse chains of SSA register assignments. */
1545
1546 static tree
1547 unmodified_parm (gimple stmt, tree op)
1548 {
1549 tree res = unmodified_parm_1 (stmt, op);
1550 if (res)
1551 return res;
1552
1553 if (TREE_CODE (op) == SSA_NAME
1554 && !SSA_NAME_IS_DEFAULT_DEF (op)
1555 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1556 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1557 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1558 return NULL_TREE;
1559 }
1560
1561 /* If OP refers to a value of a function parameter or value loaded from an
1562 aggregate passed to a parameter (either by value or reference), return TRUE
1563 and store the number of the parameter to *INDEX_P and information whether
1564 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1565 the function parameters, STMT is the statement in which OP is used or
1566 loaded. */
1567
1568 static bool
1569 unmodified_parm_or_parm_agg_item (struct ipa_node_params *info,
1570 gimple stmt, tree op, int *index_p,
1571 struct agg_position_info *aggpos)
1572 {
1573 tree res = unmodified_parm_1 (stmt, op);
1574
1575 gcc_checking_assert (aggpos);
1576 if (res)
1577 {
1578 *index_p = ipa_get_param_decl_index (info, res);
1579 if (*index_p < 0)
1580 return false;
1581 aggpos->agg_contents = false;
1582 aggpos->by_ref = false;
1583 return true;
1584 }
1585
1586 if (TREE_CODE (op) == SSA_NAME)
1587 {
1588 if (SSA_NAME_IS_DEFAULT_DEF (op)
1589 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1590 return false;
1591 stmt = SSA_NAME_DEF_STMT (op);
1592 op = gimple_assign_rhs1 (stmt);
1593 if (!REFERENCE_CLASS_P (op))
1594 return unmodified_parm_or_parm_agg_item (info, stmt, op, index_p,
1595 aggpos);
1596 }
1597
1598 aggpos->agg_contents = true;
1599 return ipa_load_from_parm_agg (info, stmt, op, index_p, &aggpos->offset,
1600 &aggpos->by_ref);
1601 }
1602
1603 /* See if statement might disappear after inlining.
1604 0 - means not eliminated
1605 1 - half of statements goes away
1606 2 - for sure it is eliminated.
1607 We are not terribly sophisticated, basically looking for simple abstraction
1608 penalty wrappers. */
1609
1610 static int
1611 eliminated_by_inlining_prob (gimple stmt)
1612 {
1613 enum gimple_code code = gimple_code (stmt);
1614 enum tree_code rhs_code;
1615
1616 if (!optimize)
1617 return 0;
1618
1619 switch (code)
1620 {
1621 case GIMPLE_RETURN:
1622 return 2;
1623 case GIMPLE_ASSIGN:
1624 if (gimple_num_ops (stmt) != 2)
1625 return 0;
1626
1627 rhs_code = gimple_assign_rhs_code (stmt);
1628
1629 /* Casts of parameters, loads from parameters passed by reference
1630 and stores to return value or parameters are often free after
1631 inlining dua to SRA and further combining.
1632 Assume that half of statements goes away. */
1633 if (CONVERT_EXPR_CODE_P (rhs_code)
1634 || rhs_code == VIEW_CONVERT_EXPR
1635 || rhs_code == ADDR_EXPR
1636 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1637 {
1638 tree rhs = gimple_assign_rhs1 (stmt);
1639 tree lhs = gimple_assign_lhs (stmt);
1640 tree inner_rhs = get_base_address (rhs);
1641 tree inner_lhs = get_base_address (lhs);
1642 bool rhs_free = false;
1643 bool lhs_free = false;
1644
1645 if (!inner_rhs)
1646 inner_rhs = rhs;
1647 if (!inner_lhs)
1648 inner_lhs = lhs;
1649
1650 /* Reads of parameter are expected to be free. */
1651 if (unmodified_parm (stmt, inner_rhs))
1652 rhs_free = true;
1653 /* Match expressions of form &this->field. Those will most likely
1654 combine with something upstream after inlining. */
1655 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1656 {
1657 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1658 if (TREE_CODE (op) == PARM_DECL)
1659 rhs_free = true;
1660 else if (TREE_CODE (op) == MEM_REF
1661 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1662 rhs_free = true;
1663 }
1664
1665 /* When parameter is not SSA register because its address is taken
1666 and it is just copied into one, the statement will be completely
1667 free after inlining (we will copy propagate backward). */
1668 if (rhs_free && is_gimple_reg (lhs))
1669 return 2;
1670
1671 /* Reads of parameters passed by reference
1672 expected to be free (i.e. optimized out after inlining). */
1673 if (TREE_CODE (inner_rhs) == MEM_REF
1674 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1675 rhs_free = true;
1676
1677 /* Copying parameter passed by reference into gimple register is
1678 probably also going to copy propagate, but we can't be quite
1679 sure. */
1680 if (rhs_free && is_gimple_reg (lhs))
1681 lhs_free = true;
1682
1683 /* Writes to parameters, parameters passed by value and return value
1684 (either dirrectly or passed via invisible reference) are free.
1685
1686 TODO: We ought to handle testcase like
1687 struct a {int a,b;};
1688 struct a
1689 retrurnsturct (void)
1690 {
1691 struct a a ={1,2};
1692 return a;
1693 }
1694
1695 This translate into:
1696
1697 retrurnsturct ()
1698 {
1699 int a$b;
1700 int a$a;
1701 struct a a;
1702 struct a D.2739;
1703
1704 <bb 2>:
1705 D.2739.a = 1;
1706 D.2739.b = 2;
1707 return D.2739;
1708
1709 }
1710 For that we either need to copy ipa-split logic detecting writes
1711 to return value. */
1712 if (TREE_CODE (inner_lhs) == PARM_DECL
1713 || TREE_CODE (inner_lhs) == RESULT_DECL
1714 || (TREE_CODE (inner_lhs) == MEM_REF
1715 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1716 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1717 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1718 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1719 (inner_lhs,
1720 0))) == RESULT_DECL))))
1721 lhs_free = true;
1722 if (lhs_free
1723 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1724 rhs_free = true;
1725 if (lhs_free && rhs_free)
1726 return 1;
1727 }
1728 return 0;
1729 default:
1730 return 0;
1731 }
1732 }
1733
1734
1735 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1736 predicates to the CFG edges. */
1737
1738 static void
1739 set_cond_stmt_execution_predicate (struct ipa_node_params *info,
1740 struct inline_summary *summary,
1741 basic_block bb)
1742 {
1743 gimple last;
1744 tree op;
1745 int index;
1746 struct agg_position_info aggpos;
1747 enum tree_code code, inverted_code;
1748 edge e;
1749 edge_iterator ei;
1750 gimple set_stmt;
1751 tree op2;
1752
1753 last = last_stmt (bb);
1754 if (!last || gimple_code (last) != GIMPLE_COND)
1755 return;
1756 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1757 return;
1758 op = gimple_cond_lhs (last);
1759 /* TODO: handle conditionals like
1760 var = op0 < 4;
1761 if (var != 0). */
1762 if (unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1763 {
1764 code = gimple_cond_code (last);
1765 inverted_code
1766 = invert_tree_comparison (code,
1767 HONOR_NANS (TYPE_MODE (TREE_TYPE (op))));
1768
1769 FOR_EACH_EDGE (e, ei, bb->succs)
1770 {
1771 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1772 ? code : inverted_code);
1773 /* invert_tree_comparison will return ERROR_MARK on FP
1774 comparsions that are not EQ/NE instead of returning proper
1775 unordered one. Be sure it is not confused with NON_CONSTANT. */
1776 if (this_code != ERROR_MARK)
1777 {
1778 struct predicate p = add_condition (summary, index, &aggpos,
1779 this_code,
1780 gimple_cond_rhs (last));
1781 e->aux = pool_alloc (edge_predicate_pool);
1782 *(struct predicate *) e->aux = p;
1783 }
1784 }
1785 }
1786
1787 if (TREE_CODE (op) != SSA_NAME)
1788 return;
1789 /* Special case
1790 if (builtin_constant_p (op))
1791 constant_code
1792 else
1793 nonconstant_code.
1794 Here we can predicate nonconstant_code. We can't
1795 really handle constant_code since we have no predicate
1796 for this and also the constant code is not known to be
1797 optimized away when inliner doen't see operand is constant.
1798 Other optimizers might think otherwise. */
1799 if (gimple_cond_code (last) != NE_EXPR
1800 || !integer_zerop (gimple_cond_rhs (last)))
1801 return;
1802 set_stmt = SSA_NAME_DEF_STMT (op);
1803 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1804 || gimple_call_num_args (set_stmt) != 1)
1805 return;
1806 op2 = gimple_call_arg (set_stmt, 0);
1807 if (!unmodified_parm_or_parm_agg_item
1808 (info, set_stmt, op2, &index, &aggpos))
1809 return;
1810 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1811 {
1812 struct predicate p = add_condition (summary, index, &aggpos,
1813 IS_NOT_CONSTANT, NULL_TREE);
1814 e->aux = pool_alloc (edge_predicate_pool);
1815 *(struct predicate *) e->aux = p;
1816 }
1817 }
1818
1819
1820 /* If BB ends by a switch we can turn into predicates, attach corresponding
1821 predicates to the CFG edges. */
1822
1823 static void
1824 set_switch_stmt_execution_predicate (struct ipa_node_params *info,
1825 struct inline_summary *summary,
1826 basic_block bb)
1827 {
1828 gimple lastg;
1829 tree op;
1830 int index;
1831 struct agg_position_info aggpos;
1832 edge e;
1833 edge_iterator ei;
1834 size_t n;
1835 size_t case_idx;
1836
1837 lastg = last_stmt (bb);
1838 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1839 return;
1840 gswitch *last = as_a <gswitch *> (lastg);
1841 op = gimple_switch_index (last);
1842 if (!unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1843 return;
1844
1845 FOR_EACH_EDGE (e, ei, bb->succs)
1846 {
1847 e->aux = pool_alloc (edge_predicate_pool);
1848 *(struct predicate *) e->aux = false_predicate ();
1849 }
1850 n = gimple_switch_num_labels (last);
1851 for (case_idx = 0; case_idx < n; ++case_idx)
1852 {
1853 tree cl = gimple_switch_label (last, case_idx);
1854 tree min, max;
1855 struct predicate p;
1856
1857 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1858 min = CASE_LOW (cl);
1859 max = CASE_HIGH (cl);
1860
1861 /* For default we might want to construct predicate that none
1862 of cases is met, but it is bit hard to do not having negations
1863 of conditionals handy. */
1864 if (!min && !max)
1865 p = true_predicate ();
1866 else if (!max)
1867 p = add_condition (summary, index, &aggpos, EQ_EXPR, min);
1868 else
1869 {
1870 struct predicate p1, p2;
1871 p1 = add_condition (summary, index, &aggpos, GE_EXPR, min);
1872 p2 = add_condition (summary, index, &aggpos, LE_EXPR, max);
1873 p = and_predicates (summary->conds, &p1, &p2);
1874 }
1875 *(struct predicate *) e->aux
1876 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1877 }
1878 }
1879
1880
1881 /* For each BB in NODE attach to its AUX pointer predicate under
1882 which it is executable. */
1883
1884 static void
1885 compute_bb_predicates (struct cgraph_node *node,
1886 struct ipa_node_params *parms_info,
1887 struct inline_summary *summary)
1888 {
1889 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1890 bool done = false;
1891 basic_block bb;
1892
1893 FOR_EACH_BB_FN (bb, my_function)
1894 {
1895 set_cond_stmt_execution_predicate (parms_info, summary, bb);
1896 set_switch_stmt_execution_predicate (parms_info, summary, bb);
1897 }
1898
1899 /* Entry block is always executable. */
1900 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1901 = pool_alloc (edge_predicate_pool);
1902 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1903 = true_predicate ();
1904
1905 /* A simple dataflow propagation of predicates forward in the CFG.
1906 TODO: work in reverse postorder. */
1907 while (!done)
1908 {
1909 done = true;
1910 FOR_EACH_BB_FN (bb, my_function)
1911 {
1912 struct predicate p = false_predicate ();
1913 edge e;
1914 edge_iterator ei;
1915 FOR_EACH_EDGE (e, ei, bb->preds)
1916 {
1917 if (e->src->aux)
1918 {
1919 struct predicate this_bb_predicate
1920 = *(struct predicate *) e->src->aux;
1921 if (e->aux)
1922 this_bb_predicate
1923 = and_predicates (summary->conds, &this_bb_predicate,
1924 (struct predicate *) e->aux);
1925 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1926 if (true_predicate_p (&p))
1927 break;
1928 }
1929 }
1930 if (false_predicate_p (&p))
1931 gcc_assert (!bb->aux);
1932 else
1933 {
1934 if (!bb->aux)
1935 {
1936 done = false;
1937 bb->aux = pool_alloc (edge_predicate_pool);
1938 *((struct predicate *) bb->aux) = p;
1939 }
1940 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1941 {
1942 /* This OR operation is needed to ensure monotonous data flow
1943 in the case we hit the limit on number of clauses and the
1944 and/or operations above give approximate answers. */
1945 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1946 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1947 {
1948 done = false;
1949 *((struct predicate *) bb->aux) = p;
1950 }
1951 }
1952 }
1953 }
1954 }
1955 }
1956
1957
1958 /* We keep info about constantness of SSA names. */
1959
1960 typedef struct predicate predicate_t;
1961 /* Return predicate specifying when the STMT might have result that is not
1962 a compile time constant. */
1963
1964 static struct predicate
1965 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1966 struct inline_summary *summary,
1967 tree expr,
1968 vec<predicate_t> nonconstant_names)
1969 {
1970 tree parm;
1971 int index;
1972
1973 while (UNARY_CLASS_P (expr))
1974 expr = TREE_OPERAND (expr, 0);
1975
1976 parm = unmodified_parm (NULL, expr);
1977 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1978 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1979 if (is_gimple_min_invariant (expr))
1980 return false_predicate ();
1981 if (TREE_CODE (expr) == SSA_NAME)
1982 return nonconstant_names[SSA_NAME_VERSION (expr)];
1983 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1984 {
1985 struct predicate p1 = will_be_nonconstant_expr_predicate
1986 (info, summary, TREE_OPERAND (expr, 0),
1987 nonconstant_names);
1988 struct predicate p2;
1989 if (true_predicate_p (&p1))
1990 return p1;
1991 p2 = will_be_nonconstant_expr_predicate (info, summary,
1992 TREE_OPERAND (expr, 1),
1993 nonconstant_names);
1994 return or_predicates (summary->conds, &p1, &p2);
1995 }
1996 else if (TREE_CODE (expr) == COND_EXPR)
1997 {
1998 struct predicate p1 = will_be_nonconstant_expr_predicate
1999 (info, summary, TREE_OPERAND (expr, 0),
2000 nonconstant_names);
2001 struct predicate p2;
2002 if (true_predicate_p (&p1))
2003 return p1;
2004 p2 = will_be_nonconstant_expr_predicate (info, summary,
2005 TREE_OPERAND (expr, 1),
2006 nonconstant_names);
2007 if (true_predicate_p (&p2))
2008 return p2;
2009 p1 = or_predicates (summary->conds, &p1, &p2);
2010 p2 = will_be_nonconstant_expr_predicate (info, summary,
2011 TREE_OPERAND (expr, 2),
2012 nonconstant_names);
2013 return or_predicates (summary->conds, &p1, &p2);
2014 }
2015 else
2016 {
2017 debug_tree (expr);
2018 gcc_unreachable ();
2019 }
2020 return false_predicate ();
2021 }
2022
2023
2024 /* Return predicate specifying when the STMT might have result that is not
2025 a compile time constant. */
2026
2027 static struct predicate
2028 will_be_nonconstant_predicate (struct ipa_node_params *info,
2029 struct inline_summary *summary,
2030 gimple stmt,
2031 vec<predicate_t> nonconstant_names)
2032 {
2033 struct predicate p = true_predicate ();
2034 ssa_op_iter iter;
2035 tree use;
2036 struct predicate op_non_const;
2037 bool is_load;
2038 int base_index;
2039 struct agg_position_info aggpos;
2040
2041 /* What statments might be optimized away
2042 when their arguments are constant
2043 TODO: also trivial builtins.
2044 builtin_constant_p is already handled later. */
2045 if (gimple_code (stmt) != GIMPLE_ASSIGN
2046 && gimple_code (stmt) != GIMPLE_COND
2047 && gimple_code (stmt) != GIMPLE_SWITCH)
2048 return p;
2049
2050 /* Stores will stay anyway. */
2051 if (gimple_store_p (stmt))
2052 return p;
2053
2054 is_load = gimple_assign_load_p (stmt);
2055
2056 /* Loads can be optimized when the value is known. */
2057 if (is_load)
2058 {
2059 tree op;
2060 gcc_assert (gimple_assign_single_p (stmt));
2061 op = gimple_assign_rhs1 (stmt);
2062 if (!unmodified_parm_or_parm_agg_item (info, stmt, op, &base_index,
2063 &aggpos))
2064 return p;
2065 }
2066 else
2067 base_index = -1;
2068
2069 /* See if we understand all operands before we start
2070 adding conditionals. */
2071 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2072 {
2073 tree parm = unmodified_parm (stmt, use);
2074 /* For arguments we can build a condition. */
2075 if (parm && ipa_get_param_decl_index (info, parm) >= 0)
2076 continue;
2077 if (TREE_CODE (use) != SSA_NAME)
2078 return p;
2079 /* If we know when operand is constant,
2080 we still can say something useful. */
2081 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2082 continue;
2083 return p;
2084 }
2085
2086 if (is_load)
2087 op_non_const =
2088 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2089 else
2090 op_non_const = false_predicate ();
2091 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2092 {
2093 tree parm = unmodified_parm (stmt, use);
2094 int index;
2095
2096 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2097 {
2098 if (index != base_index)
2099 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2100 else
2101 continue;
2102 }
2103 else
2104 p = nonconstant_names[SSA_NAME_VERSION (use)];
2105 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2106 }
2107 if (gimple_code (stmt) == GIMPLE_ASSIGN
2108 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
2109 nonconstant_names[SSA_NAME_VERSION (gimple_assign_lhs (stmt))]
2110 = op_non_const;
2111 return op_non_const;
2112 }
2113
2114 struct record_modified_bb_info
2115 {
2116 bitmap bb_set;
2117 gimple stmt;
2118 };
2119
2120 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2121 set except for info->stmt. */
2122
2123 static bool
2124 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2125 {
2126 struct record_modified_bb_info *info =
2127 (struct record_modified_bb_info *) data;
2128 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2129 return false;
2130 bitmap_set_bit (info->bb_set,
2131 SSA_NAME_IS_DEFAULT_DEF (vdef)
2132 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2133 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2134 return false;
2135 }
2136
2137 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2138 will change since last invocation of STMT.
2139
2140 Value 0 is reserved for compile time invariants.
2141 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2142 ought to be REG_BR_PROB_BASE / estimated_iters. */
2143
2144 static int
2145 param_change_prob (gimple stmt, int i)
2146 {
2147 tree op = gimple_call_arg (stmt, i);
2148 basic_block bb = gimple_bb (stmt);
2149 tree base;
2150
2151 /* Global invariants neve change. */
2152 if (is_gimple_min_invariant (op))
2153 return 0;
2154 /* We would have to do non-trivial analysis to really work out what
2155 is the probability of value to change (i.e. when init statement
2156 is in a sibling loop of the call).
2157
2158 We do an conservative estimate: when call is executed N times more often
2159 than the statement defining value, we take the frequency 1/N. */
2160 if (TREE_CODE (op) == SSA_NAME)
2161 {
2162 int init_freq;
2163
2164 if (!bb->frequency)
2165 return REG_BR_PROB_BASE;
2166
2167 if (SSA_NAME_IS_DEFAULT_DEF (op))
2168 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2169 else
2170 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2171
2172 if (!init_freq)
2173 init_freq = 1;
2174 if (init_freq < bb->frequency)
2175 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2176 else
2177 return REG_BR_PROB_BASE;
2178 }
2179
2180 base = get_base_address (op);
2181 if (base)
2182 {
2183 ao_ref refd;
2184 int max;
2185 struct record_modified_bb_info info;
2186 bitmap_iterator bi;
2187 unsigned index;
2188 tree init = ctor_for_folding (base);
2189
2190 if (init != error_mark_node)
2191 return 0;
2192 if (!bb->frequency)
2193 return REG_BR_PROB_BASE;
2194 ao_ref_init (&refd, op);
2195 info.stmt = stmt;
2196 info.bb_set = BITMAP_ALLOC (NULL);
2197 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2198 NULL);
2199 if (bitmap_bit_p (info.bb_set, bb->index))
2200 {
2201 BITMAP_FREE (info.bb_set);
2202 return REG_BR_PROB_BASE;
2203 }
2204
2205 /* Assume that every memory is initialized at entry.
2206 TODO: Can we easilly determine if value is always defined
2207 and thus we may skip entry block? */
2208 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2209 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2210 else
2211 max = 1;
2212
2213 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2214 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2215
2216 BITMAP_FREE (info.bb_set);
2217 if (max < bb->frequency)
2218 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2219 else
2220 return REG_BR_PROB_BASE;
2221 }
2222 return REG_BR_PROB_BASE;
2223 }
2224
2225 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2226 sub-graph and if the predicate the condition depends on is known. If so,
2227 return true and store the pointer the predicate in *P. */
2228
2229 static bool
2230 phi_result_unknown_predicate (struct ipa_node_params *info,
2231 struct inline_summary *summary, basic_block bb,
2232 struct predicate *p,
2233 vec<predicate_t> nonconstant_names)
2234 {
2235 edge e;
2236 edge_iterator ei;
2237 basic_block first_bb = NULL;
2238 gimple stmt;
2239
2240 if (single_pred_p (bb))
2241 {
2242 *p = false_predicate ();
2243 return true;
2244 }
2245
2246 FOR_EACH_EDGE (e, ei, bb->preds)
2247 {
2248 if (single_succ_p (e->src))
2249 {
2250 if (!single_pred_p (e->src))
2251 return false;
2252 if (!first_bb)
2253 first_bb = single_pred (e->src);
2254 else if (single_pred (e->src) != first_bb)
2255 return false;
2256 }
2257 else
2258 {
2259 if (!first_bb)
2260 first_bb = e->src;
2261 else if (e->src != first_bb)
2262 return false;
2263 }
2264 }
2265
2266 if (!first_bb)
2267 return false;
2268
2269 stmt = last_stmt (first_bb);
2270 if (!stmt
2271 || gimple_code (stmt) != GIMPLE_COND
2272 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2273 return false;
2274
2275 *p = will_be_nonconstant_expr_predicate (info, summary,
2276 gimple_cond_lhs (stmt),
2277 nonconstant_names);
2278 if (true_predicate_p (p))
2279 return false;
2280 else
2281 return true;
2282 }
2283
2284 /* Given a PHI statement in a function described by inline properties SUMMARY
2285 and *P being the predicate describing whether the selected PHI argument is
2286 known, store a predicate for the result of the PHI statement into
2287 NONCONSTANT_NAMES, if possible. */
2288
2289 static void
2290 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2291 struct predicate *p,
2292 vec<predicate_t> nonconstant_names)
2293 {
2294 unsigned i;
2295
2296 for (i = 0; i < gimple_phi_num_args (phi); i++)
2297 {
2298 tree arg = gimple_phi_arg (phi, i)->def;
2299 if (!is_gimple_min_invariant (arg))
2300 {
2301 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2302 *p = or_predicates (summary->conds, p,
2303 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2304 if (true_predicate_p (p))
2305 return;
2306 }
2307 }
2308
2309 if (dump_file && (dump_flags & TDF_DETAILS))
2310 {
2311 fprintf (dump_file, "\t\tphi predicate: ");
2312 dump_predicate (dump_file, summary->conds, p);
2313 }
2314 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2315 }
2316
2317 /* Return predicate specifying when array index in access OP becomes non-constant. */
2318
2319 static struct predicate
2320 array_index_predicate (struct inline_summary *info,
2321 vec< predicate_t> nonconstant_names, tree op)
2322 {
2323 struct predicate p = false_predicate ();
2324 while (handled_component_p (op))
2325 {
2326 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2327 {
2328 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2329 p = or_predicates (info->conds, &p,
2330 &nonconstant_names[SSA_NAME_VERSION
2331 (TREE_OPERAND (op, 1))]);
2332 }
2333 op = TREE_OPERAND (op, 0);
2334 }
2335 return p;
2336 }
2337
2338 /* For a typical usage of __builtin_expect (a<b, 1), we
2339 may introduce an extra relation stmt:
2340 With the builtin, we have
2341 t1 = a <= b;
2342 t2 = (long int) t1;
2343 t3 = __builtin_expect (t2, 1);
2344 if (t3 != 0)
2345 goto ...
2346 Without the builtin, we have
2347 if (a<=b)
2348 goto...
2349 This affects the size/time estimation and may have
2350 an impact on the earlier inlining.
2351 Here find this pattern and fix it up later. */
2352
2353 static gimple
2354 find_foldable_builtin_expect (basic_block bb)
2355 {
2356 gimple_stmt_iterator bsi;
2357
2358 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2359 {
2360 gimple stmt = gsi_stmt (bsi);
2361 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2362 || (is_gimple_call (stmt)
2363 && gimple_call_internal_p (stmt)
2364 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2365 {
2366 tree var = gimple_call_lhs (stmt);
2367 tree arg = gimple_call_arg (stmt, 0);
2368 use_operand_p use_p;
2369 gimple use_stmt;
2370 bool match = false;
2371 bool done = false;
2372
2373 if (!var || !arg)
2374 continue;
2375 gcc_assert (TREE_CODE (var) == SSA_NAME);
2376
2377 while (TREE_CODE (arg) == SSA_NAME)
2378 {
2379 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2380 if (!is_gimple_assign (stmt_tmp))
2381 break;
2382 switch (gimple_assign_rhs_code (stmt_tmp))
2383 {
2384 case LT_EXPR:
2385 case LE_EXPR:
2386 case GT_EXPR:
2387 case GE_EXPR:
2388 case EQ_EXPR:
2389 case NE_EXPR:
2390 match = true;
2391 done = true;
2392 break;
2393 CASE_CONVERT:
2394 break;
2395 default:
2396 done = true;
2397 break;
2398 }
2399 if (done)
2400 break;
2401 arg = gimple_assign_rhs1 (stmt_tmp);
2402 }
2403
2404 if (match && single_imm_use (var, &use_p, &use_stmt)
2405 && gimple_code (use_stmt) == GIMPLE_COND)
2406 return use_stmt;
2407 }
2408 }
2409 return NULL;
2410 }
2411
2412 /* Return true when the basic blocks contains only clobbers followed by RESX.
2413 Such BBs are kept around to make removal of dead stores possible with
2414 presence of EH and will be optimized out by optimize_clobbers later in the
2415 game.
2416
2417 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2418 that can be clobber only, too.. When it is false, the RESX is not necessary
2419 on the end of basic block. */
2420
2421 static bool
2422 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2423 {
2424 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2425 edge_iterator ei;
2426 edge e;
2427
2428 if (need_eh)
2429 {
2430 if (gsi_end_p (gsi))
2431 return false;
2432 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2433 return false;
2434 gsi_prev (&gsi);
2435 }
2436 else if (!single_succ_p (bb))
2437 return false;
2438
2439 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2440 {
2441 gimple stmt = gsi_stmt (gsi);
2442 if (is_gimple_debug (stmt))
2443 continue;
2444 if (gimple_clobber_p (stmt))
2445 continue;
2446 if (gimple_code (stmt) == GIMPLE_LABEL)
2447 break;
2448 return false;
2449 }
2450
2451 /* See if all predecestors are either throws or clobber only BBs. */
2452 FOR_EACH_EDGE (e, ei, bb->preds)
2453 if (!(e->flags & EDGE_EH)
2454 && !clobber_only_eh_bb_p (e->src, false))
2455 return false;
2456
2457 return true;
2458 }
2459
2460 /* Compute function body size parameters for NODE.
2461 When EARLY is true, we compute only simple summaries without
2462 non-trivial predicates to drive the early inliner. */
2463
2464 static void
2465 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2466 {
2467 gcov_type time = 0;
2468 /* Estimate static overhead for function prologue/epilogue and alignment. */
2469 int size = 2;
2470 /* Benefits are scaled by probability of elimination that is in range
2471 <0,2>. */
2472 basic_block bb;
2473 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2474 int freq;
2475 struct inline_summary *info = inline_summary (node);
2476 struct predicate bb_predicate;
2477 struct ipa_node_params *parms_info = NULL;
2478 vec<predicate_t> nonconstant_names = vNULL;
2479 int nblocks, n;
2480 int *order;
2481 predicate array_index = true_predicate ();
2482 gimple fix_builtin_expect_stmt;
2483
2484 info->conds = NULL;
2485 info->entry = NULL;
2486
2487 if (opt_for_fn (node->decl, optimize) && !early)
2488 {
2489 calculate_dominance_info (CDI_DOMINATORS);
2490 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2491
2492 if (ipa_node_params_vector.exists ())
2493 {
2494 parms_info = IPA_NODE_REF (node);
2495 nonconstant_names.safe_grow_cleared
2496 (SSANAMES (my_function)->length ());
2497 }
2498 }
2499
2500 if (dump_file)
2501 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2502 node->name ());
2503
2504 /* When we run into maximal number of entries, we assign everything to the
2505 constant truth case. Be sure to have it in list. */
2506 bb_predicate = true_predicate ();
2507 account_size_time (info, 0, 0, &bb_predicate);
2508
2509 bb_predicate = not_inlined_predicate ();
2510 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2511
2512 gcc_assert (my_function && my_function->cfg);
2513 if (parms_info)
2514 compute_bb_predicates (node, parms_info, info);
2515 gcc_assert (cfun == my_function);
2516 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2517 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2518 for (n = 0; n < nblocks; n++)
2519 {
2520 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2521 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2522 if (clobber_only_eh_bb_p (bb))
2523 {
2524 if (dump_file && (dump_flags & TDF_DETAILS))
2525 fprintf (dump_file, "\n Ignoring BB %i;"
2526 " it will be optimized away by cleanup_clobbers\n",
2527 bb->index);
2528 continue;
2529 }
2530
2531 /* TODO: Obviously predicates can be propagated down across CFG. */
2532 if (parms_info)
2533 {
2534 if (bb->aux)
2535 bb_predicate = *(struct predicate *) bb->aux;
2536 else
2537 bb_predicate = false_predicate ();
2538 }
2539 else
2540 bb_predicate = true_predicate ();
2541
2542 if (dump_file && (dump_flags & TDF_DETAILS))
2543 {
2544 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2545 dump_predicate (dump_file, info->conds, &bb_predicate);
2546 }
2547
2548 if (parms_info && nonconstant_names.exists ())
2549 {
2550 struct predicate phi_predicate;
2551 bool first_phi = true;
2552
2553 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2554 gsi_next (&bsi))
2555 {
2556 if (first_phi
2557 && !phi_result_unknown_predicate (parms_info, info, bb,
2558 &phi_predicate,
2559 nonconstant_names))
2560 break;
2561 first_phi = false;
2562 if (dump_file && (dump_flags & TDF_DETAILS))
2563 {
2564 fprintf (dump_file, " ");
2565 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2566 }
2567 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2568 nonconstant_names);
2569 }
2570 }
2571
2572 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2573
2574 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2575 gsi_next (&bsi))
2576 {
2577 gimple stmt = gsi_stmt (bsi);
2578 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2579 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2580 int prob;
2581 struct predicate will_be_nonconstant;
2582
2583 /* This relation stmt should be folded after we remove
2584 buildin_expect call. Adjust the cost here. */
2585 if (stmt == fix_builtin_expect_stmt)
2586 {
2587 this_size--;
2588 this_time--;
2589 }
2590
2591 if (dump_file && (dump_flags & TDF_DETAILS))
2592 {
2593 fprintf (dump_file, " ");
2594 print_gimple_stmt (dump_file, stmt, 0, 0);
2595 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2596 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2597 this_time);
2598 }
2599
2600 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2601 {
2602 struct predicate this_array_index;
2603 this_array_index =
2604 array_index_predicate (info, nonconstant_names,
2605 gimple_assign_rhs1 (stmt));
2606 if (!false_predicate_p (&this_array_index))
2607 array_index =
2608 and_predicates (info->conds, &array_index,
2609 &this_array_index);
2610 }
2611 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2612 {
2613 struct predicate this_array_index;
2614 this_array_index =
2615 array_index_predicate (info, nonconstant_names,
2616 gimple_get_lhs (stmt));
2617 if (!false_predicate_p (&this_array_index))
2618 array_index =
2619 and_predicates (info->conds, &array_index,
2620 &this_array_index);
2621 }
2622
2623
2624 if (is_gimple_call (stmt)
2625 && !gimple_call_internal_p (stmt))
2626 {
2627 struct cgraph_edge *edge = node->get_edge (stmt);
2628 struct inline_edge_summary *es = inline_edge_summary (edge);
2629
2630 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2631 resolved as constant. We however don't want to optimize
2632 out the cgraph edges. */
2633 if (nonconstant_names.exists ()
2634 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2635 && gimple_call_lhs (stmt)
2636 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2637 {
2638 struct predicate false_p = false_predicate ();
2639 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2640 = false_p;
2641 }
2642 if (ipa_node_params_vector.exists ())
2643 {
2644 int count = gimple_call_num_args (stmt);
2645 int i;
2646
2647 if (count)
2648 es->param.safe_grow_cleared (count);
2649 for (i = 0; i < count; i++)
2650 {
2651 int prob = param_change_prob (stmt, i);
2652 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2653 es->param[i].change_prob = prob;
2654 }
2655 }
2656
2657 es->call_stmt_size = this_size;
2658 es->call_stmt_time = this_time;
2659 es->loop_depth = bb_loop_depth (bb);
2660 edge_set_predicate (edge, &bb_predicate);
2661 }
2662
2663 /* TODO: When conditional jump or swithc is known to be constant, but
2664 we did not translate it into the predicates, we really can account
2665 just maximum of the possible paths. */
2666 if (parms_info)
2667 will_be_nonconstant
2668 = will_be_nonconstant_predicate (parms_info, info,
2669 stmt, nonconstant_names);
2670 if (this_time || this_size)
2671 {
2672 struct predicate p;
2673
2674 this_time *= freq;
2675
2676 prob = eliminated_by_inlining_prob (stmt);
2677 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2678 fprintf (dump_file,
2679 "\t\t50%% will be eliminated by inlining\n");
2680 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2681 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2682
2683 if (parms_info)
2684 p = and_predicates (info->conds, &bb_predicate,
2685 &will_be_nonconstant);
2686 else
2687 p = true_predicate ();
2688
2689 if (!false_predicate_p (&p))
2690 {
2691 time += this_time;
2692 size += this_size;
2693 if (time > MAX_TIME * INLINE_TIME_SCALE)
2694 time = MAX_TIME * INLINE_TIME_SCALE;
2695 }
2696
2697 /* We account everything but the calls. Calls have their own
2698 size/time info attached to cgraph edges. This is necessary
2699 in order to make the cost disappear after inlining. */
2700 if (!is_gimple_call (stmt))
2701 {
2702 if (prob)
2703 {
2704 struct predicate ip = not_inlined_predicate ();
2705 ip = and_predicates (info->conds, &ip, &p);
2706 account_size_time (info, this_size * prob,
2707 this_time * prob, &ip);
2708 }
2709 if (prob != 2)
2710 account_size_time (info, this_size * (2 - prob),
2711 this_time * (2 - prob), &p);
2712 }
2713
2714 gcc_assert (time >= 0);
2715 gcc_assert (size >= 0);
2716 }
2717 }
2718 }
2719 set_hint_predicate (&inline_summary (node)->array_index, array_index);
2720 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2721 if (time > MAX_TIME)
2722 time = MAX_TIME;
2723 free (order);
2724
2725 if (!early && nonconstant_names.exists ())
2726 {
2727 struct loop *loop;
2728 predicate loop_iterations = true_predicate ();
2729 predicate loop_stride = true_predicate ();
2730
2731 if (dump_file && (dump_flags & TDF_DETAILS))
2732 flow_loops_dump (dump_file, NULL, 0);
2733 scev_initialize ();
2734 FOR_EACH_LOOP (loop, 0)
2735 {
2736 vec<edge> exits;
2737 edge ex;
2738 unsigned int j, i;
2739 struct tree_niter_desc niter_desc;
2740 basic_block *body = get_loop_body (loop);
2741 bb_predicate = *(struct predicate *) loop->header->aux;
2742
2743 exits = get_loop_exit_edges (loop);
2744 FOR_EACH_VEC_ELT (exits, j, ex)
2745 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2746 && !is_gimple_min_invariant (niter_desc.niter))
2747 {
2748 predicate will_be_nonconstant
2749 = will_be_nonconstant_expr_predicate (parms_info, info,
2750 niter_desc.niter,
2751 nonconstant_names);
2752 if (!true_predicate_p (&will_be_nonconstant))
2753 will_be_nonconstant = and_predicates (info->conds,
2754 &bb_predicate,
2755 &will_be_nonconstant);
2756 if (!true_predicate_p (&will_be_nonconstant)
2757 && !false_predicate_p (&will_be_nonconstant))
2758 /* This is slightly inprecise. We may want to represent each
2759 loop with independent predicate. */
2760 loop_iterations =
2761 and_predicates (info->conds, &loop_iterations,
2762 &will_be_nonconstant);
2763 }
2764 exits.release ();
2765
2766 for (i = 0; i < loop->num_nodes; i++)
2767 {
2768 gimple_stmt_iterator gsi;
2769 bb_predicate = *(struct predicate *) body[i]->aux;
2770 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2771 gsi_next (&gsi))
2772 {
2773 gimple stmt = gsi_stmt (gsi);
2774 affine_iv iv;
2775 ssa_op_iter iter;
2776 tree use;
2777
2778 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2779 {
2780 predicate will_be_nonconstant;
2781
2782 if (!simple_iv
2783 (loop, loop_containing_stmt (stmt), use, &iv, true)
2784 || is_gimple_min_invariant (iv.step))
2785 continue;
2786 will_be_nonconstant
2787 = will_be_nonconstant_expr_predicate (parms_info, info,
2788 iv.step,
2789 nonconstant_names);
2790 if (!true_predicate_p (&will_be_nonconstant))
2791 will_be_nonconstant
2792 = and_predicates (info->conds,
2793 &bb_predicate,
2794 &will_be_nonconstant);
2795 if (!true_predicate_p (&will_be_nonconstant)
2796 && !false_predicate_p (&will_be_nonconstant))
2797 /* This is slightly inprecise. We may want to represent
2798 each loop with independent predicate. */
2799 loop_stride =
2800 and_predicates (info->conds, &loop_stride,
2801 &will_be_nonconstant);
2802 }
2803 }
2804 }
2805 free (body);
2806 }
2807 set_hint_predicate (&inline_summary (node)->loop_iterations,
2808 loop_iterations);
2809 set_hint_predicate (&inline_summary (node)->loop_stride, loop_stride);
2810 scev_finalize ();
2811 }
2812 FOR_ALL_BB_FN (bb, my_function)
2813 {
2814 edge e;
2815 edge_iterator ei;
2816
2817 if (bb->aux)
2818 pool_free (edge_predicate_pool, bb->aux);
2819 bb->aux = NULL;
2820 FOR_EACH_EDGE (e, ei, bb->succs)
2821 {
2822 if (e->aux)
2823 pool_free (edge_predicate_pool, e->aux);
2824 e->aux = NULL;
2825 }
2826 }
2827 inline_summary (node)->self_time = time;
2828 inline_summary (node)->self_size = size;
2829 nonconstant_names.release ();
2830 if (opt_for_fn (node->decl, optimize) && !early)
2831 {
2832 loop_optimizer_finalize ();
2833 free_dominance_info (CDI_DOMINATORS);
2834 }
2835 if (dump_file)
2836 {
2837 fprintf (dump_file, "\n");
2838 dump_inline_summary (dump_file, node);
2839 }
2840 }
2841
2842
2843 /* Compute parameters of functions used by inliner.
2844 EARLY is true when we compute parameters for the early inliner */
2845
2846 void
2847 compute_inline_parameters (struct cgraph_node *node, bool early)
2848 {
2849 HOST_WIDE_INT self_stack_size;
2850 struct cgraph_edge *e;
2851 struct inline_summary *info;
2852
2853 gcc_assert (!node->global.inlined_to);
2854
2855 inline_summary_alloc ();
2856
2857 info = inline_summary (node);
2858 reset_inline_summary (node);
2859
2860 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2861 Once this happen, we will need to more curefully predict call
2862 statement size. */
2863 if (node->thunk.thunk_p)
2864 {
2865 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2866 struct predicate t = true_predicate ();
2867
2868 info->inlinable = 0;
2869 node->callees->call_stmt_cannot_inline_p = true;
2870 node->local.can_change_signature = false;
2871 es->call_stmt_time = 1;
2872 es->call_stmt_size = 1;
2873 account_size_time (info, 0, 0, &t);
2874 return;
2875 }
2876
2877 /* Even is_gimple_min_invariant rely on current_function_decl. */
2878 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2879
2880 /* Estimate the stack size for the function if we're optimizing. */
2881 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2882 info->estimated_self_stack_size = self_stack_size;
2883 info->estimated_stack_size = self_stack_size;
2884 info->stack_frame_offset = 0;
2885
2886 /* Can this function be inlined at all? */
2887 if (!opt_for_fn (node->decl, optimize)
2888 && !lookup_attribute ("always_inline",
2889 DECL_ATTRIBUTES (node->decl)))
2890 info->inlinable = false;
2891 else
2892 info->inlinable = tree_inlinable_function_p (node->decl);
2893
2894 /* Type attributes can use parameter indices to describe them. */
2895 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2896 node->local.can_change_signature = false;
2897 else
2898 {
2899 /* Otherwise, inlinable functions always can change signature. */
2900 if (info->inlinable)
2901 node->local.can_change_signature = true;
2902 else
2903 {
2904 /* Functions calling builtin_apply can not change signature. */
2905 for (e = node->callees; e; e = e->next_callee)
2906 {
2907 tree cdecl = e->callee->decl;
2908 if (DECL_BUILT_IN (cdecl)
2909 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2910 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2911 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2912 break;
2913 }
2914 node->local.can_change_signature = !e;
2915 }
2916 }
2917 estimate_function_body_sizes (node, early);
2918
2919 for (e = node->callees; e; e = e->next_callee)
2920 if (e->callee->comdat_local_p ())
2921 break;
2922 node->calls_comdat_local = (e != NULL);
2923
2924 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2925 info->time = info->self_time;
2926 info->size = info->self_size;
2927 info->stack_frame_offset = 0;
2928 info->estimated_stack_size = info->estimated_self_stack_size;
2929 #ifdef ENABLE_CHECKING
2930 inline_update_overall_summary (node);
2931 gcc_assert (info->time == info->self_time && info->size == info->self_size);
2932 #endif
2933
2934 pop_cfun ();
2935 }
2936
2937
2938 /* Compute parameters of functions used by inliner using
2939 current_function_decl. */
2940
2941 static unsigned int
2942 compute_inline_parameters_for_current (void)
2943 {
2944 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2945 return 0;
2946 }
2947
2948 namespace {
2949
2950 const pass_data pass_data_inline_parameters =
2951 {
2952 GIMPLE_PASS, /* type */
2953 "inline_param", /* name */
2954 OPTGROUP_INLINE, /* optinfo_flags */
2955 TV_INLINE_PARAMETERS, /* tv_id */
2956 0, /* properties_required */
2957 0, /* properties_provided */
2958 0, /* properties_destroyed */
2959 0, /* todo_flags_start */
2960 0, /* todo_flags_finish */
2961 };
2962
2963 class pass_inline_parameters : public gimple_opt_pass
2964 {
2965 public:
2966 pass_inline_parameters (gcc::context *ctxt)
2967 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
2968 {}
2969
2970 /* opt_pass methods: */
2971 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
2972 virtual unsigned int execute (function *)
2973 {
2974 return compute_inline_parameters_for_current ();
2975 }
2976
2977 }; // class pass_inline_parameters
2978
2979 } // anon namespace
2980
2981 gimple_opt_pass *
2982 make_pass_inline_parameters (gcc::context *ctxt)
2983 {
2984 return new pass_inline_parameters (ctxt);
2985 }
2986
2987
2988 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
2989 KNOWN_CONTEXTS and KNOWN_AGGS. */
2990
2991 static bool
2992 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
2993 int *size, int *time,
2994 vec<tree> known_vals,
2995 vec<ipa_polymorphic_call_context> known_contexts,
2996 vec<ipa_agg_jump_function_p> known_aggs)
2997 {
2998 tree target;
2999 struct cgraph_node *callee;
3000 struct inline_summary *isummary;
3001 enum availability avail;
3002 bool speculative;
3003
3004 if (!known_vals.exists () && !known_contexts.exists ())
3005 return false;
3006 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3007 return false;
3008
3009 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3010 known_aggs, &speculative);
3011 if (!target || speculative)
3012 return false;
3013
3014 /* Account for difference in cost between indirect and direct calls. */
3015 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3016 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3017 gcc_checking_assert (*time >= 0);
3018 gcc_checking_assert (*size >= 0);
3019
3020 callee = cgraph_node::get (target);
3021 if (!callee || !callee->definition)
3022 return false;
3023 callee = callee->function_symbol (&avail);
3024 if (avail < AVAIL_AVAILABLE)
3025 return false;
3026 isummary = inline_summary (callee);
3027 return isummary->inlinable;
3028 }
3029
3030 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3031 handle edge E with probability PROB.
3032 Set HINTS if edge may be devirtualized.
3033 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3034 site. */
3035
3036 static inline void
3037 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3038 int *time,
3039 int prob,
3040 vec<tree> known_vals,
3041 vec<ipa_polymorphic_call_context> known_contexts,
3042 vec<ipa_agg_jump_function_p> known_aggs,
3043 inline_hints *hints)
3044 {
3045 struct inline_edge_summary *es = inline_edge_summary (e);
3046 int call_size = es->call_stmt_size;
3047 int call_time = es->call_stmt_time;
3048 int cur_size;
3049 if (!e->callee
3050 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3051 known_vals, known_contexts, known_aggs)
3052 && hints && e->maybe_hot_p ())
3053 *hints |= INLINE_HINT_indirect_call;
3054 cur_size = call_size * INLINE_SIZE_SCALE;
3055 *size += cur_size;
3056 if (min_size)
3057 *min_size += cur_size;
3058 *time += apply_probability ((gcov_type) call_time, prob)
3059 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3060 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3061 *time = MAX_TIME * INLINE_TIME_SCALE;
3062 }
3063
3064
3065
3066 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3067 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3068 describe context of the call site. */
3069
3070 static void
3071 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3072 int *min_size, int *time,
3073 inline_hints *hints,
3074 clause_t possible_truths,
3075 vec<tree> known_vals,
3076 vec<ipa_polymorphic_call_context> known_contexts,
3077 vec<ipa_agg_jump_function_p> known_aggs)
3078 {
3079 struct cgraph_edge *e;
3080 for (e = node->callees; e; e = e->next_callee)
3081 {
3082 struct inline_edge_summary *es = inline_edge_summary (e);
3083 if (!es->predicate
3084 || evaluate_predicate (es->predicate, possible_truths))
3085 {
3086 if (e->inline_failed)
3087 {
3088 /* Predicates of calls shall not use NOT_CHANGED codes,
3089 sowe do not need to compute probabilities. */
3090 estimate_edge_size_and_time (e, size,
3091 es->predicate ? NULL : min_size,
3092 time, REG_BR_PROB_BASE,
3093 known_vals, known_contexts,
3094 known_aggs, hints);
3095 }
3096 else
3097 estimate_calls_size_and_time (e->callee, size, min_size, time,
3098 hints,
3099 possible_truths,
3100 known_vals, known_contexts,
3101 known_aggs);
3102 }
3103 }
3104 for (e = node->indirect_calls; e; e = e->next_callee)
3105 {
3106 struct inline_edge_summary *es = inline_edge_summary (e);
3107 if (!es->predicate
3108 || evaluate_predicate (es->predicate, possible_truths))
3109 estimate_edge_size_and_time (e, size,
3110 es->predicate ? NULL : min_size,
3111 time, REG_BR_PROB_BASE,
3112 known_vals, known_contexts, known_aggs,
3113 hints);
3114 }
3115 }
3116
3117
3118 /* Estimate size and time needed to execute NODE assuming
3119 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3120 information about NODE's arguments. If non-NULL use also probability
3121 information present in INLINE_PARAM_SUMMARY vector.
3122 Additionally detemine hints determined by the context. Finally compute
3123 minimal size needed for the call that is independent on the call context and
3124 can be used for fast estimates. Return the values in RET_SIZE,
3125 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3126
3127 static void
3128 estimate_node_size_and_time (struct cgraph_node *node,
3129 clause_t possible_truths,
3130 vec<tree> known_vals,
3131 vec<ipa_polymorphic_call_context> known_contexts,
3132 vec<ipa_agg_jump_function_p> known_aggs,
3133 int *ret_size, int *ret_min_size, int *ret_time,
3134 inline_hints *ret_hints,
3135 vec<inline_param_summary>
3136 inline_param_summary)
3137 {
3138 struct inline_summary *info = inline_summary (node);
3139 size_time_entry *e;
3140 int size = 0;
3141 int time = 0;
3142 int min_size = 0;
3143 inline_hints hints = 0;
3144 int i;
3145
3146 if (dump_file && (dump_flags & TDF_DETAILS))
3147 {
3148 bool found = false;
3149 fprintf (dump_file, " Estimating body: %s/%i\n"
3150 " Known to be false: ", node->name (),
3151 node->order);
3152
3153 for (i = predicate_not_inlined_condition;
3154 i < (predicate_first_dynamic_condition
3155 + (int) vec_safe_length (info->conds)); i++)
3156 if (!(possible_truths & (1 << i)))
3157 {
3158 if (found)
3159 fprintf (dump_file, ", ");
3160 found = true;
3161 dump_condition (dump_file, info->conds, i);
3162 }
3163 }
3164
3165 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3166 if (evaluate_predicate (&e->predicate, possible_truths))
3167 {
3168 size += e->size;
3169 gcc_checking_assert (e->time >= 0);
3170 gcc_checking_assert (time >= 0);
3171 if (!inline_param_summary.exists ())
3172 time += e->time;
3173 else
3174 {
3175 int prob = predicate_probability (info->conds,
3176 &e->predicate,
3177 possible_truths,
3178 inline_param_summary);
3179 gcc_checking_assert (prob >= 0);
3180 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3181 time += apply_probability ((gcov_type) e->time, prob);
3182 }
3183 if (time > MAX_TIME * INLINE_TIME_SCALE)
3184 time = MAX_TIME * INLINE_TIME_SCALE;
3185 gcc_checking_assert (time >= 0);
3186
3187 }
3188 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3189 min_size = (*info->entry)[0].size;
3190 gcc_checking_assert (size >= 0);
3191 gcc_checking_assert (time >= 0);
3192
3193 if (info->loop_iterations
3194 && !evaluate_predicate (info->loop_iterations, possible_truths))
3195 hints |= INLINE_HINT_loop_iterations;
3196 if (info->loop_stride
3197 && !evaluate_predicate (info->loop_stride, possible_truths))
3198 hints |= INLINE_HINT_loop_stride;
3199 if (info->array_index
3200 && !evaluate_predicate (info->array_index, possible_truths))
3201 hints |= INLINE_HINT_array_index;
3202 if (info->scc_no)
3203 hints |= INLINE_HINT_in_scc;
3204 if (DECL_DECLARED_INLINE_P (node->decl))
3205 hints |= INLINE_HINT_declared_inline;
3206
3207 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3208 known_vals, known_contexts, known_aggs);
3209 gcc_checking_assert (size >= 0);
3210 gcc_checking_assert (time >= 0);
3211 time = RDIV (time, INLINE_TIME_SCALE);
3212 size = RDIV (size, INLINE_SIZE_SCALE);
3213 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3214
3215 if (dump_file && (dump_flags & TDF_DETAILS))
3216 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3217 if (ret_time)
3218 *ret_time = time;
3219 if (ret_size)
3220 *ret_size = size;
3221 if (ret_min_size)
3222 *ret_min_size = min_size;
3223 if (ret_hints)
3224 *ret_hints = hints;
3225 return;
3226 }
3227
3228
3229 /* Estimate size and time needed to execute callee of EDGE assuming that
3230 parameters known to be constant at caller of EDGE are propagated.
3231 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3232 and types for parameters. */
3233
3234 void
3235 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3236 vec<tree> known_vals,
3237 vec<ipa_polymorphic_call_context>
3238 known_contexts,
3239 vec<ipa_agg_jump_function_p> known_aggs,
3240 int *ret_size, int *ret_time,
3241 inline_hints *hints)
3242 {
3243 clause_t clause;
3244
3245 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3246 known_aggs);
3247 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3248 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3249 }
3250
3251 /* Translate all conditions from callee representation into caller
3252 representation and symbolically evaluate predicate P into new predicate.
3253
3254 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3255 is summary of function predicate P is from. OPERAND_MAP is array giving
3256 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3257 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3258 predicate under which callee is executed. OFFSET_MAP is an array of of
3259 offsets that need to be added to conditions, negative offset means that
3260 conditions relying on values passed by reference have to be discarded
3261 because they might not be preserved (and should be considered offset zero
3262 for other purposes). */
3263
3264 static struct predicate
3265 remap_predicate (struct inline_summary *info,
3266 struct inline_summary *callee_info,
3267 struct predicate *p,
3268 vec<int> operand_map,
3269 vec<int> offset_map,
3270 clause_t possible_truths, struct predicate *toplev_predicate)
3271 {
3272 int i;
3273 struct predicate out = true_predicate ();
3274
3275 /* True predicate is easy. */
3276 if (true_predicate_p (p))
3277 return *toplev_predicate;
3278 for (i = 0; p->clause[i]; i++)
3279 {
3280 clause_t clause = p->clause[i];
3281 int cond;
3282 struct predicate clause_predicate = false_predicate ();
3283
3284 gcc_assert (i < MAX_CLAUSES);
3285
3286 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3287 /* Do we have condition we can't disprove? */
3288 if (clause & possible_truths & (1 << cond))
3289 {
3290 struct predicate cond_predicate;
3291 /* Work out if the condition can translate to predicate in the
3292 inlined function. */
3293 if (cond >= predicate_first_dynamic_condition)
3294 {
3295 struct condition *c;
3296
3297 c = &(*callee_info->conds)[cond
3298 -
3299 predicate_first_dynamic_condition];
3300 /* See if we can remap condition operand to caller's operand.
3301 Otherwise give up. */
3302 if (!operand_map.exists ()
3303 || (int) operand_map.length () <= c->operand_num
3304 || operand_map[c->operand_num] == -1
3305 /* TODO: For non-aggregate conditions, adding an offset is
3306 basically an arithmetic jump function processing which
3307 we should support in future. */
3308 || ((!c->agg_contents || !c->by_ref)
3309 && offset_map[c->operand_num] > 0)
3310 || (c->agg_contents && c->by_ref
3311 && offset_map[c->operand_num] < 0))
3312 cond_predicate = true_predicate ();
3313 else
3314 {
3315 struct agg_position_info ap;
3316 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3317 if (offset_delta < 0)
3318 {
3319 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3320 offset_delta = 0;
3321 }
3322 gcc_assert (!c->agg_contents
3323 || c->by_ref || offset_delta == 0);
3324 ap.offset = c->offset + offset_delta;
3325 ap.agg_contents = c->agg_contents;
3326 ap.by_ref = c->by_ref;
3327 cond_predicate = add_condition (info,
3328 operand_map[c->operand_num],
3329 &ap, c->code, c->val);
3330 }
3331 }
3332 /* Fixed conditions remains same, construct single
3333 condition predicate. */
3334 else
3335 {
3336 cond_predicate.clause[0] = 1 << cond;
3337 cond_predicate.clause[1] = 0;
3338 }
3339 clause_predicate = or_predicates (info->conds, &clause_predicate,
3340 &cond_predicate);
3341 }
3342 out = and_predicates (info->conds, &out, &clause_predicate);
3343 }
3344 return and_predicates (info->conds, &out, toplev_predicate);
3345 }
3346
3347
3348 /* Update summary information of inline clones after inlining.
3349 Compute peak stack usage. */
3350
3351 static void
3352 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3353 {
3354 struct cgraph_edge *e;
3355 struct inline_summary *callee_info = inline_summary (node);
3356 struct inline_summary *caller_info = inline_summary (node->callers->caller);
3357 HOST_WIDE_INT peak;
3358
3359 callee_info->stack_frame_offset
3360 = caller_info->stack_frame_offset
3361 + caller_info->estimated_self_stack_size;
3362 peak = callee_info->stack_frame_offset
3363 + callee_info->estimated_self_stack_size;
3364 if (inline_summary (node->global.inlined_to)->estimated_stack_size < peak)
3365 inline_summary (node->global.inlined_to)->estimated_stack_size = peak;
3366 ipa_propagate_frequency (node);
3367 for (e = node->callees; e; e = e->next_callee)
3368 {
3369 if (!e->inline_failed)
3370 inline_update_callee_summaries (e->callee, depth);
3371 inline_edge_summary (e)->loop_depth += depth;
3372 }
3373 for (e = node->indirect_calls; e; e = e->next_callee)
3374 inline_edge_summary (e)->loop_depth += depth;
3375 }
3376
3377 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3378 When functoin A is inlined in B and A calls C with parameter that
3379 changes with probability PROB1 and C is known to be passthroug
3380 of argument if B that change with probability PROB2, the probability
3381 of change is now PROB1*PROB2. */
3382
3383 static void
3384 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3385 struct cgraph_edge *edge)
3386 {
3387 if (ipa_node_params_vector.exists ())
3388 {
3389 int i;
3390 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3391 struct inline_edge_summary *es = inline_edge_summary (edge);
3392 struct inline_edge_summary *inlined_es
3393 = inline_edge_summary (inlined_edge);
3394
3395 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3396 {
3397 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3398 if (jfunc->type == IPA_JF_PASS_THROUGH
3399 && (ipa_get_jf_pass_through_formal_id (jfunc)
3400 < (int) inlined_es->param.length ()))
3401 {
3402 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3403 int prob1 = es->param[i].change_prob;
3404 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3405 int prob = combine_probabilities (prob1, prob2);
3406
3407 if (prob1 && prob2 && !prob)
3408 prob = 1;
3409
3410 es->param[i].change_prob = prob;
3411 }
3412 }
3413 }
3414 }
3415
3416 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3417
3418 Remap predicates of callees of NODE. Rest of arguments match
3419 remap_predicate.
3420
3421 Also update change probabilities. */
3422
3423 static void
3424 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3425 struct cgraph_node *node,
3426 struct inline_summary *info,
3427 struct inline_summary *callee_info,
3428 vec<int> operand_map,
3429 vec<int> offset_map,
3430 clause_t possible_truths,
3431 struct predicate *toplev_predicate)
3432 {
3433 struct cgraph_edge *e;
3434 for (e = node->callees; e; e = e->next_callee)
3435 {
3436 struct inline_edge_summary *es = inline_edge_summary (e);
3437 struct predicate p;
3438
3439 if (e->inline_failed)
3440 {
3441 remap_edge_change_prob (inlined_edge, e);
3442
3443 if (es->predicate)
3444 {
3445 p = remap_predicate (info, callee_info,
3446 es->predicate, operand_map, offset_map,
3447 possible_truths, toplev_predicate);
3448 edge_set_predicate (e, &p);
3449 /* TODO: We should remove the edge for code that will be
3450 optimized out, but we need to keep verifiers and tree-inline
3451 happy. Make it cold for now. */
3452 if (false_predicate_p (&p))
3453 {
3454 e->count = 0;
3455 e->frequency = 0;
3456 }
3457 }
3458 else
3459 edge_set_predicate (e, toplev_predicate);
3460 }
3461 else
3462 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3463 operand_map, offset_map, possible_truths,
3464 toplev_predicate);
3465 }
3466 for (e = node->indirect_calls; e; e = e->next_callee)
3467 {
3468 struct inline_edge_summary *es = inline_edge_summary (e);
3469 struct predicate p;
3470
3471 remap_edge_change_prob (inlined_edge, e);
3472 if (es->predicate)
3473 {
3474 p = remap_predicate (info, callee_info,
3475 es->predicate, operand_map, offset_map,
3476 possible_truths, toplev_predicate);
3477 edge_set_predicate (e, &p);
3478 /* TODO: We should remove the edge for code that will be optimized
3479 out, but we need to keep verifiers and tree-inline happy.
3480 Make it cold for now. */
3481 if (false_predicate_p (&p))
3482 {
3483 e->count = 0;
3484 e->frequency = 0;
3485 }
3486 }
3487 else
3488 edge_set_predicate (e, toplev_predicate);
3489 }
3490 }
3491
3492 /* Same as remap_predicate, but set result into hint *HINT. */
3493
3494 static void
3495 remap_hint_predicate (struct inline_summary *info,
3496 struct inline_summary *callee_info,
3497 struct predicate **hint,
3498 vec<int> operand_map,
3499 vec<int> offset_map,
3500 clause_t possible_truths,
3501 struct predicate *toplev_predicate)
3502 {
3503 predicate p;
3504
3505 if (!*hint)
3506 return;
3507 p = remap_predicate (info, callee_info,
3508 *hint,
3509 operand_map, offset_map,
3510 possible_truths, toplev_predicate);
3511 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3512 {
3513 if (!*hint)
3514 set_hint_predicate (hint, p);
3515 else
3516 **hint = and_predicates (info->conds, *hint, &p);
3517 }
3518 }
3519
3520 /* We inlined EDGE. Update summary of the function we inlined into. */
3521
3522 void
3523 inline_merge_summary (struct cgraph_edge *edge)
3524 {
3525 struct inline_summary *callee_info = inline_summary (edge->callee);
3526 struct cgraph_node *to = (edge->caller->global.inlined_to
3527 ? edge->caller->global.inlined_to : edge->caller);
3528 struct inline_summary *info = inline_summary (to);
3529 clause_t clause = 0; /* not_inline is known to be false. */
3530 size_time_entry *e;
3531 vec<int> operand_map = vNULL;
3532 vec<int> offset_map = vNULL;
3533 int i;
3534 struct predicate toplev_predicate;
3535 struct predicate true_p = true_predicate ();
3536 struct inline_edge_summary *es = inline_edge_summary (edge);
3537
3538 if (es->predicate)
3539 toplev_predicate = *es->predicate;
3540 else
3541 toplev_predicate = true_predicate ();
3542
3543 if (ipa_node_params_vector.exists () && callee_info->conds)
3544 {
3545 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3546 int count = ipa_get_cs_argument_count (args);
3547 int i;
3548
3549 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3550 if (count)
3551 {
3552 operand_map.safe_grow_cleared (count);
3553 offset_map.safe_grow_cleared (count);
3554 }
3555 for (i = 0; i < count; i++)
3556 {
3557 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3558 int map = -1;
3559
3560 /* TODO: handle non-NOPs when merging. */
3561 if (jfunc->type == IPA_JF_PASS_THROUGH)
3562 {
3563 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3564 map = ipa_get_jf_pass_through_formal_id (jfunc);
3565 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3566 offset_map[i] = -1;
3567 }
3568 else if (jfunc->type == IPA_JF_ANCESTOR)
3569 {
3570 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3571 if (offset >= 0 && offset < INT_MAX)
3572 {
3573 map = ipa_get_jf_ancestor_formal_id (jfunc);
3574 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3575 offset = -1;
3576 offset_map[i] = offset;
3577 }
3578 }
3579 operand_map[i] = map;
3580 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3581 }
3582 }
3583 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3584 {
3585 struct predicate p = remap_predicate (info, callee_info,
3586 &e->predicate, operand_map,
3587 offset_map, clause,
3588 &toplev_predicate);
3589 if (!false_predicate_p (&p))
3590 {
3591 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3592 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3593 int prob = predicate_probability (callee_info->conds,
3594 &e->predicate,
3595 clause, es->param);
3596 add_time = apply_probability ((gcov_type) add_time, prob);
3597 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3598 add_time = MAX_TIME * INLINE_TIME_SCALE;
3599 if (prob != REG_BR_PROB_BASE
3600 && dump_file && (dump_flags & TDF_DETAILS))
3601 {
3602 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3603 (double) prob / REG_BR_PROB_BASE);
3604 }
3605 account_size_time (info, e->size, add_time, &p);
3606 }
3607 }
3608 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3609 offset_map, clause, &toplev_predicate);
3610 remap_hint_predicate (info, callee_info,
3611 &callee_info->loop_iterations,
3612 operand_map, offset_map, clause, &toplev_predicate);
3613 remap_hint_predicate (info, callee_info,
3614 &callee_info->loop_stride,
3615 operand_map, offset_map, clause, &toplev_predicate);
3616 remap_hint_predicate (info, callee_info,
3617 &callee_info->array_index,
3618 operand_map, offset_map, clause, &toplev_predicate);
3619
3620 inline_update_callee_summaries (edge->callee,
3621 inline_edge_summary (edge)->loop_depth);
3622
3623 /* We do not maintain predicates of inlined edges, free it. */
3624 edge_set_predicate (edge, &true_p);
3625 /* Similarly remove param summaries. */
3626 es->param.release ();
3627 operand_map.release ();
3628 offset_map.release ();
3629 }
3630
3631 /* For performance reasons inline_merge_summary is not updating overall size
3632 and time. Recompute it. */
3633
3634 void
3635 inline_update_overall_summary (struct cgraph_node *node)
3636 {
3637 struct inline_summary *info = inline_summary (node);
3638 size_time_entry *e;
3639 int i;
3640
3641 info->size = 0;
3642 info->time = 0;
3643 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3644 {
3645 info->size += e->size, info->time += e->time;
3646 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3647 info->time = MAX_TIME * INLINE_TIME_SCALE;
3648 }
3649 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3650 &info->time, NULL,
3651 ~(clause_t) (1 << predicate_false_condition),
3652 vNULL, vNULL, vNULL);
3653 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3654 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3655 }
3656
3657 /* Return hints derrived from EDGE. */
3658 int
3659 simple_edge_hints (struct cgraph_edge *edge)
3660 {
3661 int hints = 0;
3662 struct cgraph_node *to = (edge->caller->global.inlined_to
3663 ? edge->caller->global.inlined_to : edge->caller);
3664 if (inline_summary (to)->scc_no
3665 && inline_summary (to)->scc_no == inline_summary (edge->callee)->scc_no
3666 && !edge->recursive_p ())
3667 hints |= INLINE_HINT_same_scc;
3668
3669 if (to->lto_file_data && edge->callee->lto_file_data
3670 && to->lto_file_data != edge->callee->lto_file_data)
3671 hints |= INLINE_HINT_cross_module;
3672
3673 return hints;
3674 }
3675
3676 /* Estimate the time cost for the caller when inlining EDGE.
3677 Only to be called via estimate_edge_time, that handles the
3678 caching mechanism.
3679
3680 When caching, also update the cache entry. Compute both time and
3681 size, since we always need both metrics eventually. */
3682
3683 int
3684 do_estimate_edge_time (struct cgraph_edge *edge)
3685 {
3686 int time;
3687 int size;
3688 inline_hints hints;
3689 struct cgraph_node *callee;
3690 clause_t clause;
3691 vec<tree> known_vals;
3692 vec<ipa_polymorphic_call_context> known_contexts;
3693 vec<ipa_agg_jump_function_p> known_aggs;
3694 struct inline_edge_summary *es = inline_edge_summary (edge);
3695 int min_size;
3696
3697 callee = edge->callee->ultimate_alias_target ();
3698
3699 gcc_checking_assert (edge->inline_failed);
3700 evaluate_properties_for_edge (edge, true,
3701 &clause, &known_vals, &known_contexts,
3702 &known_aggs);
3703 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3704 known_aggs, &size, &min_size, &time, &hints, es->param);
3705
3706 /* When we have profile feedback, we can quite safely identify hot
3707 edges and for those we disable size limits. Don't do that when
3708 probability that caller will call the callee is low however, since it
3709 may hurt optimization of the caller's hot path. */
3710 if (edge->count && edge->maybe_hot_p ()
3711 && (edge->count * 2
3712 > (edge->caller->global.inlined_to
3713 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3714 hints |= INLINE_HINT_known_hot;
3715
3716 known_vals.release ();
3717 known_contexts.release ();
3718 known_aggs.release ();
3719 gcc_checking_assert (size >= 0);
3720 gcc_checking_assert (time >= 0);
3721
3722 /* When caching, update the cache entry. */
3723 if (edge_growth_cache.exists ())
3724 {
3725 inline_summary (edge->callee)->min_size = min_size;
3726 if ((int) edge_growth_cache.length () <= edge->uid)
3727 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3728 edge_growth_cache[edge->uid].time = time + (time >= 0);
3729
3730 edge_growth_cache[edge->uid].size = size + (size >= 0);
3731 hints |= simple_edge_hints (edge);
3732 edge_growth_cache[edge->uid].hints = hints + 1;
3733 }
3734 return time;
3735 }
3736
3737
3738 /* Return estimated callee growth after inlining EDGE.
3739 Only to be called via estimate_edge_size. */
3740
3741 int
3742 do_estimate_edge_size (struct cgraph_edge *edge)
3743 {
3744 int size;
3745 struct cgraph_node *callee;
3746 clause_t clause;
3747 vec<tree> known_vals;
3748 vec<ipa_polymorphic_call_context> known_contexts;
3749 vec<ipa_agg_jump_function_p> known_aggs;
3750
3751 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3752
3753 if (edge_growth_cache.exists ())
3754 {
3755 do_estimate_edge_time (edge);
3756 size = edge_growth_cache[edge->uid].size;
3757 gcc_checking_assert (size);
3758 return size - (size > 0);
3759 }
3760
3761 callee = edge->callee->ultimate_alias_target ();
3762
3763 /* Early inliner runs without caching, go ahead and do the dirty work. */
3764 gcc_checking_assert (edge->inline_failed);
3765 evaluate_properties_for_edge (edge, true,
3766 &clause, &known_vals, &known_contexts,
3767 &known_aggs);
3768 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3769 known_aggs, &size, NULL, NULL, NULL, vNULL);
3770 known_vals.release ();
3771 known_contexts.release ();
3772 known_aggs.release ();
3773 return size;
3774 }
3775
3776
3777 /* Estimate the growth of the caller when inlining EDGE.
3778 Only to be called via estimate_edge_size. */
3779
3780 inline_hints
3781 do_estimate_edge_hints (struct cgraph_edge *edge)
3782 {
3783 inline_hints hints;
3784 struct cgraph_node *callee;
3785 clause_t clause;
3786 vec<tree> known_vals;
3787 vec<ipa_polymorphic_call_context> known_contexts;
3788 vec<ipa_agg_jump_function_p> known_aggs;
3789
3790 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3791
3792 if (edge_growth_cache.exists ())
3793 {
3794 do_estimate_edge_time (edge);
3795 hints = edge_growth_cache[edge->uid].hints;
3796 gcc_checking_assert (hints);
3797 return hints - 1;
3798 }
3799
3800 callee = edge->callee->ultimate_alias_target ();
3801
3802 /* Early inliner runs without caching, go ahead and do the dirty work. */
3803 gcc_checking_assert (edge->inline_failed);
3804 evaluate_properties_for_edge (edge, true,
3805 &clause, &known_vals, &known_contexts,
3806 &known_aggs);
3807 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3808 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3809 known_vals.release ();
3810 known_contexts.release ();
3811 known_aggs.release ();
3812 hints |= simple_edge_hints (edge);
3813 return hints;
3814 }
3815
3816
3817 /* Estimate self time of the function NODE after inlining EDGE. */
3818
3819 int
3820 estimate_time_after_inlining (struct cgraph_node *node,
3821 struct cgraph_edge *edge)
3822 {
3823 struct inline_edge_summary *es = inline_edge_summary (edge);
3824 if (!es->predicate || !false_predicate_p (es->predicate))
3825 {
3826 gcov_type time =
3827 inline_summary (node)->time + estimate_edge_time (edge);
3828 if (time < 0)
3829 time = 0;
3830 if (time > MAX_TIME)
3831 time = MAX_TIME;
3832 return time;
3833 }
3834 return inline_summary (node)->time;
3835 }
3836
3837
3838 /* Estimate the size of NODE after inlining EDGE which should be an
3839 edge to either NODE or a call inlined into NODE. */
3840
3841 int
3842 estimate_size_after_inlining (struct cgraph_node *node,
3843 struct cgraph_edge *edge)
3844 {
3845 struct inline_edge_summary *es = inline_edge_summary (edge);
3846 if (!es->predicate || !false_predicate_p (es->predicate))
3847 {
3848 int size = inline_summary (node)->size + estimate_edge_growth (edge);
3849 gcc_assert (size >= 0);
3850 return size;
3851 }
3852 return inline_summary (node)->size;
3853 }
3854
3855
3856 struct growth_data
3857 {
3858 struct cgraph_node *node;
3859 bool self_recursive;
3860 int growth;
3861 };
3862
3863
3864 /* Worker for do_estimate_growth. Collect growth for all callers. */
3865
3866 static bool
3867 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3868 {
3869 struct cgraph_edge *e;
3870 struct growth_data *d = (struct growth_data *) data;
3871
3872 for (e = node->callers; e; e = e->next_caller)
3873 {
3874 gcc_checking_assert (e->inline_failed);
3875
3876 if (e->caller == d->node
3877 || (e->caller->global.inlined_to
3878 && e->caller->global.inlined_to == d->node))
3879 d->self_recursive = true;
3880 d->growth += estimate_edge_growth (e);
3881 }
3882 return false;
3883 }
3884
3885
3886 /* Estimate the growth caused by inlining NODE into all callees. */
3887
3888 int
3889 do_estimate_growth (struct cgraph_node *node)
3890 {
3891 struct growth_data d = { node, 0, false };
3892 struct inline_summary *info = inline_summary (node);
3893
3894 node->call_for_symbol_thunks_and_aliases (do_estimate_growth_1, &d, true);
3895
3896 /* For self recursive functions the growth estimation really should be
3897 infinity. We don't want to return very large values because the growth
3898 plays various roles in badness computation fractions. Be sure to not
3899 return zero or negative growths. */
3900 if (d.self_recursive)
3901 d.growth = d.growth < info->size ? info->size : d.growth;
3902 else if (DECL_EXTERNAL (node->decl))
3903 ;
3904 else
3905 {
3906 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3907 d.growth -= info->size;
3908 /* COMDAT functions are very often not shared across multiple units
3909 since they come from various template instantiations.
3910 Take this into account. */
3911 else if (DECL_COMDAT (node->decl)
3912 && node->can_remove_if_no_direct_calls_p ())
3913 d.growth -= (info->size
3914 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3915 + 50) / 100;
3916 }
3917
3918 if (node_growth_cache.exists ())
3919 {
3920 if ((int) node_growth_cache.length () <= node->uid)
3921 node_growth_cache.safe_grow_cleared (symtab->cgraph_max_uid);
3922 node_growth_cache[node->uid] = d.growth + (d.growth >= 0);
3923 }
3924 return d.growth;
3925 }
3926
3927
3928 /* Make cheap estimation if growth of NODE is likely positive knowing
3929 EDGE_GROWTH of one particular edge.
3930 We assume that most of other edges will have similar growth
3931 and skip computation if there are too many callers. */
3932
3933 bool
3934 growth_likely_positive (struct cgraph_node *node, int edge_growth ATTRIBUTE_UNUSED)
3935 {
3936 int max_callers;
3937 int ret;
3938 struct cgraph_edge *e;
3939 gcc_checking_assert (edge_growth > 0);
3940
3941 /* Unlike for functions called once, we play unsafe with
3942 COMDATs. We can allow that since we know functions
3943 in consideration are small (and thus risk is small) and
3944 moreover grow estimates already accounts that COMDAT
3945 functions may or may not disappear when eliminated from
3946 current unit. With good probability making aggressive
3947 choice in all units is going to make overall program
3948 smaller.
3949
3950 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
3951 instead of
3952 cgraph_will_be_removed_from_program_if_no_direct_calls */
3953 if (DECL_EXTERNAL (node->decl)
3954 || !node->can_remove_if_no_direct_calls_p ())
3955 return true;
3956
3957 /* If there is cached value, just go ahead. */
3958 if ((int)node_growth_cache.length () > node->uid
3959 && (ret = node_growth_cache[node->uid]))
3960 return ret > 0;
3961 if (!node->will_be_removed_from_program_if_no_direct_calls_p ()
3962 && (!DECL_COMDAT (node->decl)
3963 || !node->can_remove_if_no_direct_calls_p ()))
3964 return true;
3965 max_callers = inline_summary (node)->size * 4 / edge_growth + 2;
3966
3967 for (e = node->callers; e; e = e->next_caller)
3968 {
3969 max_callers--;
3970 if (!max_callers)
3971 return true;
3972 }
3973 return estimate_growth (node) > 0;
3974 }
3975
3976
3977 /* This function performs intraprocedural analysis in NODE that is required to
3978 inline indirect calls. */
3979
3980 static void
3981 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
3982 {
3983 ipa_analyze_node (node);
3984 if (dump_file && (dump_flags & TDF_DETAILS))
3985 {
3986 ipa_print_node_params (dump_file, node);
3987 ipa_print_node_jump_functions (dump_file, node);
3988 }
3989 }
3990
3991
3992 /* Note function body size. */
3993
3994 void
3995 inline_analyze_function (struct cgraph_node *node)
3996 {
3997 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3998
3999 if (dump_file)
4000 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
4001 node->name (), node->order);
4002 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4003 inline_indirect_intraprocedural_analysis (node);
4004 compute_inline_parameters (node, false);
4005 if (!optimize)
4006 {
4007 struct cgraph_edge *e;
4008 for (e = node->callees; e; e = e->next_callee)
4009 {
4010 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4011 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4012 e->call_stmt_cannot_inline_p = true;
4013 }
4014 for (e = node->indirect_calls; e; e = e->next_callee)
4015 {
4016 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4017 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4018 e->call_stmt_cannot_inline_p = true;
4019 }
4020 }
4021
4022 pop_cfun ();
4023 }
4024
4025
4026 /* Called when new function is inserted to callgraph late. */
4027
4028 static void
4029 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
4030 {
4031 inline_analyze_function (node);
4032 }
4033
4034
4035 /* Note function body size. */
4036
4037 void
4038 inline_generate_summary (void)
4039 {
4040 struct cgraph_node *node;
4041
4042 /* When not optimizing, do not bother to analyze. Inlining is still done
4043 because edge redirection needs to happen there. */
4044 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4045 return;
4046
4047 function_insertion_hook_holder =
4048 symtab->add_cgraph_insertion_hook (&add_new_function, NULL);
4049
4050 ipa_register_cgraph_hooks ();
4051 inline_free_summary ();
4052
4053 FOR_EACH_DEFINED_FUNCTION (node)
4054 if (!node->alias)
4055 inline_analyze_function (node);
4056 }
4057
4058
4059 /* Read predicate from IB. */
4060
4061 static struct predicate
4062 read_predicate (struct lto_input_block *ib)
4063 {
4064 struct predicate out;
4065 clause_t clause;
4066 int k = 0;
4067
4068 do
4069 {
4070 gcc_assert (k <= MAX_CLAUSES);
4071 clause = out.clause[k++] = streamer_read_uhwi (ib);
4072 }
4073 while (clause);
4074
4075 /* Zero-initialize the remaining clauses in OUT. */
4076 while (k <= MAX_CLAUSES)
4077 out.clause[k++] = 0;
4078
4079 return out;
4080 }
4081
4082
4083 /* Write inline summary for edge E to OB. */
4084
4085 static void
4086 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4087 {
4088 struct inline_edge_summary *es = inline_edge_summary (e);
4089 struct predicate p;
4090 int length, i;
4091
4092 es->call_stmt_size = streamer_read_uhwi (ib);
4093 es->call_stmt_time = streamer_read_uhwi (ib);
4094 es->loop_depth = streamer_read_uhwi (ib);
4095 p = read_predicate (ib);
4096 edge_set_predicate (e, &p);
4097 length = streamer_read_uhwi (ib);
4098 if (length)
4099 {
4100 es->param.safe_grow_cleared (length);
4101 for (i = 0; i < length; i++)
4102 es->param[i].change_prob = streamer_read_uhwi (ib);
4103 }
4104 }
4105
4106
4107 /* Stream in inline summaries from the section. */
4108
4109 static void
4110 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4111 size_t len)
4112 {
4113 const struct lto_function_header *header =
4114 (const struct lto_function_header *) data;
4115 const int cfg_offset = sizeof (struct lto_function_header);
4116 const int main_offset = cfg_offset + header->cfg_size;
4117 const int string_offset = main_offset + header->main_size;
4118 struct data_in *data_in;
4119 unsigned int i, count2, j;
4120 unsigned int f_count;
4121
4122 lto_input_block ib ((const char *) data + main_offset, header->main_size);
4123
4124 data_in =
4125 lto_data_in_create (file_data, (const char *) data + string_offset,
4126 header->string_size, vNULL);
4127 f_count = streamer_read_uhwi (&ib);
4128 for (i = 0; i < f_count; i++)
4129 {
4130 unsigned int index;
4131 struct cgraph_node *node;
4132 struct inline_summary *info;
4133 lto_symtab_encoder_t encoder;
4134 struct bitpack_d bp;
4135 struct cgraph_edge *e;
4136 predicate p;
4137
4138 index = streamer_read_uhwi (&ib);
4139 encoder = file_data->symtab_node_encoder;
4140 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4141 index));
4142 info = inline_summary (node);
4143
4144 info->estimated_stack_size
4145 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4146 info->size = info->self_size = streamer_read_uhwi (&ib);
4147 info->time = info->self_time = streamer_read_uhwi (&ib);
4148
4149 bp = streamer_read_bitpack (&ib);
4150 info->inlinable = bp_unpack_value (&bp, 1);
4151
4152 count2 = streamer_read_uhwi (&ib);
4153 gcc_assert (!info->conds);
4154 for (j = 0; j < count2; j++)
4155 {
4156 struct condition c;
4157 c.operand_num = streamer_read_uhwi (&ib);
4158 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4159 c.val = stream_read_tree (&ib, data_in);
4160 bp = streamer_read_bitpack (&ib);
4161 c.agg_contents = bp_unpack_value (&bp, 1);
4162 c.by_ref = bp_unpack_value (&bp, 1);
4163 if (c.agg_contents)
4164 c.offset = streamer_read_uhwi (&ib);
4165 vec_safe_push (info->conds, c);
4166 }
4167 count2 = streamer_read_uhwi (&ib);
4168 gcc_assert (!info->entry);
4169 for (j = 0; j < count2; j++)
4170 {
4171 struct size_time_entry e;
4172
4173 e.size = streamer_read_uhwi (&ib);
4174 e.time = streamer_read_uhwi (&ib);
4175 e.predicate = read_predicate (&ib);
4176
4177 vec_safe_push (info->entry, e);
4178 }
4179
4180 p = read_predicate (&ib);
4181 set_hint_predicate (&info->loop_iterations, p);
4182 p = read_predicate (&ib);
4183 set_hint_predicate (&info->loop_stride, p);
4184 p = read_predicate (&ib);
4185 set_hint_predicate (&info->array_index, p);
4186 for (e = node->callees; e; e = e->next_callee)
4187 read_inline_edge_summary (&ib, e);
4188 for (e = node->indirect_calls; e; e = e->next_callee)
4189 read_inline_edge_summary (&ib, e);
4190 }
4191
4192 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4193 len);
4194 lto_data_in_delete (data_in);
4195 }
4196
4197
4198 /* Read inline summary. Jump functions are shared among ipa-cp
4199 and inliner, so when ipa-cp is active, we don't need to write them
4200 twice. */
4201
4202 void
4203 inline_read_summary (void)
4204 {
4205 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4206 struct lto_file_decl_data *file_data;
4207 unsigned int j = 0;
4208
4209 inline_summary_alloc ();
4210
4211 while ((file_data = file_data_vec[j++]))
4212 {
4213 size_t len;
4214 const char *data = lto_get_section_data (file_data,
4215 LTO_section_inline_summary,
4216 NULL, &len);
4217 if (data)
4218 inline_read_section (file_data, data, len);
4219 else
4220 /* Fatal error here. We do not want to support compiling ltrans units
4221 with different version of compiler or different flags than the WPA
4222 unit, so this should never happen. */
4223 fatal_error ("ipa inline summary is missing in input file");
4224 }
4225 if (optimize)
4226 {
4227 ipa_register_cgraph_hooks ();
4228 if (!flag_ipa_cp)
4229 ipa_prop_read_jump_functions ();
4230 }
4231 function_insertion_hook_holder =
4232 symtab->add_cgraph_insertion_hook (&add_new_function, NULL);
4233 }
4234
4235
4236 /* Write predicate P to OB. */
4237
4238 static void
4239 write_predicate (struct output_block *ob, struct predicate *p)
4240 {
4241 int j;
4242 if (p)
4243 for (j = 0; p->clause[j]; j++)
4244 {
4245 gcc_assert (j < MAX_CLAUSES);
4246 streamer_write_uhwi (ob, p->clause[j]);
4247 }
4248 streamer_write_uhwi (ob, 0);
4249 }
4250
4251
4252 /* Write inline summary for edge E to OB. */
4253
4254 static void
4255 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4256 {
4257 struct inline_edge_summary *es = inline_edge_summary (e);
4258 int i;
4259
4260 streamer_write_uhwi (ob, es->call_stmt_size);
4261 streamer_write_uhwi (ob, es->call_stmt_time);
4262 streamer_write_uhwi (ob, es->loop_depth);
4263 write_predicate (ob, es->predicate);
4264 streamer_write_uhwi (ob, es->param.length ());
4265 for (i = 0; i < (int) es->param.length (); i++)
4266 streamer_write_uhwi (ob, es->param[i].change_prob);
4267 }
4268
4269
4270 /* Write inline summary for node in SET.
4271 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4272 active, we don't need to write them twice. */
4273
4274 void
4275 inline_write_summary (void)
4276 {
4277 struct cgraph_node *node;
4278 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4279 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4280 unsigned int count = 0;
4281 int i;
4282
4283 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4284 {
4285 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4286 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4287 if (cnode && cnode->definition && !cnode->alias)
4288 count++;
4289 }
4290 streamer_write_uhwi (ob, count);
4291
4292 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4293 {
4294 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4295 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4296 if (cnode && (node = cnode)->definition && !node->alias)
4297 {
4298 struct inline_summary *info = inline_summary (node);
4299 struct bitpack_d bp;
4300 struct cgraph_edge *edge;
4301 int i;
4302 size_time_entry *e;
4303 struct condition *c;
4304
4305 streamer_write_uhwi (ob,
4306 lto_symtab_encoder_encode (encoder,
4307
4308 node));
4309 streamer_write_hwi (ob, info->estimated_self_stack_size);
4310 streamer_write_hwi (ob, info->self_size);
4311 streamer_write_hwi (ob, info->self_time);
4312 bp = bitpack_create (ob->main_stream);
4313 bp_pack_value (&bp, info->inlinable, 1);
4314 streamer_write_bitpack (&bp);
4315 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4316 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4317 {
4318 streamer_write_uhwi (ob, c->operand_num);
4319 streamer_write_uhwi (ob, c->code);
4320 stream_write_tree (ob, c->val, true);
4321 bp = bitpack_create (ob->main_stream);
4322 bp_pack_value (&bp, c->agg_contents, 1);
4323 bp_pack_value (&bp, c->by_ref, 1);
4324 streamer_write_bitpack (&bp);
4325 if (c->agg_contents)
4326 streamer_write_uhwi (ob, c->offset);
4327 }
4328 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4329 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4330 {
4331 streamer_write_uhwi (ob, e->size);
4332 streamer_write_uhwi (ob, e->time);
4333 write_predicate (ob, &e->predicate);
4334 }
4335 write_predicate (ob, info->loop_iterations);
4336 write_predicate (ob, info->loop_stride);
4337 write_predicate (ob, info->array_index);
4338 for (edge = node->callees; edge; edge = edge->next_callee)
4339 write_inline_edge_summary (ob, edge);
4340 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4341 write_inline_edge_summary (ob, edge);
4342 }
4343 }
4344 streamer_write_char_stream (ob->main_stream, 0);
4345 produce_asm (ob, NULL);
4346 destroy_output_block (ob);
4347
4348 if (optimize && !flag_ipa_cp)
4349 ipa_prop_write_jump_functions ();
4350 }
4351
4352
4353 /* Release inline summary. */
4354
4355 void
4356 inline_free_summary (void)
4357 {
4358 struct cgraph_node *node;
4359 if (function_insertion_hook_holder)
4360 symtab->remove_cgraph_insertion_hook (function_insertion_hook_holder);
4361 function_insertion_hook_holder = NULL;
4362 if (node_removal_hook_holder)
4363 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
4364 node_removal_hook_holder = NULL;
4365 if (edge_removal_hook_holder)
4366 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4367 edge_removal_hook_holder = NULL;
4368 if (node_duplication_hook_holder)
4369 symtab->remove_cgraph_duplication_hook (node_duplication_hook_holder);
4370 node_duplication_hook_holder = NULL;
4371 if (edge_duplication_hook_holder)
4372 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4373 edge_duplication_hook_holder = NULL;
4374 if (!inline_edge_summary_vec.exists ())
4375 return;
4376 FOR_EACH_DEFINED_FUNCTION (node)
4377 if (!node->alias)
4378 reset_inline_summary (node);
4379 vec_free (inline_summary_vec);
4380 inline_edge_summary_vec.release ();
4381 if (edge_predicate_pool)
4382 free_alloc_pool (edge_predicate_pool);
4383 edge_predicate_pool = 0;
4384 }