real.h (HONOR_NANS): Replace macro with 3 overloaded declarations.
[gcc.git] / gcc / ipa-inline-analysis.c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Analysis used by the inliner and other passes limiting code size growth.
22
23 We estimate for each function
24 - function body size
25 - average function execution time
26 - inlining size benefit (that is how much of function body size
27 and its call sequence is expected to disappear by inlining)
28 - inlining time benefit
29 - function frame size
30 For each call
31 - call statement size and time
32
33 inlinie_summary datastructures store above information locally (i.e.
34 parameters of the function itself) and globally (i.e. parameters of
35 the function created by applying all the inline decisions already
36 present in the callgraph).
37
38 We provide accestor to the inline_summary datastructure and
39 basic logic updating the parameters when inlining is performed.
40
41 The summaries are context sensitive. Context means
42 1) partial assignment of known constant values of operands
43 2) whether function is inlined into the call or not.
44 It is easy to add more variants. To represent function size and time
45 that depends on context (i.e. it is known to be optimized away when
46 context is known either by inlining or from IP-CP and clonning),
47 we use predicates. Predicates are logical formulas in
48 conjunctive-disjunctive form consisting of clauses. Clauses are bitmaps
49 specifying what conditions must be true. Conditions are simple test
50 of the form described above.
51
52 In order to make predicate (possibly) true, all of its clauses must
53 be (possibly) true. To make clause (possibly) true, one of conditions
54 it mentions must be (possibly) true. There are fixed bounds on
55 number of clauses and conditions and all the manipulation functions
56 are conservative in positive direction. I.e. we may lose precision
57 by thinking that predicate may be true even when it is not.
58
59 estimate_edge_size and estimate_edge_growth can be used to query
60 function size/time in the given context. inline_merge_summary merges
61 properties of caller and callee after inlining.
62
63 Finally pass_inline_parameters is exported. This is used to drive
64 computation of function parameters used by the early inliner. IPA
65 inlined performs analysis via its analyze_function method. */
66
67 #include "config.h"
68 #include "system.h"
69 #include "coretypes.h"
70 #include "tm.h"
71 #include "tree.h"
72 #include "stor-layout.h"
73 #include "stringpool.h"
74 #include "print-tree.h"
75 #include "tree-inline.h"
76 #include "langhooks.h"
77 #include "flags.h"
78 #include "diagnostic.h"
79 #include "gimple-pretty-print.h"
80 #include "params.h"
81 #include "tree-pass.h"
82 #include "coverage.h"
83 #include "predict.h"
84 #include "vec.h"
85 #include "hashtab.h"
86 #include "hash-set.h"
87 #include "machmode.h"
88 #include "hard-reg-set.h"
89 #include "input.h"
90 #include "function.h"
91 #include "dominance.h"
92 #include "cfg.h"
93 #include "cfganal.h"
94 #include "basic-block.h"
95 #include "tree-ssa-alias.h"
96 #include "internal-fn.h"
97 #include "gimple-expr.h"
98 #include "is-a.h"
99 #include "gimple.h"
100 #include "gimple-iterator.h"
101 #include "gimple-ssa.h"
102 #include "tree-cfg.h"
103 #include "tree-phinodes.h"
104 #include "ssa-iterators.h"
105 #include "tree-ssanames.h"
106 #include "tree-ssa-loop-niter.h"
107 #include "tree-ssa-loop.h"
108 #include "hash-map.h"
109 #include "plugin-api.h"
110 #include "ipa-ref.h"
111 #include "cgraph.h"
112 #include "alloc-pool.h"
113 #include "ipa-prop.h"
114 #include "lto-streamer.h"
115 #include "data-streamer.h"
116 #include "tree-streamer.h"
117 #include "ipa-inline.h"
118 #include "cfgloop.h"
119 #include "tree-scalar-evolution.h"
120 #include "ipa-utils.h"
121 #include "cilk.h"
122 #include "cfgexpand.h"
123
124 /* Estimate runtime of function can easilly run into huge numbers with many
125 nested loops. Be sure we can compute time * INLINE_SIZE_SCALE * 2 in an
126 integer. For anything larger we use gcov_type. */
127 #define MAX_TIME 500000
128
129 /* Number of bits in integer, but we really want to be stable across different
130 hosts. */
131 #define NUM_CONDITIONS 32
132
133 enum predicate_conditions
134 {
135 predicate_false_condition = 0,
136 predicate_not_inlined_condition = 1,
137 predicate_first_dynamic_condition = 2
138 };
139
140 /* Special condition code we use to represent test that operand is compile time
141 constant. */
142 #define IS_NOT_CONSTANT ERROR_MARK
143 /* Special condition code we use to represent test that operand is not changed
144 across invocation of the function. When operand IS_NOT_CONSTANT it is always
145 CHANGED, however i.e. loop invariants can be NOT_CHANGED given percentage
146 of executions even when they are not compile time constants. */
147 #define CHANGED IDENTIFIER_NODE
148
149 /* Holders of ipa cgraph hooks: */
150 static struct cgraph_node_hook_list *function_insertion_hook_holder;
151 static struct cgraph_node_hook_list *node_removal_hook_holder;
152 static struct cgraph_2node_hook_list *node_duplication_hook_holder;
153 static struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
154 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
155 static void inline_node_removal_hook (struct cgraph_node *, void *);
156 static void inline_node_duplication_hook (struct cgraph_node *,
157 struct cgraph_node *, void *);
158 static void inline_edge_removal_hook (struct cgraph_edge *, void *);
159 static void inline_edge_duplication_hook (struct cgraph_edge *,
160 struct cgraph_edge *, void *);
161
162 /* VECtor holding inline summaries.
163 In GGC memory because conditions might point to constant trees. */
164 vec<inline_summary_t, va_gc> *inline_summary_vec;
165 vec<inline_edge_summary_t> inline_edge_summary_vec;
166
167 /* Cached node/edge growths. */
168 vec<int> node_growth_cache;
169 vec<edge_growth_cache_entry> edge_growth_cache;
170
171 /* Edge predicates goes here. */
172 static alloc_pool edge_predicate_pool;
173
174 /* Return true predicate (tautology).
175 We represent it by empty list of clauses. */
176
177 static inline struct predicate
178 true_predicate (void)
179 {
180 struct predicate p;
181 p.clause[0] = 0;
182 return p;
183 }
184
185
186 /* Return predicate testing single condition number COND. */
187
188 static inline struct predicate
189 single_cond_predicate (int cond)
190 {
191 struct predicate p;
192 p.clause[0] = 1 << cond;
193 p.clause[1] = 0;
194 return p;
195 }
196
197
198 /* Return false predicate. First clause require false condition. */
199
200 static inline struct predicate
201 false_predicate (void)
202 {
203 return single_cond_predicate (predicate_false_condition);
204 }
205
206
207 /* Return true if P is (true). */
208
209 static inline bool
210 true_predicate_p (struct predicate *p)
211 {
212 return !p->clause[0];
213 }
214
215
216 /* Return true if P is (false). */
217
218 static inline bool
219 false_predicate_p (struct predicate *p)
220 {
221 if (p->clause[0] == (1 << predicate_false_condition))
222 {
223 gcc_checking_assert (!p->clause[1]
224 && p->clause[0] == 1 << predicate_false_condition);
225 return true;
226 }
227 return false;
228 }
229
230
231 /* Return predicate that is set true when function is not inlined. */
232
233 static inline struct predicate
234 not_inlined_predicate (void)
235 {
236 return single_cond_predicate (predicate_not_inlined_condition);
237 }
238
239 /* Simple description of whether a memory load or a condition refers to a load
240 from an aggregate and if so, how and where from in the aggregate.
241 Individual fields have the same meaning like fields with the same name in
242 struct condition. */
243
244 struct agg_position_info
245 {
246 HOST_WIDE_INT offset;
247 bool agg_contents;
248 bool by_ref;
249 };
250
251 /* Add condition to condition list CONDS. AGGPOS describes whether the used
252 oprand is loaded from an aggregate and where in the aggregate it is. It can
253 be NULL, which means this not a load from an aggregate. */
254
255 static struct predicate
256 add_condition (struct inline_summary *summary, int operand_num,
257 struct agg_position_info *aggpos,
258 enum tree_code code, tree val)
259 {
260 int i;
261 struct condition *c;
262 struct condition new_cond;
263 HOST_WIDE_INT offset;
264 bool agg_contents, by_ref;
265
266 if (aggpos)
267 {
268 offset = aggpos->offset;
269 agg_contents = aggpos->agg_contents;
270 by_ref = aggpos->by_ref;
271 }
272 else
273 {
274 offset = 0;
275 agg_contents = false;
276 by_ref = false;
277 }
278
279 gcc_checking_assert (operand_num >= 0);
280 for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
281 {
282 if (c->operand_num == operand_num
283 && c->code == code
284 && c->val == val
285 && c->agg_contents == agg_contents
286 && (!agg_contents || (c->offset == offset && c->by_ref == by_ref)))
287 return single_cond_predicate (i + predicate_first_dynamic_condition);
288 }
289 /* Too many conditions. Give up and return constant true. */
290 if (i == NUM_CONDITIONS - predicate_first_dynamic_condition)
291 return true_predicate ();
292
293 new_cond.operand_num = operand_num;
294 new_cond.code = code;
295 new_cond.val = val;
296 new_cond.agg_contents = agg_contents;
297 new_cond.by_ref = by_ref;
298 new_cond.offset = offset;
299 vec_safe_push (summary->conds, new_cond);
300 return single_cond_predicate (i + predicate_first_dynamic_condition);
301 }
302
303
304 /* Add clause CLAUSE into the predicate P. */
305
306 static inline void
307 add_clause (conditions conditions, struct predicate *p, clause_t clause)
308 {
309 int i;
310 int i2;
311 int insert_here = -1;
312 int c1, c2;
313
314 /* True clause. */
315 if (!clause)
316 return;
317
318 /* False clause makes the whole predicate false. Kill the other variants. */
319 if (clause == (1 << predicate_false_condition))
320 {
321 p->clause[0] = (1 << predicate_false_condition);
322 p->clause[1] = 0;
323 return;
324 }
325 if (false_predicate_p (p))
326 return;
327
328 /* No one should be silly enough to add false into nontrivial clauses. */
329 gcc_checking_assert (!(clause & (1 << predicate_false_condition)));
330
331 /* Look where to insert the clause. At the same time prune out
332 clauses of P that are implied by the new clause and thus
333 redundant. */
334 for (i = 0, i2 = 0; i <= MAX_CLAUSES; i++)
335 {
336 p->clause[i2] = p->clause[i];
337
338 if (!p->clause[i])
339 break;
340
341 /* If p->clause[i] implies clause, there is nothing to add. */
342 if ((p->clause[i] & clause) == p->clause[i])
343 {
344 /* We had nothing to add, none of clauses should've become
345 redundant. */
346 gcc_checking_assert (i == i2);
347 return;
348 }
349
350 if (p->clause[i] < clause && insert_here < 0)
351 insert_here = i2;
352
353 /* If clause implies p->clause[i], then p->clause[i] becomes redundant.
354 Otherwise the p->clause[i] has to stay. */
355 if ((p->clause[i] & clause) != clause)
356 i2++;
357 }
358
359 /* Look for clauses that are obviously true. I.e.
360 op0 == 5 || op0 != 5. */
361 for (c1 = predicate_first_dynamic_condition; c1 < NUM_CONDITIONS; c1++)
362 {
363 condition *cc1;
364 if (!(clause & (1 << c1)))
365 continue;
366 cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
367 /* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
368 and thus there is no point for looking for them. */
369 if (cc1->code == CHANGED || cc1->code == IS_NOT_CONSTANT)
370 continue;
371 for (c2 = c1 + 1; c2 < NUM_CONDITIONS; c2++)
372 if (clause & (1 << c2))
373 {
374 condition *cc1 =
375 &(*conditions)[c1 - predicate_first_dynamic_condition];
376 condition *cc2 =
377 &(*conditions)[c2 - predicate_first_dynamic_condition];
378 if (cc1->operand_num == cc2->operand_num
379 && cc1->val == cc2->val
380 && cc2->code != IS_NOT_CONSTANT
381 && cc2->code != CHANGED
382 && cc1->code == invert_tree_comparison (cc2->code,
383 HONOR_NANS (cc1->val)))
384 return;
385 }
386 }
387
388
389 /* We run out of variants. Be conservative in positive direction. */
390 if (i2 == MAX_CLAUSES)
391 return;
392 /* Keep clauses in decreasing order. This makes equivalence testing easy. */
393 p->clause[i2 + 1] = 0;
394 if (insert_here >= 0)
395 for (; i2 > insert_here; i2--)
396 p->clause[i2] = p->clause[i2 - 1];
397 else
398 insert_here = i2;
399 p->clause[insert_here] = clause;
400 }
401
402
403 /* Return P & P2. */
404
405 static struct predicate
406 and_predicates (conditions conditions,
407 struct predicate *p, struct predicate *p2)
408 {
409 struct predicate out = *p;
410 int i;
411
412 /* Avoid busy work. */
413 if (false_predicate_p (p2) || true_predicate_p (p))
414 return *p2;
415 if (false_predicate_p (p) || true_predicate_p (p2))
416 return *p;
417
418 /* See how far predicates match. */
419 for (i = 0; p->clause[i] && p->clause[i] == p2->clause[i]; i++)
420 {
421 gcc_checking_assert (i < MAX_CLAUSES);
422 }
423
424 /* Combine the predicates rest. */
425 for (; p2->clause[i]; i++)
426 {
427 gcc_checking_assert (i < MAX_CLAUSES);
428 add_clause (conditions, &out, p2->clause[i]);
429 }
430 return out;
431 }
432
433
434 /* Return true if predicates are obviously equal. */
435
436 static inline bool
437 predicates_equal_p (struct predicate *p, struct predicate *p2)
438 {
439 int i;
440 for (i = 0; p->clause[i]; i++)
441 {
442 gcc_checking_assert (i < MAX_CLAUSES);
443 gcc_checking_assert (p->clause[i] > p->clause[i + 1]);
444 gcc_checking_assert (!p2->clause[i]
445 || p2->clause[i] > p2->clause[i + 1]);
446 if (p->clause[i] != p2->clause[i])
447 return false;
448 }
449 return !p2->clause[i];
450 }
451
452
453 /* Return P | P2. */
454
455 static struct predicate
456 or_predicates (conditions conditions,
457 struct predicate *p, struct predicate *p2)
458 {
459 struct predicate out = true_predicate ();
460 int i, j;
461
462 /* Avoid busy work. */
463 if (false_predicate_p (p2) || true_predicate_p (p))
464 return *p;
465 if (false_predicate_p (p) || true_predicate_p (p2))
466 return *p2;
467 if (predicates_equal_p (p, p2))
468 return *p;
469
470 /* OK, combine the predicates. */
471 for (i = 0; p->clause[i]; i++)
472 for (j = 0; p2->clause[j]; j++)
473 {
474 gcc_checking_assert (i < MAX_CLAUSES && j < MAX_CLAUSES);
475 add_clause (conditions, &out, p->clause[i] | p2->clause[j]);
476 }
477 return out;
478 }
479
480
481 /* Having partial truth assignment in POSSIBLE_TRUTHS, return false
482 if predicate P is known to be false. */
483
484 static bool
485 evaluate_predicate (struct predicate *p, clause_t possible_truths)
486 {
487 int i;
488
489 /* True remains true. */
490 if (true_predicate_p (p))
491 return true;
492
493 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
494
495 /* See if we can find clause we can disprove. */
496 for (i = 0; p->clause[i]; i++)
497 {
498 gcc_checking_assert (i < MAX_CLAUSES);
499 if (!(p->clause[i] & possible_truths))
500 return false;
501 }
502 return true;
503 }
504
505 /* Return the probability in range 0...REG_BR_PROB_BASE that the predicated
506 instruction will be recomputed per invocation of the inlined call. */
507
508 static int
509 predicate_probability (conditions conds,
510 struct predicate *p, clause_t possible_truths,
511 vec<inline_param_summary> inline_param_summary)
512 {
513 int i;
514 int combined_prob = REG_BR_PROB_BASE;
515
516 /* True remains true. */
517 if (true_predicate_p (p))
518 return REG_BR_PROB_BASE;
519
520 if (false_predicate_p (p))
521 return 0;
522
523 gcc_assert (!(possible_truths & (1 << predicate_false_condition)));
524
525 /* See if we can find clause we can disprove. */
526 for (i = 0; p->clause[i]; i++)
527 {
528 gcc_checking_assert (i < MAX_CLAUSES);
529 if (!(p->clause[i] & possible_truths))
530 return 0;
531 else
532 {
533 int this_prob = 0;
534 int i2;
535 if (!inline_param_summary.exists ())
536 return REG_BR_PROB_BASE;
537 for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
538 if ((p->clause[i] & possible_truths) & (1 << i2))
539 {
540 if (i2 >= predicate_first_dynamic_condition)
541 {
542 condition *c =
543 &(*conds)[i2 - predicate_first_dynamic_condition];
544 if (c->code == CHANGED
545 && (c->operand_num <
546 (int) inline_param_summary.length ()))
547 {
548 int iprob =
549 inline_param_summary[c->operand_num].change_prob;
550 this_prob = MAX (this_prob, iprob);
551 }
552 else
553 this_prob = REG_BR_PROB_BASE;
554 }
555 else
556 this_prob = REG_BR_PROB_BASE;
557 }
558 combined_prob = MIN (this_prob, combined_prob);
559 if (!combined_prob)
560 return 0;
561 }
562 }
563 return combined_prob;
564 }
565
566
567 /* Dump conditional COND. */
568
569 static void
570 dump_condition (FILE *f, conditions conditions, int cond)
571 {
572 condition *c;
573 if (cond == predicate_false_condition)
574 fprintf (f, "false");
575 else if (cond == predicate_not_inlined_condition)
576 fprintf (f, "not inlined");
577 else
578 {
579 c = &(*conditions)[cond - predicate_first_dynamic_condition];
580 fprintf (f, "op%i", c->operand_num);
581 if (c->agg_contents)
582 fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
583 c->by_ref ? "ref " : "", c->offset);
584 if (c->code == IS_NOT_CONSTANT)
585 {
586 fprintf (f, " not constant");
587 return;
588 }
589 if (c->code == CHANGED)
590 {
591 fprintf (f, " changed");
592 return;
593 }
594 fprintf (f, " %s ", op_symbol_code (c->code));
595 print_generic_expr (f, c->val, 1);
596 }
597 }
598
599
600 /* Dump clause CLAUSE. */
601
602 static void
603 dump_clause (FILE *f, conditions conds, clause_t clause)
604 {
605 int i;
606 bool found = false;
607 fprintf (f, "(");
608 if (!clause)
609 fprintf (f, "true");
610 for (i = 0; i < NUM_CONDITIONS; i++)
611 if (clause & (1 << i))
612 {
613 if (found)
614 fprintf (f, " || ");
615 found = true;
616 dump_condition (f, conds, i);
617 }
618 fprintf (f, ")");
619 }
620
621
622 /* Dump predicate PREDICATE. */
623
624 static void
625 dump_predicate (FILE *f, conditions conds, struct predicate *pred)
626 {
627 int i;
628 if (true_predicate_p (pred))
629 dump_clause (f, conds, 0);
630 else
631 for (i = 0; pred->clause[i]; i++)
632 {
633 if (i)
634 fprintf (f, " && ");
635 dump_clause (f, conds, pred->clause[i]);
636 }
637 fprintf (f, "\n");
638 }
639
640
641 /* Dump inline hints. */
642 void
643 dump_inline_hints (FILE *f, inline_hints hints)
644 {
645 if (!hints)
646 return;
647 fprintf (f, "inline hints:");
648 if (hints & INLINE_HINT_indirect_call)
649 {
650 hints &= ~INLINE_HINT_indirect_call;
651 fprintf (f, " indirect_call");
652 }
653 if (hints & INLINE_HINT_loop_iterations)
654 {
655 hints &= ~INLINE_HINT_loop_iterations;
656 fprintf (f, " loop_iterations");
657 }
658 if (hints & INLINE_HINT_loop_stride)
659 {
660 hints &= ~INLINE_HINT_loop_stride;
661 fprintf (f, " loop_stride");
662 }
663 if (hints & INLINE_HINT_same_scc)
664 {
665 hints &= ~INLINE_HINT_same_scc;
666 fprintf (f, " same_scc");
667 }
668 if (hints & INLINE_HINT_in_scc)
669 {
670 hints &= ~INLINE_HINT_in_scc;
671 fprintf (f, " in_scc");
672 }
673 if (hints & INLINE_HINT_cross_module)
674 {
675 hints &= ~INLINE_HINT_cross_module;
676 fprintf (f, " cross_module");
677 }
678 if (hints & INLINE_HINT_declared_inline)
679 {
680 hints &= ~INLINE_HINT_declared_inline;
681 fprintf (f, " declared_inline");
682 }
683 if (hints & INLINE_HINT_array_index)
684 {
685 hints &= ~INLINE_HINT_array_index;
686 fprintf (f, " array_index");
687 }
688 if (hints & INLINE_HINT_known_hot)
689 {
690 hints &= ~INLINE_HINT_known_hot;
691 fprintf (f, " known_hot");
692 }
693 gcc_assert (!hints);
694 }
695
696
697 /* Record SIZE and TIME under condition PRED into the inline summary. */
698
699 static void
700 account_size_time (struct inline_summary *summary, int size, int time,
701 struct predicate *pred)
702 {
703 size_time_entry *e;
704 bool found = false;
705 int i;
706
707 if (false_predicate_p (pred))
708 return;
709
710 /* We need to create initial empty unconitional clause, but otherwie
711 we don't need to account empty times and sizes. */
712 if (!size && !time && summary->entry)
713 return;
714
715 /* Watch overflow that might result from insane profiles. */
716 if (time > MAX_TIME * INLINE_TIME_SCALE)
717 time = MAX_TIME * INLINE_TIME_SCALE;
718 gcc_assert (time >= 0);
719
720 for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
721 if (predicates_equal_p (&e->predicate, pred))
722 {
723 found = true;
724 break;
725 }
726 if (i == 256)
727 {
728 i = 0;
729 found = true;
730 e = &(*summary->entry)[0];
731 gcc_assert (!e->predicate.clause[0]);
732 if (dump_file && (dump_flags & TDF_DETAILS))
733 fprintf (dump_file,
734 "\t\tReached limit on number of entries, "
735 "ignoring the predicate.");
736 }
737 if (dump_file && (dump_flags & TDF_DETAILS) && (time || size))
738 {
739 fprintf (dump_file,
740 "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate:",
741 ((double) size) / INLINE_SIZE_SCALE,
742 ((double) time) / INLINE_TIME_SCALE, found ? "" : "new ");
743 dump_predicate (dump_file, summary->conds, pred);
744 }
745 if (!found)
746 {
747 struct size_time_entry new_entry;
748 new_entry.size = size;
749 new_entry.time = time;
750 new_entry.predicate = *pred;
751 vec_safe_push (summary->entry, new_entry);
752 }
753 else
754 {
755 e->size += size;
756 e->time += time;
757 if (e->time > MAX_TIME * INLINE_TIME_SCALE)
758 e->time = MAX_TIME * INLINE_TIME_SCALE;
759 }
760 }
761
762 /* Set predicate for edge E. */
763
764 static void
765 edge_set_predicate (struct cgraph_edge *e, struct predicate *predicate)
766 {
767 struct inline_edge_summary *es = inline_edge_summary (e);
768
769 /* If the edge is determined to be never executed, redirect it
770 to BUILTIN_UNREACHABLE to save inliner from inlining into it. */
771 if (predicate && false_predicate_p (predicate) && e->callee)
772 {
773 struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL;
774
775 e->redirect_callee (cgraph_node::get_create
776 (builtin_decl_implicit (BUILT_IN_UNREACHABLE)));
777 e->inline_failed = CIF_UNREACHABLE;
778 if (callee)
779 callee->remove_symbol_and_inline_clones ();
780 }
781 if (predicate && !true_predicate_p (predicate))
782 {
783 if (!es->predicate)
784 es->predicate = (struct predicate *) pool_alloc (edge_predicate_pool);
785 *es->predicate = *predicate;
786 }
787 else
788 {
789 if (es->predicate)
790 pool_free (edge_predicate_pool, es->predicate);
791 es->predicate = NULL;
792 }
793 }
794
795 /* Set predicate for hint *P. */
796
797 static void
798 set_hint_predicate (struct predicate **p, struct predicate new_predicate)
799 {
800 if (false_predicate_p (&new_predicate) || true_predicate_p (&new_predicate))
801 {
802 if (*p)
803 pool_free (edge_predicate_pool, *p);
804 *p = NULL;
805 }
806 else
807 {
808 if (!*p)
809 *p = (struct predicate *) pool_alloc (edge_predicate_pool);
810 **p = new_predicate;
811 }
812 }
813
814
815 /* KNOWN_VALS is partial mapping of parameters of NODE to constant values.
816 KNOWN_AGGS is a vector of aggreggate jump functions for each parameter.
817 Return clause of possible truths. When INLINE_P is true, assume that we are
818 inlining.
819
820 ERROR_MARK means compile time invariant. */
821
822 static clause_t
823 evaluate_conditions_for_known_args (struct cgraph_node *node,
824 bool inline_p,
825 vec<tree> known_vals,
826 vec<ipa_agg_jump_function_p>
827 known_aggs)
828 {
829 clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
830 struct inline_summary *info = inline_summary (node);
831 int i;
832 struct condition *c;
833
834 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
835 {
836 tree val;
837 tree res;
838
839 /* We allow call stmt to have fewer arguments than the callee function
840 (especially for K&R style programs). So bound check here (we assume
841 known_aggs vector, if non-NULL, has the same length as
842 known_vals). */
843 gcc_checking_assert (!known_aggs.exists ()
844 || (known_vals.length () == known_aggs.length ()));
845 if (c->operand_num >= (int) known_vals.length ())
846 {
847 clause |= 1 << (i + predicate_first_dynamic_condition);
848 continue;
849 }
850
851 if (c->agg_contents)
852 {
853 struct ipa_agg_jump_function *agg;
854
855 if (c->code == CHANGED
856 && !c->by_ref
857 && (known_vals[c->operand_num] == error_mark_node))
858 continue;
859
860 if (known_aggs.exists ())
861 {
862 agg = known_aggs[c->operand_num];
863 val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
864 }
865 else
866 val = NULL_TREE;
867 }
868 else
869 {
870 val = known_vals[c->operand_num];
871 if (val == error_mark_node && c->code != CHANGED)
872 val = NULL_TREE;
873 }
874
875 if (!val)
876 {
877 clause |= 1 << (i + predicate_first_dynamic_condition);
878 continue;
879 }
880 if (c->code == IS_NOT_CONSTANT || c->code == CHANGED)
881 continue;
882
883 if (operand_equal_p (TYPE_SIZE (TREE_TYPE (c->val)),
884 TYPE_SIZE (TREE_TYPE (val)), 0))
885 {
886 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val);
887
888 res = val
889 ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val)
890 : NULL;
891
892 if (res && integer_zerop (res))
893 continue;
894 }
895 clause |= 1 << (i + predicate_first_dynamic_condition);
896 }
897 return clause;
898 }
899
900
901 /* Work out what conditions might be true at invocation of E. */
902
903 static void
904 evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
905 clause_t *clause_ptr,
906 vec<tree> *known_vals_ptr,
907 vec<ipa_polymorphic_call_context>
908 *known_contexts_ptr,
909 vec<ipa_agg_jump_function_p> *known_aggs_ptr)
910 {
911 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
912 struct inline_summary *info = inline_summary (callee);
913 vec<tree> known_vals = vNULL;
914 vec<ipa_agg_jump_function_p> known_aggs = vNULL;
915
916 if (clause_ptr)
917 *clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
918 if (known_vals_ptr)
919 known_vals_ptr->create (0);
920 if (known_contexts_ptr)
921 known_contexts_ptr->create (0);
922
923 if (ipa_node_params_vector.exists ()
924 && !e->call_stmt_cannot_inline_p
925 && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
926 {
927 struct ipa_node_params *parms_info;
928 struct ipa_edge_args *args = IPA_EDGE_REF (e);
929 struct inline_edge_summary *es = inline_edge_summary (e);
930 int i, count = ipa_get_cs_argument_count (args);
931
932 if (e->caller->global.inlined_to)
933 parms_info = IPA_NODE_REF (e->caller->global.inlined_to);
934 else
935 parms_info = IPA_NODE_REF (e->caller);
936
937 if (count && (info->conds || known_vals_ptr))
938 known_vals.safe_grow_cleared (count);
939 if (count && (info->conds || known_aggs_ptr))
940 known_aggs.safe_grow_cleared (count);
941 if (count && known_contexts_ptr)
942 known_contexts_ptr->safe_grow_cleared (count);
943
944 for (i = 0; i < count; i++)
945 {
946 struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i);
947 tree cst = ipa_value_from_jfunc (parms_info, jf);
948 if (cst)
949 {
950 gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
951 if (known_vals.exists ())
952 known_vals[i] = cst;
953 }
954 else if (inline_p && !es->param[i].change_prob)
955 known_vals[i] = error_mark_node;
956
957 if (known_contexts_ptr)
958 (*known_contexts_ptr)[i] = ipa_context_from_jfunc (parms_info, e,
959 i, jf);
960 /* TODO: When IPA-CP starts propagating and merging aggregate jump
961 functions, use its knowledge of the caller too, just like the
962 scalar case above. */
963 known_aggs[i] = &jf->agg;
964 }
965 }
966
967 if (clause_ptr)
968 *clause_ptr = evaluate_conditions_for_known_args (callee, inline_p,
969 known_vals, known_aggs);
970
971 if (known_vals_ptr)
972 *known_vals_ptr = known_vals;
973 else
974 known_vals.release ();
975
976 if (known_aggs_ptr)
977 *known_aggs_ptr = known_aggs;
978 else
979 known_aggs.release ();
980 }
981
982
983 /* Allocate the inline summary vector or resize it to cover all cgraph nodes. */
984
985 static void
986 inline_summary_alloc (void)
987 {
988 if (!node_removal_hook_holder)
989 node_removal_hook_holder =
990 symtab->add_cgraph_removal_hook (&inline_node_removal_hook, NULL);
991 if (!edge_removal_hook_holder)
992 edge_removal_hook_holder =
993 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
994 if (!node_duplication_hook_holder)
995 node_duplication_hook_holder =
996 symtab->add_cgraph_duplication_hook (&inline_node_duplication_hook, NULL);
997 if (!edge_duplication_hook_holder)
998 edge_duplication_hook_holder =
999 symtab->add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
1000
1001 if (vec_safe_length (inline_summary_vec) <= (unsigned) symtab->cgraph_max_uid)
1002 vec_safe_grow_cleared (inline_summary_vec, symtab->cgraph_max_uid + 1);
1003 if (inline_edge_summary_vec.length () <= (unsigned) symtab->edges_max_uid)
1004 inline_edge_summary_vec.safe_grow_cleared (symtab->edges_max_uid + 1);
1005 if (!edge_predicate_pool)
1006 edge_predicate_pool = create_alloc_pool ("edge predicates",
1007 sizeof (struct predicate), 10);
1008 }
1009
1010 /* We are called multiple time for given function; clear
1011 data from previous run so they are not cumulated. */
1012
1013 static void
1014 reset_inline_edge_summary (struct cgraph_edge *e)
1015 {
1016 if (e->uid < (int) inline_edge_summary_vec.length ())
1017 {
1018 struct inline_edge_summary *es = inline_edge_summary (e);
1019
1020 es->call_stmt_size = es->call_stmt_time = 0;
1021 if (es->predicate)
1022 pool_free (edge_predicate_pool, es->predicate);
1023 es->predicate = NULL;
1024 es->param.release ();
1025 }
1026 }
1027
1028 /* We are called multiple time for given function; clear
1029 data from previous run so they are not cumulated. */
1030
1031 static void
1032 reset_inline_summary (struct cgraph_node *node)
1033 {
1034 struct inline_summary *info = inline_summary (node);
1035 struct cgraph_edge *e;
1036
1037 info->self_size = info->self_time = 0;
1038 info->estimated_stack_size = 0;
1039 info->estimated_self_stack_size = 0;
1040 info->stack_frame_offset = 0;
1041 info->size = 0;
1042 info->time = 0;
1043 info->growth = 0;
1044 info->scc_no = 0;
1045 if (info->loop_iterations)
1046 {
1047 pool_free (edge_predicate_pool, info->loop_iterations);
1048 info->loop_iterations = NULL;
1049 }
1050 if (info->loop_stride)
1051 {
1052 pool_free (edge_predicate_pool, info->loop_stride);
1053 info->loop_stride = NULL;
1054 }
1055 if (info->array_index)
1056 {
1057 pool_free (edge_predicate_pool, info->array_index);
1058 info->array_index = NULL;
1059 }
1060 vec_free (info->conds);
1061 vec_free (info->entry);
1062 for (e = node->callees; e; e = e->next_callee)
1063 reset_inline_edge_summary (e);
1064 for (e = node->indirect_calls; e; e = e->next_callee)
1065 reset_inline_edge_summary (e);
1066 }
1067
1068 /* Hook that is called by cgraph.c when a node is removed. */
1069
1070 static void
1071 inline_node_removal_hook (struct cgraph_node *node,
1072 void *data ATTRIBUTE_UNUSED)
1073 {
1074 struct inline_summary *info;
1075 if (vec_safe_length (inline_summary_vec) <= (unsigned) node->uid)
1076 return;
1077 info = inline_summary (node);
1078 reset_inline_summary (node);
1079 memset (info, 0, sizeof (inline_summary_t));
1080 }
1081
1082 /* Remap predicate P of former function to be predicate of duplicated function.
1083 POSSIBLE_TRUTHS is clause of possible truths in the duplicated node,
1084 INFO is inline summary of the duplicated node. */
1085
1086 static struct predicate
1087 remap_predicate_after_duplication (struct predicate *p,
1088 clause_t possible_truths,
1089 struct inline_summary *info)
1090 {
1091 struct predicate new_predicate = true_predicate ();
1092 int j;
1093 for (j = 0; p->clause[j]; j++)
1094 if (!(possible_truths & p->clause[j]))
1095 {
1096 new_predicate = false_predicate ();
1097 break;
1098 }
1099 else
1100 add_clause (info->conds, &new_predicate,
1101 possible_truths & p->clause[j]);
1102 return new_predicate;
1103 }
1104
1105 /* Same as remap_predicate_after_duplication but handle hint predicate *P.
1106 Additionally care about allocating new memory slot for updated predicate
1107 and set it to NULL when it becomes true or false (and thus uninteresting).
1108 */
1109
1110 static void
1111 remap_hint_predicate_after_duplication (struct predicate **p,
1112 clause_t possible_truths,
1113 struct inline_summary *info)
1114 {
1115 struct predicate new_predicate;
1116
1117 if (!*p)
1118 return;
1119
1120 new_predicate = remap_predicate_after_duplication (*p,
1121 possible_truths, info);
1122 /* We do not want to free previous predicate; it is used by node origin. */
1123 *p = NULL;
1124 set_hint_predicate (p, new_predicate);
1125 }
1126
1127
1128 /* Hook that is called by cgraph.c when a node is duplicated. */
1129
1130 static void
1131 inline_node_duplication_hook (struct cgraph_node *src,
1132 struct cgraph_node *dst,
1133 ATTRIBUTE_UNUSED void *data)
1134 {
1135 struct inline_summary *info;
1136 inline_summary_alloc ();
1137 info = inline_summary (dst);
1138 memcpy (info, inline_summary (src), sizeof (struct inline_summary));
1139 /* TODO: as an optimization, we may avoid copying conditions
1140 that are known to be false or true. */
1141 info->conds = vec_safe_copy (info->conds);
1142
1143 /* When there are any replacements in the function body, see if we can figure
1144 out that something was optimized out. */
1145 if (ipa_node_params_vector.exists () && dst->clone.tree_map)
1146 {
1147 vec<size_time_entry, va_gc> *entry = info->entry;
1148 /* Use SRC parm info since it may not be copied yet. */
1149 struct ipa_node_params *parms_info = IPA_NODE_REF (src);
1150 vec<tree> known_vals = vNULL;
1151 int count = ipa_get_param_count (parms_info);
1152 int i, j;
1153 clause_t possible_truths;
1154 struct predicate true_pred = true_predicate ();
1155 size_time_entry *e;
1156 int optimized_out_size = 0;
1157 bool inlined_to_p = false;
1158 struct cgraph_edge *edge;
1159
1160 info->entry = 0;
1161 known_vals.safe_grow_cleared (count);
1162 for (i = 0; i < count; i++)
1163 {
1164 struct ipa_replace_map *r;
1165
1166 for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
1167 {
1168 if (((!r->old_tree && r->parm_num == i)
1169 || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
1170 && r->replace_p && !r->ref_p)
1171 {
1172 known_vals[i] = r->new_tree;
1173 break;
1174 }
1175 }
1176 }
1177 possible_truths = evaluate_conditions_for_known_args (dst, false,
1178 known_vals,
1179 vNULL);
1180 known_vals.release ();
1181
1182 account_size_time (info, 0, 0, &true_pred);
1183
1184 /* Remap size_time vectors.
1185 Simplify the predicate by prunning out alternatives that are known
1186 to be false.
1187 TODO: as on optimization, we can also eliminate conditions known
1188 to be true. */
1189 for (i = 0; vec_safe_iterate (entry, i, &e); i++)
1190 {
1191 struct predicate new_predicate;
1192 new_predicate = remap_predicate_after_duplication (&e->predicate,
1193 possible_truths,
1194 info);
1195 if (false_predicate_p (&new_predicate))
1196 optimized_out_size += e->size;
1197 else
1198 account_size_time (info, e->size, e->time, &new_predicate);
1199 }
1200
1201 /* Remap edge predicates with the same simplification as above.
1202 Also copy constantness arrays. */
1203 for (edge = dst->callees; edge; edge = edge->next_callee)
1204 {
1205 struct predicate new_predicate;
1206 struct inline_edge_summary *es = inline_edge_summary (edge);
1207
1208 if (!edge->inline_failed)
1209 inlined_to_p = true;
1210 if (!es->predicate)
1211 continue;
1212 new_predicate = remap_predicate_after_duplication (es->predicate,
1213 possible_truths,
1214 info);
1215 if (false_predicate_p (&new_predicate)
1216 && !false_predicate_p (es->predicate))
1217 {
1218 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1219 edge->frequency = 0;
1220 }
1221 edge_set_predicate (edge, &new_predicate);
1222 }
1223
1224 /* Remap indirect edge predicates with the same simplificaiton as above.
1225 Also copy constantness arrays. */
1226 for (edge = dst->indirect_calls; edge; edge = edge->next_callee)
1227 {
1228 struct predicate new_predicate;
1229 struct inline_edge_summary *es = inline_edge_summary (edge);
1230
1231 gcc_checking_assert (edge->inline_failed);
1232 if (!es->predicate)
1233 continue;
1234 new_predicate = remap_predicate_after_duplication (es->predicate,
1235 possible_truths,
1236 info);
1237 if (false_predicate_p (&new_predicate)
1238 && !false_predicate_p (es->predicate))
1239 {
1240 optimized_out_size += es->call_stmt_size * INLINE_SIZE_SCALE;
1241 edge->frequency = 0;
1242 }
1243 edge_set_predicate (edge, &new_predicate);
1244 }
1245 remap_hint_predicate_after_duplication (&info->loop_iterations,
1246 possible_truths, info);
1247 remap_hint_predicate_after_duplication (&info->loop_stride,
1248 possible_truths, info);
1249 remap_hint_predicate_after_duplication (&info->array_index,
1250 possible_truths, info);
1251
1252 /* If inliner or someone after inliner will ever start producing
1253 non-trivial clones, we will get trouble with lack of information
1254 about updating self sizes, because size vectors already contains
1255 sizes of the calees. */
1256 gcc_assert (!inlined_to_p || !optimized_out_size);
1257 }
1258 else
1259 {
1260 info->entry = vec_safe_copy (info->entry);
1261 if (info->loop_iterations)
1262 {
1263 predicate p = *info->loop_iterations;
1264 info->loop_iterations = NULL;
1265 set_hint_predicate (&info->loop_iterations, p);
1266 }
1267 if (info->loop_stride)
1268 {
1269 predicate p = *info->loop_stride;
1270 info->loop_stride = NULL;
1271 set_hint_predicate (&info->loop_stride, p);
1272 }
1273 if (info->array_index)
1274 {
1275 predicate p = *info->array_index;
1276 info->array_index = NULL;
1277 set_hint_predicate (&info->array_index, p);
1278 }
1279 }
1280 inline_update_overall_summary (dst);
1281 }
1282
1283
1284 /* Hook that is called by cgraph.c when a node is duplicated. */
1285
1286 static void
1287 inline_edge_duplication_hook (struct cgraph_edge *src,
1288 struct cgraph_edge *dst,
1289 ATTRIBUTE_UNUSED void *data)
1290 {
1291 struct inline_edge_summary *info;
1292 struct inline_edge_summary *srcinfo;
1293 inline_summary_alloc ();
1294 info = inline_edge_summary (dst);
1295 srcinfo = inline_edge_summary (src);
1296 memcpy (info, srcinfo, sizeof (struct inline_edge_summary));
1297 info->predicate = NULL;
1298 edge_set_predicate (dst, srcinfo->predicate);
1299 info->param = srcinfo->param.copy ();
1300 }
1301
1302
1303 /* Keep edge cache consistent across edge removal. */
1304
1305 static void
1306 inline_edge_removal_hook (struct cgraph_edge *edge,
1307 void *data ATTRIBUTE_UNUSED)
1308 {
1309 if (edge_growth_cache.exists ())
1310 reset_edge_growth_cache (edge);
1311 reset_inline_edge_summary (edge);
1312 }
1313
1314
1315 /* Initialize growth caches. */
1316
1317 void
1318 initialize_growth_caches (void)
1319 {
1320 if (symtab->edges_max_uid)
1321 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
1322 if (symtab->cgraph_max_uid)
1323 node_growth_cache.safe_grow_cleared (symtab->cgraph_max_uid);
1324 }
1325
1326
1327 /* Free growth caches. */
1328
1329 void
1330 free_growth_caches (void)
1331 {
1332 edge_growth_cache.release ();
1333 node_growth_cache.release ();
1334 }
1335
1336
1337 /* Dump edge summaries associated to NODE and recursively to all clones.
1338 Indent by INDENT. */
1339
1340 static void
1341 dump_inline_edge_summary (FILE *f, int indent, struct cgraph_node *node,
1342 struct inline_summary *info)
1343 {
1344 struct cgraph_edge *edge;
1345 for (edge = node->callees; edge; edge = edge->next_callee)
1346 {
1347 struct inline_edge_summary *es = inline_edge_summary (edge);
1348 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1349 int i;
1350
1351 fprintf (f,
1352 "%*s%s/%i %s\n%*s loop depth:%2i freq:%4i size:%2i"
1353 " time: %2i callee size:%2i stack:%2i",
1354 indent, "", callee->name (), callee->order,
1355 !edge->inline_failed
1356 ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed),
1357 indent, "", es->loop_depth, edge->frequency,
1358 es->call_stmt_size, es->call_stmt_time,
1359 (int) inline_summary (callee)->size / INLINE_SIZE_SCALE,
1360 (int) inline_summary (callee)->estimated_stack_size);
1361
1362 if (es->predicate)
1363 {
1364 fprintf (f, " predicate: ");
1365 dump_predicate (f, info->conds, es->predicate);
1366 }
1367 else
1368 fprintf (f, "\n");
1369 if (es->param.exists ())
1370 for (i = 0; i < (int) es->param.length (); i++)
1371 {
1372 int prob = es->param[i].change_prob;
1373
1374 if (!prob)
1375 fprintf (f, "%*s op%i is compile time invariant\n",
1376 indent + 2, "", i);
1377 else if (prob != REG_BR_PROB_BASE)
1378 fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i,
1379 prob * 100.0 / REG_BR_PROB_BASE);
1380 }
1381 if (!edge->inline_failed)
1382 {
1383 fprintf (f, "%*sStack frame offset %i, callee self size %i,"
1384 " callee size %i\n",
1385 indent + 2, "",
1386 (int) inline_summary (callee)->stack_frame_offset,
1387 (int) inline_summary (callee)->estimated_self_stack_size,
1388 (int) inline_summary (callee)->estimated_stack_size);
1389 dump_inline_edge_summary (f, indent + 2, callee, info);
1390 }
1391 }
1392 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
1393 {
1394 struct inline_edge_summary *es = inline_edge_summary (edge);
1395 fprintf (f, "%*sindirect call loop depth:%2i freq:%4i size:%2i"
1396 " time: %2i",
1397 indent, "",
1398 es->loop_depth,
1399 edge->frequency, es->call_stmt_size, es->call_stmt_time);
1400 if (es->predicate)
1401 {
1402 fprintf (f, "predicate: ");
1403 dump_predicate (f, info->conds, es->predicate);
1404 }
1405 else
1406 fprintf (f, "\n");
1407 }
1408 }
1409
1410
1411 void
1412 dump_inline_summary (FILE *f, struct cgraph_node *node)
1413 {
1414 if (node->definition)
1415 {
1416 struct inline_summary *s = inline_summary (node);
1417 size_time_entry *e;
1418 int i;
1419 fprintf (f, "Inline summary for %s/%i", node->name (),
1420 node->order);
1421 if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
1422 fprintf (f, " always_inline");
1423 if (s->inlinable)
1424 fprintf (f, " inlinable");
1425 fprintf (f, "\n self time: %i\n", s->self_time);
1426 fprintf (f, " global time: %i\n", s->time);
1427 fprintf (f, " self size: %i\n", s->self_size);
1428 fprintf (f, " global size: %i\n", s->size);
1429 fprintf (f, " min size: %i\n", s->min_size);
1430 fprintf (f, " self stack: %i\n",
1431 (int) s->estimated_self_stack_size);
1432 fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size);
1433 if (s->growth)
1434 fprintf (f, " estimated growth:%i\n", (int) s->growth);
1435 if (s->scc_no)
1436 fprintf (f, " In SCC: %i\n", (int) s->scc_no);
1437 for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
1438 {
1439 fprintf (f, " size:%f, time:%f, predicate:",
1440 (double) e->size / INLINE_SIZE_SCALE,
1441 (double) e->time / INLINE_TIME_SCALE);
1442 dump_predicate (f, s->conds, &e->predicate);
1443 }
1444 if (s->loop_iterations)
1445 {
1446 fprintf (f, " loop iterations:");
1447 dump_predicate (f, s->conds, s->loop_iterations);
1448 }
1449 if (s->loop_stride)
1450 {
1451 fprintf (f, " loop stride:");
1452 dump_predicate (f, s->conds, s->loop_stride);
1453 }
1454 if (s->array_index)
1455 {
1456 fprintf (f, " array index:");
1457 dump_predicate (f, s->conds, s->array_index);
1458 }
1459 fprintf (f, " calls:\n");
1460 dump_inline_edge_summary (f, 4, node, s);
1461 fprintf (f, "\n");
1462 }
1463 }
1464
1465 DEBUG_FUNCTION void
1466 debug_inline_summary (struct cgraph_node *node)
1467 {
1468 dump_inline_summary (stderr, node);
1469 }
1470
1471 void
1472 dump_inline_summaries (FILE *f)
1473 {
1474 struct cgraph_node *node;
1475
1476 FOR_EACH_DEFINED_FUNCTION (node)
1477 if (!node->global.inlined_to)
1478 dump_inline_summary (f, node);
1479 }
1480
1481 /* Give initial reasons why inlining would fail on EDGE. This gets either
1482 nullified or usually overwritten by more precise reasons later. */
1483
1484 void
1485 initialize_inline_failed (struct cgraph_edge *e)
1486 {
1487 struct cgraph_node *callee = e->callee;
1488
1489 if (e->indirect_unknown_callee)
1490 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
1491 else if (!callee->definition)
1492 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
1493 else if (callee->local.redefined_extern_inline)
1494 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
1495 else if (e->call_stmt_cannot_inline_p)
1496 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
1497 else if (cfun && fn_contains_cilk_spawn_p (cfun))
1498 /* We can't inline if the function is spawing a function. */
1499 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
1500 else
1501 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
1502 }
1503
1504 /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the
1505 boolean variable pointed to by DATA. */
1506
1507 static bool
1508 mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
1509 void *data)
1510 {
1511 bool *b = (bool *) data;
1512 *b = true;
1513 return true;
1514 }
1515
1516 /* If OP refers to value of function parameter, return the corresponding
1517 parameter. */
1518
1519 static tree
1520 unmodified_parm_1 (gimple stmt, tree op)
1521 {
1522 /* SSA_NAME referring to parm default def? */
1523 if (TREE_CODE (op) == SSA_NAME
1524 && SSA_NAME_IS_DEFAULT_DEF (op)
1525 && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL)
1526 return SSA_NAME_VAR (op);
1527 /* Non-SSA parm reference? */
1528 if (TREE_CODE (op) == PARM_DECL)
1529 {
1530 bool modified = false;
1531
1532 ao_ref refd;
1533 ao_ref_init (&refd, op);
1534 walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified,
1535 NULL);
1536 if (!modified)
1537 return op;
1538 }
1539 return NULL_TREE;
1540 }
1541
1542 /* If OP refers to value of function parameter, return the corresponding
1543 parameter. Also traverse chains of SSA register assignments. */
1544
1545 static tree
1546 unmodified_parm (gimple stmt, tree op)
1547 {
1548 tree res = unmodified_parm_1 (stmt, op);
1549 if (res)
1550 return res;
1551
1552 if (TREE_CODE (op) == SSA_NAME
1553 && !SSA_NAME_IS_DEFAULT_DEF (op)
1554 && gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1555 return unmodified_parm (SSA_NAME_DEF_STMT (op),
1556 gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)));
1557 return NULL_TREE;
1558 }
1559
1560 /* If OP refers to a value of a function parameter or value loaded from an
1561 aggregate passed to a parameter (either by value or reference), return TRUE
1562 and store the number of the parameter to *INDEX_P and information whether
1563 and how it has been loaded from an aggregate into *AGGPOS. INFO describes
1564 the function parameters, STMT is the statement in which OP is used or
1565 loaded. */
1566
1567 static bool
1568 unmodified_parm_or_parm_agg_item (struct ipa_node_params *info,
1569 gimple stmt, tree op, int *index_p,
1570 struct agg_position_info *aggpos)
1571 {
1572 tree res = unmodified_parm_1 (stmt, op);
1573
1574 gcc_checking_assert (aggpos);
1575 if (res)
1576 {
1577 *index_p = ipa_get_param_decl_index (info, res);
1578 if (*index_p < 0)
1579 return false;
1580 aggpos->agg_contents = false;
1581 aggpos->by_ref = false;
1582 return true;
1583 }
1584
1585 if (TREE_CODE (op) == SSA_NAME)
1586 {
1587 if (SSA_NAME_IS_DEFAULT_DEF (op)
1588 || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op)))
1589 return false;
1590 stmt = SSA_NAME_DEF_STMT (op);
1591 op = gimple_assign_rhs1 (stmt);
1592 if (!REFERENCE_CLASS_P (op))
1593 return unmodified_parm_or_parm_agg_item (info, stmt, op, index_p,
1594 aggpos);
1595 }
1596
1597 aggpos->agg_contents = true;
1598 return ipa_load_from_parm_agg (info, stmt, op, index_p, &aggpos->offset,
1599 &aggpos->by_ref);
1600 }
1601
1602 /* See if statement might disappear after inlining.
1603 0 - means not eliminated
1604 1 - half of statements goes away
1605 2 - for sure it is eliminated.
1606 We are not terribly sophisticated, basically looking for simple abstraction
1607 penalty wrappers. */
1608
1609 static int
1610 eliminated_by_inlining_prob (gimple stmt)
1611 {
1612 enum gimple_code code = gimple_code (stmt);
1613 enum tree_code rhs_code;
1614
1615 if (!optimize)
1616 return 0;
1617
1618 switch (code)
1619 {
1620 case GIMPLE_RETURN:
1621 return 2;
1622 case GIMPLE_ASSIGN:
1623 if (gimple_num_ops (stmt) != 2)
1624 return 0;
1625
1626 rhs_code = gimple_assign_rhs_code (stmt);
1627
1628 /* Casts of parameters, loads from parameters passed by reference
1629 and stores to return value or parameters are often free after
1630 inlining dua to SRA and further combining.
1631 Assume that half of statements goes away. */
1632 if (CONVERT_EXPR_CODE_P (rhs_code)
1633 || rhs_code == VIEW_CONVERT_EXPR
1634 || rhs_code == ADDR_EXPR
1635 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1636 {
1637 tree rhs = gimple_assign_rhs1 (stmt);
1638 tree lhs = gimple_assign_lhs (stmt);
1639 tree inner_rhs = get_base_address (rhs);
1640 tree inner_lhs = get_base_address (lhs);
1641 bool rhs_free = false;
1642 bool lhs_free = false;
1643
1644 if (!inner_rhs)
1645 inner_rhs = rhs;
1646 if (!inner_lhs)
1647 inner_lhs = lhs;
1648
1649 /* Reads of parameter are expected to be free. */
1650 if (unmodified_parm (stmt, inner_rhs))
1651 rhs_free = true;
1652 /* Match expressions of form &this->field. Those will most likely
1653 combine with something upstream after inlining. */
1654 else if (TREE_CODE (inner_rhs) == ADDR_EXPR)
1655 {
1656 tree op = get_base_address (TREE_OPERAND (inner_rhs, 0));
1657 if (TREE_CODE (op) == PARM_DECL)
1658 rhs_free = true;
1659 else if (TREE_CODE (op) == MEM_REF
1660 && unmodified_parm (stmt, TREE_OPERAND (op, 0)))
1661 rhs_free = true;
1662 }
1663
1664 /* When parameter is not SSA register because its address is taken
1665 and it is just copied into one, the statement will be completely
1666 free after inlining (we will copy propagate backward). */
1667 if (rhs_free && is_gimple_reg (lhs))
1668 return 2;
1669
1670 /* Reads of parameters passed by reference
1671 expected to be free (i.e. optimized out after inlining). */
1672 if (TREE_CODE (inner_rhs) == MEM_REF
1673 && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0)))
1674 rhs_free = true;
1675
1676 /* Copying parameter passed by reference into gimple register is
1677 probably also going to copy propagate, but we can't be quite
1678 sure. */
1679 if (rhs_free && is_gimple_reg (lhs))
1680 lhs_free = true;
1681
1682 /* Writes to parameters, parameters passed by value and return value
1683 (either dirrectly or passed via invisible reference) are free.
1684
1685 TODO: We ought to handle testcase like
1686 struct a {int a,b;};
1687 struct a
1688 retrurnsturct (void)
1689 {
1690 struct a a ={1,2};
1691 return a;
1692 }
1693
1694 This translate into:
1695
1696 retrurnsturct ()
1697 {
1698 int a$b;
1699 int a$a;
1700 struct a a;
1701 struct a D.2739;
1702
1703 <bb 2>:
1704 D.2739.a = 1;
1705 D.2739.b = 2;
1706 return D.2739;
1707
1708 }
1709 For that we either need to copy ipa-split logic detecting writes
1710 to return value. */
1711 if (TREE_CODE (inner_lhs) == PARM_DECL
1712 || TREE_CODE (inner_lhs) == RESULT_DECL
1713 || (TREE_CODE (inner_lhs) == MEM_REF
1714 && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0))
1715 || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME
1716 && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))
1717 && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND
1718 (inner_lhs,
1719 0))) == RESULT_DECL))))
1720 lhs_free = true;
1721 if (lhs_free
1722 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1723 rhs_free = true;
1724 if (lhs_free && rhs_free)
1725 return 1;
1726 }
1727 return 0;
1728 default:
1729 return 0;
1730 }
1731 }
1732
1733
1734 /* If BB ends by a conditional we can turn into predicates, attach corresponding
1735 predicates to the CFG edges. */
1736
1737 static void
1738 set_cond_stmt_execution_predicate (struct ipa_node_params *info,
1739 struct inline_summary *summary,
1740 basic_block bb)
1741 {
1742 gimple last;
1743 tree op;
1744 int index;
1745 struct agg_position_info aggpos;
1746 enum tree_code code, inverted_code;
1747 edge e;
1748 edge_iterator ei;
1749 gimple set_stmt;
1750 tree op2;
1751
1752 last = last_stmt (bb);
1753 if (!last || gimple_code (last) != GIMPLE_COND)
1754 return;
1755 if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
1756 return;
1757 op = gimple_cond_lhs (last);
1758 /* TODO: handle conditionals like
1759 var = op0 < 4;
1760 if (var != 0). */
1761 if (unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1762 {
1763 code = gimple_cond_code (last);
1764 inverted_code = invert_tree_comparison (code, HONOR_NANS (op));
1765
1766 FOR_EACH_EDGE (e, ei, bb->succs)
1767 {
1768 enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
1769 ? code : inverted_code);
1770 /* invert_tree_comparison will return ERROR_MARK on FP
1771 comparsions that are not EQ/NE instead of returning proper
1772 unordered one. Be sure it is not confused with NON_CONSTANT. */
1773 if (this_code != ERROR_MARK)
1774 {
1775 struct predicate p = add_condition (summary, index, &aggpos,
1776 this_code,
1777 gimple_cond_rhs (last));
1778 e->aux = pool_alloc (edge_predicate_pool);
1779 *(struct predicate *) e->aux = p;
1780 }
1781 }
1782 }
1783
1784 if (TREE_CODE (op) != SSA_NAME)
1785 return;
1786 /* Special case
1787 if (builtin_constant_p (op))
1788 constant_code
1789 else
1790 nonconstant_code.
1791 Here we can predicate nonconstant_code. We can't
1792 really handle constant_code since we have no predicate
1793 for this and also the constant code is not known to be
1794 optimized away when inliner doen't see operand is constant.
1795 Other optimizers might think otherwise. */
1796 if (gimple_cond_code (last) != NE_EXPR
1797 || !integer_zerop (gimple_cond_rhs (last)))
1798 return;
1799 set_stmt = SSA_NAME_DEF_STMT (op);
1800 if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P)
1801 || gimple_call_num_args (set_stmt) != 1)
1802 return;
1803 op2 = gimple_call_arg (set_stmt, 0);
1804 if (!unmodified_parm_or_parm_agg_item
1805 (info, set_stmt, op2, &index, &aggpos))
1806 return;
1807 FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE)
1808 {
1809 struct predicate p = add_condition (summary, index, &aggpos,
1810 IS_NOT_CONSTANT, NULL_TREE);
1811 e->aux = pool_alloc (edge_predicate_pool);
1812 *(struct predicate *) e->aux = p;
1813 }
1814 }
1815
1816
1817 /* If BB ends by a switch we can turn into predicates, attach corresponding
1818 predicates to the CFG edges. */
1819
1820 static void
1821 set_switch_stmt_execution_predicate (struct ipa_node_params *info,
1822 struct inline_summary *summary,
1823 basic_block bb)
1824 {
1825 gimple lastg;
1826 tree op;
1827 int index;
1828 struct agg_position_info aggpos;
1829 edge e;
1830 edge_iterator ei;
1831 size_t n;
1832 size_t case_idx;
1833
1834 lastg = last_stmt (bb);
1835 if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
1836 return;
1837 gswitch *last = as_a <gswitch *> (lastg);
1838 op = gimple_switch_index (last);
1839 if (!unmodified_parm_or_parm_agg_item (info, last, op, &index, &aggpos))
1840 return;
1841
1842 FOR_EACH_EDGE (e, ei, bb->succs)
1843 {
1844 e->aux = pool_alloc (edge_predicate_pool);
1845 *(struct predicate *) e->aux = false_predicate ();
1846 }
1847 n = gimple_switch_num_labels (last);
1848 for (case_idx = 0; case_idx < n; ++case_idx)
1849 {
1850 tree cl = gimple_switch_label (last, case_idx);
1851 tree min, max;
1852 struct predicate p;
1853
1854 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
1855 min = CASE_LOW (cl);
1856 max = CASE_HIGH (cl);
1857
1858 /* For default we might want to construct predicate that none
1859 of cases is met, but it is bit hard to do not having negations
1860 of conditionals handy. */
1861 if (!min && !max)
1862 p = true_predicate ();
1863 else if (!max)
1864 p = add_condition (summary, index, &aggpos, EQ_EXPR, min);
1865 else
1866 {
1867 struct predicate p1, p2;
1868 p1 = add_condition (summary, index, &aggpos, GE_EXPR, min);
1869 p2 = add_condition (summary, index, &aggpos, LE_EXPR, max);
1870 p = and_predicates (summary->conds, &p1, &p2);
1871 }
1872 *(struct predicate *) e->aux
1873 = or_predicates (summary->conds, &p, (struct predicate *) e->aux);
1874 }
1875 }
1876
1877
1878 /* For each BB in NODE attach to its AUX pointer predicate under
1879 which it is executable. */
1880
1881 static void
1882 compute_bb_predicates (struct cgraph_node *node,
1883 struct ipa_node_params *parms_info,
1884 struct inline_summary *summary)
1885 {
1886 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1887 bool done = false;
1888 basic_block bb;
1889
1890 FOR_EACH_BB_FN (bb, my_function)
1891 {
1892 set_cond_stmt_execution_predicate (parms_info, summary, bb);
1893 set_switch_stmt_execution_predicate (parms_info, summary, bb);
1894 }
1895
1896 /* Entry block is always executable. */
1897 ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1898 = pool_alloc (edge_predicate_pool);
1899 *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
1900 = true_predicate ();
1901
1902 /* A simple dataflow propagation of predicates forward in the CFG.
1903 TODO: work in reverse postorder. */
1904 while (!done)
1905 {
1906 done = true;
1907 FOR_EACH_BB_FN (bb, my_function)
1908 {
1909 struct predicate p = false_predicate ();
1910 edge e;
1911 edge_iterator ei;
1912 FOR_EACH_EDGE (e, ei, bb->preds)
1913 {
1914 if (e->src->aux)
1915 {
1916 struct predicate this_bb_predicate
1917 = *(struct predicate *) e->src->aux;
1918 if (e->aux)
1919 this_bb_predicate
1920 = and_predicates (summary->conds, &this_bb_predicate,
1921 (struct predicate *) e->aux);
1922 p = or_predicates (summary->conds, &p, &this_bb_predicate);
1923 if (true_predicate_p (&p))
1924 break;
1925 }
1926 }
1927 if (false_predicate_p (&p))
1928 gcc_assert (!bb->aux);
1929 else
1930 {
1931 if (!bb->aux)
1932 {
1933 done = false;
1934 bb->aux = pool_alloc (edge_predicate_pool);
1935 *((struct predicate *) bb->aux) = p;
1936 }
1937 else if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1938 {
1939 /* This OR operation is needed to ensure monotonous data flow
1940 in the case we hit the limit on number of clauses and the
1941 and/or operations above give approximate answers. */
1942 p = or_predicates (summary->conds, &p, (struct predicate *)bb->aux);
1943 if (!predicates_equal_p (&p, (struct predicate *) bb->aux))
1944 {
1945 done = false;
1946 *((struct predicate *) bb->aux) = p;
1947 }
1948 }
1949 }
1950 }
1951 }
1952 }
1953
1954
1955 /* We keep info about constantness of SSA names. */
1956
1957 typedef struct predicate predicate_t;
1958 /* Return predicate specifying when the STMT might have result that is not
1959 a compile time constant. */
1960
1961 static struct predicate
1962 will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
1963 struct inline_summary *summary,
1964 tree expr,
1965 vec<predicate_t> nonconstant_names)
1966 {
1967 tree parm;
1968 int index;
1969
1970 while (UNARY_CLASS_P (expr))
1971 expr = TREE_OPERAND (expr, 0);
1972
1973 parm = unmodified_parm (NULL, expr);
1974 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
1975 return add_condition (summary, index, NULL, CHANGED, NULL_TREE);
1976 if (is_gimple_min_invariant (expr))
1977 return false_predicate ();
1978 if (TREE_CODE (expr) == SSA_NAME)
1979 return nonconstant_names[SSA_NAME_VERSION (expr)];
1980 if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr))
1981 {
1982 struct predicate p1 = will_be_nonconstant_expr_predicate
1983 (info, summary, TREE_OPERAND (expr, 0),
1984 nonconstant_names);
1985 struct predicate p2;
1986 if (true_predicate_p (&p1))
1987 return p1;
1988 p2 = will_be_nonconstant_expr_predicate (info, summary,
1989 TREE_OPERAND (expr, 1),
1990 nonconstant_names);
1991 return or_predicates (summary->conds, &p1, &p2);
1992 }
1993 else if (TREE_CODE (expr) == COND_EXPR)
1994 {
1995 struct predicate p1 = will_be_nonconstant_expr_predicate
1996 (info, summary, TREE_OPERAND (expr, 0),
1997 nonconstant_names);
1998 struct predicate p2;
1999 if (true_predicate_p (&p1))
2000 return p1;
2001 p2 = will_be_nonconstant_expr_predicate (info, summary,
2002 TREE_OPERAND (expr, 1),
2003 nonconstant_names);
2004 if (true_predicate_p (&p2))
2005 return p2;
2006 p1 = or_predicates (summary->conds, &p1, &p2);
2007 p2 = will_be_nonconstant_expr_predicate (info, summary,
2008 TREE_OPERAND (expr, 2),
2009 nonconstant_names);
2010 return or_predicates (summary->conds, &p1, &p2);
2011 }
2012 else
2013 {
2014 debug_tree (expr);
2015 gcc_unreachable ();
2016 }
2017 return false_predicate ();
2018 }
2019
2020
2021 /* Return predicate specifying when the STMT might have result that is not
2022 a compile time constant. */
2023
2024 static struct predicate
2025 will_be_nonconstant_predicate (struct ipa_node_params *info,
2026 struct inline_summary *summary,
2027 gimple stmt,
2028 vec<predicate_t> nonconstant_names)
2029 {
2030 struct predicate p = true_predicate ();
2031 ssa_op_iter iter;
2032 tree use;
2033 struct predicate op_non_const;
2034 bool is_load;
2035 int base_index;
2036 struct agg_position_info aggpos;
2037
2038 /* What statments might be optimized away
2039 when their arguments are constant
2040 TODO: also trivial builtins.
2041 builtin_constant_p is already handled later. */
2042 if (gimple_code (stmt) != GIMPLE_ASSIGN
2043 && gimple_code (stmt) != GIMPLE_COND
2044 && gimple_code (stmt) != GIMPLE_SWITCH)
2045 return p;
2046
2047 /* Stores will stay anyway. */
2048 if (gimple_store_p (stmt))
2049 return p;
2050
2051 is_load = gimple_assign_load_p (stmt);
2052
2053 /* Loads can be optimized when the value is known. */
2054 if (is_load)
2055 {
2056 tree op;
2057 gcc_assert (gimple_assign_single_p (stmt));
2058 op = gimple_assign_rhs1 (stmt);
2059 if (!unmodified_parm_or_parm_agg_item (info, stmt, op, &base_index,
2060 &aggpos))
2061 return p;
2062 }
2063 else
2064 base_index = -1;
2065
2066 /* See if we understand all operands before we start
2067 adding conditionals. */
2068 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2069 {
2070 tree parm = unmodified_parm (stmt, use);
2071 /* For arguments we can build a condition. */
2072 if (parm && ipa_get_param_decl_index (info, parm) >= 0)
2073 continue;
2074 if (TREE_CODE (use) != SSA_NAME)
2075 return p;
2076 /* If we know when operand is constant,
2077 we still can say something useful. */
2078 if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
2079 continue;
2080 return p;
2081 }
2082
2083 if (is_load)
2084 op_non_const =
2085 add_condition (summary, base_index, &aggpos, CHANGED, NULL);
2086 else
2087 op_non_const = false_predicate ();
2088 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2089 {
2090 tree parm = unmodified_parm (stmt, use);
2091 int index;
2092
2093 if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0)
2094 {
2095 if (index != base_index)
2096 p = add_condition (summary, index, NULL, CHANGED, NULL_TREE);
2097 else
2098 continue;
2099 }
2100 else
2101 p = nonconstant_names[SSA_NAME_VERSION (use)];
2102 op_non_const = or_predicates (summary->conds, &p, &op_non_const);
2103 }
2104 if (gimple_code (stmt) == GIMPLE_ASSIGN
2105 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
2106 nonconstant_names[SSA_NAME_VERSION (gimple_assign_lhs (stmt))]
2107 = op_non_const;
2108 return op_non_const;
2109 }
2110
2111 struct record_modified_bb_info
2112 {
2113 bitmap bb_set;
2114 gimple stmt;
2115 };
2116
2117 /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be
2118 set except for info->stmt. */
2119
2120 static bool
2121 record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data)
2122 {
2123 struct record_modified_bb_info *info =
2124 (struct record_modified_bb_info *) data;
2125 if (SSA_NAME_DEF_STMT (vdef) == info->stmt)
2126 return false;
2127 bitmap_set_bit (info->bb_set,
2128 SSA_NAME_IS_DEFAULT_DEF (vdef)
2129 ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
2130 : gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
2131 return false;
2132 }
2133
2134 /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT
2135 will change since last invocation of STMT.
2136
2137 Value 0 is reserved for compile time invariants.
2138 For common parameters it is REG_BR_PROB_BASE. For loop invariants it
2139 ought to be REG_BR_PROB_BASE / estimated_iters. */
2140
2141 static int
2142 param_change_prob (gimple stmt, int i)
2143 {
2144 tree op = gimple_call_arg (stmt, i);
2145 basic_block bb = gimple_bb (stmt);
2146 tree base;
2147
2148 /* Global invariants neve change. */
2149 if (is_gimple_min_invariant (op))
2150 return 0;
2151 /* We would have to do non-trivial analysis to really work out what
2152 is the probability of value to change (i.e. when init statement
2153 is in a sibling loop of the call).
2154
2155 We do an conservative estimate: when call is executed N times more often
2156 than the statement defining value, we take the frequency 1/N. */
2157 if (TREE_CODE (op) == SSA_NAME)
2158 {
2159 int init_freq;
2160
2161 if (!bb->frequency)
2162 return REG_BR_PROB_BASE;
2163
2164 if (SSA_NAME_IS_DEFAULT_DEF (op))
2165 init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2166 else
2167 init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
2168
2169 if (!init_freq)
2170 init_freq = 1;
2171 if (init_freq < bb->frequency)
2172 return MAX (GCOV_COMPUTE_SCALE (init_freq, bb->frequency), 1);
2173 else
2174 return REG_BR_PROB_BASE;
2175 }
2176
2177 base = get_base_address (op);
2178 if (base)
2179 {
2180 ao_ref refd;
2181 int max;
2182 struct record_modified_bb_info info;
2183 bitmap_iterator bi;
2184 unsigned index;
2185 tree init = ctor_for_folding (base);
2186
2187 if (init != error_mark_node)
2188 return 0;
2189 if (!bb->frequency)
2190 return REG_BR_PROB_BASE;
2191 ao_ref_init (&refd, op);
2192 info.stmt = stmt;
2193 info.bb_set = BITMAP_ALLOC (NULL);
2194 walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info,
2195 NULL);
2196 if (bitmap_bit_p (info.bb_set, bb->index))
2197 {
2198 BITMAP_FREE (info.bb_set);
2199 return REG_BR_PROB_BASE;
2200 }
2201
2202 /* Assume that every memory is initialized at entry.
2203 TODO: Can we easilly determine if value is always defined
2204 and thus we may skip entry block? */
2205 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
2206 max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
2207 else
2208 max = 1;
2209
2210 EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi)
2211 max = MIN (max, BASIC_BLOCK_FOR_FN (cfun, index)->frequency);
2212
2213 BITMAP_FREE (info.bb_set);
2214 if (max < bb->frequency)
2215 return MAX (GCOV_COMPUTE_SCALE (max, bb->frequency), 1);
2216 else
2217 return REG_BR_PROB_BASE;
2218 }
2219 return REG_BR_PROB_BASE;
2220 }
2221
2222 /* Find whether a basic block BB is the final block of a (half) diamond CFG
2223 sub-graph and if the predicate the condition depends on is known. If so,
2224 return true and store the pointer the predicate in *P. */
2225
2226 static bool
2227 phi_result_unknown_predicate (struct ipa_node_params *info,
2228 struct inline_summary *summary, basic_block bb,
2229 struct predicate *p,
2230 vec<predicate_t> nonconstant_names)
2231 {
2232 edge e;
2233 edge_iterator ei;
2234 basic_block first_bb = NULL;
2235 gimple stmt;
2236
2237 if (single_pred_p (bb))
2238 {
2239 *p = false_predicate ();
2240 return true;
2241 }
2242
2243 FOR_EACH_EDGE (e, ei, bb->preds)
2244 {
2245 if (single_succ_p (e->src))
2246 {
2247 if (!single_pred_p (e->src))
2248 return false;
2249 if (!first_bb)
2250 first_bb = single_pred (e->src);
2251 else if (single_pred (e->src) != first_bb)
2252 return false;
2253 }
2254 else
2255 {
2256 if (!first_bb)
2257 first_bb = e->src;
2258 else if (e->src != first_bb)
2259 return false;
2260 }
2261 }
2262
2263 if (!first_bb)
2264 return false;
2265
2266 stmt = last_stmt (first_bb);
2267 if (!stmt
2268 || gimple_code (stmt) != GIMPLE_COND
2269 || !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
2270 return false;
2271
2272 *p = will_be_nonconstant_expr_predicate (info, summary,
2273 gimple_cond_lhs (stmt),
2274 nonconstant_names);
2275 if (true_predicate_p (p))
2276 return false;
2277 else
2278 return true;
2279 }
2280
2281 /* Given a PHI statement in a function described by inline properties SUMMARY
2282 and *P being the predicate describing whether the selected PHI argument is
2283 known, store a predicate for the result of the PHI statement into
2284 NONCONSTANT_NAMES, if possible. */
2285
2286 static void
2287 predicate_for_phi_result (struct inline_summary *summary, gphi *phi,
2288 struct predicate *p,
2289 vec<predicate_t> nonconstant_names)
2290 {
2291 unsigned i;
2292
2293 for (i = 0; i < gimple_phi_num_args (phi); i++)
2294 {
2295 tree arg = gimple_phi_arg (phi, i)->def;
2296 if (!is_gimple_min_invariant (arg))
2297 {
2298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
2299 *p = or_predicates (summary->conds, p,
2300 &nonconstant_names[SSA_NAME_VERSION (arg)]);
2301 if (true_predicate_p (p))
2302 return;
2303 }
2304 }
2305
2306 if (dump_file && (dump_flags & TDF_DETAILS))
2307 {
2308 fprintf (dump_file, "\t\tphi predicate: ");
2309 dump_predicate (dump_file, summary->conds, p);
2310 }
2311 nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
2312 }
2313
2314 /* Return predicate specifying when array index in access OP becomes non-constant. */
2315
2316 static struct predicate
2317 array_index_predicate (struct inline_summary *info,
2318 vec< predicate_t> nonconstant_names, tree op)
2319 {
2320 struct predicate p = false_predicate ();
2321 while (handled_component_p (op))
2322 {
2323 if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF)
2324 {
2325 if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
2326 p = or_predicates (info->conds, &p,
2327 &nonconstant_names[SSA_NAME_VERSION
2328 (TREE_OPERAND (op, 1))]);
2329 }
2330 op = TREE_OPERAND (op, 0);
2331 }
2332 return p;
2333 }
2334
2335 /* For a typical usage of __builtin_expect (a<b, 1), we
2336 may introduce an extra relation stmt:
2337 With the builtin, we have
2338 t1 = a <= b;
2339 t2 = (long int) t1;
2340 t3 = __builtin_expect (t2, 1);
2341 if (t3 != 0)
2342 goto ...
2343 Without the builtin, we have
2344 if (a<=b)
2345 goto...
2346 This affects the size/time estimation and may have
2347 an impact on the earlier inlining.
2348 Here find this pattern and fix it up later. */
2349
2350 static gimple
2351 find_foldable_builtin_expect (basic_block bb)
2352 {
2353 gimple_stmt_iterator bsi;
2354
2355 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2356 {
2357 gimple stmt = gsi_stmt (bsi);
2358 if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT)
2359 || (is_gimple_call (stmt)
2360 && gimple_call_internal_p (stmt)
2361 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
2362 {
2363 tree var = gimple_call_lhs (stmt);
2364 tree arg = gimple_call_arg (stmt, 0);
2365 use_operand_p use_p;
2366 gimple use_stmt;
2367 bool match = false;
2368 bool done = false;
2369
2370 if (!var || !arg)
2371 continue;
2372 gcc_assert (TREE_CODE (var) == SSA_NAME);
2373
2374 while (TREE_CODE (arg) == SSA_NAME)
2375 {
2376 gimple stmt_tmp = SSA_NAME_DEF_STMT (arg);
2377 if (!is_gimple_assign (stmt_tmp))
2378 break;
2379 switch (gimple_assign_rhs_code (stmt_tmp))
2380 {
2381 case LT_EXPR:
2382 case LE_EXPR:
2383 case GT_EXPR:
2384 case GE_EXPR:
2385 case EQ_EXPR:
2386 case NE_EXPR:
2387 match = true;
2388 done = true;
2389 break;
2390 CASE_CONVERT:
2391 break;
2392 default:
2393 done = true;
2394 break;
2395 }
2396 if (done)
2397 break;
2398 arg = gimple_assign_rhs1 (stmt_tmp);
2399 }
2400
2401 if (match && single_imm_use (var, &use_p, &use_stmt)
2402 && gimple_code (use_stmt) == GIMPLE_COND)
2403 return use_stmt;
2404 }
2405 }
2406 return NULL;
2407 }
2408
2409 /* Return true when the basic blocks contains only clobbers followed by RESX.
2410 Such BBs are kept around to make removal of dead stores possible with
2411 presence of EH and will be optimized out by optimize_clobbers later in the
2412 game.
2413
2414 NEED_EH is used to recurse in case the clobber has non-EH predecestors
2415 that can be clobber only, too.. When it is false, the RESX is not necessary
2416 on the end of basic block. */
2417
2418 static bool
2419 clobber_only_eh_bb_p (basic_block bb, bool need_eh = true)
2420 {
2421 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2422 edge_iterator ei;
2423 edge e;
2424
2425 if (need_eh)
2426 {
2427 if (gsi_end_p (gsi))
2428 return false;
2429 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX)
2430 return false;
2431 gsi_prev (&gsi);
2432 }
2433 else if (!single_succ_p (bb))
2434 return false;
2435
2436 for (; !gsi_end_p (gsi); gsi_prev (&gsi))
2437 {
2438 gimple stmt = gsi_stmt (gsi);
2439 if (is_gimple_debug (stmt))
2440 continue;
2441 if (gimple_clobber_p (stmt))
2442 continue;
2443 if (gimple_code (stmt) == GIMPLE_LABEL)
2444 break;
2445 return false;
2446 }
2447
2448 /* See if all predecestors are either throws or clobber only BBs. */
2449 FOR_EACH_EDGE (e, ei, bb->preds)
2450 if (!(e->flags & EDGE_EH)
2451 && !clobber_only_eh_bb_p (e->src, false))
2452 return false;
2453
2454 return true;
2455 }
2456
2457 /* Compute function body size parameters for NODE.
2458 When EARLY is true, we compute only simple summaries without
2459 non-trivial predicates to drive the early inliner. */
2460
2461 static void
2462 estimate_function_body_sizes (struct cgraph_node *node, bool early)
2463 {
2464 gcov_type time = 0;
2465 /* Estimate static overhead for function prologue/epilogue and alignment. */
2466 int size = 2;
2467 /* Benefits are scaled by probability of elimination that is in range
2468 <0,2>. */
2469 basic_block bb;
2470 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
2471 int freq;
2472 struct inline_summary *info = inline_summary (node);
2473 struct predicate bb_predicate;
2474 struct ipa_node_params *parms_info = NULL;
2475 vec<predicate_t> nonconstant_names = vNULL;
2476 int nblocks, n;
2477 int *order;
2478 predicate array_index = true_predicate ();
2479 gimple fix_builtin_expect_stmt;
2480
2481 info->conds = NULL;
2482 info->entry = NULL;
2483
2484 if (opt_for_fn (node->decl, optimize) && !early)
2485 {
2486 calculate_dominance_info (CDI_DOMINATORS);
2487 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2488
2489 if (ipa_node_params_vector.exists ())
2490 {
2491 parms_info = IPA_NODE_REF (node);
2492 nonconstant_names.safe_grow_cleared
2493 (SSANAMES (my_function)->length ());
2494 }
2495 }
2496
2497 if (dump_file)
2498 fprintf (dump_file, "\nAnalyzing function body size: %s\n",
2499 node->name ());
2500
2501 /* When we run into maximal number of entries, we assign everything to the
2502 constant truth case. Be sure to have it in list. */
2503 bb_predicate = true_predicate ();
2504 account_size_time (info, 0, 0, &bb_predicate);
2505
2506 bb_predicate = not_inlined_predicate ();
2507 account_size_time (info, 2 * INLINE_SIZE_SCALE, 0, &bb_predicate);
2508
2509 gcc_assert (my_function && my_function->cfg);
2510 if (parms_info)
2511 compute_bb_predicates (node, parms_info, info);
2512 gcc_assert (cfun == my_function);
2513 order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2514 nblocks = pre_and_rev_post_order_compute (NULL, order, false);
2515 for (n = 0; n < nblocks; n++)
2516 {
2517 bb = BASIC_BLOCK_FOR_FN (cfun, order[n]);
2518 freq = compute_call_stmt_bb_frequency (node->decl, bb);
2519 if (clobber_only_eh_bb_p (bb))
2520 {
2521 if (dump_file && (dump_flags & TDF_DETAILS))
2522 fprintf (dump_file, "\n Ignoring BB %i;"
2523 " it will be optimized away by cleanup_clobbers\n",
2524 bb->index);
2525 continue;
2526 }
2527
2528 /* TODO: Obviously predicates can be propagated down across CFG. */
2529 if (parms_info)
2530 {
2531 if (bb->aux)
2532 bb_predicate = *(struct predicate *) bb->aux;
2533 else
2534 bb_predicate = false_predicate ();
2535 }
2536 else
2537 bb_predicate = true_predicate ();
2538
2539 if (dump_file && (dump_flags & TDF_DETAILS))
2540 {
2541 fprintf (dump_file, "\n BB %i predicate:", bb->index);
2542 dump_predicate (dump_file, info->conds, &bb_predicate);
2543 }
2544
2545 if (parms_info && nonconstant_names.exists ())
2546 {
2547 struct predicate phi_predicate;
2548 bool first_phi = true;
2549
2550 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
2551 gsi_next (&bsi))
2552 {
2553 if (first_phi
2554 && !phi_result_unknown_predicate (parms_info, info, bb,
2555 &phi_predicate,
2556 nonconstant_names))
2557 break;
2558 first_phi = false;
2559 if (dump_file && (dump_flags & TDF_DETAILS))
2560 {
2561 fprintf (dump_file, " ");
2562 print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
2563 }
2564 predicate_for_phi_result (info, bsi.phi (), &phi_predicate,
2565 nonconstant_names);
2566 }
2567 }
2568
2569 fix_builtin_expect_stmt = find_foldable_builtin_expect (bb);
2570
2571 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
2572 gsi_next (&bsi))
2573 {
2574 gimple stmt = gsi_stmt (bsi);
2575 int this_size = estimate_num_insns (stmt, &eni_size_weights);
2576 int this_time = estimate_num_insns (stmt, &eni_time_weights);
2577 int prob;
2578 struct predicate will_be_nonconstant;
2579
2580 /* This relation stmt should be folded after we remove
2581 buildin_expect call. Adjust the cost here. */
2582 if (stmt == fix_builtin_expect_stmt)
2583 {
2584 this_size--;
2585 this_time--;
2586 }
2587
2588 if (dump_file && (dump_flags & TDF_DETAILS))
2589 {
2590 fprintf (dump_file, " ");
2591 print_gimple_stmt (dump_file, stmt, 0, 0);
2592 fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n",
2593 ((double) freq) / CGRAPH_FREQ_BASE, this_size,
2594 this_time);
2595 }
2596
2597 if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
2598 {
2599 struct predicate this_array_index;
2600 this_array_index =
2601 array_index_predicate (info, nonconstant_names,
2602 gimple_assign_rhs1 (stmt));
2603 if (!false_predicate_p (&this_array_index))
2604 array_index =
2605 and_predicates (info->conds, &array_index,
2606 &this_array_index);
2607 }
2608 if (gimple_store_p (stmt) && nonconstant_names.exists ())
2609 {
2610 struct predicate this_array_index;
2611 this_array_index =
2612 array_index_predicate (info, nonconstant_names,
2613 gimple_get_lhs (stmt));
2614 if (!false_predicate_p (&this_array_index))
2615 array_index =
2616 and_predicates (info->conds, &array_index,
2617 &this_array_index);
2618 }
2619
2620
2621 if (is_gimple_call (stmt)
2622 && !gimple_call_internal_p (stmt))
2623 {
2624 struct cgraph_edge *edge = node->get_edge (stmt);
2625 struct inline_edge_summary *es = inline_edge_summary (edge);
2626
2627 /* Special case: results of BUILT_IN_CONSTANT_P will be always
2628 resolved as constant. We however don't want to optimize
2629 out the cgraph edges. */
2630 if (nonconstant_names.exists ()
2631 && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
2632 && gimple_call_lhs (stmt)
2633 && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
2634 {
2635 struct predicate false_p = false_predicate ();
2636 nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
2637 = false_p;
2638 }
2639 if (ipa_node_params_vector.exists ())
2640 {
2641 int count = gimple_call_num_args (stmt);
2642 int i;
2643
2644 if (count)
2645 es->param.safe_grow_cleared (count);
2646 for (i = 0; i < count; i++)
2647 {
2648 int prob = param_change_prob (stmt, i);
2649 gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
2650 es->param[i].change_prob = prob;
2651 }
2652 }
2653
2654 es->call_stmt_size = this_size;
2655 es->call_stmt_time = this_time;
2656 es->loop_depth = bb_loop_depth (bb);
2657 edge_set_predicate (edge, &bb_predicate);
2658 }
2659
2660 /* TODO: When conditional jump or swithc is known to be constant, but
2661 we did not translate it into the predicates, we really can account
2662 just maximum of the possible paths. */
2663 if (parms_info)
2664 will_be_nonconstant
2665 = will_be_nonconstant_predicate (parms_info, info,
2666 stmt, nonconstant_names);
2667 if (this_time || this_size)
2668 {
2669 struct predicate p;
2670
2671 this_time *= freq;
2672
2673 prob = eliminated_by_inlining_prob (stmt);
2674 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
2675 fprintf (dump_file,
2676 "\t\t50%% will be eliminated by inlining\n");
2677 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
2678 fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
2679
2680 if (parms_info)
2681 p = and_predicates (info->conds, &bb_predicate,
2682 &will_be_nonconstant);
2683 else
2684 p = true_predicate ();
2685
2686 if (!false_predicate_p (&p))
2687 {
2688 time += this_time;
2689 size += this_size;
2690 if (time > MAX_TIME * INLINE_TIME_SCALE)
2691 time = MAX_TIME * INLINE_TIME_SCALE;
2692 }
2693
2694 /* We account everything but the calls. Calls have their own
2695 size/time info attached to cgraph edges. This is necessary
2696 in order to make the cost disappear after inlining. */
2697 if (!is_gimple_call (stmt))
2698 {
2699 if (prob)
2700 {
2701 struct predicate ip = not_inlined_predicate ();
2702 ip = and_predicates (info->conds, &ip, &p);
2703 account_size_time (info, this_size * prob,
2704 this_time * prob, &ip);
2705 }
2706 if (prob != 2)
2707 account_size_time (info, this_size * (2 - prob),
2708 this_time * (2 - prob), &p);
2709 }
2710
2711 gcc_assert (time >= 0);
2712 gcc_assert (size >= 0);
2713 }
2714 }
2715 }
2716 set_hint_predicate (&inline_summary (node)->array_index, array_index);
2717 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
2718 if (time > MAX_TIME)
2719 time = MAX_TIME;
2720 free (order);
2721
2722 if (!early && nonconstant_names.exists ())
2723 {
2724 struct loop *loop;
2725 predicate loop_iterations = true_predicate ();
2726 predicate loop_stride = true_predicate ();
2727
2728 if (dump_file && (dump_flags & TDF_DETAILS))
2729 flow_loops_dump (dump_file, NULL, 0);
2730 scev_initialize ();
2731 FOR_EACH_LOOP (loop, 0)
2732 {
2733 vec<edge> exits;
2734 edge ex;
2735 unsigned int j, i;
2736 struct tree_niter_desc niter_desc;
2737 basic_block *body = get_loop_body (loop);
2738 bb_predicate = *(struct predicate *) loop->header->aux;
2739
2740 exits = get_loop_exit_edges (loop);
2741 FOR_EACH_VEC_ELT (exits, j, ex)
2742 if (number_of_iterations_exit (loop, ex, &niter_desc, false)
2743 && !is_gimple_min_invariant (niter_desc.niter))
2744 {
2745 predicate will_be_nonconstant
2746 = will_be_nonconstant_expr_predicate (parms_info, info,
2747 niter_desc.niter,
2748 nonconstant_names);
2749 if (!true_predicate_p (&will_be_nonconstant))
2750 will_be_nonconstant = and_predicates (info->conds,
2751 &bb_predicate,
2752 &will_be_nonconstant);
2753 if (!true_predicate_p (&will_be_nonconstant)
2754 && !false_predicate_p (&will_be_nonconstant))
2755 /* This is slightly inprecise. We may want to represent each
2756 loop with independent predicate. */
2757 loop_iterations =
2758 and_predicates (info->conds, &loop_iterations,
2759 &will_be_nonconstant);
2760 }
2761 exits.release ();
2762
2763 for (i = 0; i < loop->num_nodes; i++)
2764 {
2765 gimple_stmt_iterator gsi;
2766 bb_predicate = *(struct predicate *) body[i]->aux;
2767 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi);
2768 gsi_next (&gsi))
2769 {
2770 gimple stmt = gsi_stmt (gsi);
2771 affine_iv iv;
2772 ssa_op_iter iter;
2773 tree use;
2774
2775 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
2776 {
2777 predicate will_be_nonconstant;
2778
2779 if (!simple_iv
2780 (loop, loop_containing_stmt (stmt), use, &iv, true)
2781 || is_gimple_min_invariant (iv.step))
2782 continue;
2783 will_be_nonconstant
2784 = will_be_nonconstant_expr_predicate (parms_info, info,
2785 iv.step,
2786 nonconstant_names);
2787 if (!true_predicate_p (&will_be_nonconstant))
2788 will_be_nonconstant
2789 = and_predicates (info->conds,
2790 &bb_predicate,
2791 &will_be_nonconstant);
2792 if (!true_predicate_p (&will_be_nonconstant)
2793 && !false_predicate_p (&will_be_nonconstant))
2794 /* This is slightly inprecise. We may want to represent
2795 each loop with independent predicate. */
2796 loop_stride =
2797 and_predicates (info->conds, &loop_stride,
2798 &will_be_nonconstant);
2799 }
2800 }
2801 }
2802 free (body);
2803 }
2804 set_hint_predicate (&inline_summary (node)->loop_iterations,
2805 loop_iterations);
2806 set_hint_predicate (&inline_summary (node)->loop_stride, loop_stride);
2807 scev_finalize ();
2808 }
2809 FOR_ALL_BB_FN (bb, my_function)
2810 {
2811 edge e;
2812 edge_iterator ei;
2813
2814 if (bb->aux)
2815 pool_free (edge_predicate_pool, bb->aux);
2816 bb->aux = NULL;
2817 FOR_EACH_EDGE (e, ei, bb->succs)
2818 {
2819 if (e->aux)
2820 pool_free (edge_predicate_pool, e->aux);
2821 e->aux = NULL;
2822 }
2823 }
2824 inline_summary (node)->self_time = time;
2825 inline_summary (node)->self_size = size;
2826 nonconstant_names.release ();
2827 if (opt_for_fn (node->decl, optimize) && !early)
2828 {
2829 loop_optimizer_finalize ();
2830 free_dominance_info (CDI_DOMINATORS);
2831 }
2832 if (dump_file)
2833 {
2834 fprintf (dump_file, "\n");
2835 dump_inline_summary (dump_file, node);
2836 }
2837 }
2838
2839
2840 /* Compute parameters of functions used by inliner.
2841 EARLY is true when we compute parameters for the early inliner */
2842
2843 void
2844 compute_inline_parameters (struct cgraph_node *node, bool early)
2845 {
2846 HOST_WIDE_INT self_stack_size;
2847 struct cgraph_edge *e;
2848 struct inline_summary *info;
2849
2850 gcc_assert (!node->global.inlined_to);
2851
2852 inline_summary_alloc ();
2853
2854 info = inline_summary (node);
2855 reset_inline_summary (node);
2856
2857 /* FIXME: Thunks are inlinable, but tree-inline don't know how to do that.
2858 Once this happen, we will need to more curefully predict call
2859 statement size. */
2860 if (node->thunk.thunk_p)
2861 {
2862 struct inline_edge_summary *es = inline_edge_summary (node->callees);
2863 struct predicate t = true_predicate ();
2864
2865 info->inlinable = 0;
2866 node->callees->call_stmt_cannot_inline_p = true;
2867 node->local.can_change_signature = false;
2868 es->call_stmt_time = 1;
2869 es->call_stmt_size = 1;
2870 account_size_time (info, 0, 0, &t);
2871 return;
2872 }
2873
2874 /* Even is_gimple_min_invariant rely on current_function_decl. */
2875 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2876
2877 /* Estimate the stack size for the function if we're optimizing. */
2878 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
2879 info->estimated_self_stack_size = self_stack_size;
2880 info->estimated_stack_size = self_stack_size;
2881 info->stack_frame_offset = 0;
2882
2883 /* Can this function be inlined at all? */
2884 if (!opt_for_fn (node->decl, optimize)
2885 && !lookup_attribute ("always_inline",
2886 DECL_ATTRIBUTES (node->decl)))
2887 info->inlinable = false;
2888 else
2889 info->inlinable = tree_inlinable_function_p (node->decl);
2890
2891 /* Type attributes can use parameter indices to describe them. */
2892 if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
2893 node->local.can_change_signature = false;
2894 else
2895 {
2896 /* Otherwise, inlinable functions always can change signature. */
2897 if (info->inlinable)
2898 node->local.can_change_signature = true;
2899 else
2900 {
2901 /* Functions calling builtin_apply can not change signature. */
2902 for (e = node->callees; e; e = e->next_callee)
2903 {
2904 tree cdecl = e->callee->decl;
2905 if (DECL_BUILT_IN (cdecl)
2906 && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
2907 && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
2908 || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
2909 break;
2910 }
2911 node->local.can_change_signature = !e;
2912 }
2913 }
2914 estimate_function_body_sizes (node, early);
2915
2916 for (e = node->callees; e; e = e->next_callee)
2917 if (e->callee->comdat_local_p ())
2918 break;
2919 node->calls_comdat_local = (e != NULL);
2920
2921 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2922 info->time = info->self_time;
2923 info->size = info->self_size;
2924 info->stack_frame_offset = 0;
2925 info->estimated_stack_size = info->estimated_self_stack_size;
2926 #ifdef ENABLE_CHECKING
2927 inline_update_overall_summary (node);
2928 gcc_assert (info->time == info->self_time && info->size == info->self_size);
2929 #endif
2930
2931 pop_cfun ();
2932 }
2933
2934
2935 /* Compute parameters of functions used by inliner using
2936 current_function_decl. */
2937
2938 static unsigned int
2939 compute_inline_parameters_for_current (void)
2940 {
2941 compute_inline_parameters (cgraph_node::get (current_function_decl), true);
2942 return 0;
2943 }
2944
2945 namespace {
2946
2947 const pass_data pass_data_inline_parameters =
2948 {
2949 GIMPLE_PASS, /* type */
2950 "inline_param", /* name */
2951 OPTGROUP_INLINE, /* optinfo_flags */
2952 TV_INLINE_PARAMETERS, /* tv_id */
2953 0, /* properties_required */
2954 0, /* properties_provided */
2955 0, /* properties_destroyed */
2956 0, /* todo_flags_start */
2957 0, /* todo_flags_finish */
2958 };
2959
2960 class pass_inline_parameters : public gimple_opt_pass
2961 {
2962 public:
2963 pass_inline_parameters (gcc::context *ctxt)
2964 : gimple_opt_pass (pass_data_inline_parameters, ctxt)
2965 {}
2966
2967 /* opt_pass methods: */
2968 opt_pass * clone () { return new pass_inline_parameters (m_ctxt); }
2969 virtual unsigned int execute (function *)
2970 {
2971 return compute_inline_parameters_for_current ();
2972 }
2973
2974 }; // class pass_inline_parameters
2975
2976 } // anon namespace
2977
2978 gimple_opt_pass *
2979 make_pass_inline_parameters (gcc::context *ctxt)
2980 {
2981 return new pass_inline_parameters (ctxt);
2982 }
2983
2984
2985 /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS,
2986 KNOWN_CONTEXTS and KNOWN_AGGS. */
2987
2988 static bool
2989 estimate_edge_devirt_benefit (struct cgraph_edge *ie,
2990 int *size, int *time,
2991 vec<tree> known_vals,
2992 vec<ipa_polymorphic_call_context> known_contexts,
2993 vec<ipa_agg_jump_function_p> known_aggs)
2994 {
2995 tree target;
2996 struct cgraph_node *callee;
2997 struct inline_summary *isummary;
2998 enum availability avail;
2999 bool speculative;
3000
3001 if (!known_vals.exists () && !known_contexts.exists ())
3002 return false;
3003 if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining))
3004 return false;
3005
3006 target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts,
3007 known_aggs, &speculative);
3008 if (!target || speculative)
3009 return false;
3010
3011 /* Account for difference in cost between indirect and direct calls. */
3012 *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost);
3013 *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost);
3014 gcc_checking_assert (*time >= 0);
3015 gcc_checking_assert (*size >= 0);
3016
3017 callee = cgraph_node::get (target);
3018 if (!callee || !callee->definition)
3019 return false;
3020 callee = callee->function_symbol (&avail);
3021 if (avail < AVAIL_AVAILABLE)
3022 return false;
3023 isummary = inline_summary (callee);
3024 return isummary->inlinable;
3025 }
3026
3027 /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to
3028 handle edge E with probability PROB.
3029 Set HINTS if edge may be devirtualized.
3030 KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call
3031 site. */
3032
3033 static inline void
3034 estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
3035 int *time,
3036 int prob,
3037 vec<tree> known_vals,
3038 vec<ipa_polymorphic_call_context> known_contexts,
3039 vec<ipa_agg_jump_function_p> known_aggs,
3040 inline_hints *hints)
3041 {
3042 struct inline_edge_summary *es = inline_edge_summary (e);
3043 int call_size = es->call_stmt_size;
3044 int call_time = es->call_stmt_time;
3045 int cur_size;
3046 if (!e->callee
3047 && estimate_edge_devirt_benefit (e, &call_size, &call_time,
3048 known_vals, known_contexts, known_aggs)
3049 && hints && e->maybe_hot_p ())
3050 *hints |= INLINE_HINT_indirect_call;
3051 cur_size = call_size * INLINE_SIZE_SCALE;
3052 *size += cur_size;
3053 if (min_size)
3054 *min_size += cur_size;
3055 *time += apply_probability ((gcov_type) call_time, prob)
3056 * e->frequency * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE);
3057 if (*time > MAX_TIME * INLINE_TIME_SCALE)
3058 *time = MAX_TIME * INLINE_TIME_SCALE;
3059 }
3060
3061
3062
3063 /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all
3064 calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3065 describe context of the call site. */
3066
3067 static void
3068 estimate_calls_size_and_time (struct cgraph_node *node, int *size,
3069 int *min_size, int *time,
3070 inline_hints *hints,
3071 clause_t possible_truths,
3072 vec<tree> known_vals,
3073 vec<ipa_polymorphic_call_context> known_contexts,
3074 vec<ipa_agg_jump_function_p> known_aggs)
3075 {
3076 struct cgraph_edge *e;
3077 for (e = node->callees; e; e = e->next_callee)
3078 {
3079 struct inline_edge_summary *es = inline_edge_summary (e);
3080 if (!es->predicate
3081 || evaluate_predicate (es->predicate, possible_truths))
3082 {
3083 if (e->inline_failed)
3084 {
3085 /* Predicates of calls shall not use NOT_CHANGED codes,
3086 sowe do not need to compute probabilities. */
3087 estimate_edge_size_and_time (e, size,
3088 es->predicate ? NULL : min_size,
3089 time, REG_BR_PROB_BASE,
3090 known_vals, known_contexts,
3091 known_aggs, hints);
3092 }
3093 else
3094 estimate_calls_size_and_time (e->callee, size, min_size, time,
3095 hints,
3096 possible_truths,
3097 known_vals, known_contexts,
3098 known_aggs);
3099 }
3100 }
3101 for (e = node->indirect_calls; e; e = e->next_callee)
3102 {
3103 struct inline_edge_summary *es = inline_edge_summary (e);
3104 if (!es->predicate
3105 || evaluate_predicate (es->predicate, possible_truths))
3106 estimate_edge_size_and_time (e, size,
3107 es->predicate ? NULL : min_size,
3108 time, REG_BR_PROB_BASE,
3109 known_vals, known_contexts, known_aggs,
3110 hints);
3111 }
3112 }
3113
3114
3115 /* Estimate size and time needed to execute NODE assuming
3116 POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS
3117 information about NODE's arguments. If non-NULL use also probability
3118 information present in INLINE_PARAM_SUMMARY vector.
3119 Additionally detemine hints determined by the context. Finally compute
3120 minimal size needed for the call that is independent on the call context and
3121 can be used for fast estimates. Return the values in RET_SIZE,
3122 RET_MIN_SIZE, RET_TIME and RET_HINTS. */
3123
3124 static void
3125 estimate_node_size_and_time (struct cgraph_node *node,
3126 clause_t possible_truths,
3127 vec<tree> known_vals,
3128 vec<ipa_polymorphic_call_context> known_contexts,
3129 vec<ipa_agg_jump_function_p> known_aggs,
3130 int *ret_size, int *ret_min_size, int *ret_time,
3131 inline_hints *ret_hints,
3132 vec<inline_param_summary>
3133 inline_param_summary)
3134 {
3135 struct inline_summary *info = inline_summary (node);
3136 size_time_entry *e;
3137 int size = 0;
3138 int time = 0;
3139 int min_size = 0;
3140 inline_hints hints = 0;
3141 int i;
3142
3143 if (dump_file && (dump_flags & TDF_DETAILS))
3144 {
3145 bool found = false;
3146 fprintf (dump_file, " Estimating body: %s/%i\n"
3147 " Known to be false: ", node->name (),
3148 node->order);
3149
3150 for (i = predicate_not_inlined_condition;
3151 i < (predicate_first_dynamic_condition
3152 + (int) vec_safe_length (info->conds)); i++)
3153 if (!(possible_truths & (1 << i)))
3154 {
3155 if (found)
3156 fprintf (dump_file, ", ");
3157 found = true;
3158 dump_condition (dump_file, info->conds, i);
3159 }
3160 }
3161
3162 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3163 if (evaluate_predicate (&e->predicate, possible_truths))
3164 {
3165 size += e->size;
3166 gcc_checking_assert (e->time >= 0);
3167 gcc_checking_assert (time >= 0);
3168 if (!inline_param_summary.exists ())
3169 time += e->time;
3170 else
3171 {
3172 int prob = predicate_probability (info->conds,
3173 &e->predicate,
3174 possible_truths,
3175 inline_param_summary);
3176 gcc_checking_assert (prob >= 0);
3177 gcc_checking_assert (prob <= REG_BR_PROB_BASE);
3178 time += apply_probability ((gcov_type) e->time, prob);
3179 }
3180 if (time > MAX_TIME * INLINE_TIME_SCALE)
3181 time = MAX_TIME * INLINE_TIME_SCALE;
3182 gcc_checking_assert (time >= 0);
3183
3184 }
3185 gcc_checking_assert (true_predicate_p (&(*info->entry)[0].predicate));
3186 min_size = (*info->entry)[0].size;
3187 gcc_checking_assert (size >= 0);
3188 gcc_checking_assert (time >= 0);
3189
3190 if (info->loop_iterations
3191 && !evaluate_predicate (info->loop_iterations, possible_truths))
3192 hints |= INLINE_HINT_loop_iterations;
3193 if (info->loop_stride
3194 && !evaluate_predicate (info->loop_stride, possible_truths))
3195 hints |= INLINE_HINT_loop_stride;
3196 if (info->array_index
3197 && !evaluate_predicate (info->array_index, possible_truths))
3198 hints |= INLINE_HINT_array_index;
3199 if (info->scc_no)
3200 hints |= INLINE_HINT_in_scc;
3201 if (DECL_DECLARED_INLINE_P (node->decl))
3202 hints |= INLINE_HINT_declared_inline;
3203
3204 estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths,
3205 known_vals, known_contexts, known_aggs);
3206 gcc_checking_assert (size >= 0);
3207 gcc_checking_assert (time >= 0);
3208 time = RDIV (time, INLINE_TIME_SCALE);
3209 size = RDIV (size, INLINE_SIZE_SCALE);
3210 min_size = RDIV (min_size, INLINE_SIZE_SCALE);
3211
3212 if (dump_file && (dump_flags & TDF_DETAILS))
3213 fprintf (dump_file, "\n size:%i time:%i\n", (int) size, (int) time);
3214 if (ret_time)
3215 *ret_time = time;
3216 if (ret_size)
3217 *ret_size = size;
3218 if (ret_min_size)
3219 *ret_min_size = min_size;
3220 if (ret_hints)
3221 *ret_hints = hints;
3222 return;
3223 }
3224
3225
3226 /* Estimate size and time needed to execute callee of EDGE assuming that
3227 parameters known to be constant at caller of EDGE are propagated.
3228 KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values
3229 and types for parameters. */
3230
3231 void
3232 estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
3233 vec<tree> known_vals,
3234 vec<ipa_polymorphic_call_context>
3235 known_contexts,
3236 vec<ipa_agg_jump_function_p> known_aggs,
3237 int *ret_size, int *ret_time,
3238 inline_hints *hints)
3239 {
3240 clause_t clause;
3241
3242 clause = evaluate_conditions_for_known_args (node, false, known_vals,
3243 known_aggs);
3244 estimate_node_size_and_time (node, clause, known_vals, known_contexts,
3245 known_aggs, ret_size, NULL, ret_time, hints, vNULL);
3246 }
3247
3248 /* Translate all conditions from callee representation into caller
3249 representation and symbolically evaluate predicate P into new predicate.
3250
3251 INFO is inline_summary of function we are adding predicate into, CALLEE_INFO
3252 is summary of function predicate P is from. OPERAND_MAP is array giving
3253 callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
3254 callee conditions that may be true in caller context. TOPLEV_PREDICATE is
3255 predicate under which callee is executed. OFFSET_MAP is an array of of
3256 offsets that need to be added to conditions, negative offset means that
3257 conditions relying on values passed by reference have to be discarded
3258 because they might not be preserved (and should be considered offset zero
3259 for other purposes). */
3260
3261 static struct predicate
3262 remap_predicate (struct inline_summary *info,
3263 struct inline_summary *callee_info,
3264 struct predicate *p,
3265 vec<int> operand_map,
3266 vec<int> offset_map,
3267 clause_t possible_truths, struct predicate *toplev_predicate)
3268 {
3269 int i;
3270 struct predicate out = true_predicate ();
3271
3272 /* True predicate is easy. */
3273 if (true_predicate_p (p))
3274 return *toplev_predicate;
3275 for (i = 0; p->clause[i]; i++)
3276 {
3277 clause_t clause = p->clause[i];
3278 int cond;
3279 struct predicate clause_predicate = false_predicate ();
3280
3281 gcc_assert (i < MAX_CLAUSES);
3282
3283 for (cond = 0; cond < NUM_CONDITIONS; cond++)
3284 /* Do we have condition we can't disprove? */
3285 if (clause & possible_truths & (1 << cond))
3286 {
3287 struct predicate cond_predicate;
3288 /* Work out if the condition can translate to predicate in the
3289 inlined function. */
3290 if (cond >= predicate_first_dynamic_condition)
3291 {
3292 struct condition *c;
3293
3294 c = &(*callee_info->conds)[cond
3295 -
3296 predicate_first_dynamic_condition];
3297 /* See if we can remap condition operand to caller's operand.
3298 Otherwise give up. */
3299 if (!operand_map.exists ()
3300 || (int) operand_map.length () <= c->operand_num
3301 || operand_map[c->operand_num] == -1
3302 /* TODO: For non-aggregate conditions, adding an offset is
3303 basically an arithmetic jump function processing which
3304 we should support in future. */
3305 || ((!c->agg_contents || !c->by_ref)
3306 && offset_map[c->operand_num] > 0)
3307 || (c->agg_contents && c->by_ref
3308 && offset_map[c->operand_num] < 0))
3309 cond_predicate = true_predicate ();
3310 else
3311 {
3312 struct agg_position_info ap;
3313 HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
3314 if (offset_delta < 0)
3315 {
3316 gcc_checking_assert (!c->agg_contents || !c->by_ref);
3317 offset_delta = 0;
3318 }
3319 gcc_assert (!c->agg_contents
3320 || c->by_ref || offset_delta == 0);
3321 ap.offset = c->offset + offset_delta;
3322 ap.agg_contents = c->agg_contents;
3323 ap.by_ref = c->by_ref;
3324 cond_predicate = add_condition (info,
3325 operand_map[c->operand_num],
3326 &ap, c->code, c->val);
3327 }
3328 }
3329 /* Fixed conditions remains same, construct single
3330 condition predicate. */
3331 else
3332 {
3333 cond_predicate.clause[0] = 1 << cond;
3334 cond_predicate.clause[1] = 0;
3335 }
3336 clause_predicate = or_predicates (info->conds, &clause_predicate,
3337 &cond_predicate);
3338 }
3339 out = and_predicates (info->conds, &out, &clause_predicate);
3340 }
3341 return and_predicates (info->conds, &out, toplev_predicate);
3342 }
3343
3344
3345 /* Update summary information of inline clones after inlining.
3346 Compute peak stack usage. */
3347
3348 static void
3349 inline_update_callee_summaries (struct cgraph_node *node, int depth)
3350 {
3351 struct cgraph_edge *e;
3352 struct inline_summary *callee_info = inline_summary (node);
3353 struct inline_summary *caller_info = inline_summary (node->callers->caller);
3354 HOST_WIDE_INT peak;
3355
3356 callee_info->stack_frame_offset
3357 = caller_info->stack_frame_offset
3358 + caller_info->estimated_self_stack_size;
3359 peak = callee_info->stack_frame_offset
3360 + callee_info->estimated_self_stack_size;
3361 if (inline_summary (node->global.inlined_to)->estimated_stack_size < peak)
3362 inline_summary (node->global.inlined_to)->estimated_stack_size = peak;
3363 ipa_propagate_frequency (node);
3364 for (e = node->callees; e; e = e->next_callee)
3365 {
3366 if (!e->inline_failed)
3367 inline_update_callee_summaries (e->callee, depth);
3368 inline_edge_summary (e)->loop_depth += depth;
3369 }
3370 for (e = node->indirect_calls; e; e = e->next_callee)
3371 inline_edge_summary (e)->loop_depth += depth;
3372 }
3373
3374 /* Update change_prob of EDGE after INLINED_EDGE has been inlined.
3375 When functoin A is inlined in B and A calls C with parameter that
3376 changes with probability PROB1 and C is known to be passthroug
3377 of argument if B that change with probability PROB2, the probability
3378 of change is now PROB1*PROB2. */
3379
3380 static void
3381 remap_edge_change_prob (struct cgraph_edge *inlined_edge,
3382 struct cgraph_edge *edge)
3383 {
3384 if (ipa_node_params_vector.exists ())
3385 {
3386 int i;
3387 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3388 struct inline_edge_summary *es = inline_edge_summary (edge);
3389 struct inline_edge_summary *inlined_es
3390 = inline_edge_summary (inlined_edge);
3391
3392 for (i = 0; i < ipa_get_cs_argument_count (args); i++)
3393 {
3394 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3395 if (jfunc->type == IPA_JF_PASS_THROUGH
3396 && (ipa_get_jf_pass_through_formal_id (jfunc)
3397 < (int) inlined_es->param.length ()))
3398 {
3399 int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
3400 int prob1 = es->param[i].change_prob;
3401 int prob2 = inlined_es->param[jf_formal_id].change_prob;
3402 int prob = combine_probabilities (prob1, prob2);
3403
3404 if (prob1 && prob2 && !prob)
3405 prob = 1;
3406
3407 es->param[i].change_prob = prob;
3408 }
3409 }
3410 }
3411 }
3412
3413 /* Update edge summaries of NODE after INLINED_EDGE has been inlined.
3414
3415 Remap predicates of callees of NODE. Rest of arguments match
3416 remap_predicate.
3417
3418 Also update change probabilities. */
3419
3420 static void
3421 remap_edge_summaries (struct cgraph_edge *inlined_edge,
3422 struct cgraph_node *node,
3423 struct inline_summary *info,
3424 struct inline_summary *callee_info,
3425 vec<int> operand_map,
3426 vec<int> offset_map,
3427 clause_t possible_truths,
3428 struct predicate *toplev_predicate)
3429 {
3430 struct cgraph_edge *e;
3431 for (e = node->callees; e; e = e->next_callee)
3432 {
3433 struct inline_edge_summary *es = inline_edge_summary (e);
3434 struct predicate p;
3435
3436 if (e->inline_failed)
3437 {
3438 remap_edge_change_prob (inlined_edge, e);
3439
3440 if (es->predicate)
3441 {
3442 p = remap_predicate (info, callee_info,
3443 es->predicate, operand_map, offset_map,
3444 possible_truths, toplev_predicate);
3445 edge_set_predicate (e, &p);
3446 /* TODO: We should remove the edge for code that will be
3447 optimized out, but we need to keep verifiers and tree-inline
3448 happy. Make it cold for now. */
3449 if (false_predicate_p (&p))
3450 {
3451 e->count = 0;
3452 e->frequency = 0;
3453 }
3454 }
3455 else
3456 edge_set_predicate (e, toplev_predicate);
3457 }
3458 else
3459 remap_edge_summaries (inlined_edge, e->callee, info, callee_info,
3460 operand_map, offset_map, possible_truths,
3461 toplev_predicate);
3462 }
3463 for (e = node->indirect_calls; e; e = e->next_callee)
3464 {
3465 struct inline_edge_summary *es = inline_edge_summary (e);
3466 struct predicate p;
3467
3468 remap_edge_change_prob (inlined_edge, e);
3469 if (es->predicate)
3470 {
3471 p = remap_predicate (info, callee_info,
3472 es->predicate, operand_map, offset_map,
3473 possible_truths, toplev_predicate);
3474 edge_set_predicate (e, &p);
3475 /* TODO: We should remove the edge for code that will be optimized
3476 out, but we need to keep verifiers and tree-inline happy.
3477 Make it cold for now. */
3478 if (false_predicate_p (&p))
3479 {
3480 e->count = 0;
3481 e->frequency = 0;
3482 }
3483 }
3484 else
3485 edge_set_predicate (e, toplev_predicate);
3486 }
3487 }
3488
3489 /* Same as remap_predicate, but set result into hint *HINT. */
3490
3491 static void
3492 remap_hint_predicate (struct inline_summary *info,
3493 struct inline_summary *callee_info,
3494 struct predicate **hint,
3495 vec<int> operand_map,
3496 vec<int> offset_map,
3497 clause_t possible_truths,
3498 struct predicate *toplev_predicate)
3499 {
3500 predicate p;
3501
3502 if (!*hint)
3503 return;
3504 p = remap_predicate (info, callee_info,
3505 *hint,
3506 operand_map, offset_map,
3507 possible_truths, toplev_predicate);
3508 if (!false_predicate_p (&p) && !true_predicate_p (&p))
3509 {
3510 if (!*hint)
3511 set_hint_predicate (hint, p);
3512 else
3513 **hint = and_predicates (info->conds, *hint, &p);
3514 }
3515 }
3516
3517 /* We inlined EDGE. Update summary of the function we inlined into. */
3518
3519 void
3520 inline_merge_summary (struct cgraph_edge *edge)
3521 {
3522 struct inline_summary *callee_info = inline_summary (edge->callee);
3523 struct cgraph_node *to = (edge->caller->global.inlined_to
3524 ? edge->caller->global.inlined_to : edge->caller);
3525 struct inline_summary *info = inline_summary (to);
3526 clause_t clause = 0; /* not_inline is known to be false. */
3527 size_time_entry *e;
3528 vec<int> operand_map = vNULL;
3529 vec<int> offset_map = vNULL;
3530 int i;
3531 struct predicate toplev_predicate;
3532 struct predicate true_p = true_predicate ();
3533 struct inline_edge_summary *es = inline_edge_summary (edge);
3534
3535 if (es->predicate)
3536 toplev_predicate = *es->predicate;
3537 else
3538 toplev_predicate = true_predicate ();
3539
3540 if (ipa_node_params_vector.exists () && callee_info->conds)
3541 {
3542 struct ipa_edge_args *args = IPA_EDGE_REF (edge);
3543 int count = ipa_get_cs_argument_count (args);
3544 int i;
3545
3546 evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
3547 if (count)
3548 {
3549 operand_map.safe_grow_cleared (count);
3550 offset_map.safe_grow_cleared (count);
3551 }
3552 for (i = 0; i < count; i++)
3553 {
3554 struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
3555 int map = -1;
3556
3557 /* TODO: handle non-NOPs when merging. */
3558 if (jfunc->type == IPA_JF_PASS_THROUGH)
3559 {
3560 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
3561 map = ipa_get_jf_pass_through_formal_id (jfunc);
3562 if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
3563 offset_map[i] = -1;
3564 }
3565 else if (jfunc->type == IPA_JF_ANCESTOR)
3566 {
3567 HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc);
3568 if (offset >= 0 && offset < INT_MAX)
3569 {
3570 map = ipa_get_jf_ancestor_formal_id (jfunc);
3571 if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
3572 offset = -1;
3573 offset_map[i] = offset;
3574 }
3575 }
3576 operand_map[i] = map;
3577 gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
3578 }
3579 }
3580 for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
3581 {
3582 struct predicate p = remap_predicate (info, callee_info,
3583 &e->predicate, operand_map,
3584 offset_map, clause,
3585 &toplev_predicate);
3586 if (!false_predicate_p (&p))
3587 {
3588 gcov_type add_time = ((gcov_type) e->time * edge->frequency
3589 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
3590 int prob = predicate_probability (callee_info->conds,
3591 &e->predicate,
3592 clause, es->param);
3593 add_time = apply_probability ((gcov_type) add_time, prob);
3594 if (add_time > MAX_TIME * INLINE_TIME_SCALE)
3595 add_time = MAX_TIME * INLINE_TIME_SCALE;
3596 if (prob != REG_BR_PROB_BASE
3597 && dump_file && (dump_flags & TDF_DETAILS))
3598 {
3599 fprintf (dump_file, "\t\tScaling time by probability:%f\n",
3600 (double) prob / REG_BR_PROB_BASE);
3601 }
3602 account_size_time (info, e->size, add_time, &p);
3603 }
3604 }
3605 remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map,
3606 offset_map, clause, &toplev_predicate);
3607 remap_hint_predicate (info, callee_info,
3608 &callee_info->loop_iterations,
3609 operand_map, offset_map, clause, &toplev_predicate);
3610 remap_hint_predicate (info, callee_info,
3611 &callee_info->loop_stride,
3612 operand_map, offset_map, clause, &toplev_predicate);
3613 remap_hint_predicate (info, callee_info,
3614 &callee_info->array_index,
3615 operand_map, offset_map, clause, &toplev_predicate);
3616
3617 inline_update_callee_summaries (edge->callee,
3618 inline_edge_summary (edge)->loop_depth);
3619
3620 /* We do not maintain predicates of inlined edges, free it. */
3621 edge_set_predicate (edge, &true_p);
3622 /* Similarly remove param summaries. */
3623 es->param.release ();
3624 operand_map.release ();
3625 offset_map.release ();
3626 }
3627
3628 /* For performance reasons inline_merge_summary is not updating overall size
3629 and time. Recompute it. */
3630
3631 void
3632 inline_update_overall_summary (struct cgraph_node *node)
3633 {
3634 struct inline_summary *info = inline_summary (node);
3635 size_time_entry *e;
3636 int i;
3637
3638 info->size = 0;
3639 info->time = 0;
3640 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
3641 {
3642 info->size += e->size, info->time += e->time;
3643 if (info->time > MAX_TIME * INLINE_TIME_SCALE)
3644 info->time = MAX_TIME * INLINE_TIME_SCALE;
3645 }
3646 estimate_calls_size_and_time (node, &info->size, &info->min_size,
3647 &info->time, NULL,
3648 ~(clause_t) (1 << predicate_false_condition),
3649 vNULL, vNULL, vNULL);
3650 info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
3651 info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
3652 }
3653
3654 /* Return hints derrived from EDGE. */
3655 int
3656 simple_edge_hints (struct cgraph_edge *edge)
3657 {
3658 int hints = 0;
3659 struct cgraph_node *to = (edge->caller->global.inlined_to
3660 ? edge->caller->global.inlined_to : edge->caller);
3661 if (inline_summary (to)->scc_no
3662 && inline_summary (to)->scc_no == inline_summary (edge->callee)->scc_no
3663 && !edge->recursive_p ())
3664 hints |= INLINE_HINT_same_scc;
3665
3666 if (to->lto_file_data && edge->callee->lto_file_data
3667 && to->lto_file_data != edge->callee->lto_file_data)
3668 hints |= INLINE_HINT_cross_module;
3669
3670 return hints;
3671 }
3672
3673 /* Estimate the time cost for the caller when inlining EDGE.
3674 Only to be called via estimate_edge_time, that handles the
3675 caching mechanism.
3676
3677 When caching, also update the cache entry. Compute both time and
3678 size, since we always need both metrics eventually. */
3679
3680 int
3681 do_estimate_edge_time (struct cgraph_edge *edge)
3682 {
3683 int time;
3684 int size;
3685 inline_hints hints;
3686 struct cgraph_node *callee;
3687 clause_t clause;
3688 vec<tree> known_vals;
3689 vec<ipa_polymorphic_call_context> known_contexts;
3690 vec<ipa_agg_jump_function_p> known_aggs;
3691 struct inline_edge_summary *es = inline_edge_summary (edge);
3692 int min_size;
3693
3694 callee = edge->callee->ultimate_alias_target ();
3695
3696 gcc_checking_assert (edge->inline_failed);
3697 evaluate_properties_for_edge (edge, true,
3698 &clause, &known_vals, &known_contexts,
3699 &known_aggs);
3700 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3701 known_aggs, &size, &min_size, &time, &hints, es->param);
3702
3703 /* When we have profile feedback, we can quite safely identify hot
3704 edges and for those we disable size limits. Don't do that when
3705 probability that caller will call the callee is low however, since it
3706 may hurt optimization of the caller's hot path. */
3707 if (edge->count && edge->maybe_hot_p ()
3708 && (edge->count * 2
3709 > (edge->caller->global.inlined_to
3710 ? edge->caller->global.inlined_to->count : edge->caller->count)))
3711 hints |= INLINE_HINT_known_hot;
3712
3713 known_vals.release ();
3714 known_contexts.release ();
3715 known_aggs.release ();
3716 gcc_checking_assert (size >= 0);
3717 gcc_checking_assert (time >= 0);
3718
3719 /* When caching, update the cache entry. */
3720 if (edge_growth_cache.exists ())
3721 {
3722 inline_summary (edge->callee)->min_size = min_size;
3723 if ((int) edge_growth_cache.length () <= edge->uid)
3724 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
3725 edge_growth_cache[edge->uid].time = time + (time >= 0);
3726
3727 edge_growth_cache[edge->uid].size = size + (size >= 0);
3728 hints |= simple_edge_hints (edge);
3729 edge_growth_cache[edge->uid].hints = hints + 1;
3730 }
3731 return time;
3732 }
3733
3734
3735 /* Return estimated callee growth after inlining EDGE.
3736 Only to be called via estimate_edge_size. */
3737
3738 int
3739 do_estimate_edge_size (struct cgraph_edge *edge)
3740 {
3741 int size;
3742 struct cgraph_node *callee;
3743 clause_t clause;
3744 vec<tree> known_vals;
3745 vec<ipa_polymorphic_call_context> known_contexts;
3746 vec<ipa_agg_jump_function_p> known_aggs;
3747
3748 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3749
3750 if (edge_growth_cache.exists ())
3751 {
3752 do_estimate_edge_time (edge);
3753 size = edge_growth_cache[edge->uid].size;
3754 gcc_checking_assert (size);
3755 return size - (size > 0);
3756 }
3757
3758 callee = edge->callee->ultimate_alias_target ();
3759
3760 /* Early inliner runs without caching, go ahead and do the dirty work. */
3761 gcc_checking_assert (edge->inline_failed);
3762 evaluate_properties_for_edge (edge, true,
3763 &clause, &known_vals, &known_contexts,
3764 &known_aggs);
3765 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3766 known_aggs, &size, NULL, NULL, NULL, vNULL);
3767 known_vals.release ();
3768 known_contexts.release ();
3769 known_aggs.release ();
3770 return size;
3771 }
3772
3773
3774 /* Estimate the growth of the caller when inlining EDGE.
3775 Only to be called via estimate_edge_size. */
3776
3777 inline_hints
3778 do_estimate_edge_hints (struct cgraph_edge *edge)
3779 {
3780 inline_hints hints;
3781 struct cgraph_node *callee;
3782 clause_t clause;
3783 vec<tree> known_vals;
3784 vec<ipa_polymorphic_call_context> known_contexts;
3785 vec<ipa_agg_jump_function_p> known_aggs;
3786
3787 /* When we do caching, use do_estimate_edge_time to populate the entry. */
3788
3789 if (edge_growth_cache.exists ())
3790 {
3791 do_estimate_edge_time (edge);
3792 hints = edge_growth_cache[edge->uid].hints;
3793 gcc_checking_assert (hints);
3794 return hints - 1;
3795 }
3796
3797 callee = edge->callee->ultimate_alias_target ();
3798
3799 /* Early inliner runs without caching, go ahead and do the dirty work. */
3800 gcc_checking_assert (edge->inline_failed);
3801 evaluate_properties_for_edge (edge, true,
3802 &clause, &known_vals, &known_contexts,
3803 &known_aggs);
3804 estimate_node_size_and_time (callee, clause, known_vals, known_contexts,
3805 known_aggs, NULL, NULL, NULL, &hints, vNULL);
3806 known_vals.release ();
3807 known_contexts.release ();
3808 known_aggs.release ();
3809 hints |= simple_edge_hints (edge);
3810 return hints;
3811 }
3812
3813
3814 /* Estimate self time of the function NODE after inlining EDGE. */
3815
3816 int
3817 estimate_time_after_inlining (struct cgraph_node *node,
3818 struct cgraph_edge *edge)
3819 {
3820 struct inline_edge_summary *es = inline_edge_summary (edge);
3821 if (!es->predicate || !false_predicate_p (es->predicate))
3822 {
3823 gcov_type time =
3824 inline_summary (node)->time + estimate_edge_time (edge);
3825 if (time < 0)
3826 time = 0;
3827 if (time > MAX_TIME)
3828 time = MAX_TIME;
3829 return time;
3830 }
3831 return inline_summary (node)->time;
3832 }
3833
3834
3835 /* Estimate the size of NODE after inlining EDGE which should be an
3836 edge to either NODE or a call inlined into NODE. */
3837
3838 int
3839 estimate_size_after_inlining (struct cgraph_node *node,
3840 struct cgraph_edge *edge)
3841 {
3842 struct inline_edge_summary *es = inline_edge_summary (edge);
3843 if (!es->predicate || !false_predicate_p (es->predicate))
3844 {
3845 int size = inline_summary (node)->size + estimate_edge_growth (edge);
3846 gcc_assert (size >= 0);
3847 return size;
3848 }
3849 return inline_summary (node)->size;
3850 }
3851
3852
3853 struct growth_data
3854 {
3855 struct cgraph_node *node;
3856 bool self_recursive;
3857 int growth;
3858 };
3859
3860
3861 /* Worker for do_estimate_growth. Collect growth for all callers. */
3862
3863 static bool
3864 do_estimate_growth_1 (struct cgraph_node *node, void *data)
3865 {
3866 struct cgraph_edge *e;
3867 struct growth_data *d = (struct growth_data *) data;
3868
3869 for (e = node->callers; e; e = e->next_caller)
3870 {
3871 gcc_checking_assert (e->inline_failed);
3872
3873 if (e->caller == d->node
3874 || (e->caller->global.inlined_to
3875 && e->caller->global.inlined_to == d->node))
3876 d->self_recursive = true;
3877 d->growth += estimate_edge_growth (e);
3878 }
3879 return false;
3880 }
3881
3882
3883 /* Estimate the growth caused by inlining NODE into all callees. */
3884
3885 int
3886 do_estimate_growth (struct cgraph_node *node)
3887 {
3888 struct growth_data d = { node, 0, false };
3889 struct inline_summary *info = inline_summary (node);
3890
3891 node->call_for_symbol_thunks_and_aliases (do_estimate_growth_1, &d, true);
3892
3893 /* For self recursive functions the growth estimation really should be
3894 infinity. We don't want to return very large values because the growth
3895 plays various roles in badness computation fractions. Be sure to not
3896 return zero or negative growths. */
3897 if (d.self_recursive)
3898 d.growth = d.growth < info->size ? info->size : d.growth;
3899 else if (DECL_EXTERNAL (node->decl))
3900 ;
3901 else
3902 {
3903 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
3904 d.growth -= info->size;
3905 /* COMDAT functions are very often not shared across multiple units
3906 since they come from various template instantiations.
3907 Take this into account. */
3908 else if (DECL_COMDAT (node->decl)
3909 && node->can_remove_if_no_direct_calls_p ())
3910 d.growth -= (info->size
3911 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
3912 + 50) / 100;
3913 }
3914
3915 if (node_growth_cache.exists ())
3916 {
3917 if ((int) node_growth_cache.length () <= node->uid)
3918 node_growth_cache.safe_grow_cleared (symtab->cgraph_max_uid);
3919 node_growth_cache[node->uid] = d.growth + (d.growth >= 0);
3920 }
3921 return d.growth;
3922 }
3923
3924
3925 /* Make cheap estimation if growth of NODE is likely positive knowing
3926 EDGE_GROWTH of one particular edge.
3927 We assume that most of other edges will have similar growth
3928 and skip computation if there are too many callers. */
3929
3930 bool
3931 growth_likely_positive (struct cgraph_node *node, int edge_growth ATTRIBUTE_UNUSED)
3932 {
3933 int max_callers;
3934 int ret;
3935 struct cgraph_edge *e;
3936 gcc_checking_assert (edge_growth > 0);
3937
3938 /* Unlike for functions called once, we play unsafe with
3939 COMDATs. We can allow that since we know functions
3940 in consideration are small (and thus risk is small) and
3941 moreover grow estimates already accounts that COMDAT
3942 functions may or may not disappear when eliminated from
3943 current unit. With good probability making aggressive
3944 choice in all units is going to make overall program
3945 smaller.
3946
3947 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
3948 instead of
3949 cgraph_will_be_removed_from_program_if_no_direct_calls */
3950 if (DECL_EXTERNAL (node->decl)
3951 || !node->can_remove_if_no_direct_calls_p ())
3952 return true;
3953
3954 /* If there is cached value, just go ahead. */
3955 if ((int)node_growth_cache.length () > node->uid
3956 && (ret = node_growth_cache[node->uid]))
3957 return ret > 0;
3958 if (!node->will_be_removed_from_program_if_no_direct_calls_p ()
3959 && (!DECL_COMDAT (node->decl)
3960 || !node->can_remove_if_no_direct_calls_p ()))
3961 return true;
3962 max_callers = inline_summary (node)->size * 4 / edge_growth + 2;
3963
3964 for (e = node->callers; e; e = e->next_caller)
3965 {
3966 max_callers--;
3967 if (!max_callers)
3968 return true;
3969 }
3970 return estimate_growth (node) > 0;
3971 }
3972
3973
3974 /* This function performs intraprocedural analysis in NODE that is required to
3975 inline indirect calls. */
3976
3977 static void
3978 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
3979 {
3980 ipa_analyze_node (node);
3981 if (dump_file && (dump_flags & TDF_DETAILS))
3982 {
3983 ipa_print_node_params (dump_file, node);
3984 ipa_print_node_jump_functions (dump_file, node);
3985 }
3986 }
3987
3988
3989 /* Note function body size. */
3990
3991 void
3992 inline_analyze_function (struct cgraph_node *node)
3993 {
3994 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3995
3996 if (dump_file)
3997 fprintf (dump_file, "\nAnalyzing function: %s/%u\n",
3998 node->name (), node->order);
3999 if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p)
4000 inline_indirect_intraprocedural_analysis (node);
4001 compute_inline_parameters (node, false);
4002 if (!optimize)
4003 {
4004 struct cgraph_edge *e;
4005 for (e = node->callees; e; e = e->next_callee)
4006 {
4007 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4008 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4009 e->call_stmt_cannot_inline_p = true;
4010 }
4011 for (e = node->indirect_calls; e; e = e->next_callee)
4012 {
4013 if (e->inline_failed == CIF_FUNCTION_NOT_CONSIDERED)
4014 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
4015 e->call_stmt_cannot_inline_p = true;
4016 }
4017 }
4018
4019 pop_cfun ();
4020 }
4021
4022
4023 /* Called when new function is inserted to callgraph late. */
4024
4025 static void
4026 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
4027 {
4028 inline_analyze_function (node);
4029 }
4030
4031
4032 /* Note function body size. */
4033
4034 void
4035 inline_generate_summary (void)
4036 {
4037 struct cgraph_node *node;
4038
4039 /* When not optimizing, do not bother to analyze. Inlining is still done
4040 because edge redirection needs to happen there. */
4041 if (!optimize && !flag_generate_lto && !flag_generate_offload && !flag_wpa)
4042 return;
4043
4044 function_insertion_hook_holder =
4045 symtab->add_cgraph_insertion_hook (&add_new_function, NULL);
4046
4047 ipa_register_cgraph_hooks ();
4048 inline_free_summary ();
4049
4050 FOR_EACH_DEFINED_FUNCTION (node)
4051 if (!node->alias)
4052 inline_analyze_function (node);
4053 }
4054
4055
4056 /* Read predicate from IB. */
4057
4058 static struct predicate
4059 read_predicate (struct lto_input_block *ib)
4060 {
4061 struct predicate out;
4062 clause_t clause;
4063 int k = 0;
4064
4065 do
4066 {
4067 gcc_assert (k <= MAX_CLAUSES);
4068 clause = out.clause[k++] = streamer_read_uhwi (ib);
4069 }
4070 while (clause);
4071
4072 /* Zero-initialize the remaining clauses in OUT. */
4073 while (k <= MAX_CLAUSES)
4074 out.clause[k++] = 0;
4075
4076 return out;
4077 }
4078
4079
4080 /* Write inline summary for edge E to OB. */
4081
4082 static void
4083 read_inline_edge_summary (struct lto_input_block *ib, struct cgraph_edge *e)
4084 {
4085 struct inline_edge_summary *es = inline_edge_summary (e);
4086 struct predicate p;
4087 int length, i;
4088
4089 es->call_stmt_size = streamer_read_uhwi (ib);
4090 es->call_stmt_time = streamer_read_uhwi (ib);
4091 es->loop_depth = streamer_read_uhwi (ib);
4092 p = read_predicate (ib);
4093 edge_set_predicate (e, &p);
4094 length = streamer_read_uhwi (ib);
4095 if (length)
4096 {
4097 es->param.safe_grow_cleared (length);
4098 for (i = 0; i < length; i++)
4099 es->param[i].change_prob = streamer_read_uhwi (ib);
4100 }
4101 }
4102
4103
4104 /* Stream in inline summaries from the section. */
4105
4106 static void
4107 inline_read_section (struct lto_file_decl_data *file_data, const char *data,
4108 size_t len)
4109 {
4110 const struct lto_function_header *header =
4111 (const struct lto_function_header *) data;
4112 const int cfg_offset = sizeof (struct lto_function_header);
4113 const int main_offset = cfg_offset + header->cfg_size;
4114 const int string_offset = main_offset + header->main_size;
4115 struct data_in *data_in;
4116 unsigned int i, count2, j;
4117 unsigned int f_count;
4118
4119 lto_input_block ib ((const char *) data + main_offset, header->main_size);
4120
4121 data_in =
4122 lto_data_in_create (file_data, (const char *) data + string_offset,
4123 header->string_size, vNULL);
4124 f_count = streamer_read_uhwi (&ib);
4125 for (i = 0; i < f_count; i++)
4126 {
4127 unsigned int index;
4128 struct cgraph_node *node;
4129 struct inline_summary *info;
4130 lto_symtab_encoder_t encoder;
4131 struct bitpack_d bp;
4132 struct cgraph_edge *e;
4133 predicate p;
4134
4135 index = streamer_read_uhwi (&ib);
4136 encoder = file_data->symtab_node_encoder;
4137 node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
4138 index));
4139 info = inline_summary (node);
4140
4141 info->estimated_stack_size
4142 = info->estimated_self_stack_size = streamer_read_uhwi (&ib);
4143 info->size = info->self_size = streamer_read_uhwi (&ib);
4144 info->time = info->self_time = streamer_read_uhwi (&ib);
4145
4146 bp = streamer_read_bitpack (&ib);
4147 info->inlinable = bp_unpack_value (&bp, 1);
4148
4149 count2 = streamer_read_uhwi (&ib);
4150 gcc_assert (!info->conds);
4151 for (j = 0; j < count2; j++)
4152 {
4153 struct condition c;
4154 c.operand_num = streamer_read_uhwi (&ib);
4155 c.code = (enum tree_code) streamer_read_uhwi (&ib);
4156 c.val = stream_read_tree (&ib, data_in);
4157 bp = streamer_read_bitpack (&ib);
4158 c.agg_contents = bp_unpack_value (&bp, 1);
4159 c.by_ref = bp_unpack_value (&bp, 1);
4160 if (c.agg_contents)
4161 c.offset = streamer_read_uhwi (&ib);
4162 vec_safe_push (info->conds, c);
4163 }
4164 count2 = streamer_read_uhwi (&ib);
4165 gcc_assert (!info->entry);
4166 for (j = 0; j < count2; j++)
4167 {
4168 struct size_time_entry e;
4169
4170 e.size = streamer_read_uhwi (&ib);
4171 e.time = streamer_read_uhwi (&ib);
4172 e.predicate = read_predicate (&ib);
4173
4174 vec_safe_push (info->entry, e);
4175 }
4176
4177 p = read_predicate (&ib);
4178 set_hint_predicate (&info->loop_iterations, p);
4179 p = read_predicate (&ib);
4180 set_hint_predicate (&info->loop_stride, p);
4181 p = read_predicate (&ib);
4182 set_hint_predicate (&info->array_index, p);
4183 for (e = node->callees; e; e = e->next_callee)
4184 read_inline_edge_summary (&ib, e);
4185 for (e = node->indirect_calls; e; e = e->next_callee)
4186 read_inline_edge_summary (&ib, e);
4187 }
4188
4189 lto_free_section_data (file_data, LTO_section_inline_summary, NULL, data,
4190 len);
4191 lto_data_in_delete (data_in);
4192 }
4193
4194
4195 /* Read inline summary. Jump functions are shared among ipa-cp
4196 and inliner, so when ipa-cp is active, we don't need to write them
4197 twice. */
4198
4199 void
4200 inline_read_summary (void)
4201 {
4202 struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
4203 struct lto_file_decl_data *file_data;
4204 unsigned int j = 0;
4205
4206 inline_summary_alloc ();
4207
4208 while ((file_data = file_data_vec[j++]))
4209 {
4210 size_t len;
4211 const char *data = lto_get_section_data (file_data,
4212 LTO_section_inline_summary,
4213 NULL, &len);
4214 if (data)
4215 inline_read_section (file_data, data, len);
4216 else
4217 /* Fatal error here. We do not want to support compiling ltrans units
4218 with different version of compiler or different flags than the WPA
4219 unit, so this should never happen. */
4220 fatal_error ("ipa inline summary is missing in input file");
4221 }
4222 if (optimize)
4223 {
4224 ipa_register_cgraph_hooks ();
4225 if (!flag_ipa_cp)
4226 ipa_prop_read_jump_functions ();
4227 }
4228 function_insertion_hook_holder =
4229 symtab->add_cgraph_insertion_hook (&add_new_function, NULL);
4230 }
4231
4232
4233 /* Write predicate P to OB. */
4234
4235 static void
4236 write_predicate (struct output_block *ob, struct predicate *p)
4237 {
4238 int j;
4239 if (p)
4240 for (j = 0; p->clause[j]; j++)
4241 {
4242 gcc_assert (j < MAX_CLAUSES);
4243 streamer_write_uhwi (ob, p->clause[j]);
4244 }
4245 streamer_write_uhwi (ob, 0);
4246 }
4247
4248
4249 /* Write inline summary for edge E to OB. */
4250
4251 static void
4252 write_inline_edge_summary (struct output_block *ob, struct cgraph_edge *e)
4253 {
4254 struct inline_edge_summary *es = inline_edge_summary (e);
4255 int i;
4256
4257 streamer_write_uhwi (ob, es->call_stmt_size);
4258 streamer_write_uhwi (ob, es->call_stmt_time);
4259 streamer_write_uhwi (ob, es->loop_depth);
4260 write_predicate (ob, es->predicate);
4261 streamer_write_uhwi (ob, es->param.length ());
4262 for (i = 0; i < (int) es->param.length (); i++)
4263 streamer_write_uhwi (ob, es->param[i].change_prob);
4264 }
4265
4266
4267 /* Write inline summary for node in SET.
4268 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
4269 active, we don't need to write them twice. */
4270
4271 void
4272 inline_write_summary (void)
4273 {
4274 struct cgraph_node *node;
4275 struct output_block *ob = create_output_block (LTO_section_inline_summary);
4276 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
4277 unsigned int count = 0;
4278 int i;
4279
4280 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4281 {
4282 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4283 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4284 if (cnode && cnode->definition && !cnode->alias)
4285 count++;
4286 }
4287 streamer_write_uhwi (ob, count);
4288
4289 for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
4290 {
4291 symtab_node *snode = lto_symtab_encoder_deref (encoder, i);
4292 cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
4293 if (cnode && (node = cnode)->definition && !node->alias)
4294 {
4295 struct inline_summary *info = inline_summary (node);
4296 struct bitpack_d bp;
4297 struct cgraph_edge *edge;
4298 int i;
4299 size_time_entry *e;
4300 struct condition *c;
4301
4302 streamer_write_uhwi (ob,
4303 lto_symtab_encoder_encode (encoder,
4304
4305 node));
4306 streamer_write_hwi (ob, info->estimated_self_stack_size);
4307 streamer_write_hwi (ob, info->self_size);
4308 streamer_write_hwi (ob, info->self_time);
4309 bp = bitpack_create (ob->main_stream);
4310 bp_pack_value (&bp, info->inlinable, 1);
4311 streamer_write_bitpack (&bp);
4312 streamer_write_uhwi (ob, vec_safe_length (info->conds));
4313 for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
4314 {
4315 streamer_write_uhwi (ob, c->operand_num);
4316 streamer_write_uhwi (ob, c->code);
4317 stream_write_tree (ob, c->val, true);
4318 bp = bitpack_create (ob->main_stream);
4319 bp_pack_value (&bp, c->agg_contents, 1);
4320 bp_pack_value (&bp, c->by_ref, 1);
4321 streamer_write_bitpack (&bp);
4322 if (c->agg_contents)
4323 streamer_write_uhwi (ob, c->offset);
4324 }
4325 streamer_write_uhwi (ob, vec_safe_length (info->entry));
4326 for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
4327 {
4328 streamer_write_uhwi (ob, e->size);
4329 streamer_write_uhwi (ob, e->time);
4330 write_predicate (ob, &e->predicate);
4331 }
4332 write_predicate (ob, info->loop_iterations);
4333 write_predicate (ob, info->loop_stride);
4334 write_predicate (ob, info->array_index);
4335 for (edge = node->callees; edge; edge = edge->next_callee)
4336 write_inline_edge_summary (ob, edge);
4337 for (edge = node->indirect_calls; edge; edge = edge->next_callee)
4338 write_inline_edge_summary (ob, edge);
4339 }
4340 }
4341 streamer_write_char_stream (ob->main_stream, 0);
4342 produce_asm (ob, NULL);
4343 destroy_output_block (ob);
4344
4345 if (optimize && !flag_ipa_cp)
4346 ipa_prop_write_jump_functions ();
4347 }
4348
4349
4350 /* Release inline summary. */
4351
4352 void
4353 inline_free_summary (void)
4354 {
4355 struct cgraph_node *node;
4356 if (function_insertion_hook_holder)
4357 symtab->remove_cgraph_insertion_hook (function_insertion_hook_holder);
4358 function_insertion_hook_holder = NULL;
4359 if (node_removal_hook_holder)
4360 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
4361 node_removal_hook_holder = NULL;
4362 if (edge_removal_hook_holder)
4363 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
4364 edge_removal_hook_holder = NULL;
4365 if (node_duplication_hook_holder)
4366 symtab->remove_cgraph_duplication_hook (node_duplication_hook_holder);
4367 node_duplication_hook_holder = NULL;
4368 if (edge_duplication_hook_holder)
4369 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
4370 edge_duplication_hook_holder = NULL;
4371 if (!inline_edge_summary_vec.exists ())
4372 return;
4373 FOR_EACH_DEFINED_FUNCTION (node)
4374 if (!node->alias)
4375 reset_inline_summary (node);
4376 vec_free (inline_summary_vec);
4377 inline_edge_summary_vec.release ();
4378 if (edge_predicate_pool)
4379 free_alloc_pool (edge_predicate_pool);
4380 edge_predicate_pool = 0;
4381 }