re PR debug/83917 (with -mcall-ms2sysv-xlogues, stepping into x86 tail-call restore...
[gcc.git] / gcc / ipa-cp.c
1 /* Interprocedural constant propagation
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3
4 Contributed by Razya Ladelsky <RAZYA@il.ibm.com> and Martin Jambor
5 <mjambor@suse.cz>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* Interprocedural constant propagation (IPA-CP).
24
25 The goal of this transformation is to
26
27 1) discover functions which are always invoked with some arguments with the
28 same known constant values and modify the functions so that the
29 subsequent optimizations can take advantage of the knowledge, and
30
31 2) partial specialization - create specialized versions of functions
32 transformed in this way if some parameters are known constants only in
33 certain contexts but the estimated tradeoff between speedup and cost size
34 is deemed good.
35
36 The algorithm also propagates types and attempts to perform type based
37 devirtualization. Types are propagated much like constants.
38
39 The algorithm basically consists of three stages. In the first, functions
40 are analyzed one at a time and jump functions are constructed for all known
41 call-sites. In the second phase, the pass propagates information from the
42 jump functions across the call to reveal what values are available at what
43 call sites, performs estimations of effects of known values on functions and
44 their callees, and finally decides what specialized extra versions should be
45 created. In the third, the special versions materialize and appropriate
46 calls are redirected.
47
48 The algorithm used is to a certain extent based on "Interprocedural Constant
49 Propagation", by David Callahan, Keith D Cooper, Ken Kennedy, Linda Torczon,
50 Comp86, pg 152-161 and "A Methodology for Procedure Cloning" by Keith D
51 Cooper, Mary W. Hall, and Ken Kennedy.
52
53
54 First stage - intraprocedural analysis
55 =======================================
56
57 This phase computes jump_function and modification flags.
58
59 A jump function for a call-site represents the values passed as an actual
60 arguments of a given call-site. In principle, there are three types of
61 values:
62
63 Pass through - the caller's formal parameter is passed as an actual
64 argument, plus an operation on it can be performed.
65 Constant - a constant is passed as an actual argument.
66 Unknown - neither of the above.
67
68 All jump function types are described in detail in ipa-prop.h, together with
69 the data structures that represent them and methods of accessing them.
70
71 ipcp_generate_summary() is the main function of the first stage.
72
73 Second stage - interprocedural analysis
74 ========================================
75
76 This stage is itself divided into two phases. In the first, we propagate
77 known values over the call graph, in the second, we make cloning decisions.
78 It uses a different algorithm than the original Callahan's paper.
79
80 First, we traverse the functions topologically from callers to callees and,
81 for each strongly connected component (SCC), we propagate constants
82 according to previously computed jump functions. We also record what known
83 values depend on other known values and estimate local effects. Finally, we
84 propagate cumulative information about these effects from dependent values
85 to those on which they depend.
86
87 Second, we again traverse the call graph in the same topological order and
88 make clones for functions which we know are called with the same values in
89 all contexts and decide about extra specialized clones of functions just for
90 some contexts - these decisions are based on both local estimates and
91 cumulative estimates propagated from callees.
92
93 ipcp_propagate_stage() and ipcp_decision_stage() together constitute the
94 third stage.
95
96 Third phase - materialization of clones, call statement updates.
97 ============================================
98
99 This stage is currently performed by call graph code (mainly in cgraphunit.c
100 and tree-inline.c) according to instructions inserted to the call graph by
101 the second stage. */
102
103 #include "config.h"
104 #include "system.h"
105 #include "coretypes.h"
106 #include "backend.h"
107 #include "tree.h"
108 #include "gimple-expr.h"
109 #include "predict.h"
110 #include "alloc-pool.h"
111 #include "tree-pass.h"
112 #include "cgraph.h"
113 #include "diagnostic.h"
114 #include "fold-const.h"
115 #include "gimple-fold.h"
116 #include "symbol-summary.h"
117 #include "tree-vrp.h"
118 #include "ipa-prop.h"
119 #include "tree-pretty-print.h"
120 #include "tree-inline.h"
121 #include "params.h"
122 #include "ipa-fnsummary.h"
123 #include "ipa-utils.h"
124 #include "tree-ssa-ccp.h"
125 #include "stringpool.h"
126 #include "attribs.h"
127
128 template <typename valtype> class ipcp_value;
129
130 /* Describes a particular source for an IPA-CP value. */
131
132 template <typename valtype>
133 class ipcp_value_source
134 {
135 public:
136 /* Aggregate offset of the source, negative if the source is scalar value of
137 the argument itself. */
138 HOST_WIDE_INT offset;
139 /* The incoming edge that brought the value. */
140 cgraph_edge *cs;
141 /* If the jump function that resulted into his value was a pass-through or an
142 ancestor, this is the ipcp_value of the caller from which the described
143 value has been derived. Otherwise it is NULL. */
144 ipcp_value<valtype> *val;
145 /* Next pointer in a linked list of sources of a value. */
146 ipcp_value_source *next;
147 /* If the jump function that resulted into his value was a pass-through or an
148 ancestor, this is the index of the parameter of the caller the jump
149 function references. */
150 int index;
151 };
152
153 /* Common ancestor for all ipcp_value instantiations. */
154
155 class ipcp_value_base
156 {
157 public:
158 /* Time benefit and size cost that specializing the function for this value
159 would bring about in this function alone. */
160 int local_time_benefit, local_size_cost;
161 /* Time benefit and size cost that specializing the function for this value
162 can bring about in it's callees (transitively). */
163 int prop_time_benefit, prop_size_cost;
164
165 ipcp_value_base ()
166 : local_time_benefit (0), local_size_cost (0),
167 prop_time_benefit (0), prop_size_cost (0) {}
168 };
169
170 /* Describes one particular value stored in struct ipcp_lattice. */
171
172 template <typename valtype>
173 class ipcp_value : public ipcp_value_base
174 {
175 public:
176 /* The actual value for the given parameter. */
177 valtype value;
178 /* The list of sources from which this value originates. */
179 ipcp_value_source <valtype> *sources;
180 /* Next pointers in a linked list of all values in a lattice. */
181 ipcp_value *next;
182 /* Next pointers in a linked list of values in a strongly connected component
183 of values. */
184 ipcp_value *scc_next;
185 /* Next pointers in a linked list of SCCs of values sorted topologically
186 according their sources. */
187 ipcp_value *topo_next;
188 /* A specialized node created for this value, NULL if none has been (so far)
189 created. */
190 cgraph_node *spec_node;
191 /* Depth first search number and low link for topological sorting of
192 values. */
193 int dfs, low_link;
194 /* True if this valye is currently on the topo-sort stack. */
195 bool on_stack;
196
197 ipcp_value()
198 : sources (0), next (0), scc_next (0), topo_next (0),
199 spec_node (0), dfs (0), low_link (0), on_stack (false) {}
200
201 void add_source (cgraph_edge *cs, ipcp_value *src_val, int src_idx,
202 HOST_WIDE_INT offset);
203 };
204
205 /* Lattice describing potential values of a formal parameter of a function, or
206 a part of an aggregate. TOP is represented by a lattice with zero values
207 and with contains_variable and bottom flags cleared. BOTTOM is represented
208 by a lattice with the bottom flag set. In that case, values and
209 contains_variable flag should be disregarded. */
210
211 template <typename valtype>
212 class ipcp_lattice
213 {
214 public:
215 /* The list of known values and types in this lattice. Note that values are
216 not deallocated if a lattice is set to bottom because there may be value
217 sources referencing them. */
218 ipcp_value<valtype> *values;
219 /* Number of known values and types in this lattice. */
220 int values_count;
221 /* The lattice contains a variable component (in addition to values). */
222 bool contains_variable;
223 /* The value of the lattice is bottom (i.e. variable and unusable for any
224 propagation). */
225 bool bottom;
226
227 inline bool is_single_const ();
228 inline bool set_to_bottom ();
229 inline bool set_contains_variable ();
230 bool add_value (valtype newval, cgraph_edge *cs,
231 ipcp_value<valtype> *src_val = NULL,
232 int src_idx = 0, HOST_WIDE_INT offset = -1);
233 void print (FILE * f, bool dump_sources, bool dump_benefits);
234 };
235
236 /* Lattice of tree values with an offset to describe a part of an
237 aggregate. */
238
239 class ipcp_agg_lattice : public ipcp_lattice<tree>
240 {
241 public:
242 /* Offset that is being described by this lattice. */
243 HOST_WIDE_INT offset;
244 /* Size so that we don't have to re-compute it every time we traverse the
245 list. Must correspond to TYPE_SIZE of all lat values. */
246 HOST_WIDE_INT size;
247 /* Next element of the linked list. */
248 struct ipcp_agg_lattice *next;
249 };
250
251 /* Lattice of known bits, only capable of holding one value.
252 Bitwise constant propagation propagates which bits of a
253 value are constant.
254 For eg:
255 int f(int x)
256 {
257 return some_op (x);
258 }
259
260 int f1(int y)
261 {
262 if (cond)
263 return f (y & 0xff);
264 else
265 return f (y & 0xf);
266 }
267
268 In the above case, the param 'x' will always have all
269 the bits (except the bits in lsb) set to 0.
270 Hence the mask of 'x' would be 0xff. The mask
271 reflects that the bits in lsb are unknown.
272 The actual propagated value is given by m_value & ~m_mask. */
273
274 class ipcp_bits_lattice
275 {
276 public:
277 bool bottom_p () { return m_lattice_val == IPA_BITS_VARYING; }
278 bool top_p () { return m_lattice_val == IPA_BITS_UNDEFINED; }
279 bool constant_p () { return m_lattice_val == IPA_BITS_CONSTANT; }
280 bool set_to_bottom ();
281 bool set_to_constant (widest_int, widest_int);
282
283 widest_int get_value () { return m_value; }
284 widest_int get_mask () { return m_mask; }
285
286 bool meet_with (ipcp_bits_lattice& other, unsigned, signop,
287 enum tree_code, tree);
288
289 bool meet_with (widest_int, widest_int, unsigned);
290
291 void print (FILE *);
292
293 private:
294 enum { IPA_BITS_UNDEFINED, IPA_BITS_CONSTANT, IPA_BITS_VARYING } m_lattice_val;
295
296 /* Similar to ccp_lattice_t, mask represents which bits of value are constant.
297 If a bit in mask is set to 0, then the corresponding bit in
298 value is known to be constant. */
299 widest_int m_value, m_mask;
300
301 bool meet_with_1 (widest_int, widest_int, unsigned);
302 void get_value_and_mask (tree, widest_int *, widest_int *);
303 };
304
305 /* Lattice of value ranges. */
306
307 class ipcp_vr_lattice
308 {
309 public:
310 value_range m_vr;
311
312 inline bool bottom_p () const;
313 inline bool top_p () const;
314 inline bool set_to_bottom ();
315 bool meet_with (const value_range *p_vr);
316 bool meet_with (const ipcp_vr_lattice &other);
317 void init () { m_vr.type = VR_UNDEFINED; }
318 void print (FILE * f);
319
320 private:
321 bool meet_with_1 (const value_range *other_vr);
322 };
323
324 /* Structure containing lattices for a parameter itself and for pieces of
325 aggregates that are passed in the parameter or by a reference in a parameter
326 plus some other useful flags. */
327
328 class ipcp_param_lattices
329 {
330 public:
331 /* Lattice describing the value of the parameter itself. */
332 ipcp_lattice<tree> itself;
333 /* Lattice describing the polymorphic contexts of a parameter. */
334 ipcp_lattice<ipa_polymorphic_call_context> ctxlat;
335 /* Lattices describing aggregate parts. */
336 ipcp_agg_lattice *aggs;
337 /* Lattice describing known bits. */
338 ipcp_bits_lattice bits_lattice;
339 /* Lattice describing value range. */
340 ipcp_vr_lattice m_value_range;
341 /* Number of aggregate lattices */
342 int aggs_count;
343 /* True if aggregate data were passed by reference (as opposed to by
344 value). */
345 bool aggs_by_ref;
346 /* All aggregate lattices contain a variable component (in addition to
347 values). */
348 bool aggs_contain_variable;
349 /* The value of all aggregate lattices is bottom (i.e. variable and unusable
350 for any propagation). */
351 bool aggs_bottom;
352
353 /* There is a virtual call based on this parameter. */
354 bool virt_call;
355 };
356
357 /* Allocation pools for values and their sources in ipa-cp. */
358
359 object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
360 ("IPA-CP constant values");
361
362 object_allocator<ipcp_value<ipa_polymorphic_call_context> >
363 ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
364
365 object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
366 ("IPA-CP value sources");
367
368 object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
369 ("IPA_CP aggregate lattices");
370
371 /* Maximal count found in program. */
372
373 static profile_count max_count;
374
375 /* Original overall size of the program. */
376
377 static long overall_size, max_new_size;
378
379 /* Return the param lattices structure corresponding to the Ith formal
380 parameter of the function described by INFO. */
381 static inline struct ipcp_param_lattices *
382 ipa_get_parm_lattices (struct ipa_node_params *info, int i)
383 {
384 gcc_assert (i >= 0 && i < ipa_get_param_count (info));
385 gcc_checking_assert (!info->ipcp_orig_node);
386 gcc_checking_assert (info->lattices);
387 return &(info->lattices[i]);
388 }
389
390 /* Return the lattice corresponding to the scalar value of the Ith formal
391 parameter of the function described by INFO. */
392 static inline ipcp_lattice<tree> *
393 ipa_get_scalar_lat (struct ipa_node_params *info, int i)
394 {
395 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
396 return &plats->itself;
397 }
398
399 /* Return the lattice corresponding to the scalar value of the Ith formal
400 parameter of the function described by INFO. */
401 static inline ipcp_lattice<ipa_polymorphic_call_context> *
402 ipa_get_poly_ctx_lat (struct ipa_node_params *info, int i)
403 {
404 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
405 return &plats->ctxlat;
406 }
407
408 /* Return the lattice corresponding to the value range of the Ith formal
409 parameter of the function described by INFO. */
410
411 static inline ipcp_vr_lattice *
412 ipa_get_vr_lat (struct ipa_node_params *info, int i)
413 {
414 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
415 return &plats->m_value_range;
416 }
417
418 /* Return whether LAT is a lattice with a single constant and without an
419 undefined value. */
420
421 template <typename valtype>
422 inline bool
423 ipcp_lattice<valtype>::is_single_const ()
424 {
425 if (bottom || contains_variable || values_count != 1)
426 return false;
427 else
428 return true;
429 }
430
431 /* Print V which is extracted from a value in a lattice to F. */
432
433 static void
434 print_ipcp_constant_value (FILE * f, tree v)
435 {
436 if (TREE_CODE (v) == ADDR_EXPR
437 && TREE_CODE (TREE_OPERAND (v, 0)) == CONST_DECL)
438 {
439 fprintf (f, "& ");
440 print_generic_expr (f, DECL_INITIAL (TREE_OPERAND (v, 0)));
441 }
442 else
443 print_generic_expr (f, v);
444 }
445
446 /* Print V which is extracted from a value in a lattice to F. */
447
448 static void
449 print_ipcp_constant_value (FILE * f, ipa_polymorphic_call_context v)
450 {
451 v.dump(f, false);
452 }
453
454 /* Print a lattice LAT to F. */
455
456 template <typename valtype>
457 void
458 ipcp_lattice<valtype>::print (FILE * f, bool dump_sources, bool dump_benefits)
459 {
460 ipcp_value<valtype> *val;
461 bool prev = false;
462
463 if (bottom)
464 {
465 fprintf (f, "BOTTOM\n");
466 return;
467 }
468
469 if (!values_count && !contains_variable)
470 {
471 fprintf (f, "TOP\n");
472 return;
473 }
474
475 if (contains_variable)
476 {
477 fprintf (f, "VARIABLE");
478 prev = true;
479 if (dump_benefits)
480 fprintf (f, "\n");
481 }
482
483 for (val = values; val; val = val->next)
484 {
485 if (dump_benefits && prev)
486 fprintf (f, " ");
487 else if (!dump_benefits && prev)
488 fprintf (f, ", ");
489 else
490 prev = true;
491
492 print_ipcp_constant_value (f, val->value);
493
494 if (dump_sources)
495 {
496 ipcp_value_source<valtype> *s;
497
498 fprintf (f, " [from:");
499 for (s = val->sources; s; s = s->next)
500 fprintf (f, " %i(%f)", s->cs->caller->order,
501 s->cs->sreal_frequency ().to_double ());
502 fprintf (f, "]");
503 }
504
505 if (dump_benefits)
506 fprintf (f, " [loc_time: %i, loc_size: %i, "
507 "prop_time: %i, prop_size: %i]\n",
508 val->local_time_benefit, val->local_size_cost,
509 val->prop_time_benefit, val->prop_size_cost);
510 }
511 if (!dump_benefits)
512 fprintf (f, "\n");
513 }
514
515 void
516 ipcp_bits_lattice::print (FILE *f)
517 {
518 if (top_p ())
519 fprintf (f, " Bits unknown (TOP)\n");
520 else if (bottom_p ())
521 fprintf (f, " Bits unusable (BOTTOM)\n");
522 else
523 {
524 fprintf (f, " Bits: value = "); print_hex (get_value (), f);
525 fprintf (f, ", mask = "); print_hex (get_mask (), f);
526 fprintf (f, "\n");
527 }
528 }
529
530 /* Print value range lattice to F. */
531
532 void
533 ipcp_vr_lattice::print (FILE * f)
534 {
535 dump_value_range (f, &m_vr);
536 }
537
538 /* Print all ipcp_lattices of all functions to F. */
539
540 static void
541 print_all_lattices (FILE * f, bool dump_sources, bool dump_benefits)
542 {
543 struct cgraph_node *node;
544 int i, count;
545
546 fprintf (f, "\nLattices:\n");
547 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
548 {
549 struct ipa_node_params *info;
550
551 info = IPA_NODE_REF (node);
552 fprintf (f, " Node: %s:\n", node->dump_name ());
553 count = ipa_get_param_count (info);
554 for (i = 0; i < count; i++)
555 {
556 struct ipcp_agg_lattice *aglat;
557 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
558 fprintf (f, " param [%d]: ", i);
559 plats->itself.print (f, dump_sources, dump_benefits);
560 fprintf (f, " ctxs: ");
561 plats->ctxlat.print (f, dump_sources, dump_benefits);
562 plats->bits_lattice.print (f);
563 fprintf (f, " ");
564 plats->m_value_range.print (f);
565 fprintf (f, "\n");
566 if (plats->virt_call)
567 fprintf (f, " virt_call flag set\n");
568
569 if (plats->aggs_bottom)
570 {
571 fprintf (f, " AGGS BOTTOM\n");
572 continue;
573 }
574 if (plats->aggs_contain_variable)
575 fprintf (f, " AGGS VARIABLE\n");
576 for (aglat = plats->aggs; aglat; aglat = aglat->next)
577 {
578 fprintf (f, " %soffset " HOST_WIDE_INT_PRINT_DEC ": ",
579 plats->aggs_by_ref ? "ref " : "", aglat->offset);
580 aglat->print (f, dump_sources, dump_benefits);
581 }
582 }
583 }
584 }
585
586 /* Determine whether it is at all technically possible to create clones of NODE
587 and store this information in the ipa_node_params structure associated
588 with NODE. */
589
590 static void
591 determine_versionability (struct cgraph_node *node,
592 struct ipa_node_params *info)
593 {
594 const char *reason = NULL;
595
596 /* There are a number of generic reasons functions cannot be versioned. We
597 also cannot remove parameters if there are type attributes such as fnspec
598 present. */
599 if (node->alias || node->thunk.thunk_p)
600 reason = "alias or thunk";
601 else if (!node->local.versionable)
602 reason = "not a tree_versionable_function";
603 else if (node->get_availability () <= AVAIL_INTERPOSABLE)
604 reason = "insufficient body availability";
605 else if (!opt_for_fn (node->decl, optimize)
606 || !opt_for_fn (node->decl, flag_ipa_cp))
607 reason = "non-optimized function";
608 else if (lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)))
609 {
610 /* Ideally we should clone the SIMD clones themselves and create
611 vector copies of them, so IPA-cp and SIMD clones can happily
612 coexist, but that may not be worth the effort. */
613 reason = "function has SIMD clones";
614 }
615 else if (lookup_attribute ("target_clones", DECL_ATTRIBUTES (node->decl)))
616 {
617 /* Ideally we should clone the target clones themselves and create
618 copies of them, so IPA-cp and target clones can happily
619 coexist, but that may not be worth the effort. */
620 reason = "function target_clones attribute";
621 }
622 /* Don't clone decls local to a comdat group; it breaks and for C++
623 decloned constructors, inlining is always better anyway. */
624 else if (node->comdat_local_p ())
625 reason = "comdat-local function";
626 else if (node->calls_comdat_local)
627 {
628 /* TODO: call is versionable if we make sure that all
629 callers are inside of a comdat group. */
630 reason = "calls comdat-local function";
631 }
632
633 /* Functions calling BUILT_IN_VA_ARG_PACK and BUILT_IN_VA_ARG_PACK_LEN
634 work only when inlined. Cloning them may still lead to better code
635 because ipa-cp will not give up on cloning further. If the function is
636 external this however leads to wrong code because we may end up producing
637 offline copy of the function. */
638 if (DECL_EXTERNAL (node->decl))
639 for (cgraph_edge *edge = node->callees; !reason && edge;
640 edge = edge->next_callee)
641 if (DECL_BUILT_IN (edge->callee->decl)
642 && DECL_BUILT_IN_CLASS (edge->callee->decl) == BUILT_IN_NORMAL)
643 {
644 if (DECL_FUNCTION_CODE (edge->callee->decl) == BUILT_IN_VA_ARG_PACK)
645 reason = "external function which calls va_arg_pack";
646 if (DECL_FUNCTION_CODE (edge->callee->decl)
647 == BUILT_IN_VA_ARG_PACK_LEN)
648 reason = "external function which calls va_arg_pack_len";
649 }
650
651 if (reason && dump_file && !node->alias && !node->thunk.thunk_p)
652 fprintf (dump_file, "Function %s is not versionable, reason: %s.\n",
653 node->dump_name (), reason);
654
655 info->versionable = (reason == NULL);
656 }
657
658 /* Return true if it is at all technically possible to create clones of a
659 NODE. */
660
661 static bool
662 ipcp_versionable_function_p (struct cgraph_node *node)
663 {
664 return IPA_NODE_REF (node)->versionable;
665 }
666
667 /* Structure holding accumulated information about callers of a node. */
668
669 struct caller_statistics
670 {
671 profile_count count_sum;
672 int n_calls, n_hot_calls, freq_sum;
673 };
674
675 /* Initialize fields of STAT to zeroes. */
676
677 static inline void
678 init_caller_stats (struct caller_statistics *stats)
679 {
680 stats->count_sum = profile_count::zero ();
681 stats->n_calls = 0;
682 stats->n_hot_calls = 0;
683 stats->freq_sum = 0;
684 }
685
686 /* Worker callback of cgraph_for_node_and_aliases accumulating statistics of
687 non-thunk incoming edges to NODE. */
688
689 static bool
690 gather_caller_stats (struct cgraph_node *node, void *data)
691 {
692 struct caller_statistics *stats = (struct caller_statistics *) data;
693 struct cgraph_edge *cs;
694
695 for (cs = node->callers; cs; cs = cs->next_caller)
696 if (!cs->caller->thunk.thunk_p)
697 {
698 if (cs->count.ipa ().initialized_p ())
699 stats->count_sum += cs->count.ipa ();
700 stats->freq_sum += cs->frequency ();
701 stats->n_calls++;
702 if (cs->maybe_hot_p ())
703 stats->n_hot_calls ++;
704 }
705 return false;
706
707 }
708
709 /* Return true if this NODE is viable candidate for cloning. */
710
711 static bool
712 ipcp_cloning_candidate_p (struct cgraph_node *node)
713 {
714 struct caller_statistics stats;
715
716 gcc_checking_assert (node->has_gimple_body_p ());
717
718 if (!opt_for_fn (node->decl, flag_ipa_cp_clone))
719 {
720 if (dump_file)
721 fprintf (dump_file, "Not considering %s for cloning; "
722 "-fipa-cp-clone disabled.\n",
723 node->name ());
724 return false;
725 }
726
727 if (node->optimize_for_size_p ())
728 {
729 if (dump_file)
730 fprintf (dump_file, "Not considering %s for cloning; "
731 "optimizing it for size.\n",
732 node->name ());
733 return false;
734 }
735
736 init_caller_stats (&stats);
737 node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats, false);
738
739 if (ipa_fn_summaries->get (node)->self_size < stats.n_calls)
740 {
741 if (dump_file)
742 fprintf (dump_file, "Considering %s for cloning; code might shrink.\n",
743 node->name ());
744 return true;
745 }
746
747 /* When profile is available and function is hot, propagate into it even if
748 calls seems cold; constant propagation can improve function's speed
749 significantly. */
750 if (max_count > profile_count::zero ())
751 {
752 if (stats.count_sum > node->count.ipa ().apply_scale (90, 100))
753 {
754 if (dump_file)
755 fprintf (dump_file, "Considering %s for cloning; "
756 "usually called directly.\n",
757 node->name ());
758 return true;
759 }
760 }
761 if (!stats.n_hot_calls)
762 {
763 if (dump_file)
764 fprintf (dump_file, "Not considering %s for cloning; no hot calls.\n",
765 node->name ());
766 return false;
767 }
768 if (dump_file)
769 fprintf (dump_file, "Considering %s for cloning.\n",
770 node->name ());
771 return true;
772 }
773
774 template <typename valtype>
775 class value_topo_info
776 {
777 public:
778 /* Head of the linked list of topologically sorted values. */
779 ipcp_value<valtype> *values_topo;
780 /* Stack for creating SCCs, represented by a linked list too. */
781 ipcp_value<valtype> *stack;
782 /* Counter driving the algorithm in add_val_to_toposort. */
783 int dfs_counter;
784
785 value_topo_info () : values_topo (NULL), stack (NULL), dfs_counter (0)
786 {}
787 void add_val (ipcp_value<valtype> *cur_val);
788 void propagate_effects ();
789 };
790
791 /* Arrays representing a topological ordering of call graph nodes and a stack
792 of nodes used during constant propagation and also data required to perform
793 topological sort of values and propagation of benefits in the determined
794 order. */
795
796 class ipa_topo_info
797 {
798 public:
799 /* Array with obtained topological order of cgraph nodes. */
800 struct cgraph_node **order;
801 /* Stack of cgraph nodes used during propagation within SCC until all values
802 in the SCC stabilize. */
803 struct cgraph_node **stack;
804 int nnodes, stack_top;
805
806 value_topo_info<tree> constants;
807 value_topo_info<ipa_polymorphic_call_context> contexts;
808
809 ipa_topo_info () : order(NULL), stack(NULL), nnodes(0), stack_top(0),
810 constants ()
811 {}
812 };
813
814 /* Allocate the arrays in TOPO and topologically sort the nodes into order. */
815
816 static void
817 build_toporder_info (struct ipa_topo_info *topo)
818 {
819 topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
820 topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
821
822 gcc_checking_assert (topo->stack_top == 0);
823 topo->nnodes = ipa_reduced_postorder (topo->order, true, true, NULL);
824 }
825
826 /* Free information about strongly connected components and the arrays in
827 TOPO. */
828
829 static void
830 free_toporder_info (struct ipa_topo_info *topo)
831 {
832 ipa_free_postorder_info ();
833 free (topo->order);
834 free (topo->stack);
835 }
836
837 /* Add NODE to the stack in TOPO, unless it is already there. */
838
839 static inline void
840 push_node_to_stack (struct ipa_topo_info *topo, struct cgraph_node *node)
841 {
842 struct ipa_node_params *info = IPA_NODE_REF (node);
843 if (info->node_enqueued)
844 return;
845 info->node_enqueued = 1;
846 topo->stack[topo->stack_top++] = node;
847 }
848
849 /* Pop a node from the stack in TOPO and return it or return NULL if the stack
850 is empty. */
851
852 static struct cgraph_node *
853 pop_node_from_stack (struct ipa_topo_info *topo)
854 {
855 if (topo->stack_top)
856 {
857 struct cgraph_node *node;
858 topo->stack_top--;
859 node = topo->stack[topo->stack_top];
860 IPA_NODE_REF (node)->node_enqueued = 0;
861 return node;
862 }
863 else
864 return NULL;
865 }
866
867 /* Set lattice LAT to bottom and return true if it previously was not set as
868 such. */
869
870 template <typename valtype>
871 inline bool
872 ipcp_lattice<valtype>::set_to_bottom ()
873 {
874 bool ret = !bottom;
875 bottom = true;
876 return ret;
877 }
878
879 /* Mark lattice as containing an unknown value and return true if it previously
880 was not marked as such. */
881
882 template <typename valtype>
883 inline bool
884 ipcp_lattice<valtype>::set_contains_variable ()
885 {
886 bool ret = !contains_variable;
887 contains_variable = true;
888 return ret;
889 }
890
891 /* Set all aggegate lattices in PLATS to bottom and return true if they were
892 not previously set as such. */
893
894 static inline bool
895 set_agg_lats_to_bottom (struct ipcp_param_lattices *plats)
896 {
897 bool ret = !plats->aggs_bottom;
898 plats->aggs_bottom = true;
899 return ret;
900 }
901
902 /* Mark all aggegate lattices in PLATS as containing an unknown value and
903 return true if they were not previously marked as such. */
904
905 static inline bool
906 set_agg_lats_contain_variable (struct ipcp_param_lattices *plats)
907 {
908 bool ret = !plats->aggs_contain_variable;
909 plats->aggs_contain_variable = true;
910 return ret;
911 }
912
913 bool
914 ipcp_vr_lattice::meet_with (const ipcp_vr_lattice &other)
915 {
916 return meet_with_1 (&other.m_vr);
917 }
918
919 /* Meet the current value of the lattice with value ranfge described by VR
920 lattice. */
921
922 bool
923 ipcp_vr_lattice::meet_with (const value_range *p_vr)
924 {
925 return meet_with_1 (p_vr);
926 }
927
928 /* Meet the current value of the lattice with value ranfge described by
929 OTHER_VR lattice. */
930
931 bool
932 ipcp_vr_lattice::meet_with_1 (const value_range *other_vr)
933 {
934 tree min = m_vr.min, max = m_vr.max;
935 value_range_type type = m_vr.type;
936
937 if (bottom_p ())
938 return false;
939
940 if (other_vr->type == VR_VARYING)
941 return set_to_bottom ();
942
943 vrp_meet (&m_vr, other_vr);
944 if (type != m_vr.type
945 || min != m_vr.min
946 || max != m_vr.max)
947 return true;
948 else
949 return false;
950 }
951
952 /* Return true if value range information in the lattice is yet unknown. */
953
954 bool
955 ipcp_vr_lattice::top_p () const
956 {
957 return m_vr.type == VR_UNDEFINED;
958 }
959
960 /* Return true if value range information in the lattice is known to be
961 unusable. */
962
963 bool
964 ipcp_vr_lattice::bottom_p () const
965 {
966 return m_vr.type == VR_VARYING;
967 }
968
969 /* Set value range information in the lattice to bottom. Return true if it
970 previously was in a different state. */
971
972 bool
973 ipcp_vr_lattice::set_to_bottom ()
974 {
975 if (m_vr.type == VR_VARYING)
976 return false;
977 m_vr.type = VR_VARYING;
978 return true;
979 }
980
981 /* Set lattice value to bottom, if it already isn't the case. */
982
983 bool
984 ipcp_bits_lattice::set_to_bottom ()
985 {
986 if (bottom_p ())
987 return false;
988 m_lattice_val = IPA_BITS_VARYING;
989 m_value = 0;
990 m_mask = -1;
991 return true;
992 }
993
994 /* Set to constant if it isn't already. Only meant to be called
995 when switching state from TOP. */
996
997 bool
998 ipcp_bits_lattice::set_to_constant (widest_int value, widest_int mask)
999 {
1000 gcc_assert (top_p ());
1001 m_lattice_val = IPA_BITS_CONSTANT;
1002 m_value = value;
1003 m_mask = mask;
1004 return true;
1005 }
1006
1007 /* Convert operand to value, mask form. */
1008
1009 void
1010 ipcp_bits_lattice::get_value_and_mask (tree operand, widest_int *valuep, widest_int *maskp)
1011 {
1012 wide_int get_nonzero_bits (const_tree);
1013
1014 if (TREE_CODE (operand) == INTEGER_CST)
1015 {
1016 *valuep = wi::to_widest (operand);
1017 *maskp = 0;
1018 }
1019 else
1020 {
1021 *valuep = 0;
1022 *maskp = -1;
1023 }
1024 }
1025
1026 /* Meet operation, similar to ccp_lattice_meet, we xor values
1027 if this->value, value have different values at same bit positions, we want
1028 to drop that bit to varying. Return true if mask is changed.
1029 This function assumes that the lattice value is in CONSTANT state */
1030
1031 bool
1032 ipcp_bits_lattice::meet_with_1 (widest_int value, widest_int mask,
1033 unsigned precision)
1034 {
1035 gcc_assert (constant_p ());
1036
1037 widest_int old_mask = m_mask;
1038 m_mask = (m_mask | mask) | (m_value ^ value);
1039
1040 if (wi::sext (m_mask, precision) == -1)
1041 return set_to_bottom ();
1042
1043 return m_mask != old_mask;
1044 }
1045
1046 /* Meet the bits lattice with operand
1047 described by <value, mask, sgn, precision. */
1048
1049 bool
1050 ipcp_bits_lattice::meet_with (widest_int value, widest_int mask,
1051 unsigned precision)
1052 {
1053 if (bottom_p ())
1054 return false;
1055
1056 if (top_p ())
1057 {
1058 if (wi::sext (mask, precision) == -1)
1059 return set_to_bottom ();
1060 return set_to_constant (value, mask);
1061 }
1062
1063 return meet_with_1 (value, mask, precision);
1064 }
1065
1066 /* Meet bits lattice with the result of bit_value_binop (other, operand)
1067 if code is binary operation or bit_value_unop (other) if code is unary op.
1068 In the case when code is nop_expr, no adjustment is required. */
1069
1070 bool
1071 ipcp_bits_lattice::meet_with (ipcp_bits_lattice& other, unsigned precision,
1072 signop sgn, enum tree_code code, tree operand)
1073 {
1074 if (other.bottom_p ())
1075 return set_to_bottom ();
1076
1077 if (bottom_p () || other.top_p ())
1078 return false;
1079
1080 widest_int adjusted_value, adjusted_mask;
1081
1082 if (TREE_CODE_CLASS (code) == tcc_binary)
1083 {
1084 tree type = TREE_TYPE (operand);
1085 gcc_assert (INTEGRAL_TYPE_P (type));
1086 widest_int o_value, o_mask;
1087 get_value_and_mask (operand, &o_value, &o_mask);
1088
1089 bit_value_binop (code, sgn, precision, &adjusted_value, &adjusted_mask,
1090 sgn, precision, other.get_value (), other.get_mask (),
1091 TYPE_SIGN (type), TYPE_PRECISION (type), o_value, o_mask);
1092
1093 if (wi::sext (adjusted_mask, precision) == -1)
1094 return set_to_bottom ();
1095 }
1096
1097 else if (TREE_CODE_CLASS (code) == tcc_unary)
1098 {
1099 bit_value_unop (code, sgn, precision, &adjusted_value,
1100 &adjusted_mask, sgn, precision, other.get_value (),
1101 other.get_mask ());
1102
1103 if (wi::sext (adjusted_mask, precision) == -1)
1104 return set_to_bottom ();
1105 }
1106
1107 else
1108 return set_to_bottom ();
1109
1110 if (top_p ())
1111 {
1112 if (wi::sext (adjusted_mask, precision) == -1)
1113 return set_to_bottom ();
1114 return set_to_constant (adjusted_value, adjusted_mask);
1115 }
1116 else
1117 return meet_with_1 (adjusted_value, adjusted_mask, precision);
1118 }
1119
1120 /* Mark bot aggregate and scalar lattices as containing an unknown variable,
1121 return true is any of them has not been marked as such so far. */
1122
1123 static inline bool
1124 set_all_contains_variable (struct ipcp_param_lattices *plats)
1125 {
1126 bool ret;
1127 ret = plats->itself.set_contains_variable ();
1128 ret |= plats->ctxlat.set_contains_variable ();
1129 ret |= set_agg_lats_contain_variable (plats);
1130 ret |= plats->bits_lattice.set_to_bottom ();
1131 ret |= plats->m_value_range.set_to_bottom ();
1132 return ret;
1133 }
1134
1135 /* Worker of call_for_symbol_thunks_and_aliases, increment the integer DATA
1136 points to by the number of callers to NODE. */
1137
1138 static bool
1139 count_callers (cgraph_node *node, void *data)
1140 {
1141 int *caller_count = (int *) data;
1142
1143 for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
1144 /* Local thunks can be handled transparently, but if the thunk can not
1145 be optimized out, count it as a real use. */
1146 if (!cs->caller->thunk.thunk_p || !cs->caller->local.local)
1147 ++*caller_count;
1148 return false;
1149 }
1150
1151 /* Worker of call_for_symbol_thunks_and_aliases, it is supposed to be called on
1152 the one caller of some other node. Set the caller's corresponding flag. */
1153
1154 static bool
1155 set_single_call_flag (cgraph_node *node, void *)
1156 {
1157 cgraph_edge *cs = node->callers;
1158 /* Local thunks can be handled transparently, skip them. */
1159 while (cs && cs->caller->thunk.thunk_p && cs->caller->local.local)
1160 cs = cs->next_caller;
1161 if (cs)
1162 {
1163 IPA_NODE_REF (cs->caller)->node_calling_single_call = true;
1164 return true;
1165 }
1166 return false;
1167 }
1168
1169 /* Initialize ipcp_lattices. */
1170
1171 static void
1172 initialize_node_lattices (struct cgraph_node *node)
1173 {
1174 struct ipa_node_params *info = IPA_NODE_REF (node);
1175 struct cgraph_edge *ie;
1176 bool disable = false, variable = false;
1177 int i;
1178
1179 gcc_checking_assert (node->has_gimple_body_p ());
1180 if (cgraph_local_p (node))
1181 {
1182 int caller_count = 0;
1183 node->call_for_symbol_thunks_and_aliases (count_callers, &caller_count,
1184 true);
1185 gcc_checking_assert (caller_count > 0);
1186 if (caller_count == 1)
1187 node->call_for_symbol_thunks_and_aliases (set_single_call_flag,
1188 NULL, true);
1189 }
1190 else
1191 {
1192 /* When cloning is allowed, we can assume that externally visible
1193 functions are not called. We will compensate this by cloning
1194 later. */
1195 if (ipcp_versionable_function_p (node)
1196 && ipcp_cloning_candidate_p (node))
1197 variable = true;
1198 else
1199 disable = true;
1200 }
1201
1202 for (i = 0; i < ipa_get_param_count (info); i++)
1203 {
1204 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
1205 plats->m_value_range.init ();
1206 }
1207
1208 if (disable || variable)
1209 {
1210 for (i = 0; i < ipa_get_param_count (info); i++)
1211 {
1212 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
1213 if (disable)
1214 {
1215 plats->itself.set_to_bottom ();
1216 plats->ctxlat.set_to_bottom ();
1217 set_agg_lats_to_bottom (plats);
1218 plats->bits_lattice.set_to_bottom ();
1219 plats->m_value_range.set_to_bottom ();
1220 }
1221 else
1222 set_all_contains_variable (plats);
1223 }
1224 if (dump_file && (dump_flags & TDF_DETAILS)
1225 && !node->alias && !node->thunk.thunk_p)
1226 fprintf (dump_file, "Marking all lattices of %s as %s\n",
1227 node->dump_name (), disable ? "BOTTOM" : "VARIABLE");
1228 }
1229
1230 for (ie = node->indirect_calls; ie; ie = ie->next_callee)
1231 if (ie->indirect_info->polymorphic
1232 && ie->indirect_info->param_index >= 0)
1233 {
1234 gcc_checking_assert (ie->indirect_info->param_index >= 0);
1235 ipa_get_parm_lattices (info,
1236 ie->indirect_info->param_index)->virt_call = 1;
1237 }
1238 }
1239
1240 /* Return the result of a (possibly arithmetic) pass through jump function
1241 JFUNC on the constant value INPUT. RES_TYPE is the type of the parameter
1242 to which the result is passed. Return NULL_TREE if that cannot be
1243 determined or be considered an interprocedural invariant. */
1244
1245 static tree
1246 ipa_get_jf_pass_through_result (struct ipa_jump_func *jfunc, tree input,
1247 tree res_type)
1248 {
1249 tree res;
1250
1251 if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
1252 return input;
1253 if (!is_gimple_ip_invariant (input))
1254 return NULL_TREE;
1255
1256 tree_code opcode = ipa_get_jf_pass_through_operation (jfunc);
1257 if (!res_type)
1258 {
1259 if (TREE_CODE_CLASS (opcode) == tcc_comparison)
1260 res_type = boolean_type_node;
1261 else if (expr_type_first_operand_type_p (opcode))
1262 res_type = TREE_TYPE (input);
1263 else
1264 return NULL_TREE;
1265 }
1266
1267 if (TREE_CODE_CLASS (opcode) == tcc_unary)
1268 res = fold_unary (opcode, res_type, input);
1269 else
1270 res = fold_binary (opcode, res_type, input,
1271 ipa_get_jf_pass_through_operand (jfunc));
1272
1273 if (res && !is_gimple_ip_invariant (res))
1274 return NULL_TREE;
1275
1276 return res;
1277 }
1278
1279 /* Return the result of an ancestor jump function JFUNC on the constant value
1280 INPUT. Return NULL_TREE if that cannot be determined. */
1281
1282 static tree
1283 ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
1284 {
1285 gcc_checking_assert (TREE_CODE (input) != TREE_BINFO);
1286 if (TREE_CODE (input) == ADDR_EXPR)
1287 {
1288 tree t = TREE_OPERAND (input, 0);
1289 t = build_ref_for_offset (EXPR_LOCATION (t), t,
1290 ipa_get_jf_ancestor_offset (jfunc), false,
1291 ptr_type_node, NULL, false);
1292 return build_fold_addr_expr (t);
1293 }
1294 else
1295 return NULL_TREE;
1296 }
1297
1298 /* Determine whether JFUNC evaluates to a single known constant value and if
1299 so, return it. Otherwise return NULL. INFO describes the caller node or
1300 the one it is inlined to, so that pass-through jump functions can be
1301 evaluated. PARM_TYPE is the type of the parameter to which the result is
1302 passed. */
1303
1304 tree
1305 ipa_value_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc,
1306 tree parm_type)
1307 {
1308 if (jfunc->type == IPA_JF_CONST)
1309 return ipa_get_jf_constant (jfunc);
1310 else if (jfunc->type == IPA_JF_PASS_THROUGH
1311 || jfunc->type == IPA_JF_ANCESTOR)
1312 {
1313 tree input;
1314 int idx;
1315
1316 if (jfunc->type == IPA_JF_PASS_THROUGH)
1317 idx = ipa_get_jf_pass_through_formal_id (jfunc);
1318 else
1319 idx = ipa_get_jf_ancestor_formal_id (jfunc);
1320
1321 if (info->ipcp_orig_node)
1322 input = info->known_csts[idx];
1323 else
1324 {
1325 ipcp_lattice<tree> *lat;
1326
1327 if (!info->lattices
1328 || idx >= ipa_get_param_count (info))
1329 return NULL_TREE;
1330 lat = ipa_get_scalar_lat (info, idx);
1331 if (!lat->is_single_const ())
1332 return NULL_TREE;
1333 input = lat->values->value;
1334 }
1335
1336 if (!input)
1337 return NULL_TREE;
1338
1339 if (jfunc->type == IPA_JF_PASS_THROUGH)
1340 return ipa_get_jf_pass_through_result (jfunc, input, parm_type);
1341 else
1342 return ipa_get_jf_ancestor_result (jfunc, input);
1343 }
1344 else
1345 return NULL_TREE;
1346 }
1347
1348 /* Determie whether JFUNC evaluates to single known polymorphic context, given
1349 that INFO describes the caller node or the one it is inlined to, CS is the
1350 call graph edge corresponding to JFUNC and CSIDX index of the described
1351 parameter. */
1352
1353 ipa_polymorphic_call_context
1354 ipa_context_from_jfunc (ipa_node_params *info, cgraph_edge *cs, int csidx,
1355 ipa_jump_func *jfunc)
1356 {
1357 ipa_edge_args *args = IPA_EDGE_REF (cs);
1358 ipa_polymorphic_call_context ctx;
1359 ipa_polymorphic_call_context *edge_ctx
1360 = cs ? ipa_get_ith_polymorhic_call_context (args, csidx) : NULL;
1361
1362 if (edge_ctx && !edge_ctx->useless_p ())
1363 ctx = *edge_ctx;
1364
1365 if (jfunc->type == IPA_JF_PASS_THROUGH
1366 || jfunc->type == IPA_JF_ANCESTOR)
1367 {
1368 ipa_polymorphic_call_context srcctx;
1369 int srcidx;
1370 bool type_preserved = true;
1371 if (jfunc->type == IPA_JF_PASS_THROUGH)
1372 {
1373 if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
1374 return ctx;
1375 type_preserved = ipa_get_jf_pass_through_type_preserved (jfunc);
1376 srcidx = ipa_get_jf_pass_through_formal_id (jfunc);
1377 }
1378 else
1379 {
1380 type_preserved = ipa_get_jf_ancestor_type_preserved (jfunc);
1381 srcidx = ipa_get_jf_ancestor_formal_id (jfunc);
1382 }
1383 if (info->ipcp_orig_node)
1384 {
1385 if (info->known_contexts.exists ())
1386 srcctx = info->known_contexts[srcidx];
1387 }
1388 else
1389 {
1390 if (!info->lattices
1391 || srcidx >= ipa_get_param_count (info))
1392 return ctx;
1393 ipcp_lattice<ipa_polymorphic_call_context> *lat;
1394 lat = ipa_get_poly_ctx_lat (info, srcidx);
1395 if (!lat->is_single_const ())
1396 return ctx;
1397 srcctx = lat->values->value;
1398 }
1399 if (srcctx.useless_p ())
1400 return ctx;
1401 if (jfunc->type == IPA_JF_ANCESTOR)
1402 srcctx.offset_by (ipa_get_jf_ancestor_offset (jfunc));
1403 if (!type_preserved)
1404 srcctx.possible_dynamic_type_change (cs->in_polymorphic_cdtor);
1405 srcctx.combine_with (ctx);
1406 return srcctx;
1407 }
1408
1409 return ctx;
1410 }
1411
1412 /* If checking is enabled, verify that no lattice is in the TOP state, i.e. not
1413 bottom, not containing a variable component and without any known value at
1414 the same time. */
1415
1416 DEBUG_FUNCTION void
1417 ipcp_verify_propagated_values (void)
1418 {
1419 struct cgraph_node *node;
1420
1421 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
1422 {
1423 struct ipa_node_params *info = IPA_NODE_REF (node);
1424 int i, count = ipa_get_param_count (info);
1425
1426 for (i = 0; i < count; i++)
1427 {
1428 ipcp_lattice<tree> *lat = ipa_get_scalar_lat (info, i);
1429
1430 if (!lat->bottom
1431 && !lat->contains_variable
1432 && lat->values_count == 0)
1433 {
1434 if (dump_file)
1435 {
1436 symtab->dump (dump_file);
1437 fprintf (dump_file, "\nIPA lattices after constant "
1438 "propagation, before gcc_unreachable:\n");
1439 print_all_lattices (dump_file, true, false);
1440 }
1441
1442 gcc_unreachable ();
1443 }
1444 }
1445 }
1446 }
1447
1448 /* Return true iff X and Y should be considered equal values by IPA-CP. */
1449
1450 static bool
1451 values_equal_for_ipcp_p (tree x, tree y)
1452 {
1453 gcc_checking_assert (x != NULL_TREE && y != NULL_TREE);
1454
1455 if (x == y)
1456 return true;
1457
1458 if (TREE_CODE (x) == ADDR_EXPR
1459 && TREE_CODE (y) == ADDR_EXPR
1460 && TREE_CODE (TREE_OPERAND (x, 0)) == CONST_DECL
1461 && TREE_CODE (TREE_OPERAND (y, 0)) == CONST_DECL)
1462 return operand_equal_p (DECL_INITIAL (TREE_OPERAND (x, 0)),
1463 DECL_INITIAL (TREE_OPERAND (y, 0)), 0);
1464 else
1465 return operand_equal_p (x, y, 0);
1466 }
1467
1468 /* Return true iff X and Y should be considered equal contexts by IPA-CP. */
1469
1470 static bool
1471 values_equal_for_ipcp_p (ipa_polymorphic_call_context x,
1472 ipa_polymorphic_call_context y)
1473 {
1474 return x.equal_to (y);
1475 }
1476
1477
1478 /* Add a new value source to the value represented by THIS, marking that a
1479 value comes from edge CS and (if the underlying jump function is a
1480 pass-through or an ancestor one) from a caller value SRC_VAL of a caller
1481 parameter described by SRC_INDEX. OFFSET is negative if the source was the
1482 scalar value of the parameter itself or the offset within an aggregate. */
1483
1484 template <typename valtype>
1485 void
1486 ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
1487 int src_idx, HOST_WIDE_INT offset)
1488 {
1489 ipcp_value_source<valtype> *src;
1490
1491 src = new (ipcp_sources_pool.allocate ()) ipcp_value_source<valtype>;
1492 src->offset = offset;
1493 src->cs = cs;
1494 src->val = src_val;
1495 src->index = src_idx;
1496
1497 src->next = sources;
1498 sources = src;
1499 }
1500
1501 /* Allocate a new ipcp_value holding a tree constant, initialize its value to
1502 SOURCE and clear all other fields. */
1503
1504 static ipcp_value<tree> *
1505 allocate_and_init_ipcp_value (tree source)
1506 {
1507 ipcp_value<tree> *val;
1508
1509 val = new (ipcp_cst_values_pool.allocate ()) ipcp_value<tree>();
1510 val->value = source;
1511 return val;
1512 }
1513
1514 /* Allocate a new ipcp_value holding a polymorphic context, initialize its
1515 value to SOURCE and clear all other fields. */
1516
1517 static ipcp_value<ipa_polymorphic_call_context> *
1518 allocate_and_init_ipcp_value (ipa_polymorphic_call_context source)
1519 {
1520 ipcp_value<ipa_polymorphic_call_context> *val;
1521
1522 // TODO
1523 val = new (ipcp_poly_ctx_values_pool.allocate ())
1524 ipcp_value<ipa_polymorphic_call_context>();
1525 val->value = source;
1526 return val;
1527 }
1528
1529 /* Try to add NEWVAL to LAT, potentially creating a new ipcp_value for it. CS,
1530 SRC_VAL SRC_INDEX and OFFSET are meant for add_source and have the same
1531 meaning. OFFSET -1 means the source is scalar and not a part of an
1532 aggregate. */
1533
1534 template <typename valtype>
1535 bool
1536 ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
1537 ipcp_value<valtype> *src_val,
1538 int src_idx, HOST_WIDE_INT offset)
1539 {
1540 ipcp_value<valtype> *val;
1541
1542 if (bottom)
1543 return false;
1544
1545 for (val = values; val; val = val->next)
1546 if (values_equal_for_ipcp_p (val->value, newval))
1547 {
1548 if (ipa_edge_within_scc (cs))
1549 {
1550 ipcp_value_source<valtype> *s;
1551 for (s = val->sources; s; s = s->next)
1552 if (s->cs == cs)
1553 break;
1554 if (s)
1555 return false;
1556 }
1557
1558 val->add_source (cs, src_val, src_idx, offset);
1559 return false;
1560 }
1561
1562 if (values_count == PARAM_VALUE (PARAM_IPA_CP_VALUE_LIST_SIZE))
1563 {
1564 /* We can only free sources, not the values themselves, because sources
1565 of other values in this SCC might point to them. */
1566 for (val = values; val; val = val->next)
1567 {
1568 while (val->sources)
1569 {
1570 ipcp_value_source<valtype> *src = val->sources;
1571 val->sources = src->next;
1572 ipcp_sources_pool.remove ((ipcp_value_source<tree>*)src);
1573 }
1574 }
1575
1576 values = NULL;
1577 return set_to_bottom ();
1578 }
1579
1580 values_count++;
1581 val = allocate_and_init_ipcp_value (newval);
1582 val->add_source (cs, src_val, src_idx, offset);
1583 val->next = values;
1584 values = val;
1585 return true;
1586 }
1587
1588 /* Propagate values through a pass-through jump function JFUNC associated with
1589 edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
1590 is the index of the source parameter. PARM_TYPE is the type of the
1591 parameter to which the result is passed. */
1592
1593 static bool
1594 propagate_vals_across_pass_through (cgraph_edge *cs, ipa_jump_func *jfunc,
1595 ipcp_lattice<tree> *src_lat,
1596 ipcp_lattice<tree> *dest_lat, int src_idx,
1597 tree parm_type)
1598 {
1599 ipcp_value<tree> *src_val;
1600 bool ret = false;
1601
1602 /* Do not create new values when propagating within an SCC because if there
1603 are arithmetic functions with circular dependencies, there is infinite
1604 number of them and we would just make lattices bottom. */
1605 if ((ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
1606 && ipa_edge_within_scc (cs))
1607 ret = dest_lat->set_contains_variable ();
1608 else
1609 for (src_val = src_lat->values; src_val; src_val = src_val->next)
1610 {
1611 tree cstval = ipa_get_jf_pass_through_result (jfunc, src_val->value,
1612 parm_type);
1613
1614 if (cstval)
1615 ret |= dest_lat->add_value (cstval, cs, src_val, src_idx);
1616 else
1617 ret |= dest_lat->set_contains_variable ();
1618 }
1619
1620 return ret;
1621 }
1622
1623 /* Propagate values through an ancestor jump function JFUNC associated with
1624 edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
1625 is the index of the source parameter. */
1626
1627 static bool
1628 propagate_vals_across_ancestor (struct cgraph_edge *cs,
1629 struct ipa_jump_func *jfunc,
1630 ipcp_lattice<tree> *src_lat,
1631 ipcp_lattice<tree> *dest_lat, int src_idx)
1632 {
1633 ipcp_value<tree> *src_val;
1634 bool ret = false;
1635
1636 if (ipa_edge_within_scc (cs))
1637 return dest_lat->set_contains_variable ();
1638
1639 for (src_val = src_lat->values; src_val; src_val = src_val->next)
1640 {
1641 tree t = ipa_get_jf_ancestor_result (jfunc, src_val->value);
1642
1643 if (t)
1644 ret |= dest_lat->add_value (t, cs, src_val, src_idx);
1645 else
1646 ret |= dest_lat->set_contains_variable ();
1647 }
1648
1649 return ret;
1650 }
1651
1652 /* Propagate scalar values across jump function JFUNC that is associated with
1653 edge CS and put the values into DEST_LAT. PARM_TYPE is the type of the
1654 parameter to which the result is passed. */
1655
1656 static bool
1657 propagate_scalar_across_jump_function (struct cgraph_edge *cs,
1658 struct ipa_jump_func *jfunc,
1659 ipcp_lattice<tree> *dest_lat,
1660 tree param_type)
1661 {
1662 if (dest_lat->bottom)
1663 return false;
1664
1665 if (jfunc->type == IPA_JF_CONST)
1666 {
1667 tree val = ipa_get_jf_constant (jfunc);
1668 return dest_lat->add_value (val, cs, NULL, 0);
1669 }
1670 else if (jfunc->type == IPA_JF_PASS_THROUGH
1671 || jfunc->type == IPA_JF_ANCESTOR)
1672 {
1673 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
1674 ipcp_lattice<tree> *src_lat;
1675 int src_idx;
1676 bool ret;
1677
1678 if (jfunc->type == IPA_JF_PASS_THROUGH)
1679 src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
1680 else
1681 src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
1682
1683 src_lat = ipa_get_scalar_lat (caller_info, src_idx);
1684 if (src_lat->bottom)
1685 return dest_lat->set_contains_variable ();
1686
1687 /* If we would need to clone the caller and cannot, do not propagate. */
1688 if (!ipcp_versionable_function_p (cs->caller)
1689 && (src_lat->contains_variable
1690 || (src_lat->values_count > 1)))
1691 return dest_lat->set_contains_variable ();
1692
1693 if (jfunc->type == IPA_JF_PASS_THROUGH)
1694 ret = propagate_vals_across_pass_through (cs, jfunc, src_lat,
1695 dest_lat, src_idx, param_type);
1696 else
1697 ret = propagate_vals_across_ancestor (cs, jfunc, src_lat, dest_lat,
1698 src_idx);
1699
1700 if (src_lat->contains_variable)
1701 ret |= dest_lat->set_contains_variable ();
1702
1703 return ret;
1704 }
1705
1706 /* TODO: We currently do not handle member method pointers in IPA-CP (we only
1707 use it for indirect inlining), we should propagate them too. */
1708 return dest_lat->set_contains_variable ();
1709 }
1710
1711 /* Propagate scalar values across jump function JFUNC that is associated with
1712 edge CS and describes argument IDX and put the values into DEST_LAT. */
1713
1714 static bool
1715 propagate_context_across_jump_function (cgraph_edge *cs,
1716 ipa_jump_func *jfunc, int idx,
1717 ipcp_lattice<ipa_polymorphic_call_context> *dest_lat)
1718 {
1719 ipa_edge_args *args = IPA_EDGE_REF (cs);
1720 if (dest_lat->bottom)
1721 return false;
1722 bool ret = false;
1723 bool added_sth = false;
1724 bool type_preserved = true;
1725
1726 ipa_polymorphic_call_context edge_ctx, *edge_ctx_ptr
1727 = ipa_get_ith_polymorhic_call_context (args, idx);
1728
1729 if (edge_ctx_ptr)
1730 edge_ctx = *edge_ctx_ptr;
1731
1732 if (jfunc->type == IPA_JF_PASS_THROUGH
1733 || jfunc->type == IPA_JF_ANCESTOR)
1734 {
1735 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
1736 int src_idx;
1737 ipcp_lattice<ipa_polymorphic_call_context> *src_lat;
1738
1739 /* TODO: Once we figure out how to propagate speculations, it will
1740 probably be a good idea to switch to speculation if type_preserved is
1741 not set instead of punting. */
1742 if (jfunc->type == IPA_JF_PASS_THROUGH)
1743 {
1744 if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
1745 goto prop_fail;
1746 type_preserved = ipa_get_jf_pass_through_type_preserved (jfunc);
1747 src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
1748 }
1749 else
1750 {
1751 type_preserved = ipa_get_jf_ancestor_type_preserved (jfunc);
1752 src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
1753 }
1754
1755 src_lat = ipa_get_poly_ctx_lat (caller_info, src_idx);
1756 /* If we would need to clone the caller and cannot, do not propagate. */
1757 if (!ipcp_versionable_function_p (cs->caller)
1758 && (src_lat->contains_variable
1759 || (src_lat->values_count > 1)))
1760 goto prop_fail;
1761
1762 ipcp_value<ipa_polymorphic_call_context> *src_val;
1763 for (src_val = src_lat->values; src_val; src_val = src_val->next)
1764 {
1765 ipa_polymorphic_call_context cur = src_val->value;
1766
1767 if (!type_preserved)
1768 cur.possible_dynamic_type_change (cs->in_polymorphic_cdtor);
1769 if (jfunc->type == IPA_JF_ANCESTOR)
1770 cur.offset_by (ipa_get_jf_ancestor_offset (jfunc));
1771 /* TODO: In cases we know how the context is going to be used,
1772 we can improve the result by passing proper OTR_TYPE. */
1773 cur.combine_with (edge_ctx);
1774 if (!cur.useless_p ())
1775 {
1776 if (src_lat->contains_variable
1777 && !edge_ctx.equal_to (cur))
1778 ret |= dest_lat->set_contains_variable ();
1779 ret |= dest_lat->add_value (cur, cs, src_val, src_idx);
1780 added_sth = true;
1781 }
1782 }
1783
1784 }
1785
1786 prop_fail:
1787 if (!added_sth)
1788 {
1789 if (!edge_ctx.useless_p ())
1790 ret |= dest_lat->add_value (edge_ctx, cs);
1791 else
1792 ret |= dest_lat->set_contains_variable ();
1793 }
1794
1795 return ret;
1796 }
1797
1798 /* Propagate bits across jfunc that is associated with
1799 edge cs and update dest_lattice accordingly. */
1800
1801 bool
1802 propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
1803 ipa_jump_func *jfunc,
1804 ipcp_bits_lattice *dest_lattice)
1805 {
1806 if (dest_lattice->bottom_p ())
1807 return false;
1808
1809 enum availability availability;
1810 cgraph_node *callee = cs->callee->function_symbol (&availability);
1811 struct ipa_node_params *callee_info = IPA_NODE_REF (callee);
1812 tree parm_type = ipa_get_type (callee_info, idx);
1813
1814 /* For K&R C programs, ipa_get_type() could return NULL_TREE.
1815 Avoid the transform for these cases. */
1816 if (!parm_type)
1817 {
1818 if (dump_file && (dump_flags & TDF_DETAILS))
1819 fprintf (dump_file, "Setting dest_lattice to bottom, because"
1820 " param %i type is NULL for %s\n", idx,
1821 cs->callee->name ());
1822
1823 return dest_lattice->set_to_bottom ();
1824 }
1825
1826 unsigned precision = TYPE_PRECISION (parm_type);
1827 signop sgn = TYPE_SIGN (parm_type);
1828
1829 if (jfunc->type == IPA_JF_PASS_THROUGH
1830 || jfunc->type == IPA_JF_ANCESTOR)
1831 {
1832 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
1833 tree operand = NULL_TREE;
1834 enum tree_code code;
1835 unsigned src_idx;
1836
1837 if (jfunc->type == IPA_JF_PASS_THROUGH)
1838 {
1839 code = ipa_get_jf_pass_through_operation (jfunc);
1840 src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
1841 if (code != NOP_EXPR)
1842 operand = ipa_get_jf_pass_through_operand (jfunc);
1843 }
1844 else
1845 {
1846 code = POINTER_PLUS_EXPR;
1847 src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
1848 unsigned HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc) / BITS_PER_UNIT;
1849 operand = build_int_cstu (size_type_node, offset);
1850 }
1851
1852 struct ipcp_param_lattices *src_lats
1853 = ipa_get_parm_lattices (caller_info, src_idx);
1854
1855 /* Try to propagate bits if src_lattice is bottom, but jfunc is known.
1856 for eg consider:
1857 int f(int x)
1858 {
1859 g (x & 0xff);
1860 }
1861 Assume lattice for x is bottom, however we can still propagate
1862 result of x & 0xff == 0xff, which gets computed during ccp1 pass
1863 and we store it in jump function during analysis stage. */
1864
1865 if (src_lats->bits_lattice.bottom_p ()
1866 && jfunc->bits)
1867 return dest_lattice->meet_with (jfunc->bits->value, jfunc->bits->mask,
1868 precision);
1869 else
1870 return dest_lattice->meet_with (src_lats->bits_lattice, precision, sgn,
1871 code, operand);
1872 }
1873
1874 else if (jfunc->type == IPA_JF_ANCESTOR)
1875 return dest_lattice->set_to_bottom ();
1876 else if (jfunc->bits)
1877 return dest_lattice->meet_with (jfunc->bits->value, jfunc->bits->mask,
1878 precision);
1879 else
1880 return dest_lattice->set_to_bottom ();
1881 }
1882
1883 /* Emulate effects of unary OPERATION and/or conversion from SRC_TYPE to
1884 DST_TYPE on value range in SRC_VR and store it to DST_VR. Return true if
1885 the result is a range or an anti-range. */
1886
1887 static bool
1888 ipa_vr_operation_and_type_effects (value_range *dst_vr, value_range *src_vr,
1889 enum tree_code operation,
1890 tree dst_type, tree src_type)
1891 {
1892 memset (dst_vr, 0, sizeof (*dst_vr));
1893 extract_range_from_unary_expr (dst_vr, operation, dst_type, src_vr, src_type);
1894 if (dst_vr->type == VR_RANGE || dst_vr->type == VR_ANTI_RANGE)
1895 return true;
1896 else
1897 return false;
1898 }
1899
1900 /* Propagate value range across jump function JFUNC that is associated with
1901 edge CS with param of callee of PARAM_TYPE and update DEST_PLATS
1902 accordingly. */
1903
1904 static bool
1905 propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
1906 struct ipcp_param_lattices *dest_plats,
1907 tree param_type)
1908 {
1909 ipcp_vr_lattice *dest_lat = &dest_plats->m_value_range;
1910
1911 if (dest_lat->bottom_p ())
1912 return false;
1913
1914 if (!param_type
1915 || (!INTEGRAL_TYPE_P (param_type)
1916 && !POINTER_TYPE_P (param_type)))
1917 return dest_lat->set_to_bottom ();
1918
1919 if (jfunc->type == IPA_JF_PASS_THROUGH)
1920 {
1921 enum tree_code operation = ipa_get_jf_pass_through_operation (jfunc);
1922
1923 if (TREE_CODE_CLASS (operation) == tcc_unary)
1924 {
1925 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
1926 int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
1927 tree operand_type = ipa_get_type (caller_info, src_idx);
1928 struct ipcp_param_lattices *src_lats
1929 = ipa_get_parm_lattices (caller_info, src_idx);
1930
1931 if (src_lats->m_value_range.bottom_p ())
1932 return dest_lat->set_to_bottom ();
1933 value_range vr;
1934 if (ipa_vr_operation_and_type_effects (&vr,
1935 &src_lats->m_value_range.m_vr,
1936 operation, param_type,
1937 operand_type))
1938 return dest_lat->meet_with (&vr);
1939 }
1940 }
1941 else if (jfunc->type == IPA_JF_CONST)
1942 {
1943 tree val = ipa_get_jf_constant (jfunc);
1944 if (TREE_CODE (val) == INTEGER_CST)
1945 {
1946 val = fold_convert (param_type, val);
1947 if (TREE_OVERFLOW_P (val))
1948 val = drop_tree_overflow (val);
1949
1950 value_range tmpvr;
1951 memset (&tmpvr, 0, sizeof (tmpvr));
1952 tmpvr.type = VR_RANGE;
1953 tmpvr.min = val;
1954 tmpvr.max = val;
1955 return dest_lat->meet_with (&tmpvr);
1956 }
1957 }
1958
1959 value_range vr;
1960 if (jfunc->m_vr
1961 && ipa_vr_operation_and_type_effects (&vr, jfunc->m_vr, NOP_EXPR,
1962 param_type,
1963 TREE_TYPE (jfunc->m_vr->min)))
1964 return dest_lat->meet_with (&vr);
1965 else
1966 return dest_lat->set_to_bottom ();
1967 }
1968
1969 /* If DEST_PLATS already has aggregate items, check that aggs_by_ref matches
1970 NEW_AGGS_BY_REF and if not, mark all aggs as bottoms and return true (in all
1971 other cases, return false). If there are no aggregate items, set
1972 aggs_by_ref to NEW_AGGS_BY_REF. */
1973
1974 static bool
1975 set_check_aggs_by_ref (struct ipcp_param_lattices *dest_plats,
1976 bool new_aggs_by_ref)
1977 {
1978 if (dest_plats->aggs)
1979 {
1980 if (dest_plats->aggs_by_ref != new_aggs_by_ref)
1981 {
1982 set_agg_lats_to_bottom (dest_plats);
1983 return true;
1984 }
1985 }
1986 else
1987 dest_plats->aggs_by_ref = new_aggs_by_ref;
1988 return false;
1989 }
1990
1991 /* Walk aggregate lattices in DEST_PLATS from ***AGLAT on, until ***aglat is an
1992 already existing lattice for the given OFFSET and SIZE, marking all skipped
1993 lattices as containing variable and checking for overlaps. If there is no
1994 already existing lattice for the OFFSET and VAL_SIZE, create one, initialize
1995 it with offset, size and contains_variable to PRE_EXISTING, and return true,
1996 unless there are too many already. If there are two many, return false. If
1997 there are overlaps turn whole DEST_PLATS to bottom and return false. If any
1998 skipped lattices were newly marked as containing variable, set *CHANGE to
1999 true. */
2000
2001 static bool
2002 merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
2003 HOST_WIDE_INT offset, HOST_WIDE_INT val_size,
2004 struct ipcp_agg_lattice ***aglat,
2005 bool pre_existing, bool *change)
2006 {
2007 gcc_checking_assert (offset >= 0);
2008
2009 while (**aglat && (**aglat)->offset < offset)
2010 {
2011 if ((**aglat)->offset + (**aglat)->size > offset)
2012 {
2013 set_agg_lats_to_bottom (dest_plats);
2014 return false;
2015 }
2016 *change |= (**aglat)->set_contains_variable ();
2017 *aglat = &(**aglat)->next;
2018 }
2019
2020 if (**aglat && (**aglat)->offset == offset)
2021 {
2022 if ((**aglat)->size != val_size
2023 || ((**aglat)->next
2024 && (**aglat)->next->offset < offset + val_size))
2025 {
2026 set_agg_lats_to_bottom (dest_plats);
2027 return false;
2028 }
2029 gcc_checking_assert (!(**aglat)->next
2030 || (**aglat)->next->offset >= offset + val_size);
2031 return true;
2032 }
2033 else
2034 {
2035 struct ipcp_agg_lattice *new_al;
2036
2037 if (**aglat && (**aglat)->offset < offset + val_size)
2038 {
2039 set_agg_lats_to_bottom (dest_plats);
2040 return false;
2041 }
2042 if (dest_plats->aggs_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
2043 return false;
2044 dest_plats->aggs_count++;
2045 new_al = ipcp_agg_lattice_pool.allocate ();
2046 memset (new_al, 0, sizeof (*new_al));
2047
2048 new_al->offset = offset;
2049 new_al->size = val_size;
2050 new_al->contains_variable = pre_existing;
2051
2052 new_al->next = **aglat;
2053 **aglat = new_al;
2054 return true;
2055 }
2056 }
2057
2058 /* Set all AGLAT and all other aggregate lattices reachable by next pointers as
2059 containing an unknown value. */
2060
2061 static bool
2062 set_chain_of_aglats_contains_variable (struct ipcp_agg_lattice *aglat)
2063 {
2064 bool ret = false;
2065 while (aglat)
2066 {
2067 ret |= aglat->set_contains_variable ();
2068 aglat = aglat->next;
2069 }
2070 return ret;
2071 }
2072
2073 /* Merge existing aggregate lattices in SRC_PLATS to DEST_PLATS, subtracting
2074 DELTA_OFFSET. CS is the call graph edge and SRC_IDX the index of the source
2075 parameter used for lattice value sources. Return true if DEST_PLATS changed
2076 in any way. */
2077
2078 static bool
2079 merge_aggregate_lattices (struct cgraph_edge *cs,
2080 struct ipcp_param_lattices *dest_plats,
2081 struct ipcp_param_lattices *src_plats,
2082 int src_idx, HOST_WIDE_INT offset_delta)
2083 {
2084 bool pre_existing = dest_plats->aggs != NULL;
2085 struct ipcp_agg_lattice **dst_aglat;
2086 bool ret = false;
2087
2088 if (set_check_aggs_by_ref (dest_plats, src_plats->aggs_by_ref))
2089 return true;
2090 if (src_plats->aggs_bottom)
2091 return set_agg_lats_contain_variable (dest_plats);
2092 if (src_plats->aggs_contain_variable)
2093 ret |= set_agg_lats_contain_variable (dest_plats);
2094 dst_aglat = &dest_plats->aggs;
2095
2096 for (struct ipcp_agg_lattice *src_aglat = src_plats->aggs;
2097 src_aglat;
2098 src_aglat = src_aglat->next)
2099 {
2100 HOST_WIDE_INT new_offset = src_aglat->offset - offset_delta;
2101
2102 if (new_offset < 0)
2103 continue;
2104 if (merge_agg_lats_step (dest_plats, new_offset, src_aglat->size,
2105 &dst_aglat, pre_existing, &ret))
2106 {
2107 struct ipcp_agg_lattice *new_al = *dst_aglat;
2108
2109 dst_aglat = &(*dst_aglat)->next;
2110 if (src_aglat->bottom)
2111 {
2112 ret |= new_al->set_contains_variable ();
2113 continue;
2114 }
2115 if (src_aglat->contains_variable)
2116 ret |= new_al->set_contains_variable ();
2117 for (ipcp_value<tree> *val = src_aglat->values;
2118 val;
2119 val = val->next)
2120 ret |= new_al->add_value (val->value, cs, val, src_idx,
2121 src_aglat->offset);
2122 }
2123 else if (dest_plats->aggs_bottom)
2124 return true;
2125 }
2126 ret |= set_chain_of_aglats_contains_variable (*dst_aglat);
2127 return ret;
2128 }
2129
2130 /* Determine whether there is anything to propagate FROM SRC_PLATS through a
2131 pass-through JFUNC and if so, whether it has conform and conforms to the
2132 rules about propagating values passed by reference. */
2133
2134 static bool
2135 agg_pass_through_permissible_p (struct ipcp_param_lattices *src_plats,
2136 struct ipa_jump_func *jfunc)
2137 {
2138 return src_plats->aggs
2139 && (!src_plats->aggs_by_ref
2140 || ipa_get_jf_pass_through_agg_preserved (jfunc));
2141 }
2142
2143 /* Propagate scalar values across jump function JFUNC that is associated with
2144 edge CS and put the values into DEST_LAT. */
2145
2146 static bool
2147 propagate_aggs_across_jump_function (struct cgraph_edge *cs,
2148 struct ipa_jump_func *jfunc,
2149 struct ipcp_param_lattices *dest_plats)
2150 {
2151 bool ret = false;
2152
2153 if (dest_plats->aggs_bottom)
2154 return false;
2155
2156 if (jfunc->type == IPA_JF_PASS_THROUGH
2157 && ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
2158 {
2159 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
2160 int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2161 struct ipcp_param_lattices *src_plats;
2162
2163 src_plats = ipa_get_parm_lattices (caller_info, src_idx);
2164 if (agg_pass_through_permissible_p (src_plats, jfunc))
2165 {
2166 /* Currently we do not produce clobber aggregate jump
2167 functions, replace with merging when we do. */
2168 gcc_assert (!jfunc->agg.items);
2169 ret |= merge_aggregate_lattices (cs, dest_plats, src_plats,
2170 src_idx, 0);
2171 }
2172 else
2173 ret |= set_agg_lats_contain_variable (dest_plats);
2174 }
2175 else if (jfunc->type == IPA_JF_ANCESTOR
2176 && ipa_get_jf_ancestor_agg_preserved (jfunc))
2177 {
2178 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
2179 int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
2180 struct ipcp_param_lattices *src_plats;
2181
2182 src_plats = ipa_get_parm_lattices (caller_info, src_idx);
2183 if (src_plats->aggs && src_plats->aggs_by_ref)
2184 {
2185 /* Currently we do not produce clobber aggregate jump
2186 functions, replace with merging when we do. */
2187 gcc_assert (!jfunc->agg.items);
2188 ret |= merge_aggregate_lattices (cs, dest_plats, src_plats, src_idx,
2189 ipa_get_jf_ancestor_offset (jfunc));
2190 }
2191 else if (!src_plats->aggs_by_ref)
2192 ret |= set_agg_lats_to_bottom (dest_plats);
2193 else
2194 ret |= set_agg_lats_contain_variable (dest_plats);
2195 }
2196 else if (jfunc->agg.items)
2197 {
2198 bool pre_existing = dest_plats->aggs != NULL;
2199 struct ipcp_agg_lattice **aglat = &dest_plats->aggs;
2200 struct ipa_agg_jf_item *item;
2201 int i;
2202
2203 if (set_check_aggs_by_ref (dest_plats, jfunc->agg.by_ref))
2204 return true;
2205
2206 FOR_EACH_VEC_ELT (*jfunc->agg.items, i, item)
2207 {
2208 HOST_WIDE_INT val_size;
2209
2210 if (item->offset < 0)
2211 continue;
2212 gcc_checking_assert (is_gimple_ip_invariant (item->value));
2213 val_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (item->value)));
2214
2215 if (merge_agg_lats_step (dest_plats, item->offset, val_size,
2216 &aglat, pre_existing, &ret))
2217 {
2218 ret |= (*aglat)->add_value (item->value, cs, NULL, 0, 0);
2219 aglat = &(*aglat)->next;
2220 }
2221 else if (dest_plats->aggs_bottom)
2222 return true;
2223 }
2224
2225 ret |= set_chain_of_aglats_contains_variable (*aglat);
2226 }
2227 else
2228 ret |= set_agg_lats_contain_variable (dest_plats);
2229
2230 return ret;
2231 }
2232
2233 /* Return true if on the way cfrom CS->caller to the final (non-alias and
2234 non-thunk) destination, the call passes through a thunk. */
2235
2236 static bool
2237 call_passes_through_thunk_p (cgraph_edge *cs)
2238 {
2239 cgraph_node *alias_or_thunk = cs->callee;
2240 while (alias_or_thunk->alias)
2241 alias_or_thunk = alias_or_thunk->get_alias_target ();
2242 return alias_or_thunk->thunk.thunk_p;
2243 }
2244
2245 /* Propagate constants from the caller to the callee of CS. INFO describes the
2246 caller. */
2247
2248 static bool
2249 propagate_constants_across_call (struct cgraph_edge *cs)
2250 {
2251 struct ipa_node_params *callee_info;
2252 enum availability availability;
2253 cgraph_node *callee;
2254 struct ipa_edge_args *args;
2255 bool ret = false;
2256 int i, args_count, parms_count;
2257
2258 callee = cs->callee->function_symbol (&availability);
2259 if (!callee->definition)
2260 return false;
2261 gcc_checking_assert (callee->has_gimple_body_p ());
2262 callee_info = IPA_NODE_REF (callee);
2263
2264 args = IPA_EDGE_REF (cs);
2265 args_count = ipa_get_cs_argument_count (args);
2266 parms_count = ipa_get_param_count (callee_info);
2267 if (parms_count == 0)
2268 return false;
2269
2270 /* No propagation through instrumentation thunks is available yet.
2271 It should be possible with proper mapping of call args and
2272 instrumented callee params in the propagation loop below. But
2273 this case mostly occurs when legacy code calls instrumented code
2274 and it is not a primary target for optimizations.
2275 We detect instrumentation thunks in aliases and thunks chain by
2276 checking instrumentation_clone flag for chain source and target.
2277 Going through instrumentation thunks we always have it changed
2278 from 0 to 1 and all other nodes do not change it. */
2279 if (!cs->callee->instrumentation_clone
2280 && callee->instrumentation_clone)
2281 {
2282 for (i = 0; i < parms_count; i++)
2283 ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
2284 i));
2285 return ret;
2286 }
2287
2288 /* If this call goes through a thunk we must not propagate to the first (0th)
2289 parameter. However, we might need to uncover a thunk from below a series
2290 of aliases first. */
2291 if (call_passes_through_thunk_p (cs))
2292 {
2293 ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
2294 0));
2295 i = 1;
2296 }
2297 else
2298 i = 0;
2299
2300 for (; (i < args_count) && (i < parms_count); i++)
2301 {
2302 struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
2303 struct ipcp_param_lattices *dest_plats;
2304 tree param_type = ipa_get_type (callee_info, i);
2305
2306 dest_plats = ipa_get_parm_lattices (callee_info, i);
2307 if (availability == AVAIL_INTERPOSABLE)
2308 ret |= set_all_contains_variable (dest_plats);
2309 else
2310 {
2311 ret |= propagate_scalar_across_jump_function (cs, jump_func,
2312 &dest_plats->itself,
2313 param_type);
2314 ret |= propagate_context_across_jump_function (cs, jump_func, i,
2315 &dest_plats->ctxlat);
2316 ret
2317 |= propagate_bits_across_jump_function (cs, i, jump_func,
2318 &dest_plats->bits_lattice);
2319 ret |= propagate_aggs_across_jump_function (cs, jump_func,
2320 dest_plats);
2321 if (opt_for_fn (callee->decl, flag_ipa_vrp))
2322 ret |= propagate_vr_across_jump_function (cs, jump_func,
2323 dest_plats, param_type);
2324 else
2325 ret |= dest_plats->m_value_range.set_to_bottom ();
2326 }
2327 }
2328 for (; i < parms_count; i++)
2329 ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info, i));
2330
2331 return ret;
2332 }
2333
2334 /* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
2335 KNOWN_CONTEXTS, KNOWN_AGGS or AGG_REPS return the destination. The latter
2336 three can be NULL. If AGG_REPS is not NULL, KNOWN_AGGS is ignored. */
2337
2338 static tree
2339 ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie,
2340 vec<tree> known_csts,
2341 vec<ipa_polymorphic_call_context> known_contexts,
2342 vec<ipa_agg_jump_function_p> known_aggs,
2343 struct ipa_agg_replacement_value *agg_reps,
2344 bool *speculative)
2345 {
2346 int param_index = ie->indirect_info->param_index;
2347 HOST_WIDE_INT anc_offset;
2348 tree t;
2349 tree target = NULL;
2350
2351 *speculative = false;
2352
2353 if (param_index == -1
2354 || known_csts.length () <= (unsigned int) param_index)
2355 return NULL_TREE;
2356
2357 if (!ie->indirect_info->polymorphic)
2358 {
2359 tree t;
2360
2361 if (ie->indirect_info->agg_contents)
2362 {
2363 t = NULL;
2364 if (agg_reps && ie->indirect_info->guaranteed_unmodified)
2365 {
2366 while (agg_reps)
2367 {
2368 if (agg_reps->index == param_index
2369 && agg_reps->offset == ie->indirect_info->offset
2370 && agg_reps->by_ref == ie->indirect_info->by_ref)
2371 {
2372 t = agg_reps->value;
2373 break;
2374 }
2375 agg_reps = agg_reps->next;
2376 }
2377 }
2378 if (!t)
2379 {
2380 struct ipa_agg_jump_function *agg;
2381 if (known_aggs.length () > (unsigned int) param_index)
2382 agg = known_aggs[param_index];
2383 else
2384 agg = NULL;
2385 bool from_global_constant;
2386 t = ipa_find_agg_cst_for_param (agg, known_csts[param_index],
2387 ie->indirect_info->offset,
2388 ie->indirect_info->by_ref,
2389 &from_global_constant);
2390 if (t
2391 && !from_global_constant
2392 && !ie->indirect_info->guaranteed_unmodified)
2393 t = NULL_TREE;
2394 }
2395 }
2396 else
2397 t = known_csts[param_index];
2398
2399 if (t
2400 && TREE_CODE (t) == ADDR_EXPR
2401 && TREE_CODE (TREE_OPERAND (t, 0)) == FUNCTION_DECL)
2402 return TREE_OPERAND (t, 0);
2403 else
2404 return NULL_TREE;
2405 }
2406
2407 if (!opt_for_fn (ie->caller->decl, flag_devirtualize))
2408 return NULL_TREE;
2409
2410 gcc_assert (!ie->indirect_info->agg_contents);
2411 anc_offset = ie->indirect_info->offset;
2412
2413 t = NULL;
2414
2415 /* Try to work out value of virtual table pointer value in replacemnets. */
2416 if (!t && agg_reps && !ie->indirect_info->by_ref)
2417 {
2418 while (agg_reps)
2419 {
2420 if (agg_reps->index == param_index
2421 && agg_reps->offset == ie->indirect_info->offset
2422 && agg_reps->by_ref)
2423 {
2424 t = agg_reps->value;
2425 break;
2426 }
2427 agg_reps = agg_reps->next;
2428 }
2429 }
2430
2431 /* Try to work out value of virtual table pointer value in known
2432 aggregate values. */
2433 if (!t && known_aggs.length () > (unsigned int) param_index
2434 && !ie->indirect_info->by_ref)
2435 {
2436 struct ipa_agg_jump_function *agg;
2437 agg = known_aggs[param_index];
2438 t = ipa_find_agg_cst_for_param (agg, known_csts[param_index],
2439 ie->indirect_info->offset, true);
2440 }
2441
2442 /* If we found the virtual table pointer, lookup the target. */
2443 if (t)
2444 {
2445 tree vtable;
2446 unsigned HOST_WIDE_INT offset;
2447 if (vtable_pointer_value_to_vtable (t, &vtable, &offset))
2448 {
2449 bool can_refer;
2450 target = gimple_get_virt_method_for_vtable (ie->indirect_info->otr_token,
2451 vtable, offset, &can_refer);
2452 if (can_refer)
2453 {
2454 if (!target
2455 || (TREE_CODE (TREE_TYPE (target)) == FUNCTION_TYPE
2456 && DECL_FUNCTION_CODE (target) == BUILT_IN_UNREACHABLE)
2457 || !possible_polymorphic_call_target_p
2458 (ie, cgraph_node::get (target)))
2459 {
2460 /* Do not speculate builtin_unreachable, it is stupid! */
2461 if (ie->indirect_info->vptr_changed)
2462 return NULL;
2463 target = ipa_impossible_devirt_target (ie, target);
2464 }
2465 *speculative = ie->indirect_info->vptr_changed;
2466 if (!*speculative)
2467 return target;
2468 }
2469 }
2470 }
2471
2472 /* Do we know the constant value of pointer? */
2473 if (!t)
2474 t = known_csts[param_index];
2475
2476 gcc_checking_assert (!t || TREE_CODE (t) != TREE_BINFO);
2477
2478 ipa_polymorphic_call_context context;
2479 if (known_contexts.length () > (unsigned int) param_index)
2480 {
2481 context = known_contexts[param_index];
2482 context.offset_by (anc_offset);
2483 if (ie->indirect_info->vptr_changed)
2484 context.possible_dynamic_type_change (ie->in_polymorphic_cdtor,
2485 ie->indirect_info->otr_type);
2486 if (t)
2487 {
2488 ipa_polymorphic_call_context ctx2 = ipa_polymorphic_call_context
2489 (t, ie->indirect_info->otr_type, anc_offset);
2490 if (!ctx2.useless_p ())
2491 context.combine_with (ctx2, ie->indirect_info->otr_type);
2492 }
2493 }
2494 else if (t)
2495 {
2496 context = ipa_polymorphic_call_context (t, ie->indirect_info->otr_type,
2497 anc_offset);
2498 if (ie->indirect_info->vptr_changed)
2499 context.possible_dynamic_type_change (ie->in_polymorphic_cdtor,
2500 ie->indirect_info->otr_type);
2501 }
2502 else
2503 return NULL_TREE;
2504
2505 vec <cgraph_node *>targets;
2506 bool final;
2507
2508 targets = possible_polymorphic_call_targets
2509 (ie->indirect_info->otr_type,
2510 ie->indirect_info->otr_token,
2511 context, &final);
2512 if (!final || targets.length () > 1)
2513 {
2514 struct cgraph_node *node;
2515 if (*speculative)
2516 return target;
2517 if (!opt_for_fn (ie->caller->decl, flag_devirtualize_speculatively)
2518 || ie->speculative || !ie->maybe_hot_p ())
2519 return NULL;
2520 node = try_speculative_devirtualization (ie->indirect_info->otr_type,
2521 ie->indirect_info->otr_token,
2522 context);
2523 if (node)
2524 {
2525 *speculative = true;
2526 target = node->decl;
2527 }
2528 else
2529 return NULL;
2530 }
2531 else
2532 {
2533 *speculative = false;
2534 if (targets.length () == 1)
2535 target = targets[0]->decl;
2536 else
2537 target = ipa_impossible_devirt_target (ie, NULL_TREE);
2538 }
2539
2540 if (target && !possible_polymorphic_call_target_p (ie,
2541 cgraph_node::get (target)))
2542 {
2543 if (*speculative)
2544 return NULL;
2545 target = ipa_impossible_devirt_target (ie, target);
2546 }
2547
2548 return target;
2549 }
2550
2551
2552 /* If an indirect edge IE can be turned into a direct one based on KNOWN_CSTS,
2553 KNOWN_CONTEXTS (which can be vNULL) or KNOWN_AGGS (which also can be vNULL)
2554 return the destination. */
2555
2556 tree
2557 ipa_get_indirect_edge_target (struct cgraph_edge *ie,
2558 vec<tree> known_csts,
2559 vec<ipa_polymorphic_call_context> known_contexts,
2560 vec<ipa_agg_jump_function_p> known_aggs,
2561 bool *speculative)
2562 {
2563 return ipa_get_indirect_edge_target_1 (ie, known_csts, known_contexts,
2564 known_aggs, NULL, speculative);
2565 }
2566
2567 /* Calculate devirtualization time bonus for NODE, assuming we know KNOWN_CSTS
2568 and KNOWN_CONTEXTS. */
2569
2570 static int
2571 devirtualization_time_bonus (struct cgraph_node *node,
2572 vec<tree> known_csts,
2573 vec<ipa_polymorphic_call_context> known_contexts,
2574 vec<ipa_agg_jump_function_p> known_aggs)
2575 {
2576 struct cgraph_edge *ie;
2577 int res = 0;
2578
2579 for (ie = node->indirect_calls; ie; ie = ie->next_callee)
2580 {
2581 struct cgraph_node *callee;
2582 struct ipa_fn_summary *isummary;
2583 enum availability avail;
2584 tree target;
2585 bool speculative;
2586
2587 target = ipa_get_indirect_edge_target (ie, known_csts, known_contexts,
2588 known_aggs, &speculative);
2589 if (!target)
2590 continue;
2591
2592 /* Only bare minimum benefit for clearly un-inlineable targets. */
2593 res += 1;
2594 callee = cgraph_node::get (target);
2595 if (!callee || !callee->definition)
2596 continue;
2597 callee = callee->function_symbol (&avail);
2598 if (avail < AVAIL_AVAILABLE)
2599 continue;
2600 isummary = ipa_fn_summaries->get (callee);
2601 if (!isummary->inlinable)
2602 continue;
2603
2604 /* FIXME: The values below need re-considering and perhaps also
2605 integrating into the cost metrics, at lest in some very basic way. */
2606 if (isummary->size <= MAX_INLINE_INSNS_AUTO / 4)
2607 res += 31 / ((int)speculative + 1);
2608 else if (isummary->size <= MAX_INLINE_INSNS_AUTO / 2)
2609 res += 15 / ((int)speculative + 1);
2610 else if (isummary->size <= MAX_INLINE_INSNS_AUTO
2611 || DECL_DECLARED_INLINE_P (callee->decl))
2612 res += 7 / ((int)speculative + 1);
2613 }
2614
2615 return res;
2616 }
2617
2618 /* Return time bonus incurred because of HINTS. */
2619
2620 static int
2621 hint_time_bonus (ipa_hints hints)
2622 {
2623 int result = 0;
2624 if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
2625 result += PARAM_VALUE (PARAM_IPA_CP_LOOP_HINT_BONUS);
2626 if (hints & INLINE_HINT_array_index)
2627 result += PARAM_VALUE (PARAM_IPA_CP_ARRAY_INDEX_HINT_BONUS);
2628 return result;
2629 }
2630
2631 /* If there is a reason to penalize the function described by INFO in the
2632 cloning goodness evaluation, do so. */
2633
2634 static inline int64_t
2635 incorporate_penalties (ipa_node_params *info, int64_t evaluation)
2636 {
2637 if (info->node_within_scc)
2638 evaluation = (evaluation
2639 * (100 - PARAM_VALUE (PARAM_IPA_CP_RECURSION_PENALTY))) / 100;
2640
2641 if (info->node_calling_single_call)
2642 evaluation = (evaluation
2643 * (100 - PARAM_VALUE (PARAM_IPA_CP_SINGLE_CALL_PENALTY)))
2644 / 100;
2645
2646 return evaluation;
2647 }
2648
2649 /* Return true if cloning NODE is a good idea, given the estimated TIME_BENEFIT
2650 and SIZE_COST and with the sum of frequencies of incoming edges to the
2651 potential new clone in FREQUENCIES. */
2652
2653 static bool
2654 good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
2655 int freq_sum, profile_count count_sum, int size_cost)
2656 {
2657 if (time_benefit == 0
2658 || !opt_for_fn (node->decl, flag_ipa_cp_clone)
2659 || node->optimize_for_size_p ())
2660 return false;
2661
2662 gcc_assert (size_cost > 0);
2663
2664 struct ipa_node_params *info = IPA_NODE_REF (node);
2665 if (max_count > profile_count::zero ())
2666 {
2667 int factor = RDIV (count_sum.probability_in
2668 (max_count).to_reg_br_prob_base ()
2669 * 1000, REG_BR_PROB_BASE);
2670 int64_t evaluation = (((int64_t) time_benefit * factor)
2671 / size_cost);
2672 evaluation = incorporate_penalties (info, evaluation);
2673
2674 if (dump_file && (dump_flags & TDF_DETAILS))
2675 {
2676 fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
2677 "size: %i, count_sum: ", time_benefit, size_cost);
2678 count_sum.dump (dump_file);
2679 fprintf (dump_file, "%s%s) -> evaluation: " "%" PRId64
2680 ", threshold: %i\n",
2681 info->node_within_scc ? ", scc" : "",
2682 info->node_calling_single_call ? ", single_call" : "",
2683 evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
2684 }
2685
2686 return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
2687 }
2688 else
2689 {
2690 int64_t evaluation = (((int64_t) time_benefit * freq_sum)
2691 / size_cost);
2692 evaluation = incorporate_penalties (info, evaluation);
2693
2694 if (dump_file && (dump_flags & TDF_DETAILS))
2695 fprintf (dump_file, " good_cloning_opportunity_p (time: %i, "
2696 "size: %i, freq_sum: %i%s%s) -> evaluation: "
2697 "%" PRId64 ", threshold: %i\n",
2698 time_benefit, size_cost, freq_sum,
2699 info->node_within_scc ? ", scc" : "",
2700 info->node_calling_single_call ? ", single_call" : "",
2701 evaluation, PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD));
2702
2703 return evaluation >= PARAM_VALUE (PARAM_IPA_CP_EVAL_THRESHOLD);
2704 }
2705 }
2706
2707 /* Return all context independent values from aggregate lattices in PLATS in a
2708 vector. Return NULL if there are none. */
2709
2710 static vec<ipa_agg_jf_item, va_gc> *
2711 context_independent_aggregate_values (struct ipcp_param_lattices *plats)
2712 {
2713 vec<ipa_agg_jf_item, va_gc> *res = NULL;
2714
2715 if (plats->aggs_bottom
2716 || plats->aggs_contain_variable
2717 || plats->aggs_count == 0)
2718 return NULL;
2719
2720 for (struct ipcp_agg_lattice *aglat = plats->aggs;
2721 aglat;
2722 aglat = aglat->next)
2723 if (aglat->is_single_const ())
2724 {
2725 struct ipa_agg_jf_item item;
2726 item.offset = aglat->offset;
2727 item.value = aglat->values->value;
2728 vec_safe_push (res, item);
2729 }
2730 return res;
2731 }
2732
2733 /* Allocate KNOWN_CSTS, KNOWN_CONTEXTS and, if non-NULL, KNOWN_AGGS and
2734 populate them with values of parameters that are known independent of the
2735 context. INFO describes the function. If REMOVABLE_PARAMS_COST is
2736 non-NULL, the movement cost of all removable parameters will be stored in
2737 it. */
2738
2739 static bool
2740 gather_context_independent_values (struct ipa_node_params *info,
2741 vec<tree> *known_csts,
2742 vec<ipa_polymorphic_call_context>
2743 *known_contexts,
2744 vec<ipa_agg_jump_function> *known_aggs,
2745 int *removable_params_cost)
2746 {
2747 int i, count = ipa_get_param_count (info);
2748 bool ret = false;
2749
2750 known_csts->create (0);
2751 known_contexts->create (0);
2752 known_csts->safe_grow_cleared (count);
2753 known_contexts->safe_grow_cleared (count);
2754 if (known_aggs)
2755 {
2756 known_aggs->create (0);
2757 known_aggs->safe_grow_cleared (count);
2758 }
2759
2760 if (removable_params_cost)
2761 *removable_params_cost = 0;
2762
2763 for (i = 0; i < count; i++)
2764 {
2765 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
2766 ipcp_lattice<tree> *lat = &plats->itself;
2767
2768 if (lat->is_single_const ())
2769 {
2770 ipcp_value<tree> *val = lat->values;
2771 gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
2772 (*known_csts)[i] = val->value;
2773 if (removable_params_cost)
2774 *removable_params_cost
2775 += estimate_move_cost (TREE_TYPE (val->value), false);
2776 ret = true;
2777 }
2778 else if (removable_params_cost
2779 && !ipa_is_param_used (info, i))
2780 *removable_params_cost
2781 += ipa_get_param_move_cost (info, i);
2782
2783 if (!ipa_is_param_used (info, i))
2784 continue;
2785
2786 ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
2787 /* Do not account known context as reason for cloning. We can see
2788 if it permits devirtualization. */
2789 if (ctxlat->is_single_const ())
2790 (*known_contexts)[i] = ctxlat->values->value;
2791
2792 if (known_aggs)
2793 {
2794 vec<ipa_agg_jf_item, va_gc> *agg_items;
2795 struct ipa_agg_jump_function *ajf;
2796
2797 agg_items = context_independent_aggregate_values (plats);
2798 ajf = &(*known_aggs)[i];
2799 ajf->items = agg_items;
2800 ajf->by_ref = plats->aggs_by_ref;
2801 ret |= agg_items != NULL;
2802 }
2803 }
2804
2805 return ret;
2806 }
2807
2808 /* The current interface in ipa-inline-analysis requires a pointer vector.
2809 Create it.
2810
2811 FIXME: That interface should be re-worked, this is slightly silly. Still,
2812 I'd like to discuss how to change it first and this demonstrates the
2813 issue. */
2814
2815 static vec<ipa_agg_jump_function_p>
2816 agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function> known_aggs)
2817 {
2818 vec<ipa_agg_jump_function_p> ret;
2819 struct ipa_agg_jump_function *ajf;
2820 int i;
2821
2822 ret.create (known_aggs.length ());
2823 FOR_EACH_VEC_ELT (known_aggs, i, ajf)
2824 ret.quick_push (ajf);
2825 return ret;
2826 }
2827
2828 /* Perform time and size measurement of NODE with the context given in
2829 KNOWN_CSTS, KNOWN_CONTEXTS and KNOWN_AGGS, calculate the benefit and cost
2830 given BASE_TIME of the node without specialization, REMOVABLE_PARAMS_COST of
2831 all context-independent removable parameters and EST_MOVE_COST of estimated
2832 movement of the considered parameter and store it into VAL. */
2833
2834 static void
2835 perform_estimation_of_a_value (cgraph_node *node, vec<tree> known_csts,
2836 vec<ipa_polymorphic_call_context> known_contexts,
2837 vec<ipa_agg_jump_function_p> known_aggs_ptrs,
2838 int removable_params_cost,
2839 int est_move_cost, ipcp_value_base *val)
2840 {
2841 int size, time_benefit;
2842 sreal time, base_time;
2843 ipa_hints hints;
2844
2845 estimate_ipcp_clone_size_and_time (node, known_csts, known_contexts,
2846 known_aggs_ptrs, &size, &time,
2847 &base_time, &hints);
2848 base_time -= time;
2849 if (base_time > 65535)
2850 base_time = 65535;
2851 time_benefit = base_time.to_int ()
2852 + devirtualization_time_bonus (node, known_csts, known_contexts,
2853 known_aggs_ptrs)
2854 + hint_time_bonus (hints)
2855 + removable_params_cost + est_move_cost;
2856
2857 gcc_checking_assert (size >=0);
2858 /* The inliner-heuristics based estimates may think that in certain
2859 contexts some functions do not have any size at all but we want
2860 all specializations to have at least a tiny cost, not least not to
2861 divide by zero. */
2862 if (size == 0)
2863 size = 1;
2864
2865 val->local_time_benefit = time_benefit;
2866 val->local_size_cost = size;
2867 }
2868
2869 /* Iterate over known values of parameters of NODE and estimate the local
2870 effects in terms of time and size they have. */
2871
2872 static void
2873 estimate_local_effects (struct cgraph_node *node)
2874 {
2875 struct ipa_node_params *info = IPA_NODE_REF (node);
2876 int i, count = ipa_get_param_count (info);
2877 vec<tree> known_csts;
2878 vec<ipa_polymorphic_call_context> known_contexts;
2879 vec<ipa_agg_jump_function> known_aggs;
2880 vec<ipa_agg_jump_function_p> known_aggs_ptrs;
2881 bool always_const;
2882 int removable_params_cost;
2883
2884 if (!count || !ipcp_versionable_function_p (node))
2885 return;
2886
2887 if (dump_file && (dump_flags & TDF_DETAILS))
2888 fprintf (dump_file, "\nEstimating effects for %s.\n", node->dump_name ());
2889
2890 always_const = gather_context_independent_values (info, &known_csts,
2891 &known_contexts, &known_aggs,
2892 &removable_params_cost);
2893 known_aggs_ptrs = agg_jmp_p_vec_for_t_vec (known_aggs);
2894 int devirt_bonus = devirtualization_time_bonus (node, known_csts,
2895 known_contexts, known_aggs_ptrs);
2896 if (always_const || devirt_bonus
2897 || (removable_params_cost && node->local.can_change_signature))
2898 {
2899 struct caller_statistics stats;
2900 ipa_hints hints;
2901 sreal time, base_time;
2902 int size;
2903
2904 init_caller_stats (&stats);
2905 node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
2906 false);
2907 estimate_ipcp_clone_size_and_time (node, known_csts, known_contexts,
2908 known_aggs_ptrs, &size, &time,
2909 &base_time, &hints);
2910 time -= devirt_bonus;
2911 time -= hint_time_bonus (hints);
2912 time -= removable_params_cost;
2913 size -= stats.n_calls * removable_params_cost;
2914
2915 if (dump_file)
2916 fprintf (dump_file, " - context independent values, size: %i, "
2917 "time_benefit: %f\n", size, (base_time - time).to_double ());
2918
2919 if (size <= 0 || node->local.local)
2920 {
2921 info->do_clone_for_all_contexts = true;
2922
2923 if (dump_file)
2924 fprintf (dump_file, " Decided to specialize for all "
2925 "known contexts, code not going to grow.\n");
2926 }
2927 else if (good_cloning_opportunity_p (node,
2928 MAX ((base_time - time).to_int (),
2929 65536),
2930 stats.freq_sum, stats.count_sum,
2931 size))
2932 {
2933 if (size + overall_size <= max_new_size)
2934 {
2935 info->do_clone_for_all_contexts = true;
2936 overall_size += size;
2937
2938 if (dump_file)
2939 fprintf (dump_file, " Decided to specialize for all "
2940 "known contexts, growth deemed beneficial.\n");
2941 }
2942 else if (dump_file && (dump_flags & TDF_DETAILS))
2943 fprintf (dump_file, " Not cloning for all contexts because "
2944 "max_new_size would be reached with %li.\n",
2945 size + overall_size);
2946 }
2947 else if (dump_file && (dump_flags & TDF_DETAILS))
2948 fprintf (dump_file, " Not cloning for all contexts because "
2949 "!good_cloning_opportunity_p.\n");
2950
2951 }
2952
2953 for (i = 0; i < count; i++)
2954 {
2955 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
2956 ipcp_lattice<tree> *lat = &plats->itself;
2957 ipcp_value<tree> *val;
2958
2959 if (lat->bottom
2960 || !lat->values
2961 || known_csts[i])
2962 continue;
2963
2964 for (val = lat->values; val; val = val->next)
2965 {
2966 gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
2967 known_csts[i] = val->value;
2968
2969 int emc = estimate_move_cost (TREE_TYPE (val->value), true);
2970 perform_estimation_of_a_value (node, known_csts, known_contexts,
2971 known_aggs_ptrs,
2972 removable_params_cost, emc, val);
2973
2974 if (dump_file && (dump_flags & TDF_DETAILS))
2975 {
2976 fprintf (dump_file, " - estimates for value ");
2977 print_ipcp_constant_value (dump_file, val->value);
2978 fprintf (dump_file, " for ");
2979 ipa_dump_param (dump_file, info, i);
2980 fprintf (dump_file, ": time_benefit: %i, size: %i\n",
2981 val->local_time_benefit, val->local_size_cost);
2982 }
2983 }
2984 known_csts[i] = NULL_TREE;
2985 }
2986
2987 for (i = 0; i < count; i++)
2988 {
2989 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
2990
2991 if (!plats->virt_call)
2992 continue;
2993
2994 ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
2995 ipcp_value<ipa_polymorphic_call_context> *val;
2996
2997 if (ctxlat->bottom
2998 || !ctxlat->values
2999 || !known_contexts[i].useless_p ())
3000 continue;
3001
3002 for (val = ctxlat->values; val; val = val->next)
3003 {
3004 known_contexts[i] = val->value;
3005 perform_estimation_of_a_value (node, known_csts, known_contexts,
3006 known_aggs_ptrs,
3007 removable_params_cost, 0, val);
3008
3009 if (dump_file && (dump_flags & TDF_DETAILS))
3010 {
3011 fprintf (dump_file, " - estimates for polymorphic context ");
3012 print_ipcp_constant_value (dump_file, val->value);
3013 fprintf (dump_file, " for ");
3014 ipa_dump_param (dump_file, info, i);
3015 fprintf (dump_file, ": time_benefit: %i, size: %i\n",
3016 val->local_time_benefit, val->local_size_cost);
3017 }
3018 }
3019 known_contexts[i] = ipa_polymorphic_call_context ();
3020 }
3021
3022 for (i = 0; i < count; i++)
3023 {
3024 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3025 struct ipa_agg_jump_function *ajf;
3026 struct ipcp_agg_lattice *aglat;
3027
3028 if (plats->aggs_bottom || !plats->aggs)
3029 continue;
3030
3031 ajf = &known_aggs[i];
3032 for (aglat = plats->aggs; aglat; aglat = aglat->next)
3033 {
3034 ipcp_value<tree> *val;
3035 if (aglat->bottom || !aglat->values
3036 /* If the following is true, the one value is in known_aggs. */
3037 || (!plats->aggs_contain_variable
3038 && aglat->is_single_const ()))
3039 continue;
3040
3041 for (val = aglat->values; val; val = val->next)
3042 {
3043 struct ipa_agg_jf_item item;
3044
3045 item.offset = aglat->offset;
3046 item.value = val->value;
3047 vec_safe_push (ajf->items, item);
3048
3049 perform_estimation_of_a_value (node, known_csts, known_contexts,
3050 known_aggs_ptrs,
3051 removable_params_cost, 0, val);
3052
3053 if (dump_file && (dump_flags & TDF_DETAILS))
3054 {
3055 fprintf (dump_file, " - estimates for value ");
3056 print_ipcp_constant_value (dump_file, val->value);
3057 fprintf (dump_file, " for ");
3058 ipa_dump_param (dump_file, info, i);
3059 fprintf (dump_file, "[%soffset: " HOST_WIDE_INT_PRINT_DEC
3060 "]: time_benefit: %i, size: %i\n",
3061 plats->aggs_by_ref ? "ref " : "",
3062 aglat->offset,
3063 val->local_time_benefit, val->local_size_cost);
3064 }
3065
3066 ajf->items->pop ();
3067 }
3068 }
3069 }
3070
3071 for (i = 0; i < count; i++)
3072 vec_free (known_aggs[i].items);
3073
3074 known_csts.release ();
3075 known_contexts.release ();
3076 known_aggs.release ();
3077 known_aggs_ptrs.release ();
3078 }
3079
3080
3081 /* Add value CUR_VAL and all yet-unsorted values it is dependent on to the
3082 topological sort of values. */
3083
3084 template <typename valtype>
3085 void
3086 value_topo_info<valtype>::add_val (ipcp_value<valtype> *cur_val)
3087 {
3088 ipcp_value_source<valtype> *src;
3089
3090 if (cur_val->dfs)
3091 return;
3092
3093 dfs_counter++;
3094 cur_val->dfs = dfs_counter;
3095 cur_val->low_link = dfs_counter;
3096
3097 cur_val->topo_next = stack;
3098 stack = cur_val;
3099 cur_val->on_stack = true;
3100
3101 for (src = cur_val->sources; src; src = src->next)
3102 if (src->val)
3103 {
3104 if (src->val->dfs == 0)
3105 {
3106 add_val (src->val);
3107 if (src->val->low_link < cur_val->low_link)
3108 cur_val->low_link = src->val->low_link;
3109 }
3110 else if (src->val->on_stack
3111 && src->val->dfs < cur_val->low_link)
3112 cur_val->low_link = src->val->dfs;
3113 }
3114
3115 if (cur_val->dfs == cur_val->low_link)
3116 {
3117 ipcp_value<valtype> *v, *scc_list = NULL;
3118
3119 do
3120 {
3121 v = stack;
3122 stack = v->topo_next;
3123 v->on_stack = false;
3124
3125 v->scc_next = scc_list;
3126 scc_list = v;
3127 }
3128 while (v != cur_val);
3129
3130 cur_val->topo_next = values_topo;
3131 values_topo = cur_val;
3132 }
3133 }
3134
3135 /* Add all values in lattices associated with NODE to the topological sort if
3136 they are not there yet. */
3137
3138 static void
3139 add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
3140 {
3141 struct ipa_node_params *info = IPA_NODE_REF (node);
3142 int i, count = ipa_get_param_count (info);
3143
3144 for (i = 0; i < count; i++)
3145 {
3146 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3147 ipcp_lattice<tree> *lat = &plats->itself;
3148 struct ipcp_agg_lattice *aglat;
3149
3150 if (!lat->bottom)
3151 {
3152 ipcp_value<tree> *val;
3153 for (val = lat->values; val; val = val->next)
3154 topo->constants.add_val (val);
3155 }
3156
3157 if (!plats->aggs_bottom)
3158 for (aglat = plats->aggs; aglat; aglat = aglat->next)
3159 if (!aglat->bottom)
3160 {
3161 ipcp_value<tree> *val;
3162 for (val = aglat->values; val; val = val->next)
3163 topo->constants.add_val (val);
3164 }
3165
3166 ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
3167 if (!ctxlat->bottom)
3168 {
3169 ipcp_value<ipa_polymorphic_call_context> *ctxval;
3170 for (ctxval = ctxlat->values; ctxval; ctxval = ctxval->next)
3171 topo->contexts.add_val (ctxval);
3172 }
3173 }
3174 }
3175
3176 /* One pass of constants propagation along the call graph edges, from callers
3177 to callees (requires topological ordering in TOPO), iterate over strongly
3178 connected components. */
3179
3180 static void
3181 propagate_constants_topo (struct ipa_topo_info *topo)
3182 {
3183 int i;
3184
3185 for (i = topo->nnodes - 1; i >= 0; i--)
3186 {
3187 unsigned j;
3188 struct cgraph_node *v, *node = topo->order[i];
3189 vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (node);
3190
3191 /* First, iteratively propagate within the strongly connected component
3192 until all lattices stabilize. */
3193 FOR_EACH_VEC_ELT (cycle_nodes, j, v)
3194 if (v->has_gimple_body_p ())
3195 push_node_to_stack (topo, v);
3196
3197 v = pop_node_from_stack (topo);
3198 while (v)
3199 {
3200 struct cgraph_edge *cs;
3201
3202 for (cs = v->callees; cs; cs = cs->next_callee)
3203 if (ipa_edge_within_scc (cs))
3204 {
3205 IPA_NODE_REF (v)->node_within_scc = true;
3206 if (propagate_constants_across_call (cs))
3207 push_node_to_stack (topo, cs->callee->function_symbol ());
3208 }
3209 v = pop_node_from_stack (topo);
3210 }
3211
3212 /* Afterwards, propagate along edges leading out of the SCC, calculates
3213 the local effects of the discovered constants and all valid values to
3214 their topological sort. */
3215 FOR_EACH_VEC_ELT (cycle_nodes, j, v)
3216 if (v->has_gimple_body_p ())
3217 {
3218 struct cgraph_edge *cs;
3219
3220 estimate_local_effects (v);
3221 add_all_node_vals_to_toposort (v, topo);
3222 for (cs = v->callees; cs; cs = cs->next_callee)
3223 if (!ipa_edge_within_scc (cs))
3224 propagate_constants_across_call (cs);
3225 }
3226 cycle_nodes.release ();
3227 }
3228 }
3229
3230
3231 /* Return the sum of A and B if none of them is bigger than INT_MAX/2, return
3232 the bigger one if otherwise. */
3233
3234 static int
3235 safe_add (int a, int b)
3236 {
3237 if (a > INT_MAX/2 || b > INT_MAX/2)
3238 return a > b ? a : b;
3239 else
3240 return a + b;
3241 }
3242
3243
3244 /* Propagate the estimated effects of individual values along the topological
3245 from the dependent values to those they depend on. */
3246
3247 template <typename valtype>
3248 void
3249 value_topo_info<valtype>::propagate_effects ()
3250 {
3251 ipcp_value<valtype> *base;
3252
3253 for (base = values_topo; base; base = base->topo_next)
3254 {
3255 ipcp_value_source<valtype> *src;
3256 ipcp_value<valtype> *val;
3257 int time = 0, size = 0;
3258
3259 for (val = base; val; val = val->scc_next)
3260 {
3261 time = safe_add (time,
3262 val->local_time_benefit + val->prop_time_benefit);
3263 size = safe_add (size, val->local_size_cost + val->prop_size_cost);
3264 }
3265
3266 for (val = base; val; val = val->scc_next)
3267 for (src = val->sources; src; src = src->next)
3268 if (src->val
3269 && src->cs->maybe_hot_p ())
3270 {
3271 src->val->prop_time_benefit = safe_add (time,
3272 src->val->prop_time_benefit);
3273 src->val->prop_size_cost = safe_add (size,
3274 src->val->prop_size_cost);
3275 }
3276 }
3277 }
3278
3279
3280 /* Propagate constants, polymorphic contexts and their effects from the
3281 summaries interprocedurally. */
3282
3283 static void
3284 ipcp_propagate_stage (struct ipa_topo_info *topo)
3285 {
3286 struct cgraph_node *node;
3287
3288 if (dump_file)
3289 fprintf (dump_file, "\n Propagating constants:\n\n");
3290
3291 max_count = profile_count::uninitialized ();
3292
3293 FOR_EACH_DEFINED_FUNCTION (node)
3294 {
3295 struct ipa_node_params *info = IPA_NODE_REF (node);
3296
3297 determine_versionability (node, info);
3298 if (node->has_gimple_body_p ())
3299 {
3300 info->lattices = XCNEWVEC (struct ipcp_param_lattices,
3301 ipa_get_param_count (info));
3302 initialize_node_lattices (node);
3303 }
3304 if (node->definition && !node->alias)
3305 overall_size += ipa_fn_summaries->get (node)->self_size;
3306 max_count = max_count.max (node->count.ipa ());
3307 }
3308
3309 max_new_size = overall_size;
3310 if (max_new_size < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
3311 max_new_size = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
3312 max_new_size += max_new_size * PARAM_VALUE (PARAM_IPCP_UNIT_GROWTH) / 100 + 1;
3313
3314 if (dump_file)
3315 fprintf (dump_file, "\noverall_size: %li, max_new_size: %li\n",
3316 overall_size, max_new_size);
3317
3318 propagate_constants_topo (topo);
3319 if (flag_checking)
3320 ipcp_verify_propagated_values ();
3321 topo->constants.propagate_effects ();
3322 topo->contexts.propagate_effects ();
3323
3324 if (dump_file)
3325 {
3326 fprintf (dump_file, "\nIPA lattices after all propagation:\n");
3327 print_all_lattices (dump_file, (dump_flags & TDF_DETAILS), true);
3328 }
3329 }
3330
3331 /* Discover newly direct outgoing edges from NODE which is a new clone with
3332 known KNOWN_CSTS and make them direct. */
3333
3334 static void
3335 ipcp_discover_new_direct_edges (struct cgraph_node *node,
3336 vec<tree> known_csts,
3337 vec<ipa_polymorphic_call_context>
3338 known_contexts,
3339 struct ipa_agg_replacement_value *aggvals)
3340 {
3341 struct cgraph_edge *ie, *next_ie;
3342 bool found = false;
3343
3344 for (ie = node->indirect_calls; ie; ie = next_ie)
3345 {
3346 tree target;
3347 bool speculative;
3348
3349 next_ie = ie->next_callee;
3350 target = ipa_get_indirect_edge_target_1 (ie, known_csts, known_contexts,
3351 vNULL, aggvals, &speculative);
3352 if (target)
3353 {
3354 bool agg_contents = ie->indirect_info->agg_contents;
3355 bool polymorphic = ie->indirect_info->polymorphic;
3356 int param_index = ie->indirect_info->param_index;
3357 struct cgraph_edge *cs = ipa_make_edge_direct_to_target (ie, target,
3358 speculative);
3359 found = true;
3360
3361 if (cs && !agg_contents && !polymorphic)
3362 {
3363 struct ipa_node_params *info = IPA_NODE_REF (node);
3364 int c = ipa_get_controlled_uses (info, param_index);
3365 if (c != IPA_UNDESCRIBED_USE)
3366 {
3367 struct ipa_ref *to_del;
3368
3369 c--;
3370 ipa_set_controlled_uses (info, param_index, c);
3371 if (dump_file && (dump_flags & TDF_DETAILS))
3372 fprintf (dump_file, " controlled uses count of param "
3373 "%i bumped down to %i\n", param_index, c);
3374 if (c == 0
3375 && (to_del = node->find_reference (cs->callee, NULL, 0)))
3376 {
3377 if (dump_file && (dump_flags & TDF_DETAILS))
3378 fprintf (dump_file, " and even removing its "
3379 "cloning-created reference\n");
3380 to_del->remove_reference ();
3381 }
3382 }
3383 }
3384 }
3385 }
3386 /* Turning calls to direct calls will improve overall summary. */
3387 if (found)
3388 ipa_update_overall_fn_summary (node);
3389 }
3390
3391 /* Vector of pointers which for linked lists of clones of an original crgaph
3392 edge. */
3393
3394 static vec<cgraph_edge *> next_edge_clone;
3395 static vec<cgraph_edge *> prev_edge_clone;
3396
3397 static inline void
3398 grow_edge_clone_vectors (void)
3399 {
3400 if (next_edge_clone.length ()
3401 <= (unsigned) symtab->edges_max_uid)
3402 next_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
3403 if (prev_edge_clone.length ()
3404 <= (unsigned) symtab->edges_max_uid)
3405 prev_edge_clone.safe_grow_cleared (symtab->edges_max_uid + 1);
3406 }
3407
3408 /* Edge duplication hook to grow the appropriate linked list in
3409 next_edge_clone. */
3410
3411 static void
3412 ipcp_edge_duplication_hook (struct cgraph_edge *src, struct cgraph_edge *dst,
3413 void *)
3414 {
3415 grow_edge_clone_vectors ();
3416
3417 struct cgraph_edge *old_next = next_edge_clone[src->uid];
3418 if (old_next)
3419 prev_edge_clone[old_next->uid] = dst;
3420 prev_edge_clone[dst->uid] = src;
3421
3422 next_edge_clone[dst->uid] = old_next;
3423 next_edge_clone[src->uid] = dst;
3424 }
3425
3426 /* Hook that is called by cgraph.c when an edge is removed. */
3427
3428 static void
3429 ipcp_edge_removal_hook (struct cgraph_edge *cs, void *)
3430 {
3431 grow_edge_clone_vectors ();
3432
3433 struct cgraph_edge *prev = prev_edge_clone[cs->uid];
3434 struct cgraph_edge *next = next_edge_clone[cs->uid];
3435 if (prev)
3436 next_edge_clone[prev->uid] = next;
3437 if (next)
3438 prev_edge_clone[next->uid] = prev;
3439 }
3440
3441 /* See if NODE is a clone with a known aggregate value at a given OFFSET of a
3442 parameter with the given INDEX. */
3443
3444 static tree
3445 get_clone_agg_value (struct cgraph_node *node, HOST_WIDE_INT offset,
3446 int index)
3447 {
3448 struct ipa_agg_replacement_value *aggval;
3449
3450 aggval = ipa_get_agg_replacements_for_node (node);
3451 while (aggval)
3452 {
3453 if (aggval->offset == offset
3454 && aggval->index == index)
3455 return aggval->value;
3456 aggval = aggval->next;
3457 }
3458 return NULL_TREE;
3459 }
3460
3461 /* Return true is NODE is DEST or its clone for all contexts. */
3462
3463 static bool
3464 same_node_or_its_all_contexts_clone_p (cgraph_node *node, cgraph_node *dest)
3465 {
3466 if (node == dest)
3467 return true;
3468
3469 struct ipa_node_params *info = IPA_NODE_REF (node);
3470 return info->is_all_contexts_clone && info->ipcp_orig_node == dest;
3471 }
3472
3473 /* Return true if edge CS does bring about the value described by SRC to node
3474 DEST or its clone for all contexts. */
3475
3476 static bool
3477 cgraph_edge_brings_value_p (cgraph_edge *cs, ipcp_value_source<tree> *src,
3478 cgraph_node *dest)
3479 {
3480 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
3481 enum availability availability;
3482 cgraph_node *real_dest = cs->callee->function_symbol (&availability);
3483
3484 if (!same_node_or_its_all_contexts_clone_p (real_dest, dest)
3485 || availability <= AVAIL_INTERPOSABLE
3486 || caller_info->node_dead)
3487 return false;
3488 if (!src->val)
3489 return true;
3490
3491 if (caller_info->ipcp_orig_node)
3492 {
3493 tree t;
3494 if (src->offset == -1)
3495 t = caller_info->known_csts[src->index];
3496 else
3497 t = get_clone_agg_value (cs->caller, src->offset, src->index);
3498 return (t != NULL_TREE
3499 && values_equal_for_ipcp_p (src->val->value, t));
3500 }
3501 else
3502 {
3503 struct ipcp_agg_lattice *aglat;
3504 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
3505 src->index);
3506 if (src->offset == -1)
3507 return (plats->itself.is_single_const ()
3508 && values_equal_for_ipcp_p (src->val->value,
3509 plats->itself.values->value));
3510 else
3511 {
3512 if (plats->aggs_bottom || plats->aggs_contain_variable)
3513 return false;
3514 for (aglat = plats->aggs; aglat; aglat = aglat->next)
3515 if (aglat->offset == src->offset)
3516 return (aglat->is_single_const ()
3517 && values_equal_for_ipcp_p (src->val->value,
3518 aglat->values->value));
3519 }
3520 return false;
3521 }
3522 }
3523
3524 /* Return true if edge CS does bring about the value described by SRC to node
3525 DEST or its clone for all contexts. */
3526
3527 static bool
3528 cgraph_edge_brings_value_p (cgraph_edge *cs,
3529 ipcp_value_source<ipa_polymorphic_call_context> *src,
3530 cgraph_node *dest)
3531 {
3532 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
3533 cgraph_node *real_dest = cs->callee->function_symbol ();
3534
3535 if (!same_node_or_its_all_contexts_clone_p (real_dest, dest)
3536 || caller_info->node_dead)
3537 return false;
3538 if (!src->val)
3539 return true;
3540
3541 if (caller_info->ipcp_orig_node)
3542 return (caller_info->known_contexts.length () > (unsigned) src->index)
3543 && values_equal_for_ipcp_p (src->val->value,
3544 caller_info->known_contexts[src->index]);
3545
3546 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
3547 src->index);
3548 return plats->ctxlat.is_single_const ()
3549 && values_equal_for_ipcp_p (src->val->value,
3550 plats->ctxlat.values->value);
3551 }
3552
3553 /* Get the next clone in the linked list of clones of an edge. */
3554
3555 static inline struct cgraph_edge *
3556 get_next_cgraph_edge_clone (struct cgraph_edge *cs)
3557 {
3558 return next_edge_clone[cs->uid];
3559 }
3560
3561 /* Given VAL that is intended for DEST, iterate over all its sources and if
3562 they still hold, add their edge frequency and their number into *FREQUENCY
3563 and *CALLER_COUNT respectively. */
3564
3565 template <typename valtype>
3566 static bool
3567 get_info_about_necessary_edges (ipcp_value<valtype> *val, cgraph_node *dest,
3568 int *freq_sum,
3569 profile_count *count_sum, int *caller_count)
3570 {
3571 ipcp_value_source<valtype> *src;
3572 int freq = 0, count = 0;
3573 profile_count cnt = profile_count::zero ();
3574 bool hot = false;
3575
3576 for (src = val->sources; src; src = src->next)
3577 {
3578 struct cgraph_edge *cs = src->cs;
3579 while (cs)
3580 {
3581 if (cgraph_edge_brings_value_p (cs, src, dest))
3582 {
3583 count++;
3584 freq += cs->frequency ();
3585 if (cs->count.ipa ().initialized_p ())
3586 cnt += cs->count.ipa ();
3587 hot |= cs->maybe_hot_p ();
3588 }
3589 cs = get_next_cgraph_edge_clone (cs);
3590 }
3591 }
3592
3593 *freq_sum = freq;
3594 *count_sum = cnt;
3595 *caller_count = count;
3596 return hot;
3597 }
3598
3599 /* Return a vector of incoming edges that do bring value VAL to node DEST. It
3600 is assumed their number is known and equal to CALLER_COUNT. */
3601
3602 template <typename valtype>
3603 static vec<cgraph_edge *>
3604 gather_edges_for_value (ipcp_value<valtype> *val, cgraph_node *dest,
3605 int caller_count)
3606 {
3607 ipcp_value_source<valtype> *src;
3608 vec<cgraph_edge *> ret;
3609
3610 ret.create (caller_count);
3611 for (src = val->sources; src; src = src->next)
3612 {
3613 struct cgraph_edge *cs = src->cs;
3614 while (cs)
3615 {
3616 if (cgraph_edge_brings_value_p (cs, src, dest))
3617 ret.quick_push (cs);
3618 cs = get_next_cgraph_edge_clone (cs);
3619 }
3620 }
3621
3622 return ret;
3623 }
3624
3625 /* Construct a replacement map for a know VALUE for a formal parameter PARAM.
3626 Return it or NULL if for some reason it cannot be created. */
3627
3628 static struct ipa_replace_map *
3629 get_replacement_map (struct ipa_node_params *info, tree value, int parm_num)
3630 {
3631 struct ipa_replace_map *replace_map;
3632
3633
3634 replace_map = ggc_alloc<ipa_replace_map> ();
3635 if (dump_file)
3636 {
3637 fprintf (dump_file, " replacing ");
3638 ipa_dump_param (dump_file, info, parm_num);
3639
3640 fprintf (dump_file, " with const ");
3641 print_generic_expr (dump_file, value);
3642 fprintf (dump_file, "\n");
3643 }
3644 replace_map->old_tree = NULL;
3645 replace_map->parm_num = parm_num;
3646 replace_map->new_tree = value;
3647 replace_map->replace_p = true;
3648 replace_map->ref_p = false;
3649
3650 return replace_map;
3651 }
3652
3653 /* Dump new profiling counts */
3654
3655 static void
3656 dump_profile_updates (struct cgraph_node *orig_node,
3657 struct cgraph_node *new_node)
3658 {
3659 struct cgraph_edge *cs;
3660
3661 fprintf (dump_file, " setting count of the specialized node to ");
3662 new_node->count.dump (dump_file);
3663 fprintf (dump_file, "\n");
3664 for (cs = new_node->callees; cs; cs = cs->next_callee)
3665 {
3666 fprintf (dump_file, " edge to %s has count ",
3667 cs->callee->name ());
3668 cs->count.dump (dump_file);
3669 fprintf (dump_file, "\n");
3670 }
3671
3672 fprintf (dump_file, " setting count of the original node to ");
3673 orig_node->count.dump (dump_file);
3674 fprintf (dump_file, "\n");
3675 for (cs = orig_node->callees; cs; cs = cs->next_callee)
3676 {
3677 fprintf (dump_file, " edge to %s is left with ",
3678 cs->callee->name ());
3679 cs->count.dump (dump_file);
3680 fprintf (dump_file, "\n");
3681 }
3682 }
3683
3684 /* After a specialized NEW_NODE version of ORIG_NODE has been created, update
3685 their profile information to reflect this. */
3686
3687 static void
3688 update_profiling_info (struct cgraph_node *orig_node,
3689 struct cgraph_node *new_node)
3690 {
3691 struct cgraph_edge *cs;
3692 struct caller_statistics stats;
3693 profile_count new_sum, orig_sum;
3694 profile_count remainder, orig_node_count = orig_node->count;
3695
3696 if (!(orig_node_count.ipa () > profile_count::zero ()))
3697 return;
3698
3699 init_caller_stats (&stats);
3700 orig_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
3701 false);
3702 orig_sum = stats.count_sum;
3703 init_caller_stats (&stats);
3704 new_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
3705 false);
3706 new_sum = stats.count_sum;
3707
3708 if (orig_node_count < orig_sum + new_sum)
3709 {
3710 if (dump_file)
3711 {
3712 fprintf (dump_file, " Problem: node %s has too low count ",
3713 orig_node->dump_name ());
3714 orig_node_count.dump (dump_file);
3715 fprintf (dump_file, "while the sum of incoming count is ");
3716 (orig_sum + new_sum).dump (dump_file);
3717 fprintf (dump_file, "\n");
3718 }
3719
3720 orig_node_count = (orig_sum + new_sum).apply_scale (12, 10);
3721 if (dump_file)
3722 {
3723 fprintf (dump_file, " proceeding by pretending it was ");
3724 orig_node_count.dump (dump_file);
3725 fprintf (dump_file, "\n");
3726 }
3727 }
3728
3729 remainder = orig_node_count.combine_with_ipa_count (orig_node_count.ipa ()
3730 - new_sum.ipa ());
3731 new_sum = orig_node_count.combine_with_ipa_count (new_sum);
3732 orig_node->count = remainder;
3733
3734 for (cs = new_node->callees; cs; cs = cs->next_callee)
3735 cs->count = cs->count.apply_scale (new_sum, orig_node_count);
3736
3737 for (cs = orig_node->callees; cs; cs = cs->next_callee)
3738 cs->count = cs->count.apply_scale (remainder, orig_node_count);
3739
3740 if (dump_file)
3741 dump_profile_updates (orig_node, new_node);
3742 }
3743
3744 /* Update the respective profile of specialized NEW_NODE and the original
3745 ORIG_NODE after additional edges with cumulative count sum REDIRECTED_SUM
3746 have been redirected to the specialized version. */
3747
3748 static void
3749 update_specialized_profile (struct cgraph_node *new_node,
3750 struct cgraph_node *orig_node,
3751 profile_count redirected_sum)
3752 {
3753 struct cgraph_edge *cs;
3754 profile_count new_node_count, orig_node_count = orig_node->count;
3755
3756 if (dump_file)
3757 {
3758 fprintf (dump_file, " the sum of counts of redirected edges is ");
3759 redirected_sum.dump (dump_file);
3760 fprintf (dump_file, "\n");
3761 }
3762 if (!(orig_node_count > profile_count::zero ()))
3763 return;
3764
3765 gcc_assert (orig_node_count >= redirected_sum);
3766
3767 new_node_count = new_node->count;
3768 new_node->count += redirected_sum;
3769 orig_node->count -= redirected_sum;
3770
3771 for (cs = new_node->callees; cs; cs = cs->next_callee)
3772 cs->count += cs->count.apply_scale (redirected_sum, new_node_count);
3773
3774 for (cs = orig_node->callees; cs; cs = cs->next_callee)
3775 {
3776 profile_count dec = cs->count.apply_scale (redirected_sum,
3777 orig_node_count);
3778 cs->count -= dec;
3779 }
3780
3781 if (dump_file)
3782 dump_profile_updates (orig_node, new_node);
3783 }
3784
3785 /* Create a specialized version of NODE with known constants in KNOWN_CSTS,
3786 known contexts in KNOWN_CONTEXTS and known aggregate values in AGGVALS and
3787 redirect all edges in CALLERS to it. */
3788
3789 static struct cgraph_node *
3790 create_specialized_node (struct cgraph_node *node,
3791 vec<tree> known_csts,
3792 vec<ipa_polymorphic_call_context> known_contexts,
3793 struct ipa_agg_replacement_value *aggvals,
3794 vec<cgraph_edge *> callers)
3795 {
3796 struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
3797 vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
3798 struct ipa_agg_replacement_value *av;
3799 struct cgraph_node *new_node;
3800 int i, count = ipa_get_param_count (info);
3801 bitmap args_to_skip;
3802
3803 gcc_assert (!info->ipcp_orig_node);
3804
3805 if (node->local.can_change_signature)
3806 {
3807 args_to_skip = BITMAP_GGC_ALLOC ();
3808 for (i = 0; i < count; i++)
3809 {
3810 tree t = known_csts[i];
3811
3812 if (t || !ipa_is_param_used (info, i))
3813 bitmap_set_bit (args_to_skip, i);
3814 }
3815 }
3816 else
3817 {
3818 args_to_skip = NULL;
3819 if (dump_file && (dump_flags & TDF_DETAILS))
3820 fprintf (dump_file, " cannot change function signature\n");
3821 }
3822
3823 for (i = 0; i < count; i++)
3824 {
3825 tree t = known_csts[i];
3826 if (t)
3827 {
3828 struct ipa_replace_map *replace_map;
3829
3830 gcc_checking_assert (TREE_CODE (t) != TREE_BINFO);
3831 replace_map = get_replacement_map (info, t, i);
3832 if (replace_map)
3833 vec_safe_push (replace_trees, replace_map);
3834 }
3835 }
3836
3837 new_node = node->create_virtual_clone (callers, replace_trees,
3838 args_to_skip, "constprop");
3839 ipa_set_node_agg_value_chain (new_node, aggvals);
3840 for (av = aggvals; av; av = av->next)
3841 new_node->maybe_create_reference (av->value, NULL);
3842
3843 if (dump_file && (dump_flags & TDF_DETAILS))
3844 {
3845 fprintf (dump_file, " the new node is %s.\n", new_node->dump_name ());
3846 if (known_contexts.exists ())
3847 {
3848 for (i = 0; i < count; i++)
3849 if (!known_contexts[i].useless_p ())
3850 {
3851 fprintf (dump_file, " known ctx %i is ", i);
3852 known_contexts[i].dump (dump_file);
3853 }
3854 }
3855 if (aggvals)
3856 ipa_dump_agg_replacement_values (dump_file, aggvals);
3857 }
3858 ipa_check_create_node_params ();
3859 update_profiling_info (node, new_node);
3860 new_info = IPA_NODE_REF (new_node);
3861 new_info->ipcp_orig_node = node;
3862 new_info->known_csts = known_csts;
3863 new_info->known_contexts = known_contexts;
3864
3865 ipcp_discover_new_direct_edges (new_node, known_csts, known_contexts, aggvals);
3866
3867 callers.release ();
3868 return new_node;
3869 }
3870
3871 /* Given a NODE, and a subset of its CALLERS, try to populate blanks slots in
3872 KNOWN_CSTS with constants that are also known for all of the CALLERS. */
3873
3874 static void
3875 find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
3876 vec<tree> known_csts,
3877 vec<cgraph_edge *> callers)
3878 {
3879 struct ipa_node_params *info = IPA_NODE_REF (node);
3880 int i, count = ipa_get_param_count (info);
3881
3882 for (i = 0; i < count; i++)
3883 {
3884 struct cgraph_edge *cs;
3885 tree newval = NULL_TREE;
3886 int j;
3887 bool first = true;
3888 tree type = ipa_get_type (info, i);
3889
3890 if (ipa_get_scalar_lat (info, i)->bottom || known_csts[i])
3891 continue;
3892
3893 FOR_EACH_VEC_ELT (callers, j, cs)
3894 {
3895 struct ipa_jump_func *jump_func;
3896 tree t;
3897
3898 if (i >= ipa_get_cs_argument_count (IPA_EDGE_REF (cs))
3899 || (i == 0
3900 && call_passes_through_thunk_p (cs))
3901 || (!cs->callee->instrumentation_clone
3902 && cs->callee->function_symbol ()->instrumentation_clone))
3903 {
3904 newval = NULL_TREE;
3905 break;
3906 }
3907 jump_func = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), i);
3908 t = ipa_value_from_jfunc (IPA_NODE_REF (cs->caller), jump_func, type);
3909 if (!t
3910 || (newval
3911 && !values_equal_for_ipcp_p (t, newval))
3912 || (!first && !newval))
3913 {
3914 newval = NULL_TREE;
3915 break;
3916 }
3917 else
3918 newval = t;
3919 first = false;
3920 }
3921
3922 if (newval)
3923 {
3924 if (dump_file && (dump_flags & TDF_DETAILS))
3925 {
3926 fprintf (dump_file, " adding an extra known scalar value ");
3927 print_ipcp_constant_value (dump_file, newval);
3928 fprintf (dump_file, " for ");
3929 ipa_dump_param (dump_file, info, i);
3930 fprintf (dump_file, "\n");
3931 }
3932
3933 known_csts[i] = newval;
3934 }
3935 }
3936 }
3937
3938 /* Given a NODE and a subset of its CALLERS, try to populate plank slots in
3939 KNOWN_CONTEXTS with polymorphic contexts that are also known for all of the
3940 CALLERS. */
3941
3942 static void
3943 find_more_contexts_for_caller_subset (cgraph_node *node,
3944 vec<ipa_polymorphic_call_context>
3945 *known_contexts,
3946 vec<cgraph_edge *> callers)
3947 {
3948 ipa_node_params *info = IPA_NODE_REF (node);
3949 int i, count = ipa_get_param_count (info);
3950
3951 for (i = 0; i < count; i++)
3952 {
3953 cgraph_edge *cs;
3954
3955 if (ipa_get_poly_ctx_lat (info, i)->bottom
3956 || (known_contexts->exists ()
3957 && !(*known_contexts)[i].useless_p ()))
3958 continue;
3959
3960 ipa_polymorphic_call_context newval;
3961 bool first = true;
3962 int j;
3963
3964 FOR_EACH_VEC_ELT (callers, j, cs)
3965 {
3966 if (i >= ipa_get_cs_argument_count (IPA_EDGE_REF (cs)))
3967 return;
3968 ipa_jump_func *jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs),
3969 i);
3970 ipa_polymorphic_call_context ctx;
3971 ctx = ipa_context_from_jfunc (IPA_NODE_REF (cs->caller), cs, i,
3972 jfunc);
3973 if (first)
3974 {
3975 newval = ctx;
3976 first = false;
3977 }
3978 else
3979 newval.meet_with (ctx);
3980 if (newval.useless_p ())
3981 break;
3982 }
3983
3984 if (!newval.useless_p ())
3985 {
3986 if (dump_file && (dump_flags & TDF_DETAILS))
3987 {
3988 fprintf (dump_file, " adding an extra known polymorphic "
3989 "context ");
3990 print_ipcp_constant_value (dump_file, newval);
3991 fprintf (dump_file, " for ");
3992 ipa_dump_param (dump_file, info, i);
3993 fprintf (dump_file, "\n");
3994 }
3995
3996 if (!known_contexts->exists ())
3997 known_contexts->safe_grow_cleared (ipa_get_param_count (info));
3998 (*known_contexts)[i] = newval;
3999 }
4000
4001 }
4002 }
4003
4004 /* Go through PLATS and create a vector of values consisting of values and
4005 offsets (minus OFFSET) of lattices that contain only a single value. */
4006
4007 static vec<ipa_agg_jf_item>
4008 copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
4009 {
4010 vec<ipa_agg_jf_item> res = vNULL;
4011
4012 if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
4013 return vNULL;
4014
4015 for (struct ipcp_agg_lattice *aglat = plats->aggs; aglat; aglat = aglat->next)
4016 if (aglat->is_single_const ())
4017 {
4018 struct ipa_agg_jf_item ti;
4019 ti.offset = aglat->offset - offset;
4020 ti.value = aglat->values->value;
4021 res.safe_push (ti);
4022 }
4023 return res;
4024 }
4025
4026 /* Intersect all values in INTER with single value lattices in PLATS (while
4027 subtracting OFFSET). */
4028
4029 static void
4030 intersect_with_plats (struct ipcp_param_lattices *plats,
4031 vec<ipa_agg_jf_item> *inter,
4032 HOST_WIDE_INT offset)
4033 {
4034 struct ipcp_agg_lattice *aglat;
4035 struct ipa_agg_jf_item *item;
4036 int k;
4037
4038 if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
4039 {
4040 inter->release ();
4041 return;
4042 }
4043
4044 aglat = plats->aggs;
4045 FOR_EACH_VEC_ELT (*inter, k, item)
4046 {
4047 bool found = false;
4048 if (!item->value)
4049 continue;
4050 while (aglat)
4051 {
4052 if (aglat->offset - offset > item->offset)
4053 break;
4054 if (aglat->offset - offset == item->offset)
4055 {
4056 gcc_checking_assert (item->value);
4057 if (values_equal_for_ipcp_p (item->value, aglat->values->value))
4058 found = true;
4059 break;
4060 }
4061 aglat = aglat->next;
4062 }
4063 if (!found)
4064 item->value = NULL_TREE;
4065 }
4066 }
4067
4068 /* Copy aggregate replacement values of NODE (which is an IPA-CP clone) to the
4069 vector result while subtracting OFFSET from the individual value offsets. */
4070
4071 static vec<ipa_agg_jf_item>
4072 agg_replacements_to_vector (struct cgraph_node *node, int index,
4073 HOST_WIDE_INT offset)
4074 {
4075 struct ipa_agg_replacement_value *av;
4076 vec<ipa_agg_jf_item> res = vNULL;
4077
4078 for (av = ipa_get_agg_replacements_for_node (node); av; av = av->next)
4079 if (av->index == index
4080 && (av->offset - offset) >= 0)
4081 {
4082 struct ipa_agg_jf_item item;
4083 gcc_checking_assert (av->value);
4084 item.offset = av->offset - offset;
4085 item.value = av->value;
4086 res.safe_push (item);
4087 }
4088
4089 return res;
4090 }
4091
4092 /* Intersect all values in INTER with those that we have already scheduled to
4093 be replaced in parameter number INDEX of NODE, which is an IPA-CP clone
4094 (while subtracting OFFSET). */
4095
4096 static void
4097 intersect_with_agg_replacements (struct cgraph_node *node, int index,
4098 vec<ipa_agg_jf_item> *inter,
4099 HOST_WIDE_INT offset)
4100 {
4101 struct ipa_agg_replacement_value *srcvals;
4102 struct ipa_agg_jf_item *item;
4103 int i;
4104
4105 srcvals = ipa_get_agg_replacements_for_node (node);
4106 if (!srcvals)
4107 {
4108 inter->release ();
4109 return;
4110 }
4111
4112 FOR_EACH_VEC_ELT (*inter, i, item)
4113 {
4114 struct ipa_agg_replacement_value *av;
4115 bool found = false;
4116 if (!item->value)
4117 continue;
4118 for (av = srcvals; av; av = av->next)
4119 {
4120 gcc_checking_assert (av->value);
4121 if (av->index == index
4122 && av->offset - offset == item->offset)
4123 {
4124 if (values_equal_for_ipcp_p (item->value, av->value))
4125 found = true;
4126 break;
4127 }
4128 }
4129 if (!found)
4130 item->value = NULL_TREE;
4131 }
4132 }
4133
4134 /* Intersect values in INTER with aggregate values that come along edge CS to
4135 parameter number INDEX and return it. If INTER does not actually exist yet,
4136 copy all incoming values to it. If we determine we ended up with no values
4137 whatsoever, return a released vector. */
4138
4139 static vec<ipa_agg_jf_item>
4140 intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
4141 vec<ipa_agg_jf_item> inter)
4142 {
4143 struct ipa_jump_func *jfunc;
4144 jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), index);
4145 if (jfunc->type == IPA_JF_PASS_THROUGH
4146 && ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
4147 {
4148 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
4149 int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
4150
4151 if (caller_info->ipcp_orig_node)
4152 {
4153 struct cgraph_node *orig_node = caller_info->ipcp_orig_node;
4154 struct ipcp_param_lattices *orig_plats;
4155 orig_plats = ipa_get_parm_lattices (IPA_NODE_REF (orig_node),
4156 src_idx);
4157 if (agg_pass_through_permissible_p (orig_plats, jfunc))
4158 {
4159 if (!inter.exists ())
4160 inter = agg_replacements_to_vector (cs->caller, src_idx, 0);
4161 else
4162 intersect_with_agg_replacements (cs->caller, src_idx,
4163 &inter, 0);
4164 }
4165 else
4166 {
4167 inter.release ();
4168 return vNULL;
4169 }
4170 }
4171 else
4172 {
4173 struct ipcp_param_lattices *src_plats;
4174 src_plats = ipa_get_parm_lattices (caller_info, src_idx);
4175 if (agg_pass_through_permissible_p (src_plats, jfunc))
4176 {
4177 /* Currently we do not produce clobber aggregate jump
4178 functions, adjust when we do. */
4179 gcc_checking_assert (!jfunc->agg.items);
4180 if (!inter.exists ())
4181 inter = copy_plats_to_inter (src_plats, 0);
4182 else
4183 intersect_with_plats (src_plats, &inter, 0);
4184 }
4185 else
4186 {
4187 inter.release ();
4188 return vNULL;
4189 }
4190 }
4191 }
4192 else if (jfunc->type == IPA_JF_ANCESTOR
4193 && ipa_get_jf_ancestor_agg_preserved (jfunc))
4194 {
4195 struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
4196 int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
4197 struct ipcp_param_lattices *src_plats;
4198 HOST_WIDE_INT delta = ipa_get_jf_ancestor_offset (jfunc);
4199
4200 if (caller_info->ipcp_orig_node)
4201 {
4202 if (!inter.exists ())
4203 inter = agg_replacements_to_vector (cs->caller, src_idx, delta);
4204 else
4205 intersect_with_agg_replacements (cs->caller, src_idx, &inter,
4206 delta);
4207 }
4208 else
4209 {
4210 src_plats = ipa_get_parm_lattices (caller_info, src_idx);
4211 /* Currently we do not produce clobber aggregate jump
4212 functions, adjust when we do. */
4213 gcc_checking_assert (!src_plats->aggs || !jfunc->agg.items);
4214 if (!inter.exists ())
4215 inter = copy_plats_to_inter (src_plats, delta);
4216 else
4217 intersect_with_plats (src_plats, &inter, delta);
4218 }
4219 }
4220 else if (jfunc->agg.items)
4221 {
4222 struct ipa_agg_jf_item *item;
4223 int k;
4224
4225 if (!inter.exists ())
4226 for (unsigned i = 0; i < jfunc->agg.items->length (); i++)
4227 inter.safe_push ((*jfunc->agg.items)[i]);
4228 else
4229 FOR_EACH_VEC_ELT (inter, k, item)
4230 {
4231 int l = 0;
4232 bool found = false;
4233
4234 if (!item->value)
4235 continue;
4236
4237 while ((unsigned) l < jfunc->agg.items->length ())
4238 {
4239 struct ipa_agg_jf_item *ti;
4240 ti = &(*jfunc->agg.items)[l];
4241 if (ti->offset > item->offset)
4242 break;
4243 if (ti->offset == item->offset)
4244 {
4245 gcc_checking_assert (ti->value);
4246 if (values_equal_for_ipcp_p (item->value,
4247 ti->value))
4248 found = true;
4249 break;
4250 }
4251 l++;
4252 }
4253 if (!found)
4254 item->value = NULL;
4255 }
4256 }
4257 else
4258 {
4259 inter.release ();
4260 return vec<ipa_agg_jf_item>();
4261 }
4262 return inter;
4263 }
4264
4265 /* Look at edges in CALLERS and collect all known aggregate values that arrive
4266 from all of them. */
4267
4268 static struct ipa_agg_replacement_value *
4269 find_aggregate_values_for_callers_subset (struct cgraph_node *node,
4270 vec<cgraph_edge *> callers)
4271 {
4272 struct ipa_node_params *dest_info = IPA_NODE_REF (node);
4273 struct ipa_agg_replacement_value *res;
4274 struct ipa_agg_replacement_value **tail = &res;
4275 struct cgraph_edge *cs;
4276 int i, j, count = ipa_get_param_count (dest_info);
4277
4278 FOR_EACH_VEC_ELT (callers, j, cs)
4279 {
4280 int c = ipa_get_cs_argument_count (IPA_EDGE_REF (cs));
4281 if (c < count)
4282 count = c;
4283 }
4284
4285 for (i = 0; i < count; i++)
4286 {
4287 struct cgraph_edge *cs;
4288 vec<ipa_agg_jf_item> inter = vNULL;
4289 struct ipa_agg_jf_item *item;
4290 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
4291 int j;
4292
4293 /* Among other things, the following check should deal with all by_ref
4294 mismatches. */
4295 if (plats->aggs_bottom)
4296 continue;
4297
4298 FOR_EACH_VEC_ELT (callers, j, cs)
4299 {
4300 inter = intersect_aggregates_with_edge (cs, i, inter);
4301
4302 if (!inter.exists ())
4303 goto next_param;
4304 }
4305
4306 FOR_EACH_VEC_ELT (inter, j, item)
4307 {
4308 struct ipa_agg_replacement_value *v;
4309
4310 if (!item->value)
4311 continue;
4312
4313 v = ggc_alloc<ipa_agg_replacement_value> ();
4314 v->index = i;
4315 v->offset = item->offset;
4316 v->value = item->value;
4317 v->by_ref = plats->aggs_by_ref;
4318 *tail = v;
4319 tail = &v->next;
4320 }
4321
4322 next_param:
4323 if (inter.exists ())
4324 inter.release ();
4325 }
4326 *tail = NULL;
4327 return res;
4328 }
4329
4330 /* Turn KNOWN_AGGS into a list of aggregate replacement values. */
4331
4332 static struct ipa_agg_replacement_value *
4333 known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function> known_aggs)
4334 {
4335 struct ipa_agg_replacement_value *res;
4336 struct ipa_agg_replacement_value **tail = &res;
4337 struct ipa_agg_jump_function *aggjf;
4338 struct ipa_agg_jf_item *item;
4339 int i, j;
4340
4341 FOR_EACH_VEC_ELT (known_aggs, i, aggjf)
4342 FOR_EACH_VEC_SAFE_ELT (aggjf->items, j, item)
4343 {
4344 struct ipa_agg_replacement_value *v;
4345 v = ggc_alloc<ipa_agg_replacement_value> ();
4346 v->index = i;
4347 v->offset = item->offset;
4348 v->value = item->value;
4349 v->by_ref = aggjf->by_ref;
4350 *tail = v;
4351 tail = &v->next;
4352 }
4353 *tail = NULL;
4354 return res;
4355 }
4356
4357 /* Determine whether CS also brings all scalar values that the NODE is
4358 specialized for. */
4359
4360 static bool
4361 cgraph_edge_brings_all_scalars_for_node (struct cgraph_edge *cs,
4362 struct cgraph_node *node)
4363 {
4364 struct ipa_node_params *dest_info = IPA_NODE_REF (node);
4365 int count = ipa_get_param_count (dest_info);
4366 struct ipa_node_params *caller_info;
4367 struct ipa_edge_args *args;
4368 int i;
4369
4370 caller_info = IPA_NODE_REF (cs->caller);
4371 args = IPA_EDGE_REF (cs);
4372 for (i = 0; i < count; i++)
4373 {
4374 struct ipa_jump_func *jump_func;
4375 tree val, t;
4376
4377 val = dest_info->known_csts[i];
4378 if (!val)
4379 continue;
4380
4381 if (i >= ipa_get_cs_argument_count (args))
4382 return false;
4383 jump_func = ipa_get_ith_jump_func (args, i);
4384 t = ipa_value_from_jfunc (caller_info, jump_func,
4385 ipa_get_type (dest_info, i));
4386 if (!t || !values_equal_for_ipcp_p (val, t))
4387 return false;
4388 }
4389 return true;
4390 }
4391
4392 /* Determine whether CS also brings all aggregate values that NODE is
4393 specialized for. */
4394 static bool
4395 cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
4396 struct cgraph_node *node)
4397 {
4398 struct ipa_node_params *orig_caller_info = IPA_NODE_REF (cs->caller);
4399 struct ipa_node_params *orig_node_info;
4400 struct ipa_agg_replacement_value *aggval;
4401 int i, ec, count;
4402
4403 aggval = ipa_get_agg_replacements_for_node (node);
4404 if (!aggval)
4405 return true;
4406
4407 count = ipa_get_param_count (IPA_NODE_REF (node));
4408 ec = ipa_get_cs_argument_count (IPA_EDGE_REF (cs));
4409 if (ec < count)
4410 for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
4411 if (aggval->index >= ec)
4412 return false;
4413
4414 orig_node_info = IPA_NODE_REF (IPA_NODE_REF (node)->ipcp_orig_node);
4415 if (orig_caller_info->ipcp_orig_node)
4416 orig_caller_info = IPA_NODE_REF (orig_caller_info->ipcp_orig_node);
4417
4418 for (i = 0; i < count; i++)
4419 {
4420 static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
4421 struct ipcp_param_lattices *plats;
4422 bool interesting = false;
4423 for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
4424 if (aggval->index == i)
4425 {
4426 interesting = true;
4427 break;
4428 }
4429 if (!interesting)
4430 continue;
4431
4432 plats = ipa_get_parm_lattices (orig_node_info, aggval->index);
4433 if (plats->aggs_bottom)
4434 return false;
4435
4436 values = intersect_aggregates_with_edge (cs, i, values);
4437 if (!values.exists ())
4438 return false;
4439
4440 for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
4441 if (aggval->index == i)
4442 {
4443 struct ipa_agg_jf_item *item;
4444 int j;
4445 bool found = false;
4446 FOR_EACH_VEC_ELT (values, j, item)
4447 if (item->value
4448 && item->offset == av->offset
4449 && values_equal_for_ipcp_p (item->value, av->value))
4450 {
4451 found = true;
4452 break;
4453 }
4454 if (!found)
4455 {
4456 values.release ();
4457 return false;
4458 }
4459 }
4460 }
4461 return true;
4462 }
4463
4464 /* Given an original NODE and a VAL for which we have already created a
4465 specialized clone, look whether there are incoming edges that still lead
4466 into the old node but now also bring the requested value and also conform to
4467 all other criteria such that they can be redirected the special node.
4468 This function can therefore redirect the final edge in a SCC. */
4469
4470 template <typename valtype>
4471 static void
4472 perhaps_add_new_callers (cgraph_node *node, ipcp_value<valtype> *val)
4473 {
4474 ipcp_value_source<valtype> *src;
4475 profile_count redirected_sum = profile_count::zero ();
4476
4477 for (src = val->sources; src; src = src->next)
4478 {
4479 struct cgraph_edge *cs = src->cs;
4480 while (cs)
4481 {
4482 if (cgraph_edge_brings_value_p (cs, src, node)
4483 && cgraph_edge_brings_all_scalars_for_node (cs, val->spec_node)
4484 && cgraph_edge_brings_all_agg_vals_for_node (cs, val->spec_node))
4485 {
4486 if (dump_file)
4487 fprintf (dump_file, " - adding an extra caller %s of %s\n",
4488 cs->caller->dump_name (),
4489 val->spec_node->dump_name ());
4490
4491 cs->redirect_callee_duplicating_thunks (val->spec_node);
4492 val->spec_node->expand_all_artificial_thunks ();
4493 if (cs->count.ipa ().initialized_p ())
4494 redirected_sum = redirected_sum + cs->count.ipa ();
4495 }
4496 cs = get_next_cgraph_edge_clone (cs);
4497 }
4498 }
4499
4500 if (redirected_sum.nonzero_p ())
4501 update_specialized_profile (val->spec_node, node, redirected_sum);
4502 }
4503
4504 /* Return true if KNOWN_CONTEXTS contain at least one useful context. */
4505
4506 static bool
4507 known_contexts_useful_p (vec<ipa_polymorphic_call_context> known_contexts)
4508 {
4509 ipa_polymorphic_call_context *ctx;
4510 int i;
4511
4512 FOR_EACH_VEC_ELT (known_contexts, i, ctx)
4513 if (!ctx->useless_p ())
4514 return true;
4515 return false;
4516 }
4517
4518 /* Return a copy of KNOWN_CSTS if it is not empty, otherwise return vNULL. */
4519
4520 static vec<ipa_polymorphic_call_context>
4521 copy_useful_known_contexts (vec<ipa_polymorphic_call_context> known_contexts)
4522 {
4523 if (known_contexts_useful_p (known_contexts))
4524 return known_contexts.copy ();
4525 else
4526 return vNULL;
4527 }
4528
4529 /* Copy KNOWN_CSTS and modify the copy according to VAL and INDEX. If
4530 non-empty, replace KNOWN_CONTEXTS with its copy too. */
4531
4532 static void
4533 modify_known_vectors_with_val (vec<tree> *known_csts,
4534 vec<ipa_polymorphic_call_context> *known_contexts,
4535 ipcp_value<tree> *val,
4536 int index)
4537 {
4538 *known_csts = known_csts->copy ();
4539 *known_contexts = copy_useful_known_contexts (*known_contexts);
4540 (*known_csts)[index] = val->value;
4541 }
4542
4543 /* Replace KNOWN_CSTS with its copy. Also copy KNOWN_CONTEXTS and modify the
4544 copy according to VAL and INDEX. */
4545
4546 static void
4547 modify_known_vectors_with_val (vec<tree> *known_csts,
4548 vec<ipa_polymorphic_call_context> *known_contexts,
4549 ipcp_value<ipa_polymorphic_call_context> *val,
4550 int index)
4551 {
4552 *known_csts = known_csts->copy ();
4553 *known_contexts = known_contexts->copy ();
4554 (*known_contexts)[index] = val->value;
4555 }
4556
4557 /* Return true if OFFSET indicates this was not an aggregate value or there is
4558 a replacement equivalent to VALUE, INDEX and OFFSET among those in the
4559 AGGVALS list. */
4560
4561 DEBUG_FUNCTION bool
4562 ipcp_val_agg_replacement_ok_p (ipa_agg_replacement_value *aggvals,
4563 int index, HOST_WIDE_INT offset, tree value)
4564 {
4565 if (offset == -1)
4566 return true;
4567
4568 while (aggvals)
4569 {
4570 if (aggvals->index == index
4571 && aggvals->offset == offset
4572 && values_equal_for_ipcp_p (aggvals->value, value))
4573 return true;
4574 aggvals = aggvals->next;
4575 }
4576 return false;
4577 }
4578
4579 /* Return true if offset is minus one because source of a polymorphic contect
4580 cannot be an aggregate value. */
4581
4582 DEBUG_FUNCTION bool
4583 ipcp_val_agg_replacement_ok_p (ipa_agg_replacement_value *,
4584 int , HOST_WIDE_INT offset,
4585 ipa_polymorphic_call_context)
4586 {
4587 return offset == -1;
4588 }
4589
4590 /* Decide wheter to create a special version of NODE for value VAL of parameter
4591 at the given INDEX. If OFFSET is -1, the value is for the parameter itself,
4592 otherwise it is stored at the given OFFSET of the parameter. KNOWN_CSTS,
4593 KNOWN_CONTEXTS and KNOWN_AGGS describe the other already known values. */
4594
4595 template <typename valtype>
4596 static bool
4597 decide_about_value (struct cgraph_node *node, int index, HOST_WIDE_INT offset,
4598 ipcp_value<valtype> *val, vec<tree> known_csts,
4599 vec<ipa_polymorphic_call_context> known_contexts)
4600 {
4601 struct ipa_agg_replacement_value *aggvals;
4602 int freq_sum, caller_count;
4603 profile_count count_sum;
4604 vec<cgraph_edge *> callers;
4605
4606 if (val->spec_node)
4607 {
4608 perhaps_add_new_callers (node, val);
4609 return false;
4610 }
4611 else if (val->local_size_cost + overall_size > max_new_size)
4612 {
4613 if (dump_file && (dump_flags & TDF_DETAILS))
4614 fprintf (dump_file, " Ignoring candidate value because "
4615 "max_new_size would be reached with %li.\n",
4616 val->local_size_cost + overall_size);
4617 return false;
4618 }
4619 else if (!get_info_about_necessary_edges (val, node, &freq_sum, &count_sum,
4620 &caller_count))
4621 return false;
4622
4623 if (dump_file && (dump_flags & TDF_DETAILS))
4624 {
4625 fprintf (dump_file, " - considering value ");
4626 print_ipcp_constant_value (dump_file, val->value);
4627 fprintf (dump_file, " for ");
4628 ipa_dump_param (dump_file, IPA_NODE_REF (node), index);
4629 if (offset != -1)
4630 fprintf (dump_file, ", offset: " HOST_WIDE_INT_PRINT_DEC, offset);
4631 fprintf (dump_file, " (caller_count: %i)\n", caller_count);
4632 }
4633
4634 if (!good_cloning_opportunity_p (node, val->local_time_benefit,
4635 freq_sum, count_sum,
4636 val->local_size_cost)
4637 && !good_cloning_opportunity_p (node,
4638 val->local_time_benefit
4639 + val->prop_time_benefit,
4640 freq_sum, count_sum,
4641 val->local_size_cost
4642 + val->prop_size_cost))
4643 return false;
4644
4645 if (dump_file)
4646 fprintf (dump_file, " Creating a specialized node of %s.\n",
4647 node->dump_name ());
4648
4649 callers = gather_edges_for_value (val, node, caller_count);
4650 if (offset == -1)
4651 modify_known_vectors_with_val (&known_csts, &known_contexts, val, index);
4652 else
4653 {
4654 known_csts = known_csts.copy ();
4655 known_contexts = copy_useful_known_contexts (known_contexts);
4656 }
4657 find_more_scalar_values_for_callers_subset (node, known_csts, callers);
4658 find_more_contexts_for_caller_subset (node, &known_contexts, callers);
4659 aggvals = find_aggregate_values_for_callers_subset (node, callers);
4660 gcc_checking_assert (ipcp_val_agg_replacement_ok_p (aggvals, index,
4661 offset, val->value));
4662 val->spec_node = create_specialized_node (node, known_csts, known_contexts,
4663 aggvals, callers);
4664 overall_size += val->local_size_cost;
4665
4666 /* TODO: If for some lattice there is only one other known value
4667 left, make a special node for it too. */
4668
4669 return true;
4670 }
4671
4672 /* Decide whether and what specialized clones of NODE should be created. */
4673
4674 static bool
4675 decide_whether_version_node (struct cgraph_node *node)
4676 {
4677 struct ipa_node_params *info = IPA_NODE_REF (node);
4678 int i, count = ipa_get_param_count (info);
4679 vec<tree> known_csts;
4680 vec<ipa_polymorphic_call_context> known_contexts;
4681 vec<ipa_agg_jump_function> known_aggs = vNULL;
4682 bool ret = false;
4683
4684 if (count == 0)
4685 return false;
4686
4687 if (dump_file && (dump_flags & TDF_DETAILS))
4688 fprintf (dump_file, "\nEvaluating opportunities for %s.\n",
4689 node->dump_name ());
4690
4691 gather_context_independent_values (info, &known_csts, &known_contexts,
4692 info->do_clone_for_all_contexts ? &known_aggs
4693 : NULL, NULL);
4694
4695 for (i = 0; i < count;i++)
4696 {
4697 struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
4698 ipcp_lattice<tree> *lat = &plats->itself;
4699 ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
4700
4701 if (!lat->bottom
4702 && !known_csts[i])
4703 {
4704 ipcp_value<tree> *val;
4705 for (val = lat->values; val; val = val->next)
4706 ret |= decide_about_value (node, i, -1, val, known_csts,
4707 known_contexts);
4708 }
4709
4710 if (!plats->aggs_bottom)
4711 {
4712 struct ipcp_agg_lattice *aglat;
4713 ipcp_value<tree> *val;
4714 for (aglat = plats->aggs; aglat; aglat = aglat->next)
4715 if (!aglat->bottom && aglat->values
4716 /* If the following is false, the one value is in
4717 known_aggs. */
4718 && (plats->aggs_contain_variable
4719 || !aglat->is_single_const ()))
4720 for (val = aglat->values; val; val = val->next)
4721 ret |= decide_about_value (node, i, aglat->offset, val,
4722 known_csts, known_contexts);
4723 }
4724
4725 if (!ctxlat->bottom
4726 && known_contexts[i].useless_p ())
4727 {
4728 ipcp_value<ipa_polymorphic_call_context> *val;
4729 for (val = ctxlat->values; val; val = val->next)
4730 ret |= decide_about_value (node, i, -1, val, known_csts,
4731 known_contexts);
4732 }
4733
4734 info = IPA_NODE_REF (node);
4735 }
4736
4737 if (info->do_clone_for_all_contexts)
4738 {
4739 struct cgraph_node *clone;
4740 vec<cgraph_edge *> callers;
4741
4742 if (dump_file)
4743 fprintf (dump_file, " - Creating a specialized node of %s "
4744 "for all known contexts.\n", node->dump_name ());
4745
4746 callers = node->collect_callers ();
4747
4748 if (!known_contexts_useful_p (known_contexts))
4749 {
4750 known_contexts.release ();
4751 known_contexts = vNULL;
4752 }
4753 clone = create_specialized_node (node, known_csts, known_contexts,
4754 known_aggs_to_agg_replacement_list (known_aggs),
4755 callers);
4756 info = IPA_NODE_REF (node);
4757 info->do_clone_for_all_contexts = false;
4758 IPA_NODE_REF (clone)->is_all_contexts_clone = true;
4759 for (i = 0; i < count; i++)
4760 vec_free (known_aggs[i].items);
4761 known_aggs.release ();
4762 ret = true;
4763 }
4764 else
4765 {
4766 known_csts.release ();
4767 known_contexts.release ();
4768 }
4769
4770 return ret;
4771 }
4772
4773 /* Transitively mark all callees of NODE within the same SCC as not dead. */
4774
4775 static void
4776 spread_undeadness (struct cgraph_node *node)
4777 {
4778 struct cgraph_edge *cs;
4779
4780 for (cs = node->callees; cs; cs = cs->next_callee)
4781 if (ipa_edge_within_scc (cs))
4782 {
4783 struct cgraph_node *callee;
4784 struct ipa_node_params *info;
4785
4786 callee = cs->callee->function_symbol (NULL);
4787 info = IPA_NODE_REF (callee);
4788
4789 if (info->node_dead)
4790 {
4791 info->node_dead = 0;
4792 spread_undeadness (callee);
4793 }
4794 }
4795 }
4796
4797 /* Return true if NODE has a caller from outside of its SCC that is not
4798 dead. Worker callback for cgraph_for_node_and_aliases. */
4799
4800 static bool
4801 has_undead_caller_from_outside_scc_p (struct cgraph_node *node,
4802 void *data ATTRIBUTE_UNUSED)
4803 {
4804 struct cgraph_edge *cs;
4805
4806 for (cs = node->callers; cs; cs = cs->next_caller)
4807 if (cs->caller->thunk.thunk_p
4808 && cs->caller->call_for_symbol_thunks_and_aliases
4809 (has_undead_caller_from_outside_scc_p, NULL, true))
4810 return true;
4811 else if (!ipa_edge_within_scc (cs)
4812 && !IPA_NODE_REF (cs->caller)->node_dead)
4813 return true;
4814 return false;
4815 }
4816
4817
4818 /* Identify nodes within the same SCC as NODE which are no longer needed
4819 because of new clones and will be removed as unreachable. */
4820
4821 static void
4822 identify_dead_nodes (struct cgraph_node *node)
4823 {
4824 struct cgraph_node *v;
4825 for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
4826 if (v->local.local
4827 && !v->call_for_symbol_thunks_and_aliases
4828 (has_undead_caller_from_outside_scc_p, NULL, true))
4829 IPA_NODE_REF (v)->node_dead = 1;
4830
4831 for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
4832 if (!IPA_NODE_REF (v)->node_dead)
4833 spread_undeadness (v);
4834
4835 if (dump_file && (dump_flags & TDF_DETAILS))
4836 {
4837 for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
4838 if (IPA_NODE_REF (v)->node_dead)
4839 fprintf (dump_file, " Marking node as dead: %s.\n", v->dump_name ());
4840 }
4841 }
4842
4843 /* The decision stage. Iterate over the topological order of call graph nodes
4844 TOPO and make specialized clones if deemed beneficial. */
4845
4846 static void
4847 ipcp_decision_stage (struct ipa_topo_info *topo)
4848 {
4849 int i;
4850
4851 if (dump_file)
4852 fprintf (dump_file, "\nIPA decision stage:\n\n");
4853
4854 for (i = topo->nnodes - 1; i >= 0; i--)
4855 {
4856 struct cgraph_node *node = topo->order[i];
4857 bool change = false, iterate = true;
4858
4859 while (iterate)
4860 {
4861 struct cgraph_node *v;
4862 iterate = false;
4863 for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
4864 if (v->has_gimple_body_p ()
4865 && ipcp_versionable_function_p (v))
4866 iterate |= decide_whether_version_node (v);
4867
4868 change |= iterate;
4869 }
4870 if (change)
4871 identify_dead_nodes (node);
4872 }
4873 }
4874
4875 /* Look up all the bits information that we have discovered and copy it over
4876 to the transformation summary. */
4877
4878 static void
4879 ipcp_store_bits_results (void)
4880 {
4881 cgraph_node *node;
4882
4883 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
4884 {
4885 ipa_node_params *info = IPA_NODE_REF (node);
4886 bool dumped_sth = false;
4887 bool found_useful_result = false;
4888
4889 if (!opt_for_fn (node->decl, flag_ipa_bit_cp))
4890 {
4891 if (dump_file)
4892 fprintf (dump_file, "Not considering %s for ipa bitwise propagation "
4893 "; -fipa-bit-cp: disabled.\n",
4894 node->name ());
4895 continue;
4896 }
4897
4898 if (info->ipcp_orig_node)
4899 info = IPA_NODE_REF (info->ipcp_orig_node);
4900
4901 unsigned count = ipa_get_param_count (info);
4902 for (unsigned i = 0; i < count; i++)
4903 {
4904 ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
4905 if (plats->bits_lattice.constant_p ())
4906 {
4907 found_useful_result = true;
4908 break;
4909 }
4910 }
4911
4912 if (!found_useful_result)
4913 continue;
4914
4915 ipcp_grow_transformations_if_necessary ();
4916 ipcp_transformation_summary *ts = ipcp_get_transformation_summary (node);
4917 vec_safe_reserve_exact (ts->bits, count);
4918
4919 for (unsigned i = 0; i < count; i++)
4920 {
4921 ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
4922 ipa_bits *jfbits;
4923
4924 if (plats->bits_lattice.constant_p ())
4925 jfbits
4926 = ipa_get_ipa_bits_for_value (plats->bits_lattice.get_value (),
4927 plats->bits_lattice.get_mask ());
4928 else
4929 jfbits = NULL;
4930
4931 ts->bits->quick_push (jfbits);
4932 if (!dump_file || !jfbits)
4933 continue;
4934 if (!dumped_sth)
4935 {
4936 fprintf (dump_file, "Propagated bits info for function %s:\n",
4937 node->dump_name ());
4938 dumped_sth = true;
4939 }
4940 fprintf (dump_file, " param %i: value = ", i);
4941 print_hex (jfbits->value, dump_file);
4942 fprintf (dump_file, ", mask = ");
4943 print_hex (jfbits->mask, dump_file);
4944 fprintf (dump_file, "\n");
4945 }
4946 }
4947 }
4948
4949 /* Look up all VR information that we have discovered and copy it over
4950 to the transformation summary. */
4951
4952 static void
4953 ipcp_store_vr_results (void)
4954 {
4955 cgraph_node *node;
4956
4957 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
4958 {
4959 ipa_node_params *info = IPA_NODE_REF (node);
4960 bool found_useful_result = false;
4961
4962 if (!opt_for_fn (node->decl, flag_ipa_vrp))
4963 {
4964 if (dump_file)
4965 fprintf (dump_file, "Not considering %s for VR discovery "
4966 "and propagate; -fipa-ipa-vrp: disabled.\n",
4967 node->name ());
4968 continue;
4969 }
4970
4971 if (info->ipcp_orig_node)
4972 info = IPA_NODE_REF (info->ipcp_orig_node);
4973
4974 unsigned count = ipa_get_param_count (info);
4975 for (unsigned i = 0; i < count; i++)
4976 {
4977 ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
4978 if (!plats->m_value_range.bottom_p ()
4979 && !plats->m_value_range.top_p ())
4980 {
4981 found_useful_result = true;
4982 break;
4983 }
4984 }
4985 if (!found_useful_result)
4986 continue;
4987
4988 ipcp_grow_transformations_if_necessary ();
4989 ipcp_transformation_summary *ts = ipcp_get_transformation_summary (node);
4990 vec_safe_reserve_exact (ts->m_vr, count);
4991
4992 for (unsigned i = 0; i < count; i++)
4993 {
4994 ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
4995 ipa_vr vr;
4996
4997 if (!plats->m_value_range.bottom_p ()
4998 && !plats->m_value_range.top_p ())
4999 {
5000 vr.known = true;
5001 vr.type = plats->m_value_range.m_vr.type;
5002 vr.min = wi::to_wide (plats->m_value_range.m_vr.min);
5003 vr.max = wi::to_wide (plats->m_value_range.m_vr.max);
5004 }
5005 else
5006 {
5007 vr.known = false;
5008 vr.type = VR_VARYING;
5009 vr.min = vr.max = wi::zero (INT_TYPE_SIZE);
5010 }
5011 ts->m_vr->quick_push (vr);
5012 }
5013 }
5014 }
5015
5016 /* The IPCP driver. */
5017
5018 static unsigned int
5019 ipcp_driver (void)
5020 {
5021 struct cgraph_2edge_hook_list *edge_duplication_hook_holder;
5022 struct cgraph_edge_hook_list *edge_removal_hook_holder;
5023 struct ipa_topo_info topo;
5024
5025 ipa_check_create_node_params ();
5026 ipa_check_create_edge_args ();
5027 grow_edge_clone_vectors ();
5028 edge_duplication_hook_holder
5029 = symtab->add_edge_duplication_hook (&ipcp_edge_duplication_hook, NULL);
5030 edge_removal_hook_holder
5031 = symtab->add_edge_removal_hook (&ipcp_edge_removal_hook, NULL);
5032
5033 if (dump_file)
5034 {
5035 fprintf (dump_file, "\nIPA structures before propagation:\n");
5036 if (dump_flags & TDF_DETAILS)
5037 ipa_print_all_params (dump_file);
5038 ipa_print_all_jump_functions (dump_file);
5039 }
5040
5041 /* Topological sort. */
5042 build_toporder_info (&topo);
5043 /* Do the interprocedural propagation. */
5044 ipcp_propagate_stage (&topo);
5045 /* Decide what constant propagation and cloning should be performed. */
5046 ipcp_decision_stage (&topo);
5047 /* Store results of bits propagation. */
5048 ipcp_store_bits_results ();
5049 /* Store results of value range propagation. */
5050 ipcp_store_vr_results ();
5051
5052 /* Free all IPCP structures. */
5053 free_toporder_info (&topo);
5054 next_edge_clone.release ();
5055 prev_edge_clone.release ();
5056 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
5057 symtab->remove_edge_duplication_hook (edge_duplication_hook_holder);
5058 ipa_free_all_structures_after_ipa_cp ();
5059 if (dump_file)
5060 fprintf (dump_file, "\nIPA constant propagation end\n");
5061 return 0;
5062 }
5063
5064 /* Initialization and computation of IPCP data structures. This is the initial
5065 intraprocedural analysis of functions, which gathers information to be
5066 propagated later on. */
5067
5068 static void
5069 ipcp_generate_summary (void)
5070 {
5071 struct cgraph_node *node;
5072
5073 if (dump_file)
5074 fprintf (dump_file, "\nIPA constant propagation start:\n");
5075 ipa_register_cgraph_hooks ();
5076
5077 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
5078 ipa_analyze_node (node);
5079 }
5080
5081 /* Write ipcp summary for nodes in SET. */
5082
5083 static void
5084 ipcp_write_summary (void)
5085 {
5086 ipa_prop_write_jump_functions ();
5087 }
5088
5089 /* Read ipcp summary. */
5090
5091 static void
5092 ipcp_read_summary (void)
5093 {
5094 ipa_prop_read_jump_functions ();
5095 }
5096
5097 namespace {
5098
5099 const pass_data pass_data_ipa_cp =
5100 {
5101 IPA_PASS, /* type */
5102 "cp", /* name */
5103 OPTGROUP_NONE, /* optinfo_flags */
5104 TV_IPA_CONSTANT_PROP, /* tv_id */
5105 0, /* properties_required */
5106 0, /* properties_provided */
5107 0, /* properties_destroyed */
5108 0, /* todo_flags_start */
5109 ( TODO_dump_symtab | TODO_remove_functions ), /* todo_flags_finish */
5110 };
5111
5112 class pass_ipa_cp : public ipa_opt_pass_d
5113 {
5114 public:
5115 pass_ipa_cp (gcc::context *ctxt)
5116 : ipa_opt_pass_d (pass_data_ipa_cp, ctxt,
5117 ipcp_generate_summary, /* generate_summary */
5118 ipcp_write_summary, /* write_summary */
5119 ipcp_read_summary, /* read_summary */
5120 ipcp_write_transformation_summaries, /*
5121 write_optimization_summary */
5122 ipcp_read_transformation_summaries, /*
5123 read_optimization_summary */
5124 NULL, /* stmt_fixup */
5125 0, /* function_transform_todo_flags_start */
5126 ipcp_transform_function, /* function_transform */
5127 NULL) /* variable_transform */
5128 {}
5129
5130 /* opt_pass methods: */
5131 virtual bool gate (function *)
5132 {
5133 /* FIXME: We should remove the optimize check after we ensure we never run
5134 IPA passes when not optimizing. */
5135 return (flag_ipa_cp && optimize) || in_lto_p;
5136 }
5137
5138 virtual unsigned int execute (function *) { return ipcp_driver (); }
5139
5140 }; // class pass_ipa_cp
5141
5142 } // anon namespace
5143
5144 ipa_opt_pass_d *
5145 make_pass_ipa_cp (gcc::context *ctxt)
5146 {
5147 return new pass_ipa_cp (ctxt);
5148 }
5149
5150 /* Reset all state within ipa-cp.c so that we can rerun the compiler
5151 within the same process. For use by toplev::finalize. */
5152
5153 void
5154 ipa_cp_c_finalize (void)
5155 {
5156 max_count = profile_count::uninitialized ();
5157 overall_size = 0;
5158 max_new_size = 0;
5159 }