einfo.ads (Is_Atomic_Or_VFA): Move to XEINFO INLINES section.
[gcc.git] / gcc / ipa-icf.c
1 /* Interprocedural Identical Code Folding pass
2 Copyright (C) 2014-2015 Free Software Foundation, Inc.
3
4 Contributed by Jan Hubicka <hubicka@ucw.cz> and Martin Liska <mliska@suse.cz>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Interprocedural Identical Code Folding for functions and
23 read-only variables.
24
25 The goal of this transformation is to discover functions and read-only
26 variables which do have exactly the same semantics.
27
28 In case of functions,
29 we could either create a virtual clone or do a simple function wrapper
30 that will call equivalent function. If the function is just locally visible,
31 all function calls can be redirected. For read-only variables, we create
32 aliases if possible.
33
34 Optimization pass arranges as follows:
35 1) All functions and read-only variables are visited and internal
36 data structure, either sem_function or sem_variables is created.
37 2) For every symbol from the previous step, VAR_DECL and FUNCTION_DECL are
38 saved and matched to corresponding sem_items.
39 3) These declaration are ignored for equality check and are solved
40 by Value Numbering algorithm published by Alpert, Zadeck in 1992.
41 4) We compute hash value for each symbol.
42 5) Congruence classes are created based on hash value. If hash value are
43 equal, equals function is called and symbols are deeply compared.
44 We must prove that all SSA names, declarations and other items
45 correspond.
46 6) Value Numbering is executed for these classes. At the end of the process
47 all symbol members in remaining classes can be merged.
48 7) Merge operation creates alias in case of read-only variables. For
49 callgraph node, we must decide if we can redirect local calls,
50 create an alias or a thunk.
51
52 */
53
54 #include "config.h"
55 #include "system.h"
56 #include <list>
57 #include "coretypes.h"
58 #include "hash-set.h"
59 #include "machmode.h"
60 #include "vec.h"
61 #include "double-int.h"
62 #include "input.h"
63 #include "alias.h"
64 #include "symtab.h"
65 #include "options.h"
66 #include "wide-int.h"
67 #include "inchash.h"
68 #include "tree.h"
69 #include "fold-const.h"
70 #include "predict.h"
71 #include "tm.h"
72 #include "hard-reg-set.h"
73 #include "function.h"
74 #include "dominance.h"
75 #include "cfg.h"
76 #include "basic-block.h"
77 #include "tree-ssa-alias.h"
78 #include "internal-fn.h"
79 #include "gimple-expr.h"
80 #include "is-a.h"
81 #include "gimple.h"
82 #include "hashtab.h"
83 #include "rtl.h"
84 #include "flags.h"
85 #include "statistics.h"
86 #include "real.h"
87 #include "fixed-value.h"
88 #include "insn-config.h"
89 #include "expmed.h"
90 #include "dojump.h"
91 #include "explow.h"
92 #include "calls.h"
93 #include "emit-rtl.h"
94 #include "varasm.h"
95 #include "stmt.h"
96 #include "expr.h"
97 #include "gimple-iterator.h"
98 #include "gimple-ssa.h"
99 #include "tree-cfg.h"
100 #include "tree-phinodes.h"
101 #include "stringpool.h"
102 #include "tree-ssanames.h"
103 #include "tree-dfa.h"
104 #include "tree-pass.h"
105 #include "gimple-pretty-print.h"
106 #include "hash-map.h"
107 #include "plugin-api.h"
108 #include "ipa-ref.h"
109 #include "cgraph.h"
110 #include "alloc-pool.h"
111 #include "symbol-summary.h"
112 #include "ipa-prop.h"
113 #include "ipa-inline.h"
114 #include "cfgloop.h"
115 #include "except.h"
116 #include "hash-table.h"
117 #include "coverage.h"
118 #include "attribs.h"
119 #include "print-tree.h"
120 #include "lto-streamer.h"
121 #include "data-streamer.h"
122 #include "ipa-utils.h"
123 #include "ipa-icf-gimple.h"
124 #include "ipa-icf.h"
125 #include "stor-layout.h"
126 #include "dbgcnt.h"
127
128 using namespace ipa_icf_gimple;
129
130 namespace ipa_icf {
131
132 /* Initialization and computation of symtab node hash, there data
133 are propagated later on. */
134
135 static sem_item_optimizer *optimizer = NULL;
136
137 /* Constructor. */
138
139 symbol_compare_collection::symbol_compare_collection (symtab_node *node)
140 {
141 m_references.create (0);
142 m_interposables.create (0);
143
144 ipa_ref *ref;
145
146 if (is_a <varpool_node *> (node) && DECL_VIRTUAL_P (node->decl))
147 return;
148
149 for (unsigned i = 0; node->iterate_reference (i, ref); i++)
150 {
151 if (ref->address_matters_p ())
152 m_references.safe_push (ref->referred);
153
154 if (ref->referred->get_availability () <= AVAIL_INTERPOSABLE)
155 {
156 if (ref->address_matters_p ())
157 m_references.safe_push (ref->referred);
158 else
159 m_interposables.safe_push (ref->referred);
160 }
161 }
162
163 if (is_a <cgraph_node *> (node))
164 {
165 cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
166
167 for (cgraph_edge *e = cnode->callees; e; e = e->next_callee)
168 if (e->callee->get_availability () <= AVAIL_INTERPOSABLE)
169 m_interposables.safe_push (e->callee);
170 }
171 }
172
173 /* Constructor for key value pair, where _ITEM is key and _INDEX is a target. */
174
175 sem_usage_pair::sem_usage_pair (sem_item *_item, unsigned int _index):
176 item (_item), index (_index)
177 {
178 }
179
180 /* Semantic item constructor for a node of _TYPE, where STACK is used
181 for bitmap memory allocation. */
182
183 sem_item::sem_item (sem_item_type _type,
184 bitmap_obstack *stack): type(_type), hash(0)
185 {
186 setup (stack);
187 }
188
189 /* Semantic item constructor for a node of _TYPE, where STACK is used
190 for bitmap memory allocation. The item is based on symtab node _NODE
191 with computed _HASH. */
192
193 sem_item::sem_item (sem_item_type _type, symtab_node *_node,
194 hashval_t _hash, bitmap_obstack *stack): type(_type),
195 node (_node), hash (_hash)
196 {
197 decl = node->decl;
198 setup (stack);
199 }
200
201 /* Add reference to a semantic TARGET. */
202
203 void
204 sem_item::add_reference (sem_item *target)
205 {
206 refs.safe_push (target);
207 unsigned index = refs.length ();
208 target->usages.safe_push (new sem_usage_pair(this, index));
209 bitmap_set_bit (target->usage_index_bitmap, index);
210 refs_set.add (target->node);
211 }
212
213 /* Initialize internal data structures. Bitmap STACK is used for
214 bitmap memory allocation process. */
215
216 void
217 sem_item::setup (bitmap_obstack *stack)
218 {
219 gcc_checking_assert (node);
220
221 refs.create (0);
222 tree_refs.create (0);
223 usages.create (0);
224 usage_index_bitmap = BITMAP_ALLOC (stack);
225 }
226
227 sem_item::~sem_item ()
228 {
229 for (unsigned i = 0; i < usages.length (); i++)
230 delete usages[i];
231
232 refs.release ();
233 tree_refs.release ();
234 usages.release ();
235
236 BITMAP_FREE (usage_index_bitmap);
237 }
238
239 /* Dump function for debugging purpose. */
240
241 DEBUG_FUNCTION void
242 sem_item::dump (void)
243 {
244 if (dump_file)
245 {
246 fprintf (dump_file, "[%s] %s (%u) (tree:%p)\n", type == FUNC ? "func" : "var",
247 node->name(), node->order, (void *) node->decl);
248 fprintf (dump_file, " hash: %u\n", get_hash ());
249 fprintf (dump_file, " references: ");
250
251 for (unsigned i = 0; i < refs.length (); i++)
252 fprintf (dump_file, "%s%s ", refs[i]->node->name (),
253 i < refs.length() - 1 ? "," : "");
254
255 fprintf (dump_file, "\n");
256 }
257 }
258
259 /* Return true if target supports alias symbols. */
260
261 bool
262 sem_item::target_supports_symbol_aliases_p (void)
263 {
264 #if !defined (ASM_OUTPUT_DEF) || (!defined(ASM_OUTPUT_WEAK_ALIAS) && !defined (ASM_WEAKEN_DECL))
265 return false;
266 #else
267 return true;
268 #endif
269 }
270
271 /* Semantic function constructor that uses STACK as bitmap memory stack. */
272
273 sem_function::sem_function (bitmap_obstack *stack): sem_item (FUNC, stack),
274 m_checker (NULL), m_compared_func (NULL)
275 {
276 arg_types.create (0);
277 bb_sizes.create (0);
278 bb_sorted.create (0);
279 }
280
281 /* Constructor based on callgraph node _NODE with computed hash _HASH.
282 Bitmap STACK is used for memory allocation. */
283 sem_function::sem_function (cgraph_node *node, hashval_t hash,
284 bitmap_obstack *stack):
285 sem_item (FUNC, node, hash, stack),
286 m_checker (NULL), m_compared_func (NULL)
287 {
288 arg_types.create (0);
289 bb_sizes.create (0);
290 bb_sorted.create (0);
291 }
292
293 sem_function::~sem_function ()
294 {
295 for (unsigned i = 0; i < bb_sorted.length (); i++)
296 delete (bb_sorted[i]);
297
298 arg_types.release ();
299 bb_sizes.release ();
300 bb_sorted.release ();
301 }
302
303 /* Calculates hash value based on a BASIC_BLOCK. */
304
305 hashval_t
306 sem_function::get_bb_hash (const sem_bb *basic_block)
307 {
308 inchash::hash hstate;
309
310 hstate.add_int (basic_block->nondbg_stmt_count);
311 hstate.add_int (basic_block->edge_count);
312
313 return hstate.end ();
314 }
315
316 /* References independent hash function. */
317
318 hashval_t
319 sem_function::get_hash (void)
320 {
321 if(!hash)
322 {
323 inchash::hash hstate;
324 hstate.add_int (177454); /* Random number for function type. */
325
326 hstate.add_int (arg_count);
327 hstate.add_int (cfg_checksum);
328 hstate.add_int (gcode_hash);
329
330 for (unsigned i = 0; i < bb_sorted.length (); i++)
331 hstate.merge_hash (get_bb_hash (bb_sorted[i]));
332
333 for (unsigned i = 0; i < bb_sizes.length (); i++)
334 hstate.add_int (bb_sizes[i]);
335
336
337 /* Add common features of declaration itself. */
338 if (DECL_FUNCTION_SPECIFIC_TARGET (decl))
339 hstate.add_wide_int
340 (cl_target_option_hash
341 (TREE_TARGET_OPTION (DECL_FUNCTION_SPECIFIC_TARGET (decl))));
342 if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl))
343 (cl_optimization_hash
344 (TREE_OPTIMIZATION (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl))));
345 hstate.add_flag (DECL_CXX_CONSTRUCTOR_P (decl));
346 hstate.add_flag (DECL_CXX_DESTRUCTOR_P (decl));
347
348 hash = hstate.end ();
349 }
350
351 return hash;
352 }
353
354 /* Return ture if A1 and A2 represent equivalent function attribute lists.
355 Based on comp_type_attributes. */
356
357 bool
358 sem_item::compare_attributes (const_tree a1, const_tree a2)
359 {
360 const_tree a;
361 if (a1 == a2)
362 return true;
363 for (a = a1; a != NULL_TREE; a = TREE_CHAIN (a))
364 {
365 const struct attribute_spec *as;
366 const_tree attr;
367
368 as = lookup_attribute_spec (get_attribute_name (a));
369 /* TODO: We can introduce as->affects_decl_identity
370 and as->affects_decl_reference_identity if attribute mismatch
371 gets a common reason to give up on merging. It may not be worth
372 the effort.
373 For example returns_nonnull affects only references, while
374 optimize attribute can be ignored because it is already lowered
375 into flags representation and compared separately. */
376 if (!as)
377 continue;
378
379 attr = lookup_attribute (as->name, CONST_CAST_TREE (a2));
380 if (!attr || !attribute_value_equal (a, attr))
381 break;
382 }
383 if (!a)
384 {
385 for (a = a2; a != NULL_TREE; a = TREE_CHAIN (a))
386 {
387 const struct attribute_spec *as;
388
389 as = lookup_attribute_spec (get_attribute_name (a));
390 if (!as)
391 continue;
392
393 if (!lookup_attribute (as->name, CONST_CAST_TREE (a1)))
394 break;
395 /* We don't need to compare trees again, as we did this
396 already in first loop. */
397 }
398 if (!a)
399 return true;
400 }
401 /* TODO: As in comp_type_attributes we may want to introduce target hook. */
402 return false;
403 }
404
405 /* Compare properties of symbols N1 and N2 that does not affect semantics of
406 symbol itself but affects semantics of its references from USED_BY (which
407 may be NULL if it is unknown). If comparsion is false, symbols
408 can still be merged but any symbols referring them can't.
409
410 If ADDRESS is true, do extra checking needed for IPA_REF_ADDR.
411
412 TODO: We can also split attributes to those that determine codegen of
413 a function body/variable constructor itself and those that are used when
414 referring to it. */
415
416 bool
417 sem_item::compare_referenced_symbol_properties (symtab_node *used_by,
418 symtab_node *n1,
419 symtab_node *n2,
420 bool address)
421 {
422 if (is_a <cgraph_node *> (n1))
423 {
424 /* Inline properties matters: we do now want to merge uses of inline
425 function to uses of normal function because inline hint would be lost.
426 We however can merge inline function to noinline because the alias
427 will keep its DECL_DECLARED_INLINE flag.
428
429 Also ignore inline flag when optimizing for size or when function
430 is known to not be inlinable.
431
432 TODO: the optimize_size checks can also be assumed to be true if
433 unit has no !optimize_size functions. */
434
435 if ((!used_by || address || !is_a <cgraph_node *> (used_by)
436 || !opt_for_fn (used_by->decl, optimize_size))
437 && !opt_for_fn (n1->decl, optimize_size)
438 && n1->get_availability () > AVAIL_INTERPOSABLE
439 && (!DECL_UNINLINABLE (n1->decl) || !DECL_UNINLINABLE (n2->decl)))
440 {
441 if (DECL_DISREGARD_INLINE_LIMITS (n1->decl)
442 != DECL_DISREGARD_INLINE_LIMITS (n2->decl))
443 return return_false_with_msg
444 ("DECL_DISREGARD_INLINE_LIMITS are different");
445
446 if (DECL_DECLARED_INLINE_P (n1->decl)
447 != DECL_DECLARED_INLINE_P (n2->decl))
448 return return_false_with_msg ("inline attributes are different");
449 }
450
451 if (DECL_IS_OPERATOR_NEW (n1->decl)
452 != DECL_IS_OPERATOR_NEW (n2->decl))
453 return return_false_with_msg ("operator new flags are different");
454 }
455
456 /* Merging two definitions with a reference to equivalent vtables, but
457 belonging to a different type may result in ipa-polymorphic-call analysis
458 giving a wrong answer about the dynamic type of instance. */
459 if (is_a <varpool_node *> (n1))
460 {
461 if ((DECL_VIRTUAL_P (n1->decl) || DECL_VIRTUAL_P (n2->decl))
462 && (DECL_VIRTUAL_P (n1->decl) != DECL_VIRTUAL_P (n2->decl)
463 || !types_must_be_same_for_odr (DECL_CONTEXT (n1->decl),
464 DECL_CONTEXT (n2->decl)))
465 && (!used_by || !is_a <cgraph_node *> (used_by) || address
466 || opt_for_fn (used_by->decl, flag_devirtualize)))
467 return return_false_with_msg
468 ("references to virtual tables can not be merged");
469
470 if (address && DECL_ALIGN (n1->decl) != DECL_ALIGN (n2->decl))
471 return return_false_with_msg ("alignment mismatch");
472
473 /* For functions we compare attributes in equals_wpa, because we do
474 not know what attributes may cause codegen differences, but for
475 variables just compare attributes for references - the codegen
476 for constructors is affected only by those attributes that we lower
477 to explicit representation (such as DECL_ALIGN or DECL_SECTION). */
478 if (!compare_attributes (DECL_ATTRIBUTES (n1->decl),
479 DECL_ATTRIBUTES (n2->decl)))
480 return return_false_with_msg ("different var decl attributes");
481 if (comp_type_attributes (TREE_TYPE (n1->decl),
482 TREE_TYPE (n2->decl)) != 1)
483 return return_false_with_msg ("different var type attributes");
484 }
485
486 /* When matching virtual tables, be sure to also match information
487 relevant for polymorphic call analysis. */
488 if (used_by && is_a <varpool_node *> (used_by)
489 && DECL_VIRTUAL_P (used_by->decl))
490 {
491 if (DECL_VIRTUAL_P (n1->decl) != DECL_VIRTUAL_P (n2->decl))
492 return return_false_with_msg ("virtual flag mismatch");
493 if (DECL_VIRTUAL_P (n1->decl) && is_a <cgraph_node *> (n1)
494 && (DECL_FINAL_P (n1->decl) != DECL_FINAL_P (n2->decl)))
495 return return_false_with_msg ("final flag mismatch");
496 }
497 return true;
498 }
499
500 /* Hash properties that are compared by compare_referenced_symbol_properties. */
501
502 void
503 sem_item::hash_referenced_symbol_properties (symtab_node *ref,
504 inchash::hash &hstate,
505 bool address)
506 {
507 if (is_a <cgraph_node *> (ref))
508 {
509 if ((type != FUNC || address || !opt_for_fn (decl, optimize_size))
510 && !opt_for_fn (ref->decl, optimize_size)
511 && !DECL_UNINLINABLE (ref->decl))
512 {
513 hstate.add_flag (DECL_DISREGARD_INLINE_LIMITS (ref->decl));
514 hstate.add_flag (DECL_DECLARED_INLINE_P (ref->decl));
515 }
516 hstate.add_flag (DECL_IS_OPERATOR_NEW (ref->decl));
517 }
518 else if (is_a <varpool_node *> (ref))
519 {
520 hstate.add_flag (DECL_VIRTUAL_P (ref->decl));
521 if (address)
522 hstate.add_int (DECL_ALIGN (ref->decl));
523 }
524 }
525
526
527 /* For a given symbol table nodes N1 and N2, we check that FUNCTION_DECLs
528 point to a same function. Comparison can be skipped if IGNORED_NODES
529 contains these nodes. ADDRESS indicate if address is taken. */
530
531 bool
532 sem_item::compare_symbol_references (
533 hash_map <symtab_node *, sem_item *> &ignored_nodes,
534 symtab_node *n1, symtab_node *n2, bool address)
535 {
536 enum availability avail1, avail2;
537
538 if (n1 == n2)
539 return true;
540
541 /* Never match variable and function. */
542 if (is_a <varpool_node *> (n1) != is_a <varpool_node *> (n2))
543 return false;
544
545 if (!compare_referenced_symbol_properties (node, n1, n2, address))
546 return false;
547 if (address && n1->equal_address_to (n2) == 1)
548 return true;
549 if (!address && n1->semantically_equivalent_p (n2))
550 return true;
551
552 n1 = n1->ultimate_alias_target (&avail1);
553 n2 = n2->ultimate_alias_target (&avail2);
554
555 if (avail1 >= AVAIL_INTERPOSABLE && ignored_nodes.get (n1)
556 && avail2 >= AVAIL_INTERPOSABLE && ignored_nodes.get (n2))
557 return true;
558
559 return return_false_with_msg ("different references");
560 }
561
562 /* If cgraph edges E1 and E2 are indirect calls, verify that
563 ECF flags are the same. */
564
565 bool sem_function::compare_edge_flags (cgraph_edge *e1, cgraph_edge *e2)
566 {
567 if (e1->indirect_info && e2->indirect_info)
568 {
569 int e1_flags = e1->indirect_info->ecf_flags;
570 int e2_flags = e2->indirect_info->ecf_flags;
571
572 if (e1_flags != e2_flags)
573 return return_false_with_msg ("ICF flags are different");
574 }
575 else if (e1->indirect_info || e2->indirect_info)
576 return false;
577
578 return true;
579 }
580
581 /* Return true if parameter I may be used. */
582
583 bool
584 sem_function::param_used_p (unsigned int i)
585 {
586 if (ipa_node_params_sum == NULL)
587 return false;
588
589 struct ipa_node_params *parms_info = IPA_NODE_REF (get_node ());
590
591 if (parms_info->descriptors.is_empty ()
592 || parms_info->descriptors.length () <= i)
593 return true;
594
595 return ipa_is_param_used (IPA_NODE_REF (get_node ()), i);
596 }
597
598 /* Fast equality function based on knowledge known in WPA. */
599
600 bool
601 sem_function::equals_wpa (sem_item *item,
602 hash_map <symtab_node *, sem_item *> &ignored_nodes)
603 {
604 gcc_assert (item->type == FUNC);
605 cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
606 cgraph_node *cnode2 = dyn_cast <cgraph_node *> (item->node);
607
608 m_compared_func = static_cast<sem_function *> (item);
609
610 if (arg_types.length () != m_compared_func->arg_types.length ())
611 return return_false_with_msg ("different number of arguments");
612
613 if (cnode->thunk.thunk_p != cnode2->thunk.thunk_p)
614 return return_false_with_msg ("thunk_p mismatch");
615
616 if (cnode->thunk.thunk_p)
617 {
618 if (cnode->thunk.fixed_offset != cnode2->thunk.fixed_offset)
619 return return_false_with_msg ("thunk fixed_offset mismatch");
620 if (cnode->thunk.virtual_value != cnode2->thunk.virtual_value)
621 return return_false_with_msg ("thunk virtual_value mismatch");
622 if (cnode->thunk.this_adjusting != cnode2->thunk.this_adjusting)
623 return return_false_with_msg ("thunk this_adjusting mismatch");
624 if (cnode->thunk.virtual_offset_p != cnode2->thunk.virtual_offset_p)
625 return return_false_with_msg ("thunk virtual_offset_p mismatch");
626 if (cnode->thunk.add_pointer_bounds_args
627 != cnode2->thunk.add_pointer_bounds_args)
628 return return_false_with_msg ("thunk add_pointer_bounds_args mismatch");
629 }
630
631 /* Compare special function DECL attributes. */
632 if (DECL_FUNCTION_PERSONALITY (decl)
633 != DECL_FUNCTION_PERSONALITY (item->decl))
634 return return_false_with_msg ("function personalities are different");
635
636 if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl)
637 != DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl))
638 return return_false_with_msg ("intrument function entry exit "
639 "attributes are different");
640
641 if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl))
642 return return_false_with_msg ("no stack limit attributes are different");
643
644 if (DECL_CXX_CONSTRUCTOR_P (decl) != DECL_CXX_CONSTRUCTOR_P (item->decl))
645 return return_false_with_msg ("DECL_CXX_CONSTRUCTOR mismatch");
646
647 if (DECL_CXX_DESTRUCTOR_P (decl) != DECL_CXX_DESTRUCTOR_P (item->decl))
648 return return_false_with_msg ("DECL_CXX_DESTRUCTOR mismatch");
649
650 /* TODO: pure/const flags mostly matters only for references, except for
651 the fact that codegen takes LOOPING flag as a hint that loops are
652 finite. We may arrange the code to always pick leader that has least
653 specified flags and then this can go into comparing symbol properties. */
654 if (flags_from_decl_or_type (decl) != flags_from_decl_or_type (item->decl))
655 return return_false_with_msg ("decl_or_type flags are different");
656
657 /* Do not match polymorphic constructors of different types. They calls
658 type memory location for ipa-polymorphic-call and we do not want
659 it to get confused by wrong type. */
660 if (DECL_CXX_CONSTRUCTOR_P (decl)
661 && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
662 {
663 if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE)
664 return return_false_with_msg ("DECL_CXX_CONSTURCTOR type mismatch");
665 else if (!func_checker::compatible_polymorphic_types_p
666 (method_class_type (TREE_TYPE (decl)),
667 method_class_type (TREE_TYPE (item->decl)), false))
668 return return_false_with_msg ("ctor polymorphic type mismatch");
669 }
670
671 /* Checking function TARGET and OPTIMIZATION flags. */
672 cl_target_option *tar1 = target_opts_for_fn (decl);
673 cl_target_option *tar2 = target_opts_for_fn (item->decl);
674
675 if (tar1 != tar2 && !cl_target_option_eq (tar1, tar2))
676 {
677 if (dump_file && (dump_flags & TDF_DETAILS))
678 {
679 fprintf (dump_file, "target flags difference");
680 cl_target_option_print_diff (dump_file, 2, tar1, tar2);
681 }
682
683 return return_false_with_msg ("Target flags are different");
684 }
685
686 cl_optimization *opt1 = opts_for_fn (decl);
687 cl_optimization *opt2 = opts_for_fn (item->decl);
688
689 if (opt1 != opt2 && memcmp (opt1, opt2, sizeof(cl_optimization)))
690 {
691 if (dump_file && (dump_flags & TDF_DETAILS))
692 {
693 fprintf (dump_file, "optimization flags difference");
694 cl_optimization_print_diff (dump_file, 2, opt1, opt2);
695 }
696
697 return return_false_with_msg ("optimization flags are different");
698 }
699
700 /* Result type checking. */
701 if (!func_checker::compatible_types_p (result_type,
702 m_compared_func->result_type))
703 return return_false_with_msg ("result types are different");
704
705 /* Checking types of arguments. */
706 for (unsigned i = 0; i < arg_types.length (); i++)
707 {
708 /* This guard is here for function pointer with attributes (pr59927.c). */
709 if (!arg_types[i] || !m_compared_func->arg_types[i])
710 return return_false_with_msg ("NULL argument type");
711
712 /* We always need to match types so we are sure the callin conventions
713 are compatible. */
714 if (!func_checker::compatible_types_p (arg_types[i],
715 m_compared_func->arg_types[i]))
716 return return_false_with_msg ("argument type is different");
717
718 /* On used arguments we need to do a bit more of work. */
719 if (!param_used_p (i))
720 continue;
721 if (POINTER_TYPE_P (arg_types[i])
722 && (TYPE_RESTRICT (arg_types[i])
723 != TYPE_RESTRICT (m_compared_func->arg_types[i])))
724 return return_false_with_msg ("argument restrict flag mismatch");
725 /* nonnull_arg_p implies non-zero range to REFERENCE types. */
726 if (POINTER_TYPE_P (arg_types[i])
727 && TREE_CODE (arg_types[i])
728 != TREE_CODE (m_compared_func->arg_types[i])
729 && opt_for_fn (decl, flag_delete_null_pointer_checks))
730 return return_false_with_msg ("pointer wrt reference mismatch");
731 }
732
733 if (node->num_references () != item->node->num_references ())
734 return return_false_with_msg ("different number of references");
735
736 /* Checking function attributes.
737 This is quadratic in number of attributes */
738 if (comp_type_attributes (TREE_TYPE (decl),
739 TREE_TYPE (item->decl)) != 1)
740 return return_false_with_msg ("different type attributes");
741 if (!compare_attributes (DECL_ATTRIBUTES (decl),
742 DECL_ATTRIBUTES (item->decl)))
743 return return_false_with_msg ("different decl attributes");
744
745 /* The type of THIS pointer type memory location for
746 ipa-polymorphic-call-analysis. */
747 if (opt_for_fn (decl, flag_devirtualize)
748 && (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE
749 || TREE_CODE (TREE_TYPE (item->decl)) == METHOD_TYPE)
750 && param_used_p (0)
751 && compare_polymorphic_p ())
752 {
753 if (TREE_CODE (TREE_TYPE (decl)) != TREE_CODE (TREE_TYPE (item->decl)))
754 return return_false_with_msg ("METHOD_TYPE and FUNCTION_TYPE mismatch");
755 if (!func_checker::compatible_polymorphic_types_p
756 (method_class_type (TREE_TYPE (decl)),
757 method_class_type (TREE_TYPE (item->decl)), false))
758 return return_false_with_msg ("THIS pointer ODR type mismatch");
759 }
760
761 ipa_ref *ref = NULL, *ref2 = NULL;
762 for (unsigned i = 0; node->iterate_reference (i, ref); i++)
763 {
764 item->node->iterate_reference (i, ref2);
765
766 if (ref->use != ref2->use)
767 return return_false_with_msg ("reference use mismatch");
768
769 if (!compare_symbol_references (ignored_nodes, ref->referred,
770 ref2->referred,
771 ref->address_matters_p ()))
772 return false;
773 }
774
775 cgraph_edge *e1 = dyn_cast <cgraph_node *> (node)->callees;
776 cgraph_edge *e2 = dyn_cast <cgraph_node *> (item->node)->callees;
777
778 while (e1 && e2)
779 {
780 if (!compare_symbol_references (ignored_nodes, e1->callee,
781 e2->callee, false))
782 return false;
783 if (!compare_edge_flags (e1, e2))
784 return false;
785
786 e1 = e1->next_callee;
787 e2 = e2->next_callee;
788 }
789
790 if (e1 || e2)
791 return return_false_with_msg ("different number of calls");
792
793 e1 = dyn_cast <cgraph_node *> (node)->indirect_calls;
794 e2 = dyn_cast <cgraph_node *> (item->node)->indirect_calls;
795
796 while (e1 && e2)
797 {
798 if (!compare_edge_flags (e1, e2))
799 return false;
800
801 e1 = e1->next_callee;
802 e2 = e2->next_callee;
803 }
804
805 if (e1 || e2)
806 return return_false_with_msg ("different number of indirect calls");
807
808 return true;
809 }
810
811 /* Update hash by address sensitive references. We iterate over all
812 sensitive references (address_matters_p) and we hash ultime alias
813 target of these nodes, which can improve a semantic item hash.
814
815 Also hash in referenced symbols properties. This can be done at any time
816 (as the properties should not change), but it is convenient to do it here
817 while we walk the references anyway. */
818
819 void
820 sem_item::update_hash_by_addr_refs (hash_map <symtab_node *,
821 sem_item *> &m_symtab_node_map)
822 {
823 ipa_ref* ref;
824 inchash::hash hstate (hash);
825
826 for (unsigned i = 0; node->iterate_reference (i, ref); i++)
827 {
828 hstate.add_int (ref->use);
829 hash_referenced_symbol_properties (ref->referred, hstate,
830 ref->use == IPA_REF_ADDR);
831 if (ref->address_matters_p () || !m_symtab_node_map.get (ref->referred))
832 hstate.add_int (ref->referred->ultimate_alias_target ()->order);
833 }
834
835 if (is_a <cgraph_node *> (node))
836 {
837 for (cgraph_edge *e = dyn_cast <cgraph_node *> (node)->callers; e;
838 e = e->next_caller)
839 {
840 sem_item **result = m_symtab_node_map.get (e->callee);
841 hash_referenced_symbol_properties (e->callee, hstate, false);
842 if (!result)
843 hstate.add_int (e->callee->ultimate_alias_target ()->order);
844 }
845 }
846
847 hash = hstate.end ();
848 }
849
850 /* Update hash by computed local hash values taken from different
851 semantic items.
852 TODO: stronger SCC based hashing would be desirable here. */
853
854 void
855 sem_item::update_hash_by_local_refs (hash_map <symtab_node *,
856 sem_item *> &m_symtab_node_map)
857 {
858 ipa_ref* ref;
859 inchash::hash state (hash);
860
861 for (unsigned j = 0; node->iterate_reference (j, ref); j++)
862 {
863 sem_item **result = m_symtab_node_map.get (ref->referring);
864 if (result)
865 state.merge_hash ((*result)->hash);
866 }
867
868 if (type == FUNC)
869 {
870 for (cgraph_edge *e = dyn_cast <cgraph_node *> (node)->callees; e;
871 e = e->next_callee)
872 {
873 sem_item **result = m_symtab_node_map.get (e->caller);
874 if (result)
875 state.merge_hash ((*result)->hash);
876 }
877 }
878
879 global_hash = state.end ();
880 }
881
882 /* Returns true if the item equals to ITEM given as argument. */
883
884 bool
885 sem_function::equals (sem_item *item,
886 hash_map <symtab_node *, sem_item *> &)
887 {
888 gcc_assert (item->type == FUNC);
889 bool eq = equals_private (item);
890
891 if (m_checker != NULL)
892 {
893 delete m_checker;
894 m_checker = NULL;
895 }
896
897 if (dump_file && (dump_flags & TDF_DETAILS))
898 fprintf (dump_file,
899 "Equals called for:%s:%s (%u:%u) (%s:%s) with result: %s\n\n",
900 xstrdup_for_dump (node->name()),
901 xstrdup_for_dump (item->node->name ()),
902 node->order,
903 item->node->order,
904 xstrdup_for_dump (node->asm_name ()),
905 xstrdup_for_dump (item->node->asm_name ()),
906 eq ? "true" : "false");
907
908 return eq;
909 }
910
911 /* Processes function equality comparison. */
912
913 bool
914 sem_function::equals_private (sem_item *item)
915 {
916 if (item->type != FUNC)
917 return false;
918
919 basic_block bb1, bb2;
920 edge e1, e2;
921 edge_iterator ei1, ei2;
922 bool result = true;
923 tree arg1, arg2;
924
925 m_compared_func = static_cast<sem_function *> (item);
926
927 gcc_assert (decl != item->decl);
928
929 if (bb_sorted.length () != m_compared_func->bb_sorted.length ()
930 || edge_count != m_compared_func->edge_count
931 || cfg_checksum != m_compared_func->cfg_checksum)
932 return return_false ();
933
934 m_checker = new func_checker (decl, m_compared_func->decl,
935 compare_polymorphic_p (),
936 false,
937 &refs_set,
938 &m_compared_func->refs_set);
939 for (arg1 = DECL_ARGUMENTS (decl),
940 arg2 = DECL_ARGUMENTS (m_compared_func->decl);
941 arg1; arg1 = DECL_CHAIN (arg1), arg2 = DECL_CHAIN (arg2))
942 if (!m_checker->compare_decl (arg1, arg2))
943 return return_false ();
944
945 if (!dyn_cast <cgraph_node *> (node)->has_gimple_body_p ())
946 return true;
947
948 /* Fill-up label dictionary. */
949 for (unsigned i = 0; i < bb_sorted.length (); ++i)
950 {
951 m_checker->parse_labels (bb_sorted[i]);
952 m_checker->parse_labels (m_compared_func->bb_sorted[i]);
953 }
954
955 /* Checking all basic blocks. */
956 for (unsigned i = 0; i < bb_sorted.length (); ++i)
957 if(!m_checker->compare_bb (bb_sorted[i], m_compared_func->bb_sorted[i]))
958 return return_false();
959
960 dump_message ("All BBs are equal\n");
961
962 auto_vec <int> bb_dict;
963
964 /* Basic block edges check. */
965 for (unsigned i = 0; i < bb_sorted.length (); ++i)
966 {
967 bb1 = bb_sorted[i]->bb;
968 bb2 = m_compared_func->bb_sorted[i]->bb;
969
970 ei2 = ei_start (bb2->preds);
971
972 for (ei1 = ei_start (bb1->preds); ei_cond (ei1, &e1); ei_next (&ei1))
973 {
974 ei_cond (ei2, &e2);
975
976 if (e1->flags != e2->flags)
977 return return_false_with_msg ("flags comparison returns false");
978
979 if (!bb_dict_test (&bb_dict, e1->src->index, e2->src->index))
980 return return_false_with_msg ("edge comparison returns false");
981
982 if (!bb_dict_test (&bb_dict, e1->dest->index, e2->dest->index))
983 return return_false_with_msg ("BB comparison returns false");
984
985 if (!m_checker->compare_edge (e1, e2))
986 return return_false_with_msg ("edge comparison returns false");
987
988 ei_next (&ei2);
989 }
990 }
991
992 /* Basic block PHI nodes comparison. */
993 for (unsigned i = 0; i < bb_sorted.length (); i++)
994 if (!compare_phi_node (bb_sorted[i]->bb, m_compared_func->bb_sorted[i]->bb))
995 return return_false_with_msg ("PHI node comparison returns false");
996
997 return result;
998 }
999
1000 /* Set LOCAL_P of NODE to true if DATA is non-NULL.
1001 Helper for call_for_symbol_thunks_and_aliases. */
1002
1003 static bool
1004 set_local (cgraph_node *node, void *data)
1005 {
1006 node->local.local = data != NULL;
1007 return false;
1008 }
1009
1010 /* TREE_ADDRESSABLE of NODE to true.
1011 Helper for call_for_symbol_thunks_and_aliases. */
1012
1013 static bool
1014 set_addressable (varpool_node *node, void *)
1015 {
1016 TREE_ADDRESSABLE (node->decl) = 1;
1017 return false;
1018 }
1019
1020 /* Clear DECL_RTL of NODE.
1021 Helper for call_for_symbol_thunks_and_aliases. */
1022
1023 static bool
1024 clear_decl_rtl (symtab_node *node, void *)
1025 {
1026 SET_DECL_RTL (node->decl, NULL);
1027 return false;
1028 }
1029
1030 /* Redirect all callers of N and its aliases to TO. Remove aliases if
1031 possible. Return number of redirections made. */
1032
1033 static int
1034 redirect_all_callers (cgraph_node *n, cgraph_node *to)
1035 {
1036 int nredirected = 0;
1037 ipa_ref *ref;
1038 cgraph_edge *e = n->callers;
1039
1040 while (e)
1041 {
1042 /* Redirecting thunks to interposable symbols or symbols in other sections
1043 may not be supported by target output code. Play safe for now and
1044 punt on redirection. */
1045 if (!e->caller->thunk.thunk_p)
1046 {
1047 struct cgraph_edge *nexte = e->next_caller;
1048 e->redirect_callee (to);
1049 e = nexte;
1050 nredirected++;
1051 }
1052 else
1053 e = e->next_callee;
1054 }
1055 for (unsigned i = 0; n->iterate_direct_aliases (i, ref);)
1056 {
1057 bool removed = false;
1058 cgraph_node *n_alias = dyn_cast <cgraph_node *> (ref->referring);
1059
1060 if ((DECL_COMDAT_GROUP (n->decl)
1061 && (DECL_COMDAT_GROUP (n->decl)
1062 == DECL_COMDAT_GROUP (n_alias->decl)))
1063 || (n_alias->get_availability () > AVAIL_INTERPOSABLE
1064 && n->get_availability () > AVAIL_INTERPOSABLE))
1065 {
1066 nredirected += redirect_all_callers (n_alias, to);
1067 if (n_alias->can_remove_if_no_direct_calls_p ()
1068 && !n_alias->call_for_symbol_and_aliases (cgraph_node::has_thunk_p,
1069 NULL, true)
1070 && !n_alias->has_aliases_p ())
1071 n_alias->remove ();
1072 }
1073 if (!removed)
1074 i++;
1075 }
1076 return nredirected;
1077 }
1078
1079 /* Merges instance with an ALIAS_ITEM, where alias, thunk or redirection can
1080 be applied. */
1081
1082 bool
1083 sem_function::merge (sem_item *alias_item)
1084 {
1085 gcc_assert (alias_item->type == FUNC);
1086
1087 sem_function *alias_func = static_cast<sem_function *> (alias_item);
1088
1089 cgraph_node *original = get_node ();
1090 cgraph_node *local_original = NULL;
1091 cgraph_node *alias = alias_func->get_node ();
1092
1093 bool create_wrapper = false;
1094 bool create_alias = false;
1095 bool redirect_callers = false;
1096 bool remove = false;
1097
1098 bool original_discardable = false;
1099 bool original_discarded = false;
1100
1101 bool original_address_matters = original->address_matters_p ();
1102 bool alias_address_matters = alias->address_matters_p ();
1103
1104 if (DECL_EXTERNAL (alias->decl))
1105 {
1106 if (dump_file)
1107 fprintf (dump_file, "Not unifying; alias is external.\n\n");
1108 return false;
1109 }
1110
1111 if (DECL_NO_INLINE_WARNING_P (original->decl)
1112 != DECL_NO_INLINE_WARNING_P (alias->decl))
1113 {
1114 if (dump_file)
1115 fprintf (dump_file,
1116 "Not unifying; "
1117 "DECL_NO_INLINE_WARNING mismatch.\n\n");
1118 return false;
1119 }
1120
1121 /* Do not attempt to mix functions from different user sections;
1122 we do not know what user intends with those. */
1123 if (((DECL_SECTION_NAME (original->decl) && !original->implicit_section)
1124 || (DECL_SECTION_NAME (alias->decl) && !alias->implicit_section))
1125 && DECL_SECTION_NAME (original->decl) != DECL_SECTION_NAME (alias->decl))
1126 {
1127 if (dump_file)
1128 fprintf (dump_file,
1129 "Not unifying; "
1130 "original and alias are in different sections.\n\n");
1131 return false;
1132 }
1133
1134 /* See if original is in a section that can be discarded if the main
1135 symbol is not used. */
1136
1137 if (original->can_be_discarded_p ())
1138 original_discardable = true;
1139 /* Also consider case where we have resolution info and we know that
1140 original's definition is not going to be used. In this case we can not
1141 create alias to original. */
1142 if (node->resolution != LDPR_UNKNOWN
1143 && !decl_binds_to_current_def_p (node->decl))
1144 original_discardable = original_discarded = true;
1145
1146 /* Creating a symtab alias is the optimal way to merge.
1147 It however can not be used in the following cases:
1148
1149 1) if ORIGINAL and ALIAS may be possibly compared for address equality.
1150 2) if ORIGINAL is in a section that may be discarded by linker or if
1151 it is an external functions where we can not create an alias
1152 (ORIGINAL_DISCARDABLE)
1153 3) if target do not support symbol aliases.
1154 4) original and alias lie in different comdat groups.
1155
1156 If we can not produce alias, we will turn ALIAS into WRAPPER of ORIGINAL
1157 and/or redirect all callers from ALIAS to ORIGINAL. */
1158 if ((original_address_matters && alias_address_matters)
1159 || (original_discardable
1160 && (!DECL_COMDAT_GROUP (alias->decl)
1161 || (DECL_COMDAT_GROUP (alias->decl)
1162 != DECL_COMDAT_GROUP (original->decl))))
1163 || original_discarded
1164 || !sem_item::target_supports_symbol_aliases_p ()
1165 || DECL_COMDAT_GROUP (alias->decl) != DECL_COMDAT_GROUP (original->decl))
1166 {
1167 /* First see if we can produce wrapper. */
1168
1169 /* Symbol properties that matter for references must be preserved.
1170 TODO: We can produce wrapper, but we need to produce alias of ORIGINAL
1171 with proper properties. */
1172 if (!sem_item::compare_referenced_symbol_properties (NULL, original, alias,
1173 alias->address_taken))
1174 {
1175 if (dump_file)
1176 fprintf (dump_file,
1177 "Wrapper cannot be created because referenced symbol "
1178 "properties mismatch\n");
1179 }
1180 /* Do not turn function in one comdat group into wrapper to another
1181 comdat group. Other compiler producing the body of the
1182 another comdat group may make opossite decision and with unfortunate
1183 linker choices this may close a loop. */
1184 else if (DECL_COMDAT_GROUP (original->decl)
1185 && DECL_COMDAT_GROUP (alias->decl)
1186 && (DECL_COMDAT_GROUP (alias->decl)
1187 != DECL_COMDAT_GROUP (original->decl)))
1188 {
1189 if (dump_file)
1190 fprintf (dump_file,
1191 "Wrapper cannot be created because of COMDAT\n");
1192 }
1193 else if (DECL_STATIC_CHAIN (alias->decl))
1194 {
1195 if (dump_file)
1196 fprintf (dump_file,
1197 "Can not create wrapper of nested functions.\n");
1198 }
1199 /* TODO: We can also deal with variadic functions never calling
1200 VA_START. */
1201 else if (stdarg_p (TREE_TYPE (alias->decl)))
1202 {
1203 if (dump_file)
1204 fprintf (dump_file,
1205 "can not create wrapper of stdarg function.\n");
1206 }
1207 else if (inline_summaries
1208 && inline_summaries->get (alias)->self_size <= 2)
1209 {
1210 if (dump_file)
1211 fprintf (dump_file, "Wrapper creation is not "
1212 "profitable (function is too small).\n");
1213 }
1214 /* If user paid attention to mark function noinline, assume it is
1215 somewhat special and do not try to turn it into a wrapper that can
1216 not be undone by inliner. */
1217 else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (alias->decl)))
1218 {
1219 if (dump_file)
1220 fprintf (dump_file, "Wrappers are not created for noinline.\n");
1221 }
1222 else
1223 create_wrapper = true;
1224
1225 /* We can redirect local calls in the case both alias and orignal
1226 are not interposable. */
1227 redirect_callers
1228 = alias->get_availability () > AVAIL_INTERPOSABLE
1229 && original->get_availability () > AVAIL_INTERPOSABLE
1230 && !alias->instrumented_version;
1231 /* TODO: We can redirect, but we need to produce alias of ORIGINAL
1232 with proper properties. */
1233 if (!sem_item::compare_referenced_symbol_properties (NULL, original, alias,
1234 alias->address_taken))
1235 redirect_callers = false;
1236
1237 if (!redirect_callers && !create_wrapper)
1238 {
1239 if (dump_file)
1240 fprintf (dump_file, "Not unifying; can not redirect callers nor "
1241 "produce wrapper\n\n");
1242 return false;
1243 }
1244
1245 /* Work out the symbol the wrapper should call.
1246 If ORIGINAL is interposable, we need to call a local alias.
1247 Also produce local alias (if possible) as an optimization.
1248
1249 Local aliases can not be created inside comdat groups because that
1250 prevents inlining. */
1251 if (!original_discardable && !original->get_comdat_group ())
1252 {
1253 local_original
1254 = dyn_cast <cgraph_node *> (original->noninterposable_alias ());
1255 if (!local_original
1256 && original->get_availability () > AVAIL_INTERPOSABLE)
1257 local_original = original;
1258 }
1259 /* If we can not use local alias, fallback to the original
1260 when possible. */
1261 else if (original->get_availability () > AVAIL_INTERPOSABLE)
1262 local_original = original;
1263
1264 /* If original is COMDAT local, we can not really redirect calls outside
1265 of its comdat group to it. */
1266 if (original->comdat_local_p ())
1267 redirect_callers = false;
1268 if (!local_original)
1269 {
1270 if (dump_file)
1271 fprintf (dump_file, "Not unifying; "
1272 "can not produce local alias.\n\n");
1273 return false;
1274 }
1275
1276 if (!redirect_callers && !create_wrapper)
1277 {
1278 if (dump_file)
1279 fprintf (dump_file, "Not unifying; "
1280 "can not redirect callers nor produce a wrapper\n\n");
1281 return false;
1282 }
1283 if (!create_wrapper
1284 && !alias->call_for_symbol_and_aliases (cgraph_node::has_thunk_p,
1285 NULL, true)
1286 && !alias->can_remove_if_no_direct_calls_p ())
1287 {
1288 if (dump_file)
1289 fprintf (dump_file, "Not unifying; can not make wrapper and "
1290 "function has other uses than direct calls\n\n");
1291 return false;
1292 }
1293 }
1294 else
1295 create_alias = true;
1296
1297 if (redirect_callers)
1298 {
1299 int nredirected = redirect_all_callers (alias, local_original);
1300
1301 if (nredirected)
1302 {
1303 alias->icf_merged = true;
1304 local_original->icf_merged = true;
1305
1306 if (dump_file && nredirected)
1307 fprintf (dump_file, "%i local calls have been "
1308 "redirected.\n", nredirected);
1309 }
1310
1311 /* If all callers was redirected, do not produce wrapper. */
1312 if (alias->can_remove_if_no_direct_calls_p ()
1313 && !alias->has_aliases_p ())
1314 {
1315 create_wrapper = false;
1316 remove = true;
1317 }
1318 gcc_assert (!create_alias);
1319 }
1320 else if (create_alias)
1321 {
1322 alias->icf_merged = true;
1323
1324 /* Remove the function's body. */
1325 ipa_merge_profiles (original, alias);
1326 alias->release_body (true);
1327 alias->reset ();
1328 /* Notice global symbol possibly produced RTL. */
1329 ((symtab_node *)alias)->call_for_symbol_and_aliases (clear_decl_rtl,
1330 NULL, true);
1331
1332 /* Create the alias. */
1333 cgraph_node::create_alias (alias_func->decl, decl);
1334 alias->resolve_alias (original);
1335
1336 original->call_for_symbol_thunks_and_aliases
1337 (set_local, (void *)(size_t) original->local_p (), true);
1338
1339 if (dump_file)
1340 fprintf (dump_file, "Unified; Function alias has been created.\n\n");
1341 }
1342 if (create_wrapper)
1343 {
1344 gcc_assert (!create_alias);
1345 alias->icf_merged = true;
1346 local_original->icf_merged = true;
1347
1348 ipa_merge_profiles (local_original, alias, true);
1349 alias->create_wrapper (local_original);
1350
1351 if (dump_file)
1352 fprintf (dump_file, "Unified; Wrapper has been created.\n\n");
1353 }
1354
1355 /* It's possible that redirection can hit thunks that block
1356 redirection opportunities. */
1357 gcc_assert (alias->icf_merged || remove || redirect_callers);
1358 original->icf_merged = true;
1359
1360 /* Inform the inliner about cross-module merging. */
1361 if ((original->lto_file_data || alias->lto_file_data)
1362 && original->lto_file_data != alias->lto_file_data)
1363 local_original->merged = original->merged = true;
1364
1365 if (remove)
1366 {
1367 ipa_merge_profiles (original, alias);
1368 alias->release_body ();
1369 alias->reset ();
1370 alias->body_removed = true;
1371 alias->icf_merged = true;
1372 if (dump_file)
1373 fprintf (dump_file, "Unified; Function body was removed.\n");
1374 }
1375
1376 return true;
1377 }
1378
1379 /* Semantic item initialization function. */
1380
1381 void
1382 sem_function::init (void)
1383 {
1384 if (in_lto_p)
1385 get_node ()->get_untransformed_body ();
1386
1387 tree fndecl = node->decl;
1388 function *func = DECL_STRUCT_FUNCTION (fndecl);
1389
1390 gcc_assert (func);
1391 gcc_assert (SSANAMES (func));
1392
1393 ssa_names_size = SSANAMES (func)->length ();
1394 node = node;
1395
1396 decl = fndecl;
1397 region_tree = func->eh->region_tree;
1398
1399 /* iterating all function arguments. */
1400 arg_count = count_formal_params (fndecl);
1401
1402 edge_count = n_edges_for_fn (func);
1403 cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
1404 if (!cnode->thunk.thunk_p)
1405 {
1406 cfg_checksum = coverage_compute_cfg_checksum (func);
1407
1408 inchash::hash hstate;
1409
1410 basic_block bb;
1411 FOR_EACH_BB_FN (bb, func)
1412 {
1413 unsigned nondbg_stmt_count = 0;
1414
1415 edge e;
1416 for (edge_iterator ei = ei_start (bb->preds); ei_cond (ei, &e);
1417 ei_next (&ei))
1418 cfg_checksum = iterative_hash_host_wide_int (e->flags,
1419 cfg_checksum);
1420
1421 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1422 gsi_next (&gsi))
1423 {
1424 gimple stmt = gsi_stmt (gsi);
1425
1426 if (gimple_code (stmt) != GIMPLE_DEBUG
1427 && gimple_code (stmt) != GIMPLE_PREDICT)
1428 {
1429 hash_stmt (stmt, hstate);
1430 nondbg_stmt_count++;
1431 }
1432 }
1433
1434 gcode_hash = hstate.end ();
1435 bb_sizes.safe_push (nondbg_stmt_count);
1436
1437 /* Inserting basic block to hash table. */
1438 sem_bb *semantic_bb = new sem_bb (bb, nondbg_stmt_count,
1439 EDGE_COUNT (bb->preds)
1440 + EDGE_COUNT (bb->succs));
1441
1442 bb_sorted.safe_push (semantic_bb);
1443 }
1444 }
1445 else
1446 {
1447 cfg_checksum = 0;
1448 inchash::hash hstate;
1449 hstate.add_wide_int (cnode->thunk.fixed_offset);
1450 hstate.add_wide_int (cnode->thunk.virtual_value);
1451 hstate.add_flag (cnode->thunk.this_adjusting);
1452 hstate.add_flag (cnode->thunk.virtual_offset_p);
1453 hstate.add_flag (cnode->thunk.add_pointer_bounds_args);
1454 gcode_hash = hstate.end ();
1455 }
1456
1457 parse_tree_args ();
1458 }
1459
1460 /* Accumulate to HSTATE a hash of expression EXP.
1461 Identical to inchash::add_expr, but guaranteed to be stable across LTO
1462 and DECL equality classes. */
1463
1464 void
1465 sem_item::add_expr (const_tree exp, inchash::hash &hstate)
1466 {
1467 if (exp == NULL_TREE)
1468 {
1469 hstate.merge_hash (0);
1470 return;
1471 }
1472
1473 /* Handled component can be matched in a cureful way proving equivalence
1474 even if they syntactically differ. Just skip them. */
1475 STRIP_NOPS (exp);
1476 while (handled_component_p (exp))
1477 exp = TREE_OPERAND (exp, 0);
1478
1479 enum tree_code code = TREE_CODE (exp);
1480 hstate.add_int (code);
1481
1482 switch (code)
1483 {
1484 /* Use inchash::add_expr for everything that is LTO stable. */
1485 case VOID_CST:
1486 case INTEGER_CST:
1487 case REAL_CST:
1488 case FIXED_CST:
1489 case STRING_CST:
1490 case COMPLEX_CST:
1491 case VECTOR_CST:
1492 inchash::add_expr (exp, hstate);
1493 break;
1494 case CONSTRUCTOR:
1495 {
1496 unsigned HOST_WIDE_INT idx;
1497 tree value;
1498
1499 hstate.add_wide_int (int_size_in_bytes (TREE_TYPE (exp)));
1500
1501 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
1502 if (value)
1503 add_expr (value, hstate);
1504 break;
1505 }
1506 case ADDR_EXPR:
1507 case FDESC_EXPR:
1508 add_expr (get_base_address (TREE_OPERAND (exp, 0)), hstate);
1509 break;
1510 case SSA_NAME:
1511 case VAR_DECL:
1512 case CONST_DECL:
1513 case PARM_DECL:
1514 hstate.add_wide_int (int_size_in_bytes (TREE_TYPE (exp)));
1515 break;
1516 case MEM_REF:
1517 case POINTER_PLUS_EXPR:
1518 case MINUS_EXPR:
1519 case RANGE_EXPR:
1520 add_expr (TREE_OPERAND (exp, 0), hstate);
1521 add_expr (TREE_OPERAND (exp, 1), hstate);
1522 break;
1523 case PLUS_EXPR:
1524 {
1525 inchash::hash one, two;
1526 add_expr (TREE_OPERAND (exp, 0), one);
1527 add_expr (TREE_OPERAND (exp, 1), two);
1528 hstate.add_commutative (one, two);
1529 }
1530 break;
1531 CASE_CONVERT:
1532 hstate.add_wide_int (int_size_in_bytes (TREE_TYPE (exp)));
1533 return add_expr (TREE_OPERAND (exp, 0), hstate);
1534 default:
1535 break;
1536 }
1537 }
1538
1539 /* Accumulate to HSTATE a hash of type t.
1540 TYpes that may end up being compatible after LTO type merging needs to have
1541 the same hash. */
1542
1543 void
1544 sem_item::add_type (const_tree type, inchash::hash &hstate)
1545 {
1546 if (type == NULL_TREE)
1547 {
1548 hstate.merge_hash (0);
1549 return;
1550 }
1551
1552 type = TYPE_MAIN_VARIANT (type);
1553 if (TYPE_CANONICAL (type))
1554 type = TYPE_CANONICAL (type);
1555
1556 if (!AGGREGATE_TYPE_P (type))
1557 hstate.add_int (TYPE_MODE (type));
1558
1559 if (TREE_CODE (type) == COMPLEX_TYPE)
1560 {
1561 hstate.add_int (COMPLEX_TYPE);
1562 sem_item::add_type (TREE_TYPE (type), hstate);
1563 }
1564 else if (INTEGRAL_TYPE_P (type))
1565 {
1566 hstate.add_int (INTEGER_TYPE);
1567 hstate.add_flag (TYPE_UNSIGNED (type));
1568 hstate.add_int (TYPE_PRECISION (type));
1569 }
1570 else if (VECTOR_TYPE_P (type))
1571 {
1572 hstate.add_int (VECTOR_TYPE);
1573 hstate.add_int (TYPE_PRECISION (type));
1574 sem_item::add_type (TREE_TYPE (type), hstate);
1575 }
1576 else if (TREE_CODE (type) == ARRAY_TYPE)
1577 {
1578 hstate.add_int (ARRAY_TYPE);
1579 /* Do not hash size, so complete and incomplete types can match. */
1580 sem_item::add_type (TREE_TYPE (type), hstate);
1581 }
1582 else if (RECORD_OR_UNION_TYPE_P (type))
1583 {
1584 hashval_t *val = optimizer->m_type_hash_cache.get (type);
1585
1586 if (!val)
1587 {
1588 inchash::hash hstate2;
1589 unsigned nf;
1590 tree f;
1591 hashval_t hash;
1592
1593 hstate2.add_int (RECORD_TYPE);
1594 gcc_assert (COMPLETE_TYPE_P (type));
1595
1596 for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
1597 if (TREE_CODE (f) == FIELD_DECL)
1598 {
1599 add_type (TREE_TYPE (f), hstate2);
1600 nf++;
1601 }
1602
1603 hstate2.add_int (nf);
1604 hash = hstate2.end ();
1605 hstate.add_wide_int (hash);
1606 optimizer->m_type_hash_cache.put (type, hash);
1607 }
1608 else
1609 hstate.add_wide_int (*val);
1610 }
1611 }
1612
1613 /* Improve accumulated hash for HSTATE based on a gimple statement STMT. */
1614
1615 void
1616 sem_function::hash_stmt (gimple stmt, inchash::hash &hstate)
1617 {
1618 enum gimple_code code = gimple_code (stmt);
1619
1620 hstate.add_int (code);
1621
1622 switch (code)
1623 {
1624 case GIMPLE_SWITCH:
1625 add_expr (gimple_switch_index (as_a <gswitch *> (stmt)), hstate);
1626 break;
1627 case GIMPLE_ASSIGN:
1628 hstate.add_int (gimple_assign_rhs_code (stmt));
1629 if (commutative_tree_code (gimple_assign_rhs_code (stmt))
1630 || commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
1631 {
1632 inchash::hash one, two;
1633
1634 add_expr (gimple_assign_rhs1 (stmt), one);
1635 add_type (TREE_TYPE (gimple_assign_rhs1 (stmt)), one);
1636 add_expr (gimple_assign_rhs2 (stmt), two);
1637 hstate.add_commutative (one, two);
1638 if (commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
1639 {
1640 add_expr (gimple_assign_rhs3 (stmt), hstate);
1641 add_type (TREE_TYPE (gimple_assign_rhs3 (stmt)), hstate);
1642 }
1643 add_expr (gimple_assign_lhs (stmt), hstate);
1644 add_type (TREE_TYPE (gimple_assign_lhs (stmt)), two);
1645 break;
1646 }
1647 /* ... fall through ... */
1648 case GIMPLE_CALL:
1649 case GIMPLE_ASM:
1650 case GIMPLE_COND:
1651 case GIMPLE_GOTO:
1652 case GIMPLE_RETURN:
1653 /* All these statements are equivalent if their operands are. */
1654 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
1655 {
1656 add_expr (gimple_op (stmt, i), hstate);
1657 if (gimple_op (stmt, i))
1658 add_type (TREE_TYPE (gimple_op (stmt, i)), hstate);
1659 }
1660 default:
1661 break;
1662 }
1663 }
1664
1665
1666 /* Return true if polymorphic comparison must be processed. */
1667
1668 bool
1669 sem_function::compare_polymorphic_p (void)
1670 {
1671 struct cgraph_edge *e;
1672
1673 if (!opt_for_fn (get_node ()->decl, flag_devirtualize))
1674 return false;
1675 if (get_node ()->indirect_calls != NULL)
1676 return true;
1677 /* TODO: We can do simple propagation determining what calls may lead to
1678 a polymorphic call. */
1679 for (e = get_node ()->callees; e; e = e->next_callee)
1680 if (e->callee->definition
1681 && opt_for_fn (e->callee->decl, flag_devirtualize))
1682 return true;
1683 return false;
1684 }
1685
1686 /* For a given call graph NODE, the function constructs new
1687 semantic function item. */
1688
1689 sem_function *
1690 sem_function::parse (cgraph_node *node, bitmap_obstack *stack)
1691 {
1692 tree fndecl = node->decl;
1693 function *func = DECL_STRUCT_FUNCTION (fndecl);
1694
1695 if (!func || (!node->has_gimple_body_p () && !node->thunk.thunk_p))
1696 return NULL;
1697
1698 if (lookup_attribute_by_prefix ("omp ", DECL_ATTRIBUTES (node->decl)) != NULL)
1699 return NULL;
1700
1701 sem_function *f = new sem_function (node, 0, stack);
1702
1703 f->init ();
1704
1705 return f;
1706 }
1707
1708 /* Parses function arguments and result type. */
1709
1710 void
1711 sem_function::parse_tree_args (void)
1712 {
1713 tree result;
1714
1715 if (arg_types.exists ())
1716 arg_types.release ();
1717
1718 arg_types.create (4);
1719 tree fnargs = DECL_ARGUMENTS (decl);
1720
1721 for (tree parm = fnargs; parm; parm = DECL_CHAIN (parm))
1722 arg_types.safe_push (DECL_ARG_TYPE (parm));
1723
1724 /* Function result type. */
1725 result = DECL_RESULT (decl);
1726 result_type = result ? TREE_TYPE (result) : NULL;
1727
1728 /* During WPA, we can get arguments by following method. */
1729 if (!fnargs)
1730 {
1731 tree type = TYPE_ARG_TYPES (TREE_TYPE (decl));
1732 for (tree parm = type; parm; parm = TREE_CHAIN (parm))
1733 arg_types.safe_push (TYPE_CANONICAL (TREE_VALUE (parm)));
1734
1735 result_type = TREE_TYPE (TREE_TYPE (decl));
1736 }
1737 }
1738
1739 /* For given basic blocks BB1 and BB2 (from functions FUNC1 and FUNC),
1740 return true if phi nodes are semantically equivalent in these blocks . */
1741
1742 bool
1743 sem_function::compare_phi_node (basic_block bb1, basic_block bb2)
1744 {
1745 gphi_iterator si1, si2;
1746 gphi *phi1, *phi2;
1747 unsigned size1, size2, i;
1748 tree t1, t2;
1749 edge e1, e2;
1750
1751 gcc_assert (bb1 != NULL);
1752 gcc_assert (bb2 != NULL);
1753
1754 si2 = gsi_start_phis (bb2);
1755 for (si1 = gsi_start_phis (bb1); !gsi_end_p (si1);
1756 gsi_next (&si1))
1757 {
1758 gsi_next_nonvirtual_phi (&si1);
1759 gsi_next_nonvirtual_phi (&si2);
1760
1761 if (gsi_end_p (si1) && gsi_end_p (si2))
1762 break;
1763
1764 if (gsi_end_p (si1) || gsi_end_p (si2))
1765 return return_false();
1766
1767 phi1 = si1.phi ();
1768 phi2 = si2.phi ();
1769
1770 tree phi_result1 = gimple_phi_result (phi1);
1771 tree phi_result2 = gimple_phi_result (phi2);
1772
1773 if (!m_checker->compare_operand (phi_result1, phi_result2))
1774 return return_false_with_msg ("PHI results are different");
1775
1776 size1 = gimple_phi_num_args (phi1);
1777 size2 = gimple_phi_num_args (phi2);
1778
1779 if (size1 != size2)
1780 return return_false ();
1781
1782 for (i = 0; i < size1; ++i)
1783 {
1784 t1 = gimple_phi_arg (phi1, i)->def;
1785 t2 = gimple_phi_arg (phi2, i)->def;
1786
1787 if (!m_checker->compare_operand (t1, t2))
1788 return return_false ();
1789
1790 e1 = gimple_phi_arg_edge (phi1, i);
1791 e2 = gimple_phi_arg_edge (phi2, i);
1792
1793 if (!m_checker->compare_edge (e1, e2))
1794 return return_false ();
1795 }
1796
1797 gsi_next (&si2);
1798 }
1799
1800 return true;
1801 }
1802
1803 /* Returns true if tree T can be compared as a handled component. */
1804
1805 bool
1806 sem_function::icf_handled_component_p (tree t)
1807 {
1808 tree_code tc = TREE_CODE (t);
1809
1810 return (handled_component_p (t)
1811 || tc == ADDR_EXPR || tc == MEM_REF || tc == OBJ_TYPE_REF);
1812 }
1813
1814 /* Basic blocks dictionary BB_DICT returns true if SOURCE index BB
1815 corresponds to TARGET. */
1816
1817 bool
1818 sem_function::bb_dict_test (vec<int> *bb_dict, int source, int target)
1819 {
1820 source++;
1821 target++;
1822
1823 if (bb_dict->length () <= (unsigned)source)
1824 bb_dict->safe_grow_cleared (source + 1);
1825
1826 if ((*bb_dict)[source] == 0)
1827 {
1828 (*bb_dict)[source] = target;
1829 return true;
1830 }
1831 else
1832 return (*bb_dict)[source] == target;
1833 }
1834
1835
1836 /* Semantic variable constructor that uses STACK as bitmap memory stack. */
1837
1838 sem_variable::sem_variable (bitmap_obstack *stack): sem_item (VAR, stack)
1839 {
1840 }
1841
1842 /* Constructor based on varpool node _NODE with computed hash _HASH.
1843 Bitmap STACK is used for memory allocation. */
1844
1845 sem_variable::sem_variable (varpool_node *node, hashval_t _hash,
1846 bitmap_obstack *stack): sem_item(VAR,
1847 node, _hash, stack)
1848 {
1849 gcc_checking_assert (node);
1850 gcc_checking_assert (get_node ());
1851 }
1852
1853 /* Fast equality function based on knowledge known in WPA. */
1854
1855 bool
1856 sem_variable::equals_wpa (sem_item *item,
1857 hash_map <symtab_node *, sem_item *> &ignored_nodes)
1858 {
1859 gcc_assert (item->type == VAR);
1860
1861 if (node->num_references () != item->node->num_references ())
1862 return return_false_with_msg ("different number of references");
1863
1864 if (DECL_TLS_MODEL (decl) || DECL_TLS_MODEL (item->decl))
1865 return return_false_with_msg ("TLS model");
1866
1867 /* DECL_ALIGN is safe to merge, because we will always chose the largest
1868 alignment out of all aliases. */
1869
1870 if (DECL_VIRTUAL_P (decl) != DECL_VIRTUAL_P (item->decl))
1871 return return_false_with_msg ("Virtual flag mismatch");
1872
1873 if (DECL_SIZE (decl) != DECL_SIZE (item->decl)
1874 && ((!DECL_SIZE (decl) || !DECL_SIZE (item->decl))
1875 || !operand_equal_p (DECL_SIZE (decl),
1876 DECL_SIZE (item->decl), OEP_ONLY_CONST)))
1877 return return_false_with_msg ("size mismatch");
1878
1879 /* Do not attempt to mix data from different user sections;
1880 we do not know what user intends with those. */
1881 if (((DECL_SECTION_NAME (decl) && !node->implicit_section)
1882 || (DECL_SECTION_NAME (item->decl) && !item->node->implicit_section))
1883 && DECL_SECTION_NAME (decl) != DECL_SECTION_NAME (item->decl))
1884 return return_false_with_msg ("user section mismatch");
1885
1886 if (DECL_IN_TEXT_SECTION (decl) != DECL_IN_TEXT_SECTION (item->decl))
1887 return return_false_with_msg ("text section");
1888
1889 ipa_ref *ref = NULL, *ref2 = NULL;
1890 for (unsigned i = 0; node->iterate_reference (i, ref); i++)
1891 {
1892 item->node->iterate_reference (i, ref2);
1893
1894 if (ref->use != ref2->use)
1895 return return_false_with_msg ("reference use mismatch");
1896
1897 if (!compare_symbol_references (ignored_nodes,
1898 ref->referred, ref2->referred,
1899 ref->address_matters_p ()))
1900 return false;
1901 }
1902
1903 return true;
1904 }
1905
1906 /* Returns true if the item equals to ITEM given as argument. */
1907
1908 bool
1909 sem_variable::equals (sem_item *item,
1910 hash_map <symtab_node *, sem_item *> &)
1911 {
1912 gcc_assert (item->type == VAR);
1913 bool ret;
1914
1915 if (DECL_INITIAL (decl) == error_mark_node && in_lto_p)
1916 dyn_cast <varpool_node *>(node)->get_constructor ();
1917 if (DECL_INITIAL (item->decl) == error_mark_node && in_lto_p)
1918 dyn_cast <varpool_node *>(item->node)->get_constructor ();
1919
1920 /* As seen in PR ipa/65303 we have to compare variables types. */
1921 if (!func_checker::compatible_types_p (TREE_TYPE (decl),
1922 TREE_TYPE (item->decl)))
1923 return return_false_with_msg ("variables types are different");
1924
1925 ret = sem_variable::equals (DECL_INITIAL (decl),
1926 DECL_INITIAL (item->node->decl));
1927 if (dump_file && (dump_flags & TDF_DETAILS))
1928 fprintf (dump_file,
1929 "Equals called for vars:%s:%s (%u:%u) (%s:%s) with result: %s\n\n",
1930 xstrdup_for_dump (node->name()),
1931 xstrdup_for_dump (item->node->name ()),
1932 node->order, item->node->order,
1933 xstrdup_for_dump (node->asm_name ()),
1934 xstrdup_for_dump (item->node->asm_name ()), ret ? "true" : "false");
1935
1936 return ret;
1937 }
1938
1939 /* Compares trees T1 and T2 for semantic equality. */
1940
1941 bool
1942 sem_variable::equals (tree t1, tree t2)
1943 {
1944 if (!t1 || !t2)
1945 return return_with_debug (t1 == t2);
1946 if (t1 == t2)
1947 return true;
1948 tree_code tc1 = TREE_CODE (t1);
1949 tree_code tc2 = TREE_CODE (t2);
1950
1951 if (tc1 != tc2)
1952 return return_false_with_msg ("TREE_CODE mismatch");
1953
1954 switch (tc1)
1955 {
1956 case CONSTRUCTOR:
1957 {
1958 vec<constructor_elt, va_gc> *v1, *v2;
1959 unsigned HOST_WIDE_INT idx;
1960
1961 enum tree_code typecode = TREE_CODE (TREE_TYPE (t1));
1962 if (typecode != TREE_CODE (TREE_TYPE (t2)))
1963 return return_false_with_msg ("constructor type mismatch");
1964
1965 if (typecode == ARRAY_TYPE)
1966 {
1967 HOST_WIDE_INT size_1 = int_size_in_bytes (TREE_TYPE (t1));
1968 /* For arrays, check that the sizes all match. */
1969 if (TYPE_MODE (TREE_TYPE (t1)) != TYPE_MODE (TREE_TYPE (t2))
1970 || size_1 == -1
1971 || size_1 != int_size_in_bytes (TREE_TYPE (t2)))
1972 return return_false_with_msg ("constructor array size mismatch");
1973 }
1974 else if (!func_checker::compatible_types_p (TREE_TYPE (t1),
1975 TREE_TYPE (t2)))
1976 return return_false_with_msg ("constructor type incompatible");
1977
1978 v1 = CONSTRUCTOR_ELTS (t1);
1979 v2 = CONSTRUCTOR_ELTS (t2);
1980 if (vec_safe_length (v1) != vec_safe_length (v2))
1981 return return_false_with_msg ("constructor number of elts mismatch");
1982
1983 for (idx = 0; idx < vec_safe_length (v1); ++idx)
1984 {
1985 constructor_elt *c1 = &(*v1)[idx];
1986 constructor_elt *c2 = &(*v2)[idx];
1987
1988 /* Check that each value is the same... */
1989 if (!sem_variable::equals (c1->value, c2->value))
1990 return false;
1991 /* ... and that they apply to the same fields! */
1992 if (!sem_variable::equals (c1->index, c2->index))
1993 return false;
1994 }
1995 return true;
1996 }
1997 case MEM_REF:
1998 {
1999 tree x1 = TREE_OPERAND (t1, 0);
2000 tree x2 = TREE_OPERAND (t2, 0);
2001 tree y1 = TREE_OPERAND (t1, 1);
2002 tree y2 = TREE_OPERAND (t2, 1);
2003
2004 if (!func_checker::compatible_types_p (TREE_TYPE (x1), TREE_TYPE (x2)))
2005 return return_false ();
2006
2007 /* Type of the offset on MEM_REF does not matter. */
2008 return return_with_debug (sem_variable::equals (x1, x2)
2009 && wi::to_offset (y1)
2010 == wi::to_offset (y2));
2011 }
2012 case ADDR_EXPR:
2013 case FDESC_EXPR:
2014 {
2015 tree op1 = TREE_OPERAND (t1, 0);
2016 tree op2 = TREE_OPERAND (t2, 0);
2017 return sem_variable::equals (op1, op2);
2018 }
2019 /* References to other vars/decls are compared using ipa-ref. */
2020 case FUNCTION_DECL:
2021 case VAR_DECL:
2022 if (decl_in_symtab_p (t1) && decl_in_symtab_p (t2))
2023 return true;
2024 return return_false_with_msg ("Declaration mismatch");
2025 case CONST_DECL:
2026 /* TODO: We can check CONST_DECL by its DECL_INITIAL, but for that we
2027 need to process its VAR/FUNCTION references without relying on ipa-ref
2028 compare. */
2029 case FIELD_DECL:
2030 case LABEL_DECL:
2031 return return_false_with_msg ("Declaration mismatch");
2032 case INTEGER_CST:
2033 /* Integer constants are the same only if the same width of type. */
2034 if (TYPE_PRECISION (TREE_TYPE (t1)) != TYPE_PRECISION (TREE_TYPE (t2)))
2035 return return_false_with_msg ("INTEGER_CST precision mismatch");
2036 if (TYPE_MODE (TREE_TYPE (t1)) != TYPE_MODE (TREE_TYPE (t2)))
2037 return return_false_with_msg ("INTEGER_CST mode mismatch");
2038 return return_with_debug (tree_int_cst_equal (t1, t2));
2039 case STRING_CST:
2040 if (TYPE_MODE (TREE_TYPE (t1)) != TYPE_MODE (TREE_TYPE (t2)))
2041 return return_false_with_msg ("STRING_CST mode mismatch");
2042 if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2))
2043 return return_false_with_msg ("STRING_CST length mismatch");
2044 if (memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
2045 TREE_STRING_LENGTH (t1)))
2046 return return_false_with_msg ("STRING_CST mismatch");
2047 return true;
2048 case FIXED_CST:
2049 /* Fixed constants are the same only if the same width of type. */
2050 if (TYPE_PRECISION (TREE_TYPE (t1)) != TYPE_PRECISION (TREE_TYPE (t2)))
2051 return return_false_with_msg ("FIXED_CST precision mismatch");
2052
2053 return return_with_debug (FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (t1),
2054 TREE_FIXED_CST (t2)));
2055 case COMPLEX_CST:
2056 return (sem_variable::equals (TREE_REALPART (t1), TREE_REALPART (t2))
2057 && sem_variable::equals (TREE_IMAGPART (t1), TREE_IMAGPART (t2)));
2058 case REAL_CST:
2059 /* Real constants are the same only if the same width of type. */
2060 if (TYPE_PRECISION (TREE_TYPE (t1)) != TYPE_PRECISION (TREE_TYPE (t2)))
2061 return return_false_with_msg ("REAL_CST precision mismatch");
2062 return return_with_debug (REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1),
2063 TREE_REAL_CST (t2)));
2064 case VECTOR_CST:
2065 {
2066 unsigned i;
2067
2068 if (VECTOR_CST_NELTS (t1) != VECTOR_CST_NELTS (t2))
2069 return return_false_with_msg ("VECTOR_CST nelts mismatch");
2070
2071 for (i = 0; i < VECTOR_CST_NELTS (t1); ++i)
2072 if (!sem_variable::equals (VECTOR_CST_ELT (t1, i),
2073 VECTOR_CST_ELT (t2, i)))
2074 return 0;
2075
2076 return 1;
2077 }
2078 case ARRAY_REF:
2079 case ARRAY_RANGE_REF:
2080 {
2081 tree x1 = TREE_OPERAND (t1, 0);
2082 tree x2 = TREE_OPERAND (t2, 0);
2083 tree y1 = TREE_OPERAND (t1, 1);
2084 tree y2 = TREE_OPERAND (t2, 1);
2085
2086 if (!sem_variable::equals (x1, x2) || !sem_variable::equals (y1, y2))
2087 return false;
2088 if (!sem_variable::equals (array_ref_low_bound (t1),
2089 array_ref_low_bound (t2)))
2090 return false;
2091 if (!sem_variable::equals (array_ref_element_size (t1),
2092 array_ref_element_size (t2)))
2093 return false;
2094 return true;
2095 }
2096
2097 case COMPONENT_REF:
2098 case POINTER_PLUS_EXPR:
2099 case PLUS_EXPR:
2100 case MINUS_EXPR:
2101 case RANGE_EXPR:
2102 {
2103 tree x1 = TREE_OPERAND (t1, 0);
2104 tree x2 = TREE_OPERAND (t2, 0);
2105 tree y1 = TREE_OPERAND (t1, 1);
2106 tree y2 = TREE_OPERAND (t2, 1);
2107
2108 return sem_variable::equals (x1, x2) && sem_variable::equals (y1, y2);
2109 }
2110
2111 CASE_CONVERT:
2112 case VIEW_CONVERT_EXPR:
2113 if (!func_checker::compatible_types_p (TREE_TYPE (t1), TREE_TYPE (t2)))
2114 return return_false ();
2115 return sem_variable::equals (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
2116 case ERROR_MARK:
2117 return return_false_with_msg ("ERROR_MARK");
2118 default:
2119 return return_false_with_msg ("Unknown TREE code reached");
2120 }
2121 }
2122
2123 /* Parser function that visits a varpool NODE. */
2124
2125 sem_variable *
2126 sem_variable::parse (varpool_node *node, bitmap_obstack *stack)
2127 {
2128 if (TREE_THIS_VOLATILE (node->decl) || DECL_HARD_REGISTER (node->decl)
2129 || node->alias)
2130 return NULL;
2131
2132 sem_variable *v = new sem_variable (node, 0, stack);
2133
2134 v->init ();
2135
2136 return v;
2137 }
2138
2139 /* References independent hash function. */
2140
2141 hashval_t
2142 sem_variable::get_hash (void)
2143 {
2144 if (hash)
2145 return hash;
2146
2147 /* All WPA streamed in symbols should have their hashes computed at compile
2148 time. At this point, the constructor may not be in memory at all.
2149 DECL_INITIAL (decl) would be error_mark_node in that case. */
2150 gcc_assert (!node->lto_file_data);
2151 tree ctor = DECL_INITIAL (decl);
2152 inchash::hash hstate;
2153
2154 hstate.add_int (456346417);
2155 if (DECL_SIZE (decl) && tree_fits_shwi_p (DECL_SIZE (decl)))
2156 hstate.add_wide_int (tree_to_shwi (DECL_SIZE (decl)));
2157 add_expr (ctor, hstate);
2158 hash = hstate.end ();
2159
2160 return hash;
2161 }
2162
2163 /* Merges instance with an ALIAS_ITEM, where alias, thunk or redirection can
2164 be applied. */
2165
2166 bool
2167 sem_variable::merge (sem_item *alias_item)
2168 {
2169 gcc_assert (alias_item->type == VAR);
2170
2171 if (!sem_item::target_supports_symbol_aliases_p ())
2172 {
2173 if (dump_file)
2174 fprintf (dump_file, "Not unifying; "
2175 "Symbol aliases are not supported by target\n\n");
2176 return false;
2177 }
2178
2179 if (DECL_EXTERNAL (alias_item->decl))
2180 {
2181 if (dump_file)
2182 fprintf (dump_file, "Not unifying; alias is external.\n\n");
2183 return false;
2184 }
2185
2186 sem_variable *alias_var = static_cast<sem_variable *> (alias_item);
2187
2188 varpool_node *original = get_node ();
2189 varpool_node *alias = alias_var->get_node ();
2190 bool original_discardable = false;
2191
2192 bool original_address_matters = original->address_matters_p ();
2193 bool alias_address_matters = alias->address_matters_p ();
2194
2195 /* See if original is in a section that can be discarded if the main
2196 symbol is not used.
2197 Also consider case where we have resolution info and we know that
2198 original's definition is not going to be used. In this case we can not
2199 create alias to original. */
2200 if (original->can_be_discarded_p ()
2201 || (node->resolution != LDPR_UNKNOWN
2202 && !decl_binds_to_current_def_p (node->decl)))
2203 original_discardable = true;
2204
2205 gcc_assert (!TREE_ASM_WRITTEN (alias->decl));
2206
2207 /* Constant pool machinery is not quite ready for aliases.
2208 TODO: varasm code contains logic for merging DECL_IN_CONSTANT_POOL.
2209 For LTO merging does not happen that is an important missing feature.
2210 We can enable merging with LTO if the DECL_IN_CONSTANT_POOL
2211 flag is dropped and non-local symbol name is assigned. */
2212 if (DECL_IN_CONSTANT_POOL (alias->decl)
2213 || DECL_IN_CONSTANT_POOL (original->decl))
2214 {
2215 if (dump_file)
2216 fprintf (dump_file,
2217 "Not unifying; constant pool variables.\n\n");
2218 return false;
2219 }
2220
2221 /* Do not attempt to mix functions from different user sections;
2222 we do not know what user intends with those. */
2223 if (((DECL_SECTION_NAME (original->decl) && !original->implicit_section)
2224 || (DECL_SECTION_NAME (alias->decl) && !alias->implicit_section))
2225 && DECL_SECTION_NAME (original->decl) != DECL_SECTION_NAME (alias->decl))
2226 {
2227 if (dump_file)
2228 fprintf (dump_file,
2229 "Not unifying; "
2230 "original and alias are in different sections.\n\n");
2231 return false;
2232 }
2233
2234 /* We can not merge if address comparsion metters. */
2235 if (original_address_matters && alias_address_matters
2236 && flag_merge_constants < 2)
2237 {
2238 if (dump_file)
2239 fprintf (dump_file,
2240 "Not unifying; "
2241 "adress of original and alias may be compared.\n\n");
2242 return false;
2243 }
2244 if (DECL_COMDAT_GROUP (original->decl) != DECL_COMDAT_GROUP (alias->decl))
2245 {
2246 if (dump_file)
2247 fprintf (dump_file, "Not unifying; alias cannot be created; "
2248 "across comdat group boundary\n\n");
2249
2250 return false;
2251 }
2252
2253 if (original_discardable)
2254 {
2255 if (dump_file)
2256 fprintf (dump_file, "Not unifying; alias cannot be created; "
2257 "target is discardable\n\n");
2258
2259 return false;
2260 }
2261 else
2262 {
2263 gcc_assert (!original->alias);
2264 gcc_assert (!alias->alias);
2265
2266 alias->analyzed = false;
2267
2268 DECL_INITIAL (alias->decl) = NULL;
2269 ((symtab_node *)alias)->call_for_symbol_and_aliases (clear_decl_rtl,
2270 NULL, true);
2271 alias->need_bounds_init = false;
2272 alias->remove_all_references ();
2273 if (TREE_ADDRESSABLE (alias->decl))
2274 original->call_for_symbol_and_aliases (set_addressable, NULL, true);
2275
2276 varpool_node::create_alias (alias_var->decl, decl);
2277 alias->resolve_alias (original);
2278
2279 if (dump_file)
2280 fprintf (dump_file, "Unified; Variable alias has been created.\n\n");
2281
2282 return true;
2283 }
2284 }
2285
2286 /* Dump symbol to FILE. */
2287
2288 void
2289 sem_variable::dump_to_file (FILE *file)
2290 {
2291 gcc_assert (file);
2292
2293 print_node (file, "", decl, 0);
2294 fprintf (file, "\n\n");
2295 }
2296
2297 unsigned int sem_item_optimizer::class_id = 0;
2298
2299 sem_item_optimizer::sem_item_optimizer (): worklist (0), m_classes (0),
2300 m_classes_count (0), m_cgraph_node_hooks (NULL), m_varpool_node_hooks (NULL)
2301 {
2302 m_items.create (0);
2303 bitmap_obstack_initialize (&m_bmstack);
2304 }
2305
2306 sem_item_optimizer::~sem_item_optimizer ()
2307 {
2308 for (unsigned int i = 0; i < m_items.length (); i++)
2309 delete m_items[i];
2310
2311 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
2312 it != m_classes.end (); ++it)
2313 {
2314 for (unsigned int i = 0; i < (*it)->classes.length (); i++)
2315 delete (*it)->classes[i];
2316
2317 (*it)->classes.release ();
2318 free (*it);
2319 }
2320
2321 m_items.release ();
2322
2323 bitmap_obstack_release (&m_bmstack);
2324 }
2325
2326 /* Write IPA ICF summary for symbols. */
2327
2328 void
2329 sem_item_optimizer::write_summary (void)
2330 {
2331 unsigned int count = 0;
2332
2333 output_block *ob = create_output_block (LTO_section_ipa_icf);
2334 lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
2335 ob->symbol = NULL;
2336
2337 /* Calculate number of symbols to be serialized. */
2338 for (lto_symtab_encoder_iterator lsei = lsei_start_in_partition (encoder);
2339 !lsei_end_p (lsei);
2340 lsei_next_in_partition (&lsei))
2341 {
2342 symtab_node *node = lsei_node (lsei);
2343
2344 if (m_symtab_node_map.get (node))
2345 count++;
2346 }
2347
2348 streamer_write_uhwi (ob, count);
2349
2350 /* Process all of the symbols. */
2351 for (lto_symtab_encoder_iterator lsei = lsei_start_in_partition (encoder);
2352 !lsei_end_p (lsei);
2353 lsei_next_in_partition (&lsei))
2354 {
2355 symtab_node *node = lsei_node (lsei);
2356
2357 sem_item **item = m_symtab_node_map.get (node);
2358
2359 if (item && *item)
2360 {
2361 int node_ref = lto_symtab_encoder_encode (encoder, node);
2362 streamer_write_uhwi_stream (ob->main_stream, node_ref);
2363
2364 streamer_write_uhwi (ob, (*item)->get_hash ());
2365 }
2366 }
2367
2368 streamer_write_char_stream (ob->main_stream, 0);
2369 produce_asm (ob, NULL);
2370 destroy_output_block (ob);
2371 }
2372
2373 /* Reads a section from LTO stream file FILE_DATA. Input block for DATA
2374 contains LEN bytes. */
2375
2376 void
2377 sem_item_optimizer::read_section (lto_file_decl_data *file_data,
2378 const char *data, size_t len)
2379 {
2380 const lto_function_header *header =
2381 (const lto_function_header *) data;
2382 const int cfg_offset = sizeof (lto_function_header);
2383 const int main_offset = cfg_offset + header->cfg_size;
2384 const int string_offset = main_offset + header->main_size;
2385 data_in *data_in;
2386 unsigned int i;
2387 unsigned int count;
2388
2389 lto_input_block ib_main ((const char *) data + main_offset, 0,
2390 header->main_size, file_data->mode_table);
2391
2392 data_in =
2393 lto_data_in_create (file_data, (const char *) data + string_offset,
2394 header->string_size, vNULL);
2395
2396 count = streamer_read_uhwi (&ib_main);
2397
2398 for (i = 0; i < count; i++)
2399 {
2400 unsigned int index;
2401 symtab_node *node;
2402 lto_symtab_encoder_t encoder;
2403
2404 index = streamer_read_uhwi (&ib_main);
2405 encoder = file_data->symtab_node_encoder;
2406 node = lto_symtab_encoder_deref (encoder, index);
2407
2408 hashval_t hash = streamer_read_uhwi (&ib_main);
2409
2410 gcc_assert (node->definition);
2411
2412 if (dump_file)
2413 fprintf (dump_file, "Symbol added:%s (tree: %p, uid:%u)\n",
2414 node->asm_name (), (void *) node->decl, node->order);
2415
2416 if (is_a<cgraph_node *> (node))
2417 {
2418 cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
2419
2420 m_items.safe_push (new sem_function (cnode, hash, &m_bmstack));
2421 }
2422 else
2423 {
2424 varpool_node *vnode = dyn_cast <varpool_node *> (node);
2425
2426 m_items.safe_push (new sem_variable (vnode, hash, &m_bmstack));
2427 }
2428 }
2429
2430 lto_free_section_data (file_data, LTO_section_ipa_icf, NULL, data,
2431 len);
2432 lto_data_in_delete (data_in);
2433 }
2434
2435 /* Read IPA IPA ICF summary for symbols. */
2436
2437 void
2438 sem_item_optimizer::read_summary (void)
2439 {
2440 lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
2441 lto_file_decl_data *file_data;
2442 unsigned int j = 0;
2443
2444 while ((file_data = file_data_vec[j++]))
2445 {
2446 size_t len;
2447 const char *data = lto_get_section_data (file_data,
2448 LTO_section_ipa_icf, NULL, &len);
2449
2450 if (data)
2451 read_section (file_data, data, len);
2452 }
2453 }
2454
2455 /* Register callgraph and varpool hooks. */
2456
2457 void
2458 sem_item_optimizer::register_hooks (void)
2459 {
2460 if (!m_cgraph_node_hooks)
2461 m_cgraph_node_hooks = symtab->add_cgraph_removal_hook
2462 (&sem_item_optimizer::cgraph_removal_hook, this);
2463
2464 if (!m_varpool_node_hooks)
2465 m_varpool_node_hooks = symtab->add_varpool_removal_hook
2466 (&sem_item_optimizer::varpool_removal_hook, this);
2467 }
2468
2469 /* Unregister callgraph and varpool hooks. */
2470
2471 void
2472 sem_item_optimizer::unregister_hooks (void)
2473 {
2474 if (m_cgraph_node_hooks)
2475 symtab->remove_cgraph_removal_hook (m_cgraph_node_hooks);
2476
2477 if (m_varpool_node_hooks)
2478 symtab->remove_varpool_removal_hook (m_varpool_node_hooks);
2479 }
2480
2481 /* Adds a CLS to hashtable associated by hash value. */
2482
2483 void
2484 sem_item_optimizer::add_class (congruence_class *cls)
2485 {
2486 gcc_assert (cls->members.length ());
2487
2488 congruence_class_group *group = get_group_by_hash (
2489 cls->members[0]->get_hash (),
2490 cls->members[0]->type);
2491 group->classes.safe_push (cls);
2492 }
2493
2494 /* Gets a congruence class group based on given HASH value and TYPE. */
2495
2496 congruence_class_group *
2497 sem_item_optimizer::get_group_by_hash (hashval_t hash, sem_item_type type)
2498 {
2499 congruence_class_group *item = XNEW (congruence_class_group);
2500 item->hash = hash;
2501 item->type = type;
2502
2503 congruence_class_group **slot = m_classes.find_slot (item, INSERT);
2504
2505 if (*slot)
2506 free (item);
2507 else
2508 {
2509 item->classes.create (1);
2510 *slot = item;
2511 }
2512
2513 return *slot;
2514 }
2515
2516 /* Callgraph removal hook called for a NODE with a custom DATA. */
2517
2518 void
2519 sem_item_optimizer::cgraph_removal_hook (cgraph_node *node, void *data)
2520 {
2521 sem_item_optimizer *optimizer = (sem_item_optimizer *) data;
2522 optimizer->remove_symtab_node (node);
2523 }
2524
2525 /* Varpool removal hook called for a NODE with a custom DATA. */
2526
2527 void
2528 sem_item_optimizer::varpool_removal_hook (varpool_node *node, void *data)
2529 {
2530 sem_item_optimizer *optimizer = (sem_item_optimizer *) data;
2531 optimizer->remove_symtab_node (node);
2532 }
2533
2534 /* Remove symtab NODE triggered by symtab removal hooks. */
2535
2536 void
2537 sem_item_optimizer::remove_symtab_node (symtab_node *node)
2538 {
2539 gcc_assert (!m_classes.elements());
2540
2541 m_removed_items_set.add (node);
2542 }
2543
2544 void
2545 sem_item_optimizer::remove_item (sem_item *item)
2546 {
2547 if (m_symtab_node_map.get (item->node))
2548 m_symtab_node_map.remove (item->node);
2549 delete item;
2550 }
2551
2552 /* Removes all callgraph and varpool nodes that are marked by symtab
2553 as deleted. */
2554
2555 void
2556 sem_item_optimizer::filter_removed_items (void)
2557 {
2558 auto_vec <sem_item *> filtered;
2559
2560 for (unsigned int i = 0; i < m_items.length(); i++)
2561 {
2562 sem_item *item = m_items[i];
2563
2564 if (m_removed_items_set.contains (item->node))
2565 {
2566 remove_item (item);
2567 continue;
2568 }
2569
2570 if (item->type == FUNC)
2571 {
2572 cgraph_node *cnode = static_cast <sem_function *>(item)->get_node ();
2573
2574 if (in_lto_p && (cnode->alias || cnode->body_removed))
2575 remove_item (item);
2576 else
2577 filtered.safe_push (item);
2578 }
2579 else /* VAR. */
2580 {
2581 if (!flag_ipa_icf_variables)
2582 remove_item (item);
2583 else
2584 {
2585 /* Filter out non-readonly variables. */
2586 tree decl = item->decl;
2587 if (TREE_READONLY (decl))
2588 filtered.safe_push (item);
2589 else
2590 remove_item (item);
2591 }
2592 }
2593 }
2594
2595 /* Clean-up of released semantic items. */
2596
2597 m_items.release ();
2598 for (unsigned int i = 0; i < filtered.length(); i++)
2599 m_items.safe_push (filtered[i]);
2600 }
2601
2602 /* Optimizer entry point which returns true in case it processes
2603 a merge operation. True is returned if there's a merge operation
2604 processed. */
2605
2606 bool
2607 sem_item_optimizer::execute (void)
2608 {
2609 filter_removed_items ();
2610 unregister_hooks ();
2611
2612 build_graph ();
2613 update_hash_by_addr_refs ();
2614 build_hash_based_classes ();
2615
2616 if (dump_file)
2617 fprintf (dump_file, "Dump after hash based groups\n");
2618 dump_cong_classes ();
2619
2620 for (unsigned int i = 0; i < m_items.length(); i++)
2621 m_items[i]->init_wpa ();
2622
2623 subdivide_classes_by_equality (true);
2624
2625 if (dump_file)
2626 fprintf (dump_file, "Dump after WPA based types groups\n");
2627
2628 dump_cong_classes ();
2629
2630 process_cong_reduction ();
2631 verify_classes ();
2632
2633 if (dump_file)
2634 fprintf (dump_file, "Dump after callgraph-based congruence reduction\n");
2635
2636 dump_cong_classes ();
2637
2638 parse_nonsingleton_classes ();
2639 subdivide_classes_by_equality ();
2640
2641 if (dump_file)
2642 fprintf (dump_file, "Dump after full equality comparison of groups\n");
2643
2644 dump_cong_classes ();
2645
2646 unsigned int prev_class_count = m_classes_count;
2647
2648 process_cong_reduction ();
2649 dump_cong_classes ();
2650 verify_classes ();
2651 bool merged_p = merge_classes (prev_class_count);
2652
2653 if (dump_file && (dump_flags & TDF_DETAILS))
2654 symtab_node::dump_table (dump_file);
2655
2656 return merged_p;
2657 }
2658
2659 /* Function responsible for visiting all potential functions and
2660 read-only variables that can be merged. */
2661
2662 void
2663 sem_item_optimizer::parse_funcs_and_vars (void)
2664 {
2665 cgraph_node *cnode;
2666
2667 if (flag_ipa_icf_functions)
2668 FOR_EACH_DEFINED_FUNCTION (cnode)
2669 {
2670 sem_function *f = sem_function::parse (cnode, &m_bmstack);
2671 if (f)
2672 {
2673 m_items.safe_push (f);
2674 m_symtab_node_map.put (cnode, f);
2675
2676 if (dump_file)
2677 fprintf (dump_file, "Parsed function:%s\n", f->node->asm_name ());
2678
2679 if (dump_file && (dump_flags & TDF_DETAILS))
2680 f->dump_to_file (dump_file);
2681 }
2682 else if (dump_file)
2683 fprintf (dump_file, "Not parsed function:%s\n", cnode->asm_name ());
2684 }
2685
2686 varpool_node *vnode;
2687
2688 if (flag_ipa_icf_variables)
2689 FOR_EACH_DEFINED_VARIABLE (vnode)
2690 {
2691 sem_variable *v = sem_variable::parse (vnode, &m_bmstack);
2692
2693 if (v)
2694 {
2695 m_items.safe_push (v);
2696 m_symtab_node_map.put (vnode, v);
2697 }
2698 }
2699 }
2700
2701 /* Makes pairing between a congruence class CLS and semantic ITEM. */
2702
2703 void
2704 sem_item_optimizer::add_item_to_class (congruence_class *cls, sem_item *item)
2705 {
2706 item->index_in_class = cls->members.length ();
2707 cls->members.safe_push (item);
2708 item->cls = cls;
2709 }
2710
2711 /* For each semantic item, append hash values of references. */
2712
2713 void
2714 sem_item_optimizer::update_hash_by_addr_refs ()
2715 {
2716 /* First, append to hash sensitive references and class type if it need to
2717 be matched for ODR. */
2718 for (unsigned i = 0; i < m_items.length (); i++)
2719 {
2720 m_items[i]->update_hash_by_addr_refs (m_symtab_node_map);
2721 if (m_items[i]->type == FUNC)
2722 {
2723 if (TREE_CODE (TREE_TYPE (m_items[i]->decl)) == METHOD_TYPE
2724 && contains_polymorphic_type_p
2725 (method_class_type (TREE_TYPE (m_items[i]->decl)))
2726 && (DECL_CXX_CONSTRUCTOR_P (m_items[i]->decl)
2727 || (static_cast<sem_function *> (m_items[i])->param_used_p (0)
2728 && static_cast<sem_function *> (m_items[i])
2729 ->compare_polymorphic_p ())))
2730 {
2731 tree class_type
2732 = method_class_type (TREE_TYPE (m_items[i]->decl));
2733 inchash::hash hstate (m_items[i]->hash);
2734
2735 if (TYPE_NAME (class_type)
2736 && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (class_type)))
2737 hstate.add_wide_int
2738 (IDENTIFIER_HASH_VALUE
2739 (DECL_ASSEMBLER_NAME (TYPE_NAME (class_type))));
2740
2741 m_items[i]->hash = hstate.end ();
2742 }
2743 }
2744 }
2745
2746 /* Once all symbols have enhanced hash value, we can append
2747 hash values of symbols that are seen by IPA ICF and are
2748 references by a semantic item. Newly computed values
2749 are saved to global_hash member variable. */
2750 for (unsigned i = 0; i < m_items.length (); i++)
2751 m_items[i]->update_hash_by_local_refs (m_symtab_node_map);
2752
2753 /* Global hash value replace current hash values. */
2754 for (unsigned i = 0; i < m_items.length (); i++)
2755 m_items[i]->hash = m_items[i]->global_hash;
2756 }
2757
2758 /* Congruence classes are built by hash value. */
2759
2760 void
2761 sem_item_optimizer::build_hash_based_classes (void)
2762 {
2763 for (unsigned i = 0; i < m_items.length (); i++)
2764 {
2765 sem_item *item = m_items[i];
2766
2767 congruence_class_group *group = get_group_by_hash (item->hash,
2768 item->type);
2769
2770 if (!group->classes.length ())
2771 {
2772 m_classes_count++;
2773 group->classes.safe_push (new congruence_class (class_id++));
2774 }
2775
2776 add_item_to_class (group->classes[0], item);
2777 }
2778 }
2779
2780 /* Build references according to call graph. */
2781
2782 void
2783 sem_item_optimizer::build_graph (void)
2784 {
2785 for (unsigned i = 0; i < m_items.length (); i++)
2786 {
2787 sem_item *item = m_items[i];
2788 m_symtab_node_map.put (item->node, item);
2789 }
2790
2791 for (unsigned i = 0; i < m_items.length (); i++)
2792 {
2793 sem_item *item = m_items[i];
2794
2795 if (item->type == FUNC)
2796 {
2797 cgraph_node *cnode = dyn_cast <cgraph_node *> (item->node);
2798
2799 cgraph_edge *e = cnode->callees;
2800 while (e)
2801 {
2802 sem_item **slot = m_symtab_node_map.get
2803 (e->callee->ultimate_alias_target ());
2804 if (slot)
2805 item->add_reference (*slot);
2806
2807 e = e->next_callee;
2808 }
2809 }
2810
2811 ipa_ref *ref = NULL;
2812 for (unsigned i = 0; item->node->iterate_reference (i, ref); i++)
2813 {
2814 sem_item **slot = m_symtab_node_map.get
2815 (ref->referred->ultimate_alias_target ());
2816 if (slot)
2817 item->add_reference (*slot);
2818 }
2819 }
2820 }
2821
2822 /* Semantic items in classes having more than one element and initialized.
2823 In case of WPA, we load function body. */
2824
2825 void
2826 sem_item_optimizer::parse_nonsingleton_classes (void)
2827 {
2828 unsigned int init_called_count = 0;
2829
2830 for (unsigned i = 0; i < m_items.length (); i++)
2831 if (m_items[i]->cls->members.length () > 1)
2832 {
2833 m_items[i]->init ();
2834 init_called_count++;
2835 }
2836
2837 if (dump_file)
2838 fprintf (dump_file, "Init called for %u items (%.2f%%).\n", init_called_count,
2839 m_items.length () ? 100.0f * init_called_count / m_items.length (): 0.0f);
2840 }
2841
2842 /* Equality function for semantic items is used to subdivide existing
2843 classes. If IN_WPA, fast equality function is invoked. */
2844
2845 void
2846 sem_item_optimizer::subdivide_classes_by_equality (bool in_wpa)
2847 {
2848 for (hash_table <congruence_class_group_hash>::iterator it = m_classes.begin ();
2849 it != m_classes.end (); ++it)
2850 {
2851 unsigned int class_count = (*it)->classes.length ();
2852
2853 for (unsigned i = 0; i < class_count; i++)
2854 {
2855 congruence_class *c = (*it)->classes [i];
2856
2857 if (c->members.length() > 1)
2858 {
2859 auto_vec <sem_item *> new_vector;
2860
2861 sem_item *first = c->members[0];
2862 new_vector.safe_push (first);
2863
2864 unsigned class_split_first = (*it)->classes.length ();
2865
2866 for (unsigned j = 1; j < c->members.length (); j++)
2867 {
2868 sem_item *item = c->members[j];
2869
2870 bool equals = in_wpa ? first->equals_wpa (item,
2871 m_symtab_node_map) : first->equals (item, m_symtab_node_map);
2872
2873 if (equals)
2874 new_vector.safe_push (item);
2875 else
2876 {
2877 bool integrated = false;
2878
2879 for (unsigned k = class_split_first; k < (*it)->classes.length (); k++)
2880 {
2881 sem_item *x = (*it)->classes[k]->members[0];
2882 bool equals = in_wpa ? x->equals_wpa (item,
2883 m_symtab_node_map) : x->equals (item, m_symtab_node_map);
2884
2885 if (equals)
2886 {
2887 integrated = true;
2888 add_item_to_class ((*it)->classes[k], item);
2889
2890 break;
2891 }
2892 }
2893
2894 if (!integrated)
2895 {
2896 congruence_class *c = new congruence_class (class_id++);
2897 m_classes_count++;
2898 add_item_to_class (c, item);
2899
2900 (*it)->classes.safe_push (c);
2901 }
2902 }
2903 }
2904
2905 // we replace newly created new_vector for the class we've just splitted
2906 c->members.release ();
2907 c->members.create (new_vector.length ());
2908
2909 for (unsigned int j = 0; j < new_vector.length (); j++)
2910 add_item_to_class (c, new_vector[j]);
2911 }
2912 }
2913 }
2914
2915 verify_classes ();
2916 }
2917
2918 /* Subdivide classes by address references that members of the class
2919 reference. Example can be a pair of functions that have an address
2920 taken from a function. If these addresses are different the class
2921 is split. */
2922
2923 unsigned
2924 sem_item_optimizer::subdivide_classes_by_sensitive_refs ()
2925 {
2926 typedef hash_map <symbol_compare_collection *, vec <sem_item *>,
2927 symbol_compare_hashmap_traits> subdivide_hash_map;
2928
2929 unsigned newly_created_classes = 0;
2930
2931 for (hash_table <congruence_class_group_hash>::iterator it = m_classes.begin ();
2932 it != m_classes.end (); ++it)
2933 {
2934 unsigned int class_count = (*it)->classes.length ();
2935 auto_vec<congruence_class *> new_classes;
2936
2937 for (unsigned i = 0; i < class_count; i++)
2938 {
2939 congruence_class *c = (*it)->classes [i];
2940
2941 if (c->members.length() > 1)
2942 {
2943 subdivide_hash_map split_map;
2944
2945 for (unsigned j = 0; j < c->members.length (); j++)
2946 {
2947 sem_item *source_node = c->members[j];
2948
2949 symbol_compare_collection *collection = new symbol_compare_collection (source_node->node);
2950
2951 bool existed;
2952 vec <sem_item *> *slot = &split_map.get_or_insert (collection,
2953 &existed);
2954 gcc_checking_assert (slot);
2955
2956 slot->safe_push (source_node);
2957
2958 if (existed)
2959 delete collection;
2960 }
2961
2962 /* If the map contains more than one key, we have to split the map
2963 appropriately. */
2964 if (split_map.elements () != 1)
2965 {
2966 bool first_class = true;
2967
2968 for (subdivide_hash_map::iterator it2 = split_map.begin ();
2969 it2 != split_map.end (); ++it2)
2970 {
2971 congruence_class *new_cls;
2972 new_cls = new congruence_class (class_id++);
2973
2974 for (unsigned k = 0; k < (*it2).second.length (); k++)
2975 add_item_to_class (new_cls, (*it2).second[k]);
2976
2977 worklist_push (new_cls);
2978 newly_created_classes++;
2979
2980 if (first_class)
2981 {
2982 (*it)->classes[i] = new_cls;
2983 first_class = false;
2984 }
2985 else
2986 {
2987 new_classes.safe_push (new_cls);
2988 m_classes_count++;
2989 }
2990 }
2991 }
2992
2993 /* Release memory. */
2994 for (subdivide_hash_map::iterator it2 = split_map.begin ();
2995 it2 != split_map.end (); ++it2)
2996 {
2997 delete (*it2).first;
2998 (*it2).second.release ();
2999 }
3000 }
3001 }
3002
3003 for (unsigned i = 0; i < new_classes.length (); i++)
3004 (*it)->classes.safe_push (new_classes[i]);
3005 }
3006
3007 return newly_created_classes;
3008 }
3009
3010 /* Verify congruence classes if checking is enabled. */
3011
3012 void
3013 sem_item_optimizer::verify_classes (void)
3014 {
3015 #if ENABLE_CHECKING
3016 for (hash_table <congruence_class_group_hash>::iterator it = m_classes.begin ();
3017 it != m_classes.end (); ++it)
3018 {
3019 for (unsigned int i = 0; i < (*it)->classes.length (); i++)
3020 {
3021 congruence_class *cls = (*it)->classes[i];
3022
3023 gcc_checking_assert (cls);
3024 gcc_checking_assert (cls->members.length () > 0);
3025
3026 for (unsigned int j = 0; j < cls->members.length (); j++)
3027 {
3028 sem_item *item = cls->members[j];
3029
3030 gcc_checking_assert (item);
3031 gcc_checking_assert (item->cls == cls);
3032
3033 for (unsigned k = 0; k < item->usages.length (); k++)
3034 {
3035 sem_usage_pair *usage = item->usages[k];
3036 gcc_checking_assert (usage->item->index_in_class <
3037 usage->item->cls->members.length ());
3038 }
3039 }
3040 }
3041 }
3042 #endif
3043 }
3044
3045 /* Disposes split map traverse function. CLS_PTR is pointer to congruence
3046 class, BSLOT is bitmap slot we want to release. DATA is mandatory,
3047 but unused argument. */
3048
3049 bool
3050 sem_item_optimizer::release_split_map (congruence_class * const &,
3051 bitmap const &b, traverse_split_pair *)
3052 {
3053 bitmap bmp = b;
3054
3055 BITMAP_FREE (bmp);
3056
3057 return true;
3058 }
3059
3060 /* Process split operation for a class given as pointer CLS_PTR,
3061 where bitmap B splits congruence class members. DATA is used
3062 as argument of split pair. */
3063
3064 bool
3065 sem_item_optimizer::traverse_congruence_split (congruence_class * const &cls,
3066 bitmap const &b, traverse_split_pair *pair)
3067 {
3068 sem_item_optimizer *optimizer = pair->optimizer;
3069 const congruence_class *splitter_cls = pair->cls;
3070
3071 /* If counted bits are greater than zero and less than the number of members
3072 a group will be splitted. */
3073 unsigned popcount = bitmap_count_bits (b);
3074
3075 if (popcount > 0 && popcount < cls->members.length ())
3076 {
3077 congruence_class* newclasses[2] = { new congruence_class (class_id++), new congruence_class (class_id++) };
3078
3079 for (unsigned int i = 0; i < cls->members.length (); i++)
3080 {
3081 int target = bitmap_bit_p (b, i);
3082 congruence_class *tc = newclasses[target];
3083
3084 add_item_to_class (tc, cls->members[i]);
3085 }
3086
3087 #ifdef ENABLE_CHECKING
3088 for (unsigned int i = 0; i < 2; i++)
3089 gcc_checking_assert (newclasses[i]->members.length ());
3090 #endif
3091
3092 if (splitter_cls == cls)
3093 optimizer->splitter_class_removed = true;
3094
3095 /* Remove old class from worklist if presented. */
3096 bool in_worklist = cls->in_worklist;
3097
3098 if (in_worklist)
3099 cls->in_worklist = false;
3100
3101 congruence_class_group g;
3102 g.hash = cls->members[0]->get_hash ();
3103 g.type = cls->members[0]->type;
3104
3105 congruence_class_group *slot = optimizer->m_classes.find(&g);
3106
3107 for (unsigned int i = 0; i < slot->classes.length (); i++)
3108 if (slot->classes[i] == cls)
3109 {
3110 slot->classes.ordered_remove (i);
3111 break;
3112 }
3113
3114 /* New class will be inserted and integrated to work list. */
3115 for (unsigned int i = 0; i < 2; i++)
3116 optimizer->add_class (newclasses[i]);
3117
3118 /* Two classes replace one, so that increment just by one. */
3119 optimizer->m_classes_count++;
3120
3121 /* If OLD class was presented in the worklist, we remove the class
3122 and replace it will both newly created classes. */
3123 if (in_worklist)
3124 for (unsigned int i = 0; i < 2; i++)
3125 optimizer->worklist_push (newclasses[i]);
3126 else /* Just smaller class is inserted. */
3127 {
3128 unsigned int smaller_index = newclasses[0]->members.length () <
3129 newclasses[1]->members.length () ?
3130 0 : 1;
3131 optimizer->worklist_push (newclasses[smaller_index]);
3132 }
3133
3134 if (dump_file && (dump_flags & TDF_DETAILS))
3135 {
3136 fprintf (dump_file, " congruence class splitted:\n");
3137 cls->dump (dump_file, 4);
3138
3139 fprintf (dump_file, " newly created groups:\n");
3140 for (unsigned int i = 0; i < 2; i++)
3141 newclasses[i]->dump (dump_file, 4);
3142 }
3143
3144 /* Release class if not presented in work list. */
3145 if (!in_worklist)
3146 delete cls;
3147 }
3148
3149
3150 return true;
3151 }
3152
3153 /* Tests if a class CLS used as INDEXth splits any congruence classes.
3154 Bitmap stack BMSTACK is used for bitmap allocation. */
3155
3156 void
3157 sem_item_optimizer::do_congruence_step_for_index (congruence_class *cls,
3158 unsigned int index)
3159 {
3160 hash_map <congruence_class *, bitmap> split_map;
3161
3162 for (unsigned int i = 0; i < cls->members.length (); i++)
3163 {
3164 sem_item *item = cls->members[i];
3165
3166 /* Iterate all usages that have INDEX as usage of the item. */
3167 for (unsigned int j = 0; j < item->usages.length (); j++)
3168 {
3169 sem_usage_pair *usage = item->usages[j];
3170
3171 if (usage->index != index)
3172 continue;
3173
3174 bitmap *slot = split_map.get (usage->item->cls);
3175 bitmap b;
3176
3177 if(!slot)
3178 {
3179 b = BITMAP_ALLOC (&m_bmstack);
3180 split_map.put (usage->item->cls, b);
3181 }
3182 else
3183 b = *slot;
3184
3185 #if ENABLE_CHECKING
3186 gcc_checking_assert (usage->item->cls);
3187 gcc_checking_assert (usage->item->index_in_class <
3188 usage->item->cls->members.length ());
3189 #endif
3190
3191 bitmap_set_bit (b, usage->item->index_in_class);
3192 }
3193 }
3194
3195 traverse_split_pair pair;
3196 pair.optimizer = this;
3197 pair.cls = cls;
3198
3199 splitter_class_removed = false;
3200 split_map.traverse
3201 <traverse_split_pair *, sem_item_optimizer::traverse_congruence_split> (&pair);
3202
3203 /* Bitmap clean-up. */
3204 split_map.traverse
3205 <traverse_split_pair *, sem_item_optimizer::release_split_map> (NULL);
3206 }
3207
3208 /* Every usage of a congruence class CLS is a candidate that can split the
3209 collection of classes. Bitmap stack BMSTACK is used for bitmap
3210 allocation. */
3211
3212 void
3213 sem_item_optimizer::do_congruence_step (congruence_class *cls)
3214 {
3215 bitmap_iterator bi;
3216 unsigned int i;
3217
3218 bitmap usage = BITMAP_ALLOC (&m_bmstack);
3219
3220 for (unsigned int i = 0; i < cls->members.length (); i++)
3221 bitmap_ior_into (usage, cls->members[i]->usage_index_bitmap);
3222
3223 EXECUTE_IF_SET_IN_BITMAP (usage, 0, i, bi)
3224 {
3225 if (dump_file && (dump_flags & TDF_DETAILS))
3226 fprintf (dump_file, " processing congruece step for class: %u, index: %u\n",
3227 cls->id, i);
3228
3229 do_congruence_step_for_index (cls, i);
3230
3231 if (splitter_class_removed)
3232 break;
3233 }
3234
3235 BITMAP_FREE (usage);
3236 }
3237
3238 /* Adds a newly created congruence class CLS to worklist. */
3239
3240 void
3241 sem_item_optimizer::worklist_push (congruence_class *cls)
3242 {
3243 /* Return if the class CLS is already presented in work list. */
3244 if (cls->in_worklist)
3245 return;
3246
3247 cls->in_worklist = true;
3248 worklist.push_back (cls);
3249 }
3250
3251 /* Pops a class from worklist. */
3252
3253 congruence_class *
3254 sem_item_optimizer::worklist_pop (void)
3255 {
3256 congruence_class *cls;
3257
3258 while (!worklist.empty ())
3259 {
3260 cls = worklist.front ();
3261 worklist.pop_front ();
3262 if (cls->in_worklist)
3263 {
3264 cls->in_worklist = false;
3265
3266 return cls;
3267 }
3268 else
3269 {
3270 /* Work list item was already intended to be removed.
3271 The only reason for doing it is to split a class.
3272 Thus, the class CLS is deleted. */
3273 delete cls;
3274 }
3275 }
3276
3277 return NULL;
3278 }
3279
3280 /* Iterative congruence reduction function. */
3281
3282 void
3283 sem_item_optimizer::process_cong_reduction (void)
3284 {
3285 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
3286 it != m_classes.end (); ++it)
3287 for (unsigned i = 0; i < (*it)->classes.length (); i++)
3288 if ((*it)->classes[i]->is_class_used ())
3289 worklist_push ((*it)->classes[i]);
3290
3291 if (dump_file)
3292 fprintf (dump_file, "Worklist has been filled with: %lu\n",
3293 (unsigned long) worklist.size ());
3294
3295 if (dump_file && (dump_flags & TDF_DETAILS))
3296 fprintf (dump_file, "Congruence class reduction\n");
3297
3298 congruence_class *cls;
3299
3300 /* Process complete congruence reduction. */
3301 while ((cls = worklist_pop ()) != NULL)
3302 do_congruence_step (cls);
3303
3304 /* Subdivide newly created classes according to references. */
3305 unsigned new_classes = subdivide_classes_by_sensitive_refs ();
3306
3307 if (dump_file)
3308 fprintf (dump_file, "Address reference subdivision created: %u "
3309 "new classes.\n", new_classes);
3310 }
3311
3312 /* Debug function prints all informations about congruence classes. */
3313
3314 void
3315 sem_item_optimizer::dump_cong_classes (void)
3316 {
3317 if (!dump_file)
3318 return;
3319
3320 fprintf (dump_file,
3321 "Congruence classes: %u (unique hash values: %lu), with total: %u items\n",
3322 m_classes_count, (unsigned long) m_classes.elements(), m_items.length ());
3323
3324 /* Histogram calculation. */
3325 unsigned int max_index = 0;
3326 unsigned int* histogram = XCNEWVEC (unsigned int, m_items.length () + 1);
3327
3328 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
3329 it != m_classes.end (); ++it)
3330
3331 for (unsigned i = 0; i < (*it)->classes.length (); i++)
3332 {
3333 unsigned int c = (*it)->classes[i]->members.length ();
3334 histogram[c]++;
3335
3336 if (c > max_index)
3337 max_index = c;
3338 }
3339
3340 fprintf (dump_file,
3341 "Class size histogram [num of members]: number of classe number of classess\n");
3342
3343 for (unsigned int i = 0; i <= max_index; i++)
3344 if (histogram[i])
3345 fprintf (dump_file, "[%u]: %u classes\n", i, histogram[i]);
3346
3347 fprintf (dump_file, "\n\n");
3348
3349
3350 if (dump_flags & TDF_DETAILS)
3351 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
3352 it != m_classes.end (); ++it)
3353 {
3354 fprintf (dump_file, " group: with %u classes:\n", (*it)->classes.length ());
3355
3356 for (unsigned i = 0; i < (*it)->classes.length (); i++)
3357 {
3358 (*it)->classes[i]->dump (dump_file, 4);
3359
3360 if(i < (*it)->classes.length () - 1)
3361 fprintf (dump_file, " ");
3362 }
3363 }
3364
3365 free (histogram);
3366 }
3367
3368 /* After reduction is done, we can declare all items in a group
3369 to be equal. PREV_CLASS_COUNT is start number of classes
3370 before reduction. True is returned if there's a merge operation
3371 processed. */
3372
3373 bool
3374 sem_item_optimizer::merge_classes (unsigned int prev_class_count)
3375 {
3376 unsigned int item_count = m_items.length ();
3377 unsigned int class_count = m_classes_count;
3378 unsigned int equal_items = item_count - class_count;
3379
3380 unsigned int non_singular_classes_count = 0;
3381 unsigned int non_singular_classes_sum = 0;
3382
3383 bool merged_p = false;
3384
3385 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
3386 it != m_classes.end (); ++it)
3387 for (unsigned int i = 0; i < (*it)->classes.length (); i++)
3388 {
3389 congruence_class *c = (*it)->classes[i];
3390 if (c->members.length () > 1)
3391 {
3392 non_singular_classes_count++;
3393 non_singular_classes_sum += c->members.length ();
3394 }
3395 }
3396
3397 if (dump_file)
3398 {
3399 fprintf (dump_file, "\nItem count: %u\n", item_count);
3400 fprintf (dump_file, "Congruent classes before: %u, after: %u\n",
3401 prev_class_count, class_count);
3402 fprintf (dump_file, "Average class size before: %.2f, after: %.2f\n",
3403 prev_class_count ? 1.0f * item_count / prev_class_count : 0.0f,
3404 class_count ? 1.0f * item_count / class_count : 0.0f);
3405 fprintf (dump_file, "Average non-singular class size: %.2f, count: %u\n",
3406 non_singular_classes_count ? 1.0f * non_singular_classes_sum /
3407 non_singular_classes_count : 0.0f,
3408 non_singular_classes_count);
3409 fprintf (dump_file, "Equal symbols: %u\n", equal_items);
3410 fprintf (dump_file, "Fraction of visited symbols: %.2f%%\n\n",
3411 item_count ? 100.0f * equal_items / item_count : 0.0f);
3412 }
3413
3414 for (hash_table<congruence_class_group_hash>::iterator it = m_classes.begin ();
3415 it != m_classes.end (); ++it)
3416 for (unsigned int i = 0; i < (*it)->classes.length (); i++)
3417 {
3418 congruence_class *c = (*it)->classes[i];
3419
3420 if (c->members.length () == 1)
3421 continue;
3422
3423 gcc_assert (c->members.length ());
3424
3425 sem_item *source = c->members[0];
3426
3427 for (unsigned int j = 1; j < c->members.length (); j++)
3428 {
3429 sem_item *alias = c->members[j];
3430
3431 if (dump_file)
3432 {
3433 fprintf (dump_file, "Semantic equality hit:%s->%s\n",
3434 xstrdup_for_dump (source->node->name ()),
3435 xstrdup_for_dump (alias->node->name ()));
3436 fprintf (dump_file, "Assembler symbol names:%s->%s\n",
3437 xstrdup_for_dump (source->node->asm_name ()),
3438 xstrdup_for_dump (alias->node->asm_name ()));
3439 }
3440
3441 if (lookup_attribute ("no_icf", DECL_ATTRIBUTES (alias->decl)))
3442 {
3443 if (dump_file)
3444 fprintf (dump_file,
3445 "Merge operation is skipped due to no_icf "
3446 "attribute.\n\n");
3447
3448 continue;
3449 }
3450
3451 if (dump_file && (dump_flags & TDF_DETAILS))
3452 {
3453 source->dump_to_file (dump_file);
3454 alias->dump_to_file (dump_file);
3455 }
3456
3457 if (dbg_cnt (merged_ipa_icf))
3458 merged_p |= source->merge (alias);
3459 }
3460 }
3461
3462 return merged_p;
3463 }
3464
3465 /* Dump function prints all class members to a FILE with an INDENT. */
3466
3467 void
3468 congruence_class::dump (FILE *file, unsigned int indent) const
3469 {
3470 FPRINTF_SPACES (file, indent, "class with id: %u, hash: %u, items: %u\n",
3471 id, members[0]->get_hash (), members.length ());
3472
3473 FPUTS_SPACES (file, indent + 2, "");
3474 for (unsigned i = 0; i < members.length (); i++)
3475 fprintf (file, "%s(%p/%u) ", members[i]->node->asm_name (),
3476 (void *) members[i]->decl,
3477 members[i]->node->order);
3478
3479 fprintf (file, "\n");
3480 }
3481
3482 /* Returns true if there's a member that is used from another group. */
3483
3484 bool
3485 congruence_class::is_class_used (void)
3486 {
3487 for (unsigned int i = 0; i < members.length (); i++)
3488 if (members[i]->usages.length ())
3489 return true;
3490
3491 return false;
3492 }
3493
3494 /* Generate pass summary for IPA ICF pass. */
3495
3496 static void
3497 ipa_icf_generate_summary (void)
3498 {
3499 if (!optimizer)
3500 optimizer = new sem_item_optimizer ();
3501
3502 optimizer->register_hooks ();
3503 optimizer->parse_funcs_and_vars ();
3504 }
3505
3506 /* Write pass summary for IPA ICF pass. */
3507
3508 static void
3509 ipa_icf_write_summary (void)
3510 {
3511 gcc_assert (optimizer);
3512
3513 optimizer->write_summary ();
3514 }
3515
3516 /* Read pass summary for IPA ICF pass. */
3517
3518 static void
3519 ipa_icf_read_summary (void)
3520 {
3521 if (!optimizer)
3522 optimizer = new sem_item_optimizer ();
3523
3524 optimizer->read_summary ();
3525 optimizer->register_hooks ();
3526 }
3527
3528 /* Semantic equality exection function. */
3529
3530 static unsigned int
3531 ipa_icf_driver (void)
3532 {
3533 gcc_assert (optimizer);
3534
3535 bool merged_p = optimizer->execute ();
3536
3537 delete optimizer;
3538 optimizer = NULL;
3539
3540 return merged_p ? TODO_remove_functions : 0;
3541 }
3542
3543 const pass_data pass_data_ipa_icf =
3544 {
3545 IPA_PASS, /* type */
3546 "icf", /* name */
3547 OPTGROUP_IPA, /* optinfo_flags */
3548 TV_IPA_ICF, /* tv_id */
3549 0, /* properties_required */
3550 0, /* properties_provided */
3551 0, /* properties_destroyed */
3552 0, /* todo_flags_start */
3553 0, /* todo_flags_finish */
3554 };
3555
3556 class pass_ipa_icf : public ipa_opt_pass_d
3557 {
3558 public:
3559 pass_ipa_icf (gcc::context *ctxt)
3560 : ipa_opt_pass_d (pass_data_ipa_icf, ctxt,
3561 ipa_icf_generate_summary, /* generate_summary */
3562 ipa_icf_write_summary, /* write_summary */
3563 ipa_icf_read_summary, /* read_summary */
3564 NULL, /*
3565 write_optimization_summary */
3566 NULL, /*
3567 read_optimization_summary */
3568 NULL, /* stmt_fixup */
3569 0, /* function_transform_todo_flags_start */
3570 NULL, /* function_transform */
3571 NULL) /* variable_transform */
3572 {}
3573
3574 /* opt_pass methods: */
3575 virtual bool gate (function *)
3576 {
3577 return in_lto_p || flag_ipa_icf_variables || flag_ipa_icf_functions;
3578 }
3579
3580 virtual unsigned int execute (function *)
3581 {
3582 return ipa_icf_driver();
3583 }
3584 }; // class pass_ipa_icf
3585
3586 } // ipa_icf namespace
3587
3588 ipa_opt_pass_d *
3589 make_pass_ipa_icf (gcc::context *ctxt)
3590 {
3591 return new ipa_icf::pass_ipa_icf (ctxt);
3592 }