/* Basic IPA utilities for type inheritance graph construction and
devirtualization.
- Copyright (C) 2013-2015 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "input.h"
-#include "alias.h"
-#include "symtab.h"
+#include "backend.h"
+#include "rtl.h"
#include "tree.h"
+#include "gimple.h"
+#include "alloc-pool.h"
+#include "tree-pass.h"
+#include "cgraph.h"
+#include "lto-streamer.h"
#include "fold-const.h"
#include "print-tree.h"
#include "calls.h"
-#include "predict.h"
-#include "basic-block.h"
-#include "is-a.h"
-#include "plugin-api.h"
-#include "hard-reg-set.h"
-#include "function.h"
-#include "ipa-ref.h"
-#include "cgraph.h"
-#include "rtl.h"
-#include "flags.h"
-#include "insn-config.h"
-#include "expmed.h"
-#include "dojump.h"
-#include "explow.h"
-#include "emit-rtl.h"
-#include "varasm.h"
-#include "stmt.h"
-#include "expr.h"
-#include "tree-pass.h"
-#include "target.h"
-#include "tree-pretty-print.h"
#include "ipa-utils.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
#include "gimple-fold.h"
-#include "gimple-expr.h"
-#include "gimple.h"
-#include "alloc-pool.h"
#include "symbol-summary.h"
+#include "tree-vrp.h"
#include "ipa-prop.h"
-#include "ipa-inline.h"
-#include "diagnostic.h"
-#include "tree-dfa.h"
+#include "ipa-fnsummary.h"
#include "demangle.h"
#include "dbgcnt.h"
#include "gimple-pretty-print.h"
-#include "stor-layout.h"
#include "intl.h"
-#include "streamer-hooks.h"
-#include "lto-streamer.h"
+#include "stringpool.h"
+#include "attribs.h"
/* Hash based set of pairs of types. */
-typedef struct
+struct type_pair
{
tree first;
tree second;
-} type_pair;
+};
-struct pair_traits : default_hashset_traits
+template <>
+struct default_hash_traits <type_pair>
+ : typed_noop_remove <type_pair>
{
+ GTY((skip)) typedef type_pair value_type;
+ GTY((skip)) typedef type_pair compare_type;
static hashval_t
hash (type_pair p)
{
};
static bool odr_types_equivalent_p (tree, tree, bool, bool *,
- hash_set<type_pair,pair_traits> *,
+ hash_set<type_pair> *,
location_t, location_t);
static bool odr_violation_reported = false;
bool rtti_broken;
};
-/* Return true if T is a type with linkage defined. */
-
-bool
-type_with_linkage_p (const_tree t)
-{
- /* Builtin types do not define linkage, their TYPE_CONTEXT is NULL. */
- if (!TYPE_CONTEXT (t)
- || !TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL
- || !TYPE_STUB_DECL (t))
- return false;
-
- /* In LTO do not get confused by non-C++ produced types or types built
- with -fno-lto-odr-type-merigng. */
- if (in_lto_p)
- {
- /* To support -fno-lto-odr-type-merigng recognize types with vtables
- to have linkage. */
- if (RECORD_OR_UNION_TYPE_P (t)
- && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t)))
- return true;
- /* Do not accept any other types - we do not know if they were produced
- by C++ FE. */
- if (!DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))
- return false;
- }
-
- return (RECORD_OR_UNION_TYPE_P (t)
- || TREE_CODE (t) == ENUMERAL_TYPE);
-}
-
-/* Return true if T is in anonymous namespace.
- This works only on those C++ types with linkage defined. */
-
-bool
-type_in_anonymous_namespace_p (const_tree t)
-{
- gcc_assert (type_with_linkage_p (t));
-
- /* Keep -fno-lto-odr-type-merging working by recognizing classes with vtables
- properly into anonymous namespaces. */
- if (RECORD_OR_UNION_TYPE_P (t)
- && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t)))
- return (TYPE_STUB_DECL (t) && !TREE_PUBLIC (TYPE_STUB_DECL (t)));
-
- if (TYPE_STUB_DECL (t) && !TREE_PUBLIC (TYPE_STUB_DECL (t)))
- {
- /* C++ FE uses magic <anon> as assembler names of anonymous types.
- verify that this match with type_in_anonymous_namespace_p. */
-#ifdef ENABLE_CHECKING
- if (in_lto_p)
- gcc_assert (!strcmp ("<anon>",
- IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t)))));
-#endif
- return true;
- }
- return false;
-}
-
-/* Return true of T is type with One Definition Rule info attached.
- It means that either it is anonymous type or it has assembler name
- set. */
-
-bool
-odr_type_p (const_tree t)
-{
- /* We do not have this information when not in LTO, but we do not need
- to care, since it is used only for type merging. */
- gcc_checking_assert (in_lto_p || flag_lto);
-
- /* To support -fno-lto-odr-type-merging consider types with vtables ODR. */
- if (type_with_linkage_p (t) && type_in_anonymous_namespace_p (t))
- return true;
-
- if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
- && (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))))
- {
-#ifdef ENABLE_CHECKING
- /* C++ FE uses magic <anon> as assembler names of anonymous types.
- verify that this match with type_in_anonymous_namespace_p. */
- gcc_assert (!type_with_linkage_p (t)
- || strcmp ("<anon>",
- IDENTIFIER_POINTER
- (DECL_ASSEMBLER_NAME (TYPE_NAME (t))))
- || type_in_anonymous_namespace_p (t));
-#endif
- return true;
- }
- return false;
-}
-
/* Return TRUE if all derived types of T are known and thus
we may consider the walk of derived type complete.
/* Hash used to unify ODR types based on their mangled name and for anonymous
namespace types. */
-struct odr_name_hasher
+struct odr_name_hasher : pointer_hash <odr_type_d>
{
- typedef odr_type_d *value_type;
typedef union tree_node *compare_type;
static inline hashval_t hash (const odr_type_d *);
static inline bool equal (const odr_type_d *, const tree_node *);
v = TREE_OPERAND (TREE_OPERAND (v, 0), 0);
}
- hstate.add_wide_int (IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (v)));
+ hstate.add_hwi (IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (v)));
return hstate.end ();
}
When STRICT is true, we compare types by their names for purposes of
ODR violation warnings. When strict is false, we consider variants
- equivalent, becuase it is all that matters for devirtualization machinery.
+ equivalent, because it is all that matters for devirtualization machinery.
*/
bool
return false;
if (TREE_CODE (type1) == RECORD_TYPE
&& (TYPE_BINFO (type1) == NULL_TREE)
- != (TYPE_BINFO (type1) == NULL_TREE))
+ != (TYPE_BINFO (type2) == NULL_TREE))
return false;
if (TREE_CODE (type1) == RECORD_TYPE && TYPE_BINFO (type1)
&& (BINFO_VTABLE (TYPE_BINFO (type1)) == NULL_TREE)
static bool
odr_subtypes_equivalent_p (tree t1, tree t2,
- hash_set<type_pair,pair_traits> *visited,
+ hash_set<type_pair> *visited,
location_t loc1, location_t loc2)
{
{
if (!types_same_for_odr (t1, t2, true))
return false;
- /* Limit recursion: If subtypes are ODR types and we know
- that they are same, be happy. */
- if (!odr_type_p (t1) || !get_odr_type (t1, true)->odr_violated)
+ /* Limit recursion: if subtypes are ODR types and we know that they are
+ same, be happy. We need to call get_odr_type on both subtypes since
+ we don't know which among t1 and t2 defines the common ODR type and
+ therefore which call will report the ODR violation, if any. */
+ if (!odr_type_p (t1)
+ || !odr_type_p (t2)
+ || (!get_odr_type (t1, true)->odr_violated
+ && !get_odr_type (t2, true)->odr_violated))
return true;
}
return odr_types_equivalent_p (t1, t2, false, NULL, visited, loc1, loc2);
}
+/* Return true if DECL1 and DECL2 are identical methods. Consider
+ name equivalent to name.localalias.xyz. */
+
+static bool
+methods_equal_p (tree decl1, tree decl2)
+{
+ if (DECL_ASSEMBLER_NAME (decl1) == DECL_ASSEMBLER_NAME (decl2))
+ return true;
+ const char sep = symbol_table::symbol_suffix_separator ();
+
+ const char *name1 = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl1));
+ const char *ptr1 = strchr (name1, sep);
+ int len1 = ptr1 ? ptr1 - name1 : strlen (name1);
+
+ const char *name2 = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl2));
+ const char *ptr2 = strchr (name2, sep);
+ int len2 = ptr2 ? ptr2 - name2 : strlen (name2);
+
+ if (len1 != len2)
+ return false;
+ return !strncmp (name1, name2, len1);
+}
+
/* Compare two virtual tables, PREVAILING and VTABLE and output ODR
violation warnings. */
accept the other case. */
while (!end2
&& (end1
- || (DECL_ASSEMBLER_NAME (ref1->referred->decl)
- != DECL_ASSEMBLER_NAME (ref2->referred->decl)
+ || (methods_equal_p (ref1->referred->decl,
+ ref2->referred->decl)
&& TREE_CODE (ref1->referred->decl) == FUNCTION_DECL))
&& TREE_CODE (ref2->referred->decl) != FUNCTION_DECL)
{
}
while (!end1
&& (end2
- || (DECL_ASSEMBLER_NAME (ref2->referred->decl)
- != DECL_ASSEMBLER_NAME (ref1->referred->decl)
+ || (methods_equal_p (ref2->referred->decl, ref1->referred->decl)
&& TREE_CODE (ref2->referred->decl) == FUNCTION_DECL))
&& TREE_CODE (ref1->referred->decl) != FUNCTION_DECL)
{
if (!end1 && !end2)
{
- if (DECL_ASSEMBLER_NAME (ref1->referred->decl)
- == DECL_ASSEMBLER_NAME (ref2->referred->decl))
+ if (methods_equal_p (ref1->referred->decl, ref2->referred->decl))
continue;
class_type->odr_violated = true;
if (TREE_CODE (ref1->referred->decl)
!= TREE_CODE (ref2->referred->decl))
{
- if (TREE_CODE (ref1->referred->decl) == VAR_DECL)
+ if (VAR_P (ref1->referred->decl))
end1 = true;
- else if (TREE_CODE (ref2->referred->decl) == VAR_DECL)
+ else if (VAR_P (ref2->referred->decl))
end2 = true;
}
}
inform (DECL_SOURCE_LOCATION
(TYPE_NAME (DECL_CONTEXT (prevailing->decl))),
"the conflicting type defined in another translation "
- "unit has virtual table table with more entries");
+ "unit has virtual table with more entries");
}
}
return;
"unit");
gcc_assert (TREE_CODE (ref2->referred->decl)
== FUNCTION_DECL);
- inform (DECL_SOURCE_LOCATION (ref1->referred->decl),
- "virtual method %qD", ref1->referred->decl);
- inform (DECL_SOURCE_LOCATION (ref2->referred->decl),
+ inform (DECL_SOURCE_LOCATION
+ (ref1->referred->ultimate_alias_target ()->decl),
+ "virtual method %qD",
+ ref1->referred->ultimate_alias_target ()->decl);
+ inform (DECL_SOURCE_LOCATION
+ (ref2->referred->ultimate_alias_target ()->decl),
"ought to match virtual method %qD but does not",
- ref2->referred->decl);
+ ref2->referred->ultimate_alias_target ()->decl);
}
else
inform (DECL_SOURCE_LOCATION
(TYPE_NAME (DECL_CONTEXT (prevailing->decl))),
"the conflicting type defined in another translation "
- "unit has virtual table table with different contents");
+ "unit has virtual table with different contents");
return;
}
}
if (name1 && name2 && strcmp (name1, name2))
{
inform (loc_t1,
- "type name %<%s%> should match type name %<%s%>",
+ "type name %qs should match type name %qs",
name1, name2);
if (loc_t2_useful)
inform (loc_t2,
if (types_odr_comparable (t1, t2, true)
&& types_same_for_odr (t1, t2, true))
inform (loc_t1,
- "type %qT itself violate the C++ One Definition Rule", t1);
+ "type %qT itself violates the C++ One Definition Rule", t1);
/* Prevent pointless warnings like "struct aa" should match "struct aa". */
else if (TYPE_NAME (t1) == TYPE_NAME (t2)
&& TREE_CODE (t1) == TREE_CODE (t2) && !loc_t2_useful)
static bool
odr_types_equivalent_p (tree t1, tree t2, bool warn, bool *warned,
- hash_set<type_pair,pair_traits> *visited,
+ hash_set<type_pair> *visited,
location_t loc1, location_t loc2)
{
/* Check first for the obvious case of pointer identity. */
if (DECL_ARTIFICIAL (f1))
break;
warn_odr (t1, t2, f1, f2, warn, warned,
- G_("fields has different layout "
+ G_("fields have different layout "
"in another translation unit"));
return false;
}
return false;
}
- if ((TYPE_MAIN_VARIANT (t1) == t1 || TYPE_MAIN_VARIANT (t2) == t2)
- && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t1))
- && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t2))
- && odr_type_p (TYPE_MAIN_VARIANT (t1))
- && odr_type_p (TYPE_MAIN_VARIANT (t2))
- && (TYPE_METHODS (TYPE_MAIN_VARIANT (t1))
- != TYPE_METHODS (TYPE_MAIN_VARIANT (t2))))
- {
- /* Currently free_lang_data sets TYPE_METHODS to error_mark_node
- if it is non-NULL so this loop will never realy execute. */
- if (TYPE_METHODS (TYPE_MAIN_VARIANT (t1)) != error_mark_node
- && TYPE_METHODS (TYPE_MAIN_VARIANT (t2)) != error_mark_node)
- for (f1 = TYPE_METHODS (TYPE_MAIN_VARIANT (t1)),
- f2 = TYPE_METHODS (TYPE_MAIN_VARIANT (t2));
- f1 && f2 ; f1 = DECL_CHAIN (f1), f2 = DECL_CHAIN (f2))
- {
- if (DECL_ASSEMBLER_NAME (f1) != DECL_ASSEMBLER_NAME (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("a different method of same type "
- "is defined in another "
- "translation unit"));
- return false;
- }
- if (DECL_VIRTUAL_P (f1) != DECL_VIRTUAL_P (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("s definition that differs by virtual "
- "keyword in another translation unit"));
- return false;
- }
- if (DECL_VINDEX (f1) != DECL_VINDEX (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("virtual table layout differs "
- "in another translation unit"));
- return false;
- }
- if (odr_subtypes_equivalent_p (TREE_TYPE (f1),
- TREE_TYPE (f2), visited,
- loc1, loc2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("method with incompatible type is "
- "defined in another translation unit"));
- return false;
- }
- }
- if ((f1 == NULL) != (f2 == NULL))
- {
- warn_odr (t1, t2, NULL, NULL, warn, warned,
- G_("a type with different number of methods "
- "is defined in another translation unit"));
- return false;
- }
- }
}
break;
}
bool
odr_types_equivalent_p (tree type1, tree type2)
{
- hash_set<type_pair,pair_traits> visited;
+ gcc_checking_assert (odr_or_derived_type_p (type1)
+ && odr_or_derived_type_p (type2));
-#ifdef ENABLE_CHECKING
- gcc_assert (odr_or_derived_type_p (type1) && odr_or_derived_type_p (type2));
-#endif
+ hash_set<type_pair> visited;
return odr_types_equivalent_p (type1, type2, false, NULL,
&visited, UNKNOWN_LOCATION, UNKNOWN_LOCATION);
}
}
if (prevail)
- {
- tree tmp = type;
-
- type = val->type;
- val->type = tmp;
- }
+ std::swap (val->type, type);
val->types_set->add (type);
bool base_mismatch = false;
unsigned int i;
bool warned = false;
- hash_set<type_pair,pair_traits> visited;
+ hash_set<type_pair> visited;
gcc_assert (in_lto_p);
vec_safe_push (val->types, type);
}
}
- /* Next compare memory layout. */
+ /* Next compare memory layout.
+ The DECL_SOURCE_LOCATIONs in this invocation came from LTO streaming.
+ We must apply the location cache to ensure that they are valid
+ before we can pass them to odr_types_equivalent_p (PR lto/83121). */
+ if (lto_location_cache::current_cache)
+ lto_location_cache::current_cache->apply_location_cache ();
if (!odr_types_equivalent_p (val->type, type,
!flag_ltrans && !val->odr_violated && !warned,
&warned, &visited,
merge = false;
odr_violation_reported = true;
val->odr_violated = true;
- if (symtab->dump_file)
- {
- fprintf (symtab->dump_file, "ODR violation\n");
-
- print_node (symtab->dump_file, "", val->type, 0);
- putc ('\n',symtab->dump_file);
- print_node (symtab->dump_file, "", type, 0);
- putc ('\n',symtab->dump_file);
- }
}
gcc_assert (val->odr_violated || !odr_must_violate);
/* Sanity check that all bases will be build same way again. */
-#ifdef ENABLE_CHECKING
- if (COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type)
+ if (flag_checking
+ && COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type)
&& TREE_CODE (val->type) == RECORD_TYPE
&& TREE_CODE (type) == RECORD_TYPE
&& TYPE_BINFO (val->type) && TYPE_BINFO (type)
j++;
}
}
-#endif
/* Regularize things a little. During LTO same types may come with
if (slot && *slot)
{
val = *slot;
-#ifdef ENABLE_CHECKING
- if (in_lto_p && can_be_vtable_hashed_p (type))
+ if (flag_checking
+ && in_lto_p && can_be_vtable_hashed_p (type))
{
hash = hash_odr_vtable (type);
vtable_slot = odr_vtable_hash->find_slot_with_hash (type, hash,
gcc_assert (!vtable_slot || *vtable_slot == *slot);
vtable_slot = NULL;
}
-#endif
}
else if (*vtable_slot)
val = *vtable_slot;
/* Be sure we did not recorded any derived types; these may need
renumbering too. */
gcc_assert (val->derived_types.length() == 0);
- if (odr_types_ptr)
- val->id = odr_types.length ();
+ val->id = odr_types.length ();
vec_safe_push (odr_types_ptr, val);
}
return val;
{
struct symtab_node *n;
FILE *inheritance_dump_file;
- int flags;
+ dump_flags_t flags;
if (odr_hash)
return;
odr_vtable_hash = new odr_vtable_hash_type (23);
/* We reconstruct the graph starting of types of all methods seen in the
- the unit. */
+ unit. */
FOR_EACH_SYMBOL (n)
if (is_a <cgraph_node *> (n)
&& DECL_VIRTUAL_P (n->decl)
if ((ref->use == IPA_REF_ALIAS
&& referenced_from_vtable_p (dyn_cast<cgraph_node *> (ref->referring)))
|| (ref->use == IPA_REF_ADDR
- && TREE_CODE (ref->referring->decl) == VAR_DECL
+ && VAR_P (ref->referring->decl)
&& DECL_VIRTUAL_P (ref->referring->decl)))
{
found = true;
return found;
}
+/* Return if TARGET is cxa_pure_virtual. */
+
+static bool
+is_cxa_pure_virtual_p (tree target)
+{
+ return target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE
+ && DECL_NAME (target)
+ && id_equal (DECL_NAME (target),
+ "__cxa_pure_virtual");
+}
+
/* If TARGET has associated node, record it in the NODES array.
CAN_REFER specify if program can refer to the target directly.
if TARGET is unknown (NULL) or it can not be inserted (for example because
{
struct cgraph_node *target_node, *alias_target;
enum availability avail;
+ bool pure_virtual = is_cxa_pure_virtual_p (target);
- /* cxa_pure_virtual and __builtin_unreachable do not need to be added into
+ /* __builtin_unreachable do not need to be added into
list of targets; the runtime effect of calling them is undefined.
Only "real" virtual methods should be accounted. */
- if (target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE)
+ if (target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE && !pure_virtual)
return;
if (!can_refer)
??? Maybe it would make sense to be more aggressive for LTO even
elsewhere. */
if (!flag_ltrans
+ && !pure_virtual
&& type_in_anonymous_namespace_p (DECL_CONTEXT (target))
&& (!target_node
|| !referenced_from_vtable_p (target_node)))
{
gcc_assert (!target_node->global.inlined_to);
gcc_assert (target_node->real_symbol_p ());
+ /* When sanitizing, do not assume that __cxa_pure_virtual is not called
+ by valid program. */
+ if (flag_sanitize & SANITIZE_UNREACHABLE)
+ ;
+ /* Only add pure virtual if it is the only possible target. This way
+ we will preserve the diagnostics about pure virtual called in many
+ cases without disabling optimization in other. */
+ else if (pure_virtual)
+ {
+ if (nodes.length ())
+ return;
+ }
+ /* If we found a real target, take away cxa_pure_virtual. */
+ else if (!pure_virtual && nodes.length () == 1
+ && is_cxa_pure_virtual_p (nodes[0]->decl))
+ nodes.pop ();
+ if (pure_virtual && nodes.length ())
+ return;
if (!inserted->add (target))
{
cached_polymorphic_call_targets->add (target_node);
nodes.safe_push (target_node);
}
}
- else if (completep
- && (!type_in_anonymous_namespace_p
- (DECL_CONTEXT (target))
- || flag_ltrans))
+ else if (!completep)
+ ;
+ /* We have definition of __cxa_pure_virtual that is not accessible (it is
+ optimized out or partitioned to other unit) so we can not add it. When
+ not sanitizing, there is nothing to do.
+ Otherwise declare the list incomplete. */
+ else if (pure_virtual)
+ {
+ if (flag_sanitize & SANITIZE_UNREACHABLE)
+ *completep = false;
+ }
+ else if (flag_ltrans
+ || !type_in_anonymous_namespace_p (DECL_CONTEXT (target)))
*completep = false;
}
/* Polymorphic call target cache helpers. */
-struct polymorphic_call_target_hasher
+struct polymorphic_call_target_hasher
+ : pointer_hash <polymorphic_call_target_d>
{
- typedef polymorphic_call_target_d *value_type;
- typedef polymorphic_call_target_d *compare_type;
static inline hashval_t hash (const polymorphic_call_target_d *);
static inline bool equal (const polymorphic_call_target_d *,
const polymorphic_call_target_d *);
{
inchash::hash hstate (odr_query->otr_token);
- hstate.add_wide_int (odr_query->type->id);
+ hstate.add_hwi (odr_query->type->id);
hstate.merge_hash (TYPE_UID (odr_query->context.outer_type));
- hstate.add_wide_int (odr_query->context.offset);
+ hstate.add_hwi (odr_query->context.offset);
if (odr_query->context.speculative_outer_type)
{
hstate.merge_hash (TYPE_UID (odr_query->context.speculative_outer_type));
- hstate.add_wide_int (odr_query->context.speculative_offset);
+ hstate.add_hwi (odr_query->context.speculative_offset);
}
hstate.add_flag (odr_query->speculative);
hstate.add_flag (odr_query->context.maybe_in_construction);
{
tree type;
int count;
- gcov_type dyn_count;
+ profile_count dyn_count;
};
/* Record about how many calls would benefit from given method to be final. */
{
tree decl;
int count;
- gcov_type dyn_count;
+ profile_count dyn_count;
};
/* Information about type and decl warnings. */
struct final_warning_record
{
- gcov_type dyn_count;
- vec<odr_type_warn_count> type_warnings;
+ /* If needed grow type_warnings vector and initialize new decl_warn_count
+ to have dyn_count set to profile_count::zero (). */
+ void grow_type_warnings (unsigned newlen);
+
+ profile_count dyn_count;
+ auto_vec<odr_type_warn_count> type_warnings;
hash_map<tree, decl_warn_count> decl_warnings;
};
+
+void
+final_warning_record::grow_type_warnings (unsigned newlen)
+{
+ unsigned len = type_warnings.length ();
+ if (newlen > len)
+ {
+ type_warnings.safe_grow_cleared (newlen);
+ for (unsigned i = len; i < newlen; i++)
+ type_warnings[i].dyn_count = profile_count::zero ();
+ }
+}
+
struct final_warning_record *final_warning_records;
/* Return vector containing possible targets of polymorphic call of type
if ((*slot)->type_warning && final_warning_records)
{
final_warning_records->type_warnings[(*slot)->type_warning - 1].count++;
- final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
- += final_warning_records->dyn_count;
+ if (!final_warning_records->type_warnings
+ [(*slot)->type_warning - 1].dyn_count.initialized_p ())
+ final_warning_records->type_warnings
+ [(*slot)->type_warning - 1].dyn_count = profile_count::zero ();
+ if (final_warning_records->dyn_count > 0)
+ final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
+ = final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
+ + final_warning_records->dyn_count;
}
if (!speculative && (*slot)->decl_warning && final_warning_records)
{
struct decl_warn_count *c =
final_warning_records->decl_warnings.get ((*slot)->decl_warning);
c->count++;
- c->dyn_count += final_warning_records->dyn_count;
+ if (final_warning_records->dyn_count > 0)
+ c->dyn_count += final_warning_records->dyn_count;
}
return (*slot)->targets;
}
if (!outer_type->all_derivations_known)
{
- if (!speculative && final_warning_records)
+ if (!speculative && final_warning_records
+ && nodes.length () == 1
+ && TREE_CODE (TREE_TYPE (nodes[0]->decl)) == METHOD_TYPE)
{
if (complete
- && nodes.length () == 1
&& warn_suggest_final_types
&& !outer_type->derived_types.length ())
{
- if (outer_type->id >= (int)final_warning_records->type_warnings.length ())
- final_warning_records->type_warnings.safe_grow_cleared
- (odr_types.length ());
+ final_warning_records->grow_type_warnings
+ (outer_type->id);
final_warning_records->type_warnings[outer_type->id].count++;
+ if (!final_warning_records->type_warnings
+ [outer_type->id].dyn_count.initialized_p ())
+ final_warning_records->type_warnings
+ [outer_type->id].dyn_count = profile_count::zero ();
final_warning_records->type_warnings[outer_type->id].dyn_count
+= final_warning_records->dyn_count;
final_warning_records->type_warnings[outer_type->id].type
}
if (complete
&& warn_suggest_final_methods
- && nodes.length () == 1
&& types_same_for_odr (DECL_CONTEXT (nodes[0]->decl),
outer_type->type))
{
char *name = NULL;
if (in_lto_p)
name = cplus_demangle_v3 (targets[i]->asm_name (), 0);
- fprintf (f, " %s/%i", name ? name : targets[i]->name (), targets[i]->order);
+ fprintf (f, " %s/%i", name ? name : targets[i]->name (),
+ targets[i]->order);
if (in_lto_p)
free (name);
if (!targets[i]->definition)
fprintf (f, " Speculative targets:");
dump_targets (f, targets);
}
- gcc_assert (targets.length () <= len);
+ /* Ugly: during callgraph construction the target cache may get populated
+ before all targets are found. While this is harmless (because all local
+ types are discovered and only in those case we devirtualize fully and we
+ don't do speculative devirtualization before IPA stage) it triggers
+ assert here when dumping at that stage also populates the case with
+ speculative targets. Quietly ignore this. */
+ gcc_assert (symtab->state < IPA_SSA || targets.length () <= len);
fprintf (f, "\n");
}
bool final;
if (TREE_CODE (TREE_TYPE (n->decl)) == FUNCTION_TYPE
- && ((fcode = DECL_FUNCTION_CODE (n->decl))
- == BUILT_IN_UNREACHABLE
+ && ((fcode = DECL_FUNCTION_CODE (n->decl)) == BUILT_IN_UNREACHABLE
|| fcode == BUILT_IN_TRAP))
return true;
+ if (is_cxa_pure_virtual_p (n->decl))
+ return true;
+
if (!odr_hash)
return true;
targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final);
bool
possible_polymorphic_call_target_p (tree ref,
- gimple stmt,
+ gimple *stmt,
struct cgraph_node *n)
{
ipa_polymorphic_call_context context (current_function_decl, ref, stmt);
free_polymorphic_call_targets_hash ();
timevar_push (TV_IPA_INHERITANCE);
/* We reconstruct the graph starting from types of all methods seen in the
- the unit. */
+ unit. */
FOR_EACH_FUNCTION (n)
if (DECL_VIRTUAL_P (n->decl)
&& !n->definition
if (warn_suggest_final_methods || warn_suggest_final_types)
{
final_warning_records = new (final_warning_record);
- final_warning_records->type_warnings = vNULL;
- final_warning_records->type_warnings.safe_grow_cleared (odr_types.length ());
+ final_warning_records->dyn_count = profile_count::zero ();
+ final_warning_records->grow_type_warnings (odr_types.length ());
free_polymorphic_call_targets_hash ();
}
if (!opt_for_fn (n->decl, flag_devirtualize))
continue;
if (dump_file && n->indirect_calls)
- fprintf (dump_file, "\n\nProcesing function %s/%i\n",
- n->name (), n->order);
+ fprintf (dump_file, "\n\nProcesing function %s\n",
+ n->dump_name ());
for (e = n->indirect_calls; e; e = e->next_callee)
if (e->indirect_info->polymorphic)
{
bool final;
if (final_warning_records)
- final_warning_records->dyn_count = e->count;
+ final_warning_records->dyn_count = e->count.ipa ();
vec <cgraph_node *>targets
= possible_polymorphic_call_targets
{
location_t locus = gimple_location_safe (e->call_stmt);
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, locus,
- "speculatively devirtualizing call in %s/%i to %s/%i\n",
- n->name (), n->order,
- likely_target->name (),
- likely_target->order);
+ "speculatively devirtualizing call "
+ "in %s to %s\n",
+ n->dump_name (),
+ likely_target->dump_name ());
}
if (!likely_target->can_be_discarded_p ())
{
nconverted++;
update = true;
e->make_speculative
- (likely_target, e->count * 8 / 10, e->frequency * 8 / 10);
+ (likely_target, e->count.apply_scale (8, 10));
}
}
if (update)
- inline_update_overall_summary (n);
+ ipa_update_overall_fn_summary (n);
}
if (warn_suggest_final_methods || warn_suggest_final_types)
{
{
tree type = final_warning_records->type_warnings[i].type;
int count = final_warning_records->type_warnings[i].count;
- long long dyn_count
+ profile_count dyn_count
= final_warning_records->type_warnings[i].dyn_count;
- if (!dyn_count)
+ if (!(dyn_count > 0))
warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)),
OPT_Wsuggest_final_types, count,
"Declaring type %qD final "
"executed %lli times",
type,
count,
- dyn_count);
+ (long long) dyn_count.to_gcov_type ());
}
}
if (warn_suggest_final_methods)
{
- vec<const decl_warn_count*> decl_warnings_vec = vNULL;
+ auto_vec<const decl_warn_count*> decl_warnings_vec;
final_warning_records->decl_warnings.traverse
<vec<const decl_warn_count *> *, add_decl_warning> (&decl_warnings_vec);
{
tree decl = decl_warnings_vec[i]->decl;
int count = decl_warnings_vec[i]->count;
- long long dyn_count = decl_warnings_vec[i]->dyn_count;
+ profile_count dyn_count
+ = decl_warnings_vec[i]->dyn_count;
- if (!dyn_count)
+ if (!(dyn_count > 0))
if (DECL_CXX_DESTRUCTOR_P (decl))
warning_n (DECL_SOURCE_LOCATION (decl),
OPT_Wsuggest_final_methods, count,
"Declaring virtual destructor of %qD final "
"would enable devirtualization of %i calls "
"executed %lli times",
- DECL_CONTEXT (decl), count, dyn_count);
+ DECL_CONTEXT (decl), count,
+ (long long)dyn_count.to_gcov_type ());
else
warning_n (DECL_SOURCE_LOCATION (decl),
OPT_Wsuggest_final_methods, count,
"Declaring method %qD final "
"would enable devirtualization of %i calls "
"executed %lli times",
- decl, count, dyn_count);
+ decl, count,
+ (long long)dyn_count.to_gcov_type ());
}
}
-
+
delete (final_warning_records);
final_warning_records = 0;
}