/* Basic IPA utilities for type inheritance graph construction and
devirtualization.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
#include "ipa-utils.h"
#include "gimple-fold.h"
#include "symbol-summary.h"
+#include "tree-vrp.h"
#include "ipa-prop.h"
-#include "ipa-inline.h"
+#include "ipa-fnsummary.h"
#include "demangle.h"
#include "dbgcnt.h"
#include "gimple-pretty-print.h"
#include "intl.h"
+#include "stringpool.h"
+#include "attribs.h"
/* Hash based set of pairs of types. */
struct type_pair
};
template <>
-struct default_hash_traits <type_pair> : typed_noop_remove <type_pair>
+struct default_hash_traits <type_pair>
+ : typed_noop_remove <type_pair>
{
- typedef type_pair value_type;
- typedef type_pair compare_type;
+ GTY((skip)) typedef type_pair value_type;
+ GTY((skip)) typedef type_pair compare_type;
static hashval_t
hash (type_pair p)
{
v = TREE_OPERAND (TREE_OPERAND (v, 0), 0);
}
- hstate.add_wide_int (IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (v)));
+ hstate.add_hwi (IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME (v)));
return hstate.end ();
}
{
if (!types_same_for_odr (t1, t2, true))
return false;
- /* Limit recursion: If subtypes are ODR types and we know
- that they are same, be happy. */
- if (!odr_type_p (t1) || !get_odr_type (t1, true)->odr_violated)
+ /* Limit recursion: if subtypes are ODR types and we know that they are
+ same, be happy. We need to call get_odr_type on both subtypes since
+ we don't know which among t1 and t2 defines the common ODR type and
+ therefore which call will report the ODR violation, if any. */
+ if (!odr_type_p (t1)
+ || !odr_type_p (t2)
+ || (!get_odr_type (t1, true)->odr_violated
+ && !get_odr_type (t2, true)->odr_violated))
return true;
}
if (TREE_CODE (ref1->referred->decl)
!= TREE_CODE (ref2->referred->decl))
{
- if (TREE_CODE (ref1->referred->decl) == VAR_DECL)
+ if (VAR_P (ref1->referred->decl))
end1 = true;
- else if (TREE_CODE (ref2->referred->decl) == VAR_DECL)
+ else if (VAR_P (ref2->referred->decl))
end2 = true;
}
}
if (name1 && name2 && strcmp (name1, name2))
{
inform (loc_t1,
- "type name %<%s%> should match type name %<%s%>",
+ "type name %qs should match type name %qs",
name1, name2);
if (loc_t2_useful)
inform (loc_t2,
if (types_odr_comparable (t1, t2, true)
&& types_same_for_odr (t1, t2, true))
inform (loc_t1,
- "type %qT itself violate the C++ One Definition Rule", t1);
+ "type %qT itself violates the C++ One Definition Rule", t1);
/* Prevent pointless warnings like "struct aa" should match "struct aa". */
else if (TYPE_NAME (t1) == TYPE_NAME (t2)
&& TREE_CODE (t1) == TREE_CODE (t2) && !loc_t2_useful)
if (DECL_ARTIFICIAL (f1))
break;
warn_odr (t1, t2, f1, f2, warn, warned,
- G_("fields has different layout "
+ G_("fields have different layout "
"in another translation unit"));
return false;
}
return false;
}
- if ((TYPE_MAIN_VARIANT (t1) == t1 || TYPE_MAIN_VARIANT (t2) == t2)
- && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t1))
- && COMPLETE_TYPE_P (TYPE_MAIN_VARIANT (t2))
- && odr_type_p (TYPE_MAIN_VARIANT (t1))
- && odr_type_p (TYPE_MAIN_VARIANT (t2))
- && (TYPE_METHODS (TYPE_MAIN_VARIANT (t1))
- != TYPE_METHODS (TYPE_MAIN_VARIANT (t2))))
- {
- /* Currently free_lang_data sets TYPE_METHODS to error_mark_node
- if it is non-NULL so this loop will never realy execute. */
- if (TYPE_METHODS (TYPE_MAIN_VARIANT (t1)) != error_mark_node
- && TYPE_METHODS (TYPE_MAIN_VARIANT (t2)) != error_mark_node)
- for (f1 = TYPE_METHODS (TYPE_MAIN_VARIANT (t1)),
- f2 = TYPE_METHODS (TYPE_MAIN_VARIANT (t2));
- f1 && f2 ; f1 = DECL_CHAIN (f1), f2 = DECL_CHAIN (f2))
- {
- if (DECL_ASSEMBLER_NAME (f1) != DECL_ASSEMBLER_NAME (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("a different method of same type "
- "is defined in another "
- "translation unit"));
- return false;
- }
- if (DECL_VIRTUAL_P (f1) != DECL_VIRTUAL_P (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("s definition that differs by virtual "
- "keyword in another translation unit"));
- return false;
- }
- if (DECL_VINDEX (f1) != DECL_VINDEX (f2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("virtual table layout differs "
- "in another translation unit"));
- return false;
- }
- if (odr_subtypes_equivalent_p (TREE_TYPE (f1),
- TREE_TYPE (f2), visited,
- loc1, loc2))
- {
- warn_odr (t1, t2, f1, f2, warn, warned,
- G_("method with incompatible type is "
- "defined in another translation unit"));
- return false;
- }
- }
- if ((f1 == NULL) != (f2 == NULL))
- {
- warn_odr (t1, t2, NULL, NULL, warn, warned,
- G_("a type with different number of methods "
- "is defined in another translation unit"));
- return false;
- }
- }
}
break;
}
}
}
- /* Next compare memory layout. */
+ /* Next compare memory layout.
+ The DECL_SOURCE_LOCATIONs in this invocation came from LTO streaming.
+ We must apply the location cache to ensure that they are valid
+ before we can pass them to odr_types_equivalent_p (PR lto/83121). */
+ if (lto_location_cache::current_cache)
+ lto_location_cache::current_cache->apply_location_cache ();
if (!odr_types_equivalent_p (val->type, type,
!flag_ltrans && !val->odr_violated && !warned,
&warned, &visited,
/* Be sure we did not recorded any derived types; these may need
renumbering too. */
gcc_assert (val->derived_types.length() == 0);
- if (odr_types_ptr)
- val->id = odr_types.length ();
+ val->id = odr_types.length ();
vec_safe_push (odr_types_ptr, val);
}
return val;
{
struct symtab_node *n;
FILE *inheritance_dump_file;
- int flags;
+ dump_flags_t flags;
if (odr_hash)
return;
if ((ref->use == IPA_REF_ALIAS
&& referenced_from_vtable_p (dyn_cast<cgraph_node *> (ref->referring)))
|| (ref->use == IPA_REF_ADDR
- && TREE_CODE (ref->referring->decl) == VAR_DECL
+ && VAR_P (ref->referring->decl)
&& DECL_VIRTUAL_P (ref->referring->decl)))
{
found = true;
{
return target && TREE_CODE (TREE_TYPE (target)) != METHOD_TYPE
&& DECL_NAME (target)
- && !strcmp (IDENTIFIER_POINTER (DECL_NAME (target)),
+ && id_equal (DECL_NAME (target),
"__cxa_pure_virtual");
}
nodes.safe_push (target_node);
}
}
- else if (completep
- && (!type_in_anonymous_namespace_p
- (DECL_CONTEXT (target))
- || flag_ltrans))
+ else if (!completep)
+ ;
+ /* We have definition of __cxa_pure_virtual that is not accessible (it is
+ optimized out or partitioned to other unit) so we can not add it. When
+ not sanitizing, there is nothing to do.
+ Otherwise declare the list incomplete. */
+ else if (pure_virtual)
+ {
+ if (flag_sanitize & SANITIZE_UNREACHABLE)
+ *completep = false;
+ }
+ else if (flag_ltrans
+ || !type_in_anonymous_namespace_p (DECL_CONTEXT (target)))
*completep = false;
}
{
inchash::hash hstate (odr_query->otr_token);
- hstate.add_wide_int (odr_query->type->id);
+ hstate.add_hwi (odr_query->type->id);
hstate.merge_hash (TYPE_UID (odr_query->context.outer_type));
- hstate.add_wide_int (odr_query->context.offset);
+ hstate.add_hwi (odr_query->context.offset);
if (odr_query->context.speculative_outer_type)
{
hstate.merge_hash (TYPE_UID (odr_query->context.speculative_outer_type));
- hstate.add_wide_int (odr_query->context.speculative_offset);
+ hstate.add_hwi (odr_query->context.speculative_offset);
}
hstate.add_flag (odr_query->speculative);
hstate.add_flag (odr_query->context.maybe_in_construction);
{
tree type;
int count;
- gcov_type dyn_count;
+ profile_count dyn_count;
};
/* Record about how many calls would benefit from given method to be final. */
{
tree decl;
int count;
- gcov_type dyn_count;
+ profile_count dyn_count;
};
/* Information about type and decl warnings. */
struct final_warning_record
{
- gcov_type dyn_count;
+ /* If needed grow type_warnings vector and initialize new decl_warn_count
+ to have dyn_count set to profile_count::zero (). */
+ void grow_type_warnings (unsigned newlen);
+
+ profile_count dyn_count;
auto_vec<odr_type_warn_count> type_warnings;
hash_map<tree, decl_warn_count> decl_warnings;
};
+
+void
+final_warning_record::grow_type_warnings (unsigned newlen)
+{
+ unsigned len = type_warnings.length ();
+ if (newlen > len)
+ {
+ type_warnings.safe_grow_cleared (newlen);
+ for (unsigned i = len; i < newlen; i++)
+ type_warnings[i].dyn_count = profile_count::zero ();
+ }
+}
+
struct final_warning_record *final_warning_records;
/* Return vector containing possible targets of polymorphic call of type
if ((*slot)->type_warning && final_warning_records)
{
final_warning_records->type_warnings[(*slot)->type_warning - 1].count++;
- final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
- += final_warning_records->dyn_count;
+ if (!final_warning_records->type_warnings
+ [(*slot)->type_warning - 1].dyn_count.initialized_p ())
+ final_warning_records->type_warnings
+ [(*slot)->type_warning - 1].dyn_count = profile_count::zero ();
+ if (final_warning_records->dyn_count > 0)
+ final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
+ = final_warning_records->type_warnings[(*slot)->type_warning - 1].dyn_count
+ + final_warning_records->dyn_count;
}
if (!speculative && (*slot)->decl_warning && final_warning_records)
{
struct decl_warn_count *c =
final_warning_records->decl_warnings.get ((*slot)->decl_warning);
c->count++;
- c->dyn_count += final_warning_records->dyn_count;
+ if (final_warning_records->dyn_count > 0)
+ c->dyn_count += final_warning_records->dyn_count;
}
return (*slot)->targets;
}
&& warn_suggest_final_types
&& !outer_type->derived_types.length ())
{
- if (outer_type->id >= (int)final_warning_records->type_warnings.length ())
- final_warning_records->type_warnings.safe_grow_cleared
- (odr_types.length ());
+ final_warning_records->grow_type_warnings
+ (outer_type->id);
final_warning_records->type_warnings[outer_type->id].count++;
+ if (!final_warning_records->type_warnings
+ [outer_type->id].dyn_count.initialized_p ())
+ final_warning_records->type_warnings
+ [outer_type->id].dyn_count = profile_count::zero ();
final_warning_records->type_warnings[outer_type->id].dyn_count
+= final_warning_records->dyn_count;
final_warning_records->type_warnings[outer_type->id].type
char *name = NULL;
if (in_lto_p)
name = cplus_demangle_v3 (targets[i]->asm_name (), 0);
- fprintf (f, " %s/%i", name ? name : targets[i]->name (), targets[i]->order);
+ fprintf (f, " %s/%i", name ? name : targets[i]->name (),
+ targets[i]->order);
if (in_lto_p)
free (name);
if (!targets[i]->definition)
fprintf (f, " Speculative targets:");
dump_targets (f, targets);
}
- gcc_assert (targets.length () <= len);
+ /* Ugly: during callgraph construction the target cache may get populated
+ before all targets are found. While this is harmless (because all local
+ types are discovered and only in those case we devirtualize fully and we
+ don't do speculative devirtualization before IPA stage) it triggers
+ assert here when dumping at that stage also populates the case with
+ speculative targets. Quietly ignore this. */
+ gcc_assert (symtab->state < IPA_SSA || targets.length () <= len);
fprintf (f, "\n");
}
if (warn_suggest_final_methods || warn_suggest_final_types)
{
final_warning_records = new (final_warning_record);
- final_warning_records->type_warnings.safe_grow_cleared (odr_types.length ());
+ final_warning_records->dyn_count = profile_count::zero ();
+ final_warning_records->grow_type_warnings (odr_types.length ());
free_polymorphic_call_targets_hash ();
}
if (!opt_for_fn (n->decl, flag_devirtualize))
continue;
if (dump_file && n->indirect_calls)
- fprintf (dump_file, "\n\nProcesing function %s/%i\n",
- n->name (), n->order);
+ fprintf (dump_file, "\n\nProcesing function %s\n",
+ n->dump_name ());
for (e = n->indirect_calls; e; e = e->next_callee)
if (e->indirect_info->polymorphic)
{
bool final;
if (final_warning_records)
- final_warning_records->dyn_count = e->count;
+ final_warning_records->dyn_count = e->count.ipa ();
vec <cgraph_node *>targets
= possible_polymorphic_call_targets
{
location_t locus = gimple_location_safe (e->call_stmt);
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, locus,
- "speculatively devirtualizing call in %s/%i to %s/%i\n",
- n->name (), n->order,
- likely_target->name (),
- likely_target->order);
+ "speculatively devirtualizing call "
+ "in %s to %s\n",
+ n->dump_name (),
+ likely_target->dump_name ());
}
if (!likely_target->can_be_discarded_p ())
{
nconverted++;
update = true;
e->make_speculative
- (likely_target, e->count * 8 / 10, e->frequency * 8 / 10);
+ (likely_target, e->count.apply_scale (8, 10));
}
}
if (update)
- inline_update_overall_summary (n);
+ ipa_update_overall_fn_summary (n);
}
if (warn_suggest_final_methods || warn_suggest_final_types)
{
{
tree type = final_warning_records->type_warnings[i].type;
int count = final_warning_records->type_warnings[i].count;
- long long dyn_count
+ profile_count dyn_count
= final_warning_records->type_warnings[i].dyn_count;
- if (!dyn_count)
+ if (!(dyn_count > 0))
warning_n (DECL_SOURCE_LOCATION (TYPE_NAME (type)),
OPT_Wsuggest_final_types, count,
"Declaring type %qD final "
"executed %lli times",
type,
count,
- dyn_count);
+ (long long) dyn_count.to_gcov_type ());
}
}
{
tree decl = decl_warnings_vec[i]->decl;
int count = decl_warnings_vec[i]->count;
- long long dyn_count = decl_warnings_vec[i]->dyn_count;
+ profile_count dyn_count
+ = decl_warnings_vec[i]->dyn_count;
- if (!dyn_count)
+ if (!(dyn_count > 0))
if (DECL_CXX_DESTRUCTOR_P (decl))
warning_n (DECL_SOURCE_LOCATION (decl),
OPT_Wsuggest_final_methods, count,
"Declaring virtual destructor of %qD final "
"would enable devirtualization of %i calls "
"executed %lli times",
- DECL_CONTEXT (decl), count, dyn_count);
+ DECL_CONTEXT (decl), count,
+ (long long)dyn_count.to_gcov_type ());
else
warning_n (DECL_SOURCE_LOCATION (decl),
OPT_Wsuggest_final_methods, count,
"Declaring method %qD final "
"would enable devirtualization of %i calls "
"executed %lli times",
- decl, count, dyn_count);
+ decl, count,
+ (long long)dyn_count.to_gcov_type ());
}
}