namespace ipa_icf {
+/* Initialization and computation of symtab node hash, there data
+ are propagated later on. */
+
+static sem_item_optimizer *optimizer = NULL;
+
/* Constructor. */
symbol_compare_collection::symbol_compare_collection (symtab_node *node)
for (unsigned i = 0; i < bb_sizes.length (); i++)
hstate.add_int (bb_sizes[i]);
+
+ /* Add common features of declaration itself. */
+ if (DECL_FUNCTION_SPECIFIC_TARGET (decl))
+ hstate.add_wide_int
+ (cl_target_option_hash
+ (TREE_TARGET_OPTION (DECL_FUNCTION_SPECIFIC_TARGET (decl))));
+ if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl))
+ (cl_optimization_hash
+ (TREE_OPTIMIZATION (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl))));
+ hstate.add_flag (DECL_DISREGARD_INLINE_LIMITS (decl));
+ hstate.add_flag (DECL_DECLARED_INLINE_P (decl));
+ hstate.add_flag (DECL_IS_OPERATOR_NEW (decl));
+ hstate.add_flag (DECL_CXX_CONSTRUCTOR_P (decl));
+ hstate.add_flag (DECL_CXX_DESTRUCTOR_P (decl));
+
hash = hstate.end ();
}
return return_false_with_msg ("no stack limit attributes are different");
if (DECL_CXX_CONSTRUCTOR_P (decl) != DECL_CXX_CONSTRUCTOR_P (item->decl))
- return return_false_with_msg ("DELC_CXX_CONSTRUCTOR mismatch");
+ return return_false_with_msg ("DECL_CXX_CONSTRUCTOR mismatch");
if (DECL_CXX_DESTRUCTOR_P (decl) != DECL_CXX_DESTRUCTOR_P (item->decl))
- return return_false_with_msg ("DELC_CXX_DESTRUCTOR mismatch");
+ return return_false_with_msg ("DECL_CXX_DESTRUCTOR mismatch");
if (flags_from_decl_or_type (decl) != flags_from_decl_or_type (item->decl))
return return_false_with_msg ("decl_or_type flags are different");
}
}
+/* Accumulate to HSTATE a hash of type t.
+ TYpes that may end up being compatible after LTO type merging needs to have
+ the same hash. */
+
+void
+sem_item::add_type (const_tree type, inchash::hash &hstate)
+{
+ if (type == NULL_TREE)
+ {
+ hstate.merge_hash (0);
+ return;
+ }
+
+ type = TYPE_MAIN_VARIANT (type);
+ if (TYPE_CANONICAL (type))
+ type = TYPE_CANONICAL (type);
+
+ if (!AGGREGATE_TYPE_P (type))
+ hstate.add_int (TYPE_MODE (type));
+
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ hstate.add_int (COMPLEX_TYPE);
+ sem_item::add_type (TREE_TYPE (type), hstate);
+ }
+ else if (INTEGRAL_TYPE_P (type))
+ {
+ hstate.add_int (INTEGER_TYPE);
+ hstate.add_flag (TYPE_UNSIGNED (type));
+ hstate.add_int (TYPE_PRECISION (type));
+ }
+ else if (VECTOR_TYPE_P (type))
+ {
+ hstate.add_int (VECTOR_TYPE);
+ hstate.add_int (TYPE_PRECISION (type));
+ sem_item::add_type (TREE_TYPE (type), hstate);
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ hstate.add_int (ARRAY_TYPE);
+ /* Do not hash size, so complete and incomplete types can match. */
+ sem_item::add_type (TREE_TYPE (type), hstate);
+ }
+ else if (RECORD_OR_UNION_TYPE_P (type))
+ {
+ hashval_t *val = optimizer->m_type_hash_cache.get (type);
+
+ if (!val)
+ {
+ inchash::hash hstate2;
+ unsigned nf;
+ tree f;
+ hashval_t hash;
+
+ hstate2.add_int (RECORD_TYPE);
+ gcc_assert (COMPLETE_TYPE_P (type));
+
+ for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ add_type (TREE_TYPE (f), hstate2);
+ nf++;
+ }
+
+ hstate2.add_int (nf);
+ hash = hstate2.end ();
+ hstate.add_wide_int (hash);
+ optimizer->m_type_hash_cache.put (type, hash);
+ }
+ else
+ hstate.add_wide_int (*val);
+ }
+}
+
/* Improve accumulated hash for HSTATE based on a gimple statement STMT. */
void
switch (code)
{
+ case GIMPLE_SWITCH:
+ add_expr (gimple_switch_index (as_a <gswitch *> (stmt)), hstate);
+ break;
case GIMPLE_ASSIGN:
+ hstate.add_int (gimple_assign_rhs_code (stmt));
if (commutative_tree_code (gimple_assign_rhs_code (stmt))
|| commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
{
inchash::hash one, two;
add_expr (gimple_assign_rhs1 (stmt), one);
+ add_type (TREE_TYPE (gimple_assign_rhs1 (stmt)), one);
add_expr (gimple_assign_rhs2 (stmt), two);
hstate.add_commutative (one, two);
+ if (commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
+ {
+ add_expr (gimple_assign_rhs3 (stmt), hstate);
+ add_type (TREE_TYPE (gimple_assign_rhs3 (stmt)), hstate);
+ }
add_expr (gimple_assign_lhs (stmt), hstate);
+ add_type (TREE_TYPE (gimple_assign_lhs (stmt)), two);
break;
}
/* ... fall through ... */
case GIMPLE_RETURN:
/* All these statements are equivalent if their operands are. */
for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
- add_expr (gimple_op (stmt, i), hstate);
+ {
+ add_expr (gimple_op (stmt, i), hstate);
+ if (gimple_op (stmt, i))
+ add_type (TREE_TYPE (gimple_op (stmt, i)), hstate);
+ }
default:
break;
}
{
struct cgraph_edge *e;
- if (!opt_for_fn (decl, flag_devirtualize))
+ if (!opt_for_fn (get_node ()->decl, flag_devirtualize))
return false;
- if (get_node ()->indirect_calls != NULL
- || m_compared_func->get_node ()->indirect_calls != NULL)
+ if (get_node ()->indirect_calls != NULL)
return true;
/* TODO: We can do simple propagation determining what calls may lead to
a polymorphic call. */
- for (e = m_compared_func->get_node ()->callees; e; e = e->next_callee)
+ for (e = get_node ()->callees; e; e = e->next_callee)
if (e->callee->definition
&& opt_for_fn (e->callee->decl, flag_devirtualize))
return true;
void
sem_item_optimizer::update_hash_by_addr_refs ()
{
- /* First, append to hash sensitive references. */
+ /* First, append to hash sensitive references and class type if it need to
+ be matched for ODR. */
for (unsigned i = 0; i < m_items.length (); i++)
- m_items[i]->update_hash_by_addr_refs (m_symtab_node_map);
+ {
+ m_items[i]->update_hash_by_addr_refs (m_symtab_node_map);
+ if (m_items[i]->type == FUNC)
+ {
+ if (TREE_CODE (TREE_TYPE (m_items[i]->decl)) == METHOD_TYPE
+ && contains_polymorphic_type_p
+ (method_class_type (TREE_TYPE (m_items[i]->decl)))
+ && (DECL_CXX_CONSTRUCTOR_P (m_items[i]->decl)
+ || ((!flag_ipa_cp
+ || ipa_is_param_used (
+ IPA_NODE_REF
+ (dyn_cast <cgraph_node *>(m_items[i]->node)), 0))
+ && static_cast<sem_function *> (m_items[i])
+ ->compare_polymorphic_p ())))
+ {
+ tree class_type
+ = method_class_type (TREE_TYPE (m_items[i]->decl));
+ inchash::hash hstate (m_items[i]->hash);
+
+ if (TYPE_NAME (class_type)
+ && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (class_type)))
+ hstate.add_wide_int
+ (IDENTIFIER_HASH_VALUE
+ (DECL_ASSEMBLER_NAME (TYPE_NAME (class_type))));
+
+ m_items[i]->hash = hstate.end ();
+ }
+ }
+ }
/* Once all symbols have enhanced hash value, we can append
hash values of symbols that are seen by IPA ICF and are
return false;
}
-/* Initialization and computation of symtab node hash, there data
- are propagated later on. */
-
-static sem_item_optimizer *optimizer = NULL;
-
/* Generate pass summary for IPA ICF pass. */
static void