/* Interprocedural Identical Code Folding pass
- Copyright (C) 2014-2019 Free Software Foundation, Inc.
+ Copyright (C) 2014-2020 Free Software Foundation, Inc.
Contributed by Jan Hubicka <hubicka@ucw.cz> and Martin Liska <mliska@suse.cz>
#include "coverage.h"
#include "gimple-pretty-print.h"
#include "data-streamer.h"
+#include "tree-streamer.h"
#include "fold-const.h"
#include "calls.h"
#include "varasm.h"
#include "attribs.h"
#include "print-tree.h"
#include "ipa-utils.h"
+#include "tree-ssa-alias-compare.h"
#include "ipa-icf-gimple.h"
#include "fibonacci_heap.h"
#include "ipa-icf.h"
#include "stor-layout.h"
#include "dbgcnt.h"
#include "tree-vector-builder.h"
+#include "symtab-thunks.h"
+#include "alias.h"
using namespace ipa_icf_gimple;
/* Semantic function constructor that uses STACK as bitmap memory stack. */
sem_function::sem_function (bitmap_obstack *stack)
-: sem_item (FUNC, stack), m_checker (NULL), m_compared_func (NULL)
+ : sem_item (FUNC, stack), memory_access_types (), m_alias_sets_hash (0),
+ m_checker (NULL), m_compared_func (NULL)
{
bb_sizes.create (0);
bb_sorted.create (0);
}
sem_function::sem_function (cgraph_node *node, bitmap_obstack *stack)
-: sem_item (FUNC, node, stack), m_checker (NULL), m_compared_func (NULL)
+ : sem_item (FUNC, node, stack), memory_access_types (),
+ m_alias_sets_hash (0), m_checker (NULL), m_compared_func (NULL)
{
bb_sizes.create (0);
bb_sorted.create (0);
/* Compare properties of symbols N1 and N2 that does not affect semantics of
symbol itself but affects semantics of its references from USED_BY (which
- may be NULL if it is unknown). If comparsion is false, symbols
+ may be NULL if it is unknown). If comparison is false, symbols
can still be merged but any symbols referring them can't.
If ADDRESS is true, do extra checking needed for IPA_REF_ADDR.
if (DECL_IS_OPERATOR_NEW_P (n1->decl)
!= DECL_IS_OPERATOR_NEW_P (n2->decl))
return return_false_with_msg ("operator new flags are different");
+
+ if (DECL_IS_REPLACEABLE_OPERATOR (n1->decl)
+ != DECL_IS_REPLACEABLE_OPERATOR (n2->decl))
+ return return_false_with_msg ("replaceable operator flags are different");
}
/* Merging two definitions with a reference to equivalent vtables, but
m_compared_func = static_cast<sem_function *> (item);
- if (cnode->thunk.thunk_p != cnode2->thunk.thunk_p)
- return return_false_with_msg ("thunk_p mismatch");
+ if (cnode->thunk != cnode2->thunk)
+ return return_false_with_msg ("thunk mismatch");
+ if (cnode->former_thunk_p () != cnode2->former_thunk_p ())
+ return return_false_with_msg ("former_thunk_p mismatch");
- if (cnode->thunk.thunk_p)
- {
- if (cnode->thunk.fixed_offset != cnode2->thunk.fixed_offset)
- return return_false_with_msg ("thunk fixed_offset mismatch");
- if (cnode->thunk.virtual_value != cnode2->thunk.virtual_value)
- return return_false_with_msg ("thunk virtual_value mismatch");
- if (cnode->thunk.indirect_offset != cnode2->thunk.indirect_offset)
- return return_false_with_msg ("thunk indirect_offset mismatch");
- if (cnode->thunk.this_adjusting != cnode2->thunk.this_adjusting)
- return return_false_with_msg ("thunk this_adjusting mismatch");
- if (cnode->thunk.virtual_offset_p != cnode2->thunk.virtual_offset_p)
- return return_false_with_msg ("thunk virtual_offset_p mismatch");
- }
+ if ((cnode->thunk || cnode->former_thunk_p ())
+ && thunk_info::get (cnode) != thunk_info::get (cnode2))
+ return return_false_with_msg ("thunk_info mismatch");
/* Compare special function DECL attributes. */
if (DECL_FUNCTION_PERSONALITY (decl)
if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl)
!= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl))
- return return_false_with_msg ("intrument function entry exit "
+ return return_false_with_msg ("instrument function entry exit "
"attributes are different");
if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl))
type memory location for ipa-polymorphic-call and we do not want
it to get confused by wrong type. */
if (DECL_CXX_CONSTRUCTOR_P (decl)
+ && opt_for_fn (decl, flag_devirtualize)
&& TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
{
if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE)
- return return_false_with_msg ("DECL_CXX_CONSTURCTOR type mismatch");
+ return return_false_with_msg ("DECL_CXX_CONSTRUCTOR type mismatch");
else if (!func_checker::compatible_polymorphic_types_p
(TYPE_METHOD_BASETYPE (TREE_TYPE (decl)),
TYPE_METHOD_BASETYPE (TREE_TYPE (item->decl)), false))
}
/* Update hash by address sensitive references. We iterate over all
- sensitive references (address_matters_p) and we hash ultime alias
+ sensitive references (address_matters_p) and we hash ultimate alias
target of these nodes, which can improve a semantic item hash.
Also hash in referenced symbols properties. This can be done at any time
m_checker = new func_checker (decl, m_compared_func->decl,
false,
+ opt_for_fn (m_compared_func->decl,
+ flag_strict_aliasing),
&refs_set,
&m_compared_func->refs_set);
arg1 = DECL_ARGUMENTS (decl);
/* Redirecting thunks to interposable symbols or symbols in other sections
may not be supported by target output code. Play safe for now and
punt on redirection. */
- if (!e->caller->thunk.thunk_p)
+ if (!e->caller->thunk)
{
struct cgraph_edge *nexte = e->next_caller;
e->redirect_callee (to);
}
/* Do not turn function in one comdat group into wrapper to another
comdat group. Other compiler producing the body of the
- another comdat group may make opossite decision and with unfortunate
+ another comdat group may make opposite decision and with unfortunate
linker choices this may close a loop. */
else if (DECL_COMDAT_GROUP (original->decl)
&& DECL_COMDAT_GROUP (alias->decl)
else
create_wrapper = true;
- /* We can redirect local calls in the case both alias and orignal
+ /* We can redirect local calls in the case both alias and original
are not interposable. */
redirect_callers
= alias->get_availability () > AVAIL_INTERPOSABLE
edge_count = n_edges_for_fn (func);
cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
- if (!cnode->thunk.thunk_p)
+ if (!cnode->thunk)
{
cfg_checksum = coverage_compute_cfg_checksum (func);
else
{
cfg_checksum = 0;
- inchash::hash hstate;
- hstate.add_hwi (cnode->thunk.fixed_offset);
- hstate.add_hwi (cnode->thunk.virtual_value);
- hstate.add_flag (cnode->thunk.this_adjusting);
- hstate.add_flag (cnode->thunk.virtual_offset_p);
- gcode_hash = hstate.end ();
+ gcode_hash = thunk_info::get (cnode)->hash ();
}
m_checker = NULL;
{
case GIMPLE_SWITCH:
m_checker->hash_operand (gimple_switch_index (as_a <gswitch *> (stmt)),
- hstate, 0);
+ hstate, 0, func_checker::OP_NORMAL);
break;
case GIMPLE_ASSIGN:
hstate.add_int (gimple_assign_rhs_code (stmt));
- if (commutative_tree_code (gimple_assign_rhs_code (stmt))
- || commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
- {
- m_checker->hash_operand (gimple_assign_rhs1 (stmt), hstate, 0);
- m_checker->hash_operand (gimple_assign_rhs2 (stmt), hstate, 0);
- if (commutative_ternary_tree_code (gimple_assign_rhs_code (stmt)))
- m_checker->hash_operand (gimple_assign_rhs3 (stmt), hstate, 0);
- m_checker->hash_operand (gimple_assign_lhs (stmt), hstate, 0);
- }
/* fall through */
case GIMPLE_CALL:
case GIMPLE_ASM:
case GIMPLE_COND:
case GIMPLE_GOTO:
case GIMPLE_RETURN:
- /* All these statements are equivalent if their operands are. */
- for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
- m_checker->hash_operand (gimple_op (stmt, i), hstate, 0);
- /* Consider nocf_check attribute in hash as it affects code
- generation. */
- if (code == GIMPLE_CALL
- && flag_cf_protection & CF_BRANCH)
- hstate.add_flag (gimple_call_nocf_check_p (as_a <gcall *> (stmt)));
+ {
+ func_checker::operand_access_type_map map (5);
+ func_checker::classify_operands (stmt, &map);
+
+ /* All these statements are equivalent if their operands are. */
+ for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
+ {
+ func_checker::operand_access_type
+ access_type = func_checker::get_operand_access_type
+ (&map, gimple_op (stmt, i));
+ m_checker->hash_operand (gimple_op (stmt, i), hstate, 0,
+ access_type);
+ /* For memory accesses when hasing for LTO stremaing record
+ base and ref alias ptr types so we can compare them at WPA
+ time without having to read actual function body. */
+ if (access_type == func_checker::OP_MEMORY
+ && lto_streaming_expected_p ()
+ && flag_strict_aliasing)
+ {
+ ao_ref ref;
+
+ ao_ref_init (&ref, gimple_op (stmt, i));
+ tree t = ao_ref_alias_ptr_type (&ref);
+ if (!variably_modified_type_p (t, NULL_TREE))
+ memory_access_types.safe_push (t);
+ t = ao_ref_base_alias_ptr_type (&ref);
+ if (!variably_modified_type_p (t, NULL_TREE))
+ memory_access_types.safe_push (t);
+ }
+ }
+ /* Consider nocf_check attribute in hash as it affects code
+ generation. */
+ if (code == GIMPLE_CALL
+ && flag_cf_protection & CF_BRANCH)
+ hstate.add_flag (gimple_call_nocf_check_p (as_a <gcall *> (stmt)));
+ }
+ break;
default:
break;
}
tree fndecl = node->decl;
function *func = DECL_STRUCT_FUNCTION (fndecl);
- if (!func || (!node->has_gimple_body_p () && !node->thunk.thunk_p))
+ if (!func || (!node->has_gimple_body_p () && !node->thunk))
return NULL;
if (lookup_attribute_by_prefix ("omp ", DECL_ATTRIBUTES (node->decl)) != NULL)
tree phi_result1 = gimple_phi_result (phi1);
tree phi_result2 = gimple_phi_result (phi2);
- if (!m_checker->compare_operand (phi_result1, phi_result2))
+ if (!m_checker->compare_operand (phi_result1, phi_result2,
+ func_checker::OP_NORMAL))
return return_false_with_msg ("PHI results are different");
size1 = gimple_phi_num_args (phi1);
t1 = gimple_phi_arg (phi1, i)->def;
t2 = gimple_phi_arg (phi2, i)->def;
- if (!m_checker->compare_operand (t1, t2))
+ if (!m_checker->compare_operand (t1, t2, func_checker::OP_NORMAL))
return return_false ();
e1 = gimple_phi_arg_edge (phi1, i);
target++;
if (bb_dict->length () <= (unsigned)source)
- bb_dict->safe_grow_cleared (source + 1);
+ bb_dict->safe_grow_cleared (source + 1, true);
if ((*bb_dict)[source] == 0)
{
return false;
}
- /* We cannot merge if address comparsion metters. */
+ /* We cannot merge if address comparison matters. */
if (alias_address_matters && flag_merge_constants < 2)
{
if (dump_enabled_p ())
streamer_write_uhwi_stream (ob->main_stream, node_ref);
streamer_write_uhwi (ob, (*item)->get_hash ());
+
+ if ((*item)->type == FUNC)
+ {
+ sem_function *fn = static_cast<sem_function *> (*item);
+ streamer_write_uhwi (ob, fn->memory_access_types.length ());
+ for (unsigned i = 0; i < fn->memory_access_types.length (); i++)
+ stream_write_tree (ob, fn->memory_access_types[i], true);
+ }
}
}
cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
sem_function *fn = new sem_function (cnode, &m_bmstack);
+ unsigned count = streamer_read_uhwi (&ib_main);
+ inchash::hash hstate (0);
+ if (flag_incremental_link == INCREMENTAL_LINK_LTO)
+ fn->memory_access_types.reserve_exact (count);
+ for (unsigned i = 0; i < count; i++)
+ {
+ tree type = stream_read_tree (&ib_main, data_in);
+ hstate.add_int (get_deref_alias_set (type));
+ if (flag_incremental_link == INCREMENTAL_LINK_LTO)
+ fn->memory_access_types.quick_push (type);
+ }
+ fn->m_alias_sets_hash = hstate.end ();
fn->set_hash (hash);
m_items.safe_push (fn);
}
build_graph ();
update_hash_by_addr_refs ();
+ update_hash_by_memory_access_type ();
build_hash_based_classes ();
if (dump_file)
= TYPE_METHOD_BASETYPE (TREE_TYPE (m_items[i]->decl));
inchash::hash hstate (m_items[i]->get_hash ());
+ /* Hash ODR types by mangled name if it is defined.
+ If not we know that type is anonymous of free_lang_data
+ was not run and in that case type main variants are
+ unique. */
if (TYPE_NAME (class_type)
- && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (class_type)))
+ && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (class_type))
+ && !type_in_anonymous_namespace_p
+ (class_type))
hstate.add_hwi
(IDENTIFIER_HASH_VALUE
(DECL_ASSEMBLER_NAME (TYPE_NAME (class_type))));
+ else
+ {
+ gcc_checking_assert
+ (!in_lto_p
+ || type_in_anonymous_namespace_p (class_type));
+ hstate.add_hwi (TYPE_UID (TYPE_MAIN_VARIANT (class_type)));
+ }
m_items[i]->set_hash (hstate.end ());
}
m_items[i]->set_hash (m_items[i]->global_hash);
}
+void
+sem_item_optimizer::update_hash_by_memory_access_type ()
+{
+ for (unsigned i = 0; i < m_items.length (); i++)
+ {
+ if (m_items[i]->type == FUNC)
+ {
+ sem_function *fn = static_cast<sem_function *> (m_items[i]);
+ inchash::hash hstate (fn->get_hash ());
+ hstate.add_int (fn->m_alias_sets_hash);
+ fn->set_hash (hstate.end ());
+ }
+ }
+}
+
/* Congruence classes are built by hash value. */
void
"class: %u)\n", (unsigned long) m_classes.elements (),
m_items.length (), m_items.length () - single_element_classes);
fprintf (dump_file,
- "Class size histogram [num of members]: number of classe number "
- "of classess\n");
+ "Class size histogram [number of members]: number of classes\n");
for (unsigned int i = 0; i <= max_index; i++)
if (histogram[i])
fprintf (dump_file, "%6u: %6u\n", i, histogram[i]);
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
"Semantic equality hit:%s->%s\n",
- xstrdup_for_dump (source->node->name ()),
- xstrdup_for_dump (alias->node->name ()));
+ source->node->dump_name (),
+ alias->node->dump_name ());
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
"Assembler symbol names:%s->%s\n",
- xstrdup_for_dump (source->node->asm_name ()),
- xstrdup_for_dump (alias->node->asm_name ()));
+ source->node->dump_asm_name (),
+ alias->node->dump_asm_name ());
}
if (lookup_attribute ("no_icf", DECL_ATTRIBUTES (alias->decl)))
{
if (dump_file)
fprintf (dump_file, " Setting points-to UID of [%s] as %d\n",
- xstrdup_for_dump (ref->referring->asm_name ()), uid);
+ ref->referring->dump_asm_name (), uid);
SET_DECL_PT_UID (ref->referring->decl, uid);
set_alias_uids (ref->referring, uid);
fixup_pt_set (&SSA_NAME_PTR_INFO (name)->pt);
fixup_pt_set (&fn->gimple_df->escaped);
- /* The above get's us to 99% I guess, at least catching the
+ /* The above gets us to 99% I guess, at least catching the
address compares. Below also gets us aliasing correct
but as said we're giving leeway to the situation with
readonly vars anyway, so ... */
optimizer->register_hooks ();
}
-/* Semantic equality exection function. */
+/* Semantic equality execution function. */
static unsigned int
ipa_icf_driver (void)