+2019-09-20 Martin Jambor <mjambor@suse.cz>
+
+ * coretypes.h (cgraph_edge): Declare.
+ * ipa-param-manipulation.c: Rewrite.
+ * ipa-param-manipulation.h: Likewise.
+ * Makefile.in (GTFILES): Added ipa-param-manipulation.h and ipa-sra.c.
+ (OBJS): Added ipa-sra.o.
+ * cgraph.h (ipa_replace_map): Removed fields old_tree, replace_p
+ and ref_p, added fields param_adjustments and performed_splits.
+ (struct cgraph_clone_info): Remove ags_to_skip and
+ combined_args_to_skip, new field param_adjustments.
+ (cgraph_node::create_clone): Changed parameters to use
+ ipa_param_adjustments.
+ (cgraph_node::create_virtual_clone): Likewise.
+ (cgraph_node::create_virtual_clone_with_body): Likewise.
+ (tree_function_versioning): Likewise.
+ (cgraph_build_function_type_skip_args): Removed.
+ * cgraph.c (cgraph_edge::redirect_call_stmt_to_callee): Convert to
+ using ipa_param_adjustments.
+ (clone_of_p): Likewise.
+ * cgraphclones.c (cgraph_build_function_type_skip_args): Removed.
+ (build_function_decl_skip_args): Likewise.
+ (duplicate_thunk_for_node): Adjust parameters using
+ ipa_param_body_adjustments, copy param_adjustments instead of
+ args_to_skip.
+ (cgraph_node::create_clone): Convert to using ipa_param_adjustments.
+ (cgraph_node::create_virtual_clone): Likewise.
+ (cgraph_node::create_version_clone_with_body): Likewise.
+ (cgraph_materialize_clone): Likewise.
+ (symbol_table::materialize_all_clones): Likewise.
+ * ipa-fnsummary.c (ipa_fn_summary_t::duplicate): Simplify
+ ipa_replace_map check.
+ * ipa-cp.c (get_replacement_map): Do not initialize removed fields.
+ (initialize_node_lattices): Make aware that some parameters might have
+ already been removed.
+ (want_remove_some_param_p): New function.
+ (create_specialized_node): Convert to using ipa_param_adjustments and
+ deal with possibly pre-existing adjustments.
+ * lto-cgraph.c (output_cgraph_opt_summary_p): Likewise.
+ (output_node_opt_summary): Do not stream removed fields. Stream
+ parameter adjustments instead of argumetns to skip.
+ (input_node_opt_summary): Likewise.
+ (input_node_opt_summary): Likewise.
+ * lto-section-in.c (lto_section_name): Added ipa-sra section.
+ * lto-streamer.h (lto_section_type): Likewise.
+ * tree-inline.h (copy_body_data): New fields killed_new_ssa_names and
+ param_body_adjs.
+ (copy_decl_to_var): Declare.
+ * tree-inline.c (update_clone_info): Do not remap old_tree.
+ (remap_gimple_stmt): Use ipa_param_body_adjustments to modify gimple
+ statements, walk all extra generated statements and remap their
+ operands.
+ (redirect_all_calls): Add killed SSA names to a hash set.
+ (remap_ssa_name): Do not remap killed SSA names.
+ (copy_arguments_for_versioning): Renames to copy_arguments_nochange,
+ half of functionality moved to ipa_param_body_adjustments.
+ (copy_decl_to_var): Make exported.
+ (copy_body): Destroy killed_new_ssa_names hash set.
+ (expand_call_inline): Remap performed splits.
+ (update_clone_info): Likewise.
+ (tree_function_versioning): Simplify tree_map processing. Updated to
+ accept ipa_param_adjustments and use ipa_param_body_adjustments.
+ * omp-simd-clone.c (simd_clone_vector_of_formal_parm_types): Adjust
+ for the new interface.
+ (simd_clone_clauses_extract): Likewise, make args an auto_vec.
+ (simd_clone_compute_base_data_type): Likewise.
+ (simd_clone_init_simd_arrays): Adjust for the new interface.
+ (simd_clone_adjust_argument_types): Likewise.
+ (struct modify_stmt_info): Likewise.
+ (ipa_simd_modify_stmt_ops): Likewise.
+ (ipa_simd_modify_function_body): Likewise.
+ (simd_clone_adjust): Likewise.
+ * tree-sra.c: Removed IPA-SRA. Include tree-sra.h.
+ (type_internals_preclude_sra_p): Make public.
+ * tree-sra.h: New file.
+ * ipa-inline-transform.c (save_inline_function_body): Update to
+ refelct new tree_function_versioning signature.
+ * ipa-prop.c (adjust_agg_replacement_values): Use a helper from
+ ipa_param_adjustments to get current parameter indices.
+ (ipcp_modif_dom_walker::before_dom_children): Likewise.
+ (ipcp_update_bits): Likewise.
+ (ipcp_update_vr): Likewise.
+ * ipa-split.c (split_function): Convert to using ipa_param_adjustments.
+ * ipa-sra.c: New file.
+ * multiple_target.c (create_target_clone): Update to reflet new type
+ of create_version_clone_with_body.
+ * trans-mem.c (ipa_tm_create_version): Update to reflect new type of
+ tree_function_versioning.
+ (modify_function): Update to reflect new type of
+ tree_function_versioning.
+ * params.def (PARAM_IPA_SRA_MAX_REPLACEMENTS): New.
+ * passes.def: Remove old IPA-SRA and add new one.
+ * tree-pass.h (make_pass_early_ipa_sra): Remove declaration.
+ (make_pass_ipa_sra): Declare.
+ * dbgcnt.def: Remove eipa_sra. Added ipa_sra_params and
+ ipa_sra_retvalues.
+ * doc/invoke.texi (ipa-sra-max-replacements): New.
+
2019-09-19 Martin Sebor <msebor@redhat.com>
PR middle-end/91631
init-regs.o \
internal-fn.o \
ipa-cp.o \
+ ipa-sra.o \
ipa-devirt.o \
ipa-fnsummary.o \
ipa-polymorphic-call.o \
$(srcdir)/reload.h $(srcdir)/caller-save.c $(srcdir)/symtab.c \
$(srcdir)/alias.c $(srcdir)/bitmap.c $(srcdir)/cselib.c $(srcdir)/cgraph.c \
$(srcdir)/ipa-prop.c $(srcdir)/ipa-cp.c $(srcdir)/ipa-utils.h \
- $(srcdir)/dbxout.c \
+ $(srcdir)/ipa-param-manipulation.h $(srcdir)/ipa-sra.c $(srcdir)/dbxout.c \
$(srcdir)/signop.h \
$(srcdir)/dwarf2out.h \
$(srcdir)/dwarf2asm.c \
if (flag_checking && decl)
{
cgraph_node *node = cgraph_node::get (decl);
- gcc_assert (!node || !node->clone.combined_args_to_skip);
+ gcc_assert (!node || !node->clone.param_adjustments);
}
if (symtab->dump_file)
fprintf (symtab->dump_file, "updating call of %s -> %s: ",
e->caller->dump_name (), e->callee->dump_name ());
print_gimple_stmt (symtab->dump_file, e->call_stmt, 0, dump_flags);
- if (e->callee->clone.combined_args_to_skip)
+ if (e->callee->clone.param_adjustments)
+ e->callee->clone.param_adjustments->dump (symtab->dump_file);
+ unsigned performed_len
+ = vec_safe_length (e->caller->clone.performed_splits);
+ if (performed_len > 0)
+ fprintf (symtab->dump_file, "Performed splits records:\n");
+ for (unsigned i = 0; i < performed_len; i++)
{
- fprintf (symtab->dump_file, " combined args to skip: ");
- dump_bitmap (symtab->dump_file,
- e->callee->clone.combined_args_to_skip);
+ ipa_param_performed_split *sm
+ = &(*e->caller->clone.performed_splits)[i];
+ print_node_brief (symtab->dump_file, " dummy_decl: ", sm->dummy_decl,
+ TDF_UID);
+ fprintf (symtab->dump_file, ", unit_offset: %u\n", sm->unit_offset);
}
}
- if (e->callee->clone.combined_args_to_skip)
+ if (ipa_param_adjustments *padjs = e->callee->clone.param_adjustments)
{
- int lp_nr;
+ /* We need to defer cleaning EH info on the new statement to
+ fixup-cfg. We may not have dominator information at this point
+ and thus would end up with unreachable blocks and have no way
+ to communicate that we need to run CFG cleanup then. */
+ int lp_nr = lookup_stmt_eh_lp (e->call_stmt);
+ if (lp_nr != 0)
+ remove_stmt_from_eh_lp (e->call_stmt);
- new_stmt = e->call_stmt;
- if (e->callee->clone.combined_args_to_skip)
- new_stmt
- = gimple_call_copy_skip_args (new_stmt,
- e->callee->clone.combined_args_to_skip);
tree old_fntype = gimple_call_fntype (e->call_stmt);
- gimple_call_set_fndecl (new_stmt, e->callee->decl);
+ new_stmt = padjs->modify_call (e->call_stmt,
+ e->caller->clone.performed_splits,
+ e->callee->decl, false);
cgraph_node *origin = e->callee;
while (origin->clone_of)
origin = origin->clone_of;
gimple_call_set_fntype (new_stmt, TREE_TYPE (e->callee->decl));
else
{
- bitmap skip = e->callee->clone.combined_args_to_skip;
- tree t = cgraph_build_function_type_skip_args (old_fntype, skip,
- false);
- gimple_call_set_fntype (new_stmt, t);
- }
-
- if (gimple_vdef (new_stmt)
- && TREE_CODE (gimple_vdef (new_stmt)) == SSA_NAME)
- SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
-
- gsi = gsi_for_stmt (e->call_stmt);
-
- /* For optimized away parameters, add on the caller side
- before the call
- DEBUG D#X => parm_Y(D)
- stmts and associate D#X with parm in decl_debug_args_lookup
- vector to say for debug info that if parameter parm had been passed,
- it would have value parm_Y(D). */
- if (e->callee->clone.combined_args_to_skip && MAY_HAVE_DEBUG_BIND_STMTS)
- {
- vec<tree, va_gc> **debug_args
- = decl_debug_args_lookup (e->callee->decl);
- tree old_decl = gimple_call_fndecl (e->call_stmt);
- if (debug_args && old_decl)
- {
- tree parm;
- unsigned i = 0, num;
- unsigned len = vec_safe_length (*debug_args);
- unsigned nargs = gimple_call_num_args (e->call_stmt);
- for (parm = DECL_ARGUMENTS (old_decl), num = 0;
- parm && num < nargs;
- parm = DECL_CHAIN (parm), num++)
- if (bitmap_bit_p (e->callee->clone.combined_args_to_skip, num)
- && is_gimple_reg (parm))
- {
- unsigned last = i;
-
- while (i < len && (**debug_args)[i] != DECL_ORIGIN (parm))
- i += 2;
- if (i >= len)
- {
- i = 0;
- while (i < last
- && (**debug_args)[i] != DECL_ORIGIN (parm))
- i += 2;
- if (i >= last)
- continue;
- }
- tree ddecl = (**debug_args)[i + 1];
- tree arg = gimple_call_arg (e->call_stmt, num);
- if (!useless_type_conversion_p (TREE_TYPE (ddecl),
- TREE_TYPE (arg)))
- {
- tree rhs1;
- if (!fold_convertible_p (TREE_TYPE (ddecl), arg))
- continue;
- if (TREE_CODE (arg) == SSA_NAME
- && gimple_assign_cast_p (SSA_NAME_DEF_STMT (arg))
- && (rhs1
- = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (arg)))
- && useless_type_conversion_p (TREE_TYPE (ddecl),
- TREE_TYPE (rhs1)))
- arg = rhs1;
- else
- arg = fold_convert (TREE_TYPE (ddecl), arg);
- }
-
- gimple *def_temp
- = gimple_build_debug_bind (ddecl, unshare_expr (arg),
- e->call_stmt);
- gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
- }
- }
+ tree new_fntype = padjs->build_new_function_type (old_fntype, true);
+ gimple_call_set_fntype (new_stmt, new_fntype);
}
- gsi_replace (&gsi, new_stmt, false);
- /* We need to defer cleaning EH info on the new statement to
- fixup-cfg. We may not have dominator information at this point
- and thus would end up with unreachable blocks and have no way
- to communicate that we need to run CFG cleanup then. */
- lp_nr = lookup_stmt_eh_lp (e->call_stmt);
if (lp_nr != 0)
- {
- remove_stmt_from_eh_lp (e->call_stmt);
- add_stmt_to_eh_lp (new_stmt, lp_nr);
- }
+ add_stmt_to_eh_lp (new_stmt, lp_nr);
}
else
{
return true;
node = node->callees->callee->ultimate_alias_target ();
- if (!node2->clone.args_to_skip
- || !bitmap_bit_p (node2->clone.args_to_skip, 0))
+ if (!node2->clone.param_adjustments
+ || node2->clone.param_adjustments->first_param_intact_p ())
return false;
if (node2->former_clone_of == node->decl)
return true;
#include "profile-count.h"
#include "ipa-ref.h"
#include "plugin-api.h"
+#include "ipa-param-manipulation.h"
extern void debuginfo_early_init (void);
extern void debuginfo_init (void);
will be replaced by another tree while versioning. */
struct GTY(()) ipa_replace_map
{
- /* The tree that will be replaced. */
- tree old_tree;
/* The new (replacing) tree. */
tree new_tree;
/* Parameter number to replace, when old_tree is NULL. */
int parm_num;
- /* True when a substitution should be done, false otherwise. */
- bool replace_p;
- /* True when we replace a reference to old_tree. */
- bool ref_p;
};
struct GTY(()) cgraph_clone_info
{
+ /* Constants discovered by IPA-CP, i.e. which parameter should be replaced
+ with what. */
vec<ipa_replace_map *, va_gc> *tree_map;
- bitmap args_to_skip;
- bitmap combined_args_to_skip;
+ /* Parameter modification that IPA-SRA decided to perform. */
+ ipa_param_adjustments *param_adjustments;
+ /* Lists of dummy-decl and offset pairs representing split formal parameters
+ in the caller. Offsets of all new replacements are enumerated, those
+ coming from the same original parameter have the same dummy decl stored
+ along with them.
+
+ Dummy decls sit in call statement arguments followed by new parameter
+ decls (or their SSA names) in between (caller) clone materialization and
+ call redirection. Redirection then recognizes the dummy variable and
+ together with the stored offsets can reconstruct what exactly the new
+ parameter decls represent and can leave in place only those that the
+ callee expects. */
+ vec<ipa_param_performed_split, va_gc> *performed_splits;
};
enum cgraph_simd_clone_arg_type
vec<cgraph_edge *> redirect_callers,
bool call_duplication_hook,
cgraph_node *new_inlined_to,
- bitmap args_to_skip, const char *suffix = NULL);
+ ipa_param_adjustments *param_adjustments,
+ const char *suffix = NULL);
/* Create callgraph node clone with new declaration. The actual body will be
copied later at compilation stage. The name of the new clone will be
constructed from the name of the original node, SUFFIX and NUM_SUFFIX. */
cgraph_node *create_virtual_clone (vec<cgraph_edge *> redirect_callers,
vec<ipa_replace_map *, va_gc> *tree_map,
- bitmap args_to_skip, const char * suffix,
- unsigned num_suffix);
+ ipa_param_adjustments *param_adjustments,
+ const char * suffix, unsigned num_suffix);
/* cgraph node being removed from symbol table; see if its entry can be
replaced by other inline clone. */
Return the new version's cgraph node. */
cgraph_node *create_version_clone_with_body
(vec<cgraph_edge *> redirect_callers,
- vec<ipa_replace_map *, va_gc> *tree_map, bitmap args_to_skip,
- bool skip_return, bitmap bbs_to_copy, basic_block new_entry_block,
- const char *clone_name, tree target_attributes = NULL_TREE);
+ vec<ipa_replace_map *, va_gc> *tree_map,
+ ipa_param_adjustments *param_adjustments,
+ bitmap bbs_to_copy, basic_block new_entry_block, const char *clone_name,
+ tree target_attributes = NULL_TREE);
/* Insert a new cgraph_function_version_info node into cgraph_fnver_htab
corresponding to cgraph_node. */
tree clone_function_name (tree decl, const char *suffix);
void tree_function_versioning (tree, tree, vec<ipa_replace_map *, va_gc> *,
- bool, bitmap, bool, bitmap, basic_block);
+ ipa_param_adjustments *,
+ bool, bitmap, basic_block);
void dump_callgraph_transformation (const cgraph_node *original,
const cgraph_node *clone,
const char *suffix);
-tree cgraph_build_function_type_skip_args (tree orig_type, bitmap args_to_skip,
- bool skip_return);
-
/* In cgraphbuild.c */
int compute_call_stmt_bb_frequency (tree, basic_block bb);
void record_references_in_initializer (tree, bool);
return new_edge;
}
-/* Build variant of function type ORIG_TYPE skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true. */
-
-tree
-cgraph_build_function_type_skip_args (tree orig_type, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_type = NULL;
- tree args, new_args = NULL;
- tree new_reversed;
- int i = 0;
-
- for (args = TYPE_ARG_TYPES (orig_type); args && args != void_list_node;
- args = TREE_CHAIN (args), i++)
- if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
- new_args = tree_cons (NULL_TREE, TREE_VALUE (args), new_args);
-
- new_reversed = nreverse (new_args);
- if (args)
- {
- if (new_reversed)
- TREE_CHAIN (new_args) = void_list_node;
- else
- new_reversed = void_list_node;
- }
-
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || !args_to_skip
- || !bitmap_bit_p (args_to_skip, 0))
- {
- new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
- }
- else
- {
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- }
-
- if (skip_return)
- TREE_TYPE (new_type) = void_type_node;
-
- return new_type;
-}
-
-/* Build variant of function decl ORIG_DECL skipping ARGS_TO_SKIP and the
- return value if SKIP_RETURN is true.
-
- Arguments from DECL_ARGUMENTS list can't be removed now, since they are
- linked by TREE_CHAIN directly. The caller is responsible for eliminating
- them when they are being duplicated (i.e. copy_arguments_for_versioning). */
-
-static tree
-build_function_decl_skip_args (tree orig_decl, bitmap args_to_skip,
- bool skip_return)
-{
- tree new_decl = copy_node (orig_decl);
- tree new_type;
-
- new_type = TREE_TYPE (orig_decl);
- if (prototype_p (new_type)
- || (skip_return && !VOID_TYPE_P (TREE_TYPE (new_type))))
- new_type
- = cgraph_build_function_type_skip_args (new_type, args_to_skip,
- skip_return);
- TREE_TYPE (new_decl) = new_type;
-
- /* For declarations setting DECL_VINDEX (i.e. methods)
- we expect first argument to be THIS pointer. */
- if (args_to_skip && bitmap_bit_p (args_to_skip, 0))
- DECL_VINDEX (new_decl) = NULL_TREE;
-
- /* When signature changes, we need to clear builtin info. */
- if (fndecl_built_in_p (new_decl)
- && args_to_skip
- && !bitmap_empty_p (args_to_skip))
- set_decl_built_in_function (new_decl, NOT_BUILT_IN, 0);
- /* The FE might have information and assumptions about the other
- arguments. */
- DECL_LANG_SPECIFIC (new_decl) = NULL;
- return new_decl;
-}
-
/* Set flags of NEW_NODE and its decl. NEW_NODE is a newly created private
clone or its thunk. */
return cs->caller;
tree new_decl;
- if (!node->clone.args_to_skip)
- new_decl = copy_node (thunk->decl);
- else
+ if (node->clone.param_adjustments)
{
/* We do not need to duplicate this_adjusting thunks if we have removed
this. */
if (thunk->thunk.this_adjusting
- && bitmap_bit_p (node->clone.args_to_skip, 0))
+ && !node->clone.param_adjustments->first_param_intact_p ())
return node;
- new_decl = build_function_decl_skip_args (thunk->decl,
- node->clone.args_to_skip,
- false);
- }
-
- tree *link = &DECL_ARGUMENTS (new_decl);
- int i = 0;
- for (tree pd = DECL_ARGUMENTS (thunk->decl); pd; pd = DECL_CHAIN (pd), i++)
- {
- if (!node->clone.args_to_skip
- || !bitmap_bit_p (node->clone.args_to_skip, i))
- {
- tree nd = copy_node (pd);
- DECL_CONTEXT (nd) = new_decl;
- *link = nd;
- link = &DECL_CHAIN (nd);
- }
+ new_decl = copy_node (thunk->decl);
+ ipa_param_body_adjustments body_adj (node->clone.param_adjustments,
+ new_decl);
+ body_adj.modify_formal_parameters ();
}
- *link = NULL_TREE;
+ else
+ new_decl = copy_node (thunk->decl);
gcc_checking_assert (!DECL_STRUCT_FUNCTION (new_decl));
gcc_checking_assert (!DECL_INITIAL (new_decl));
new_thunk->thunk = thunk->thunk;
new_thunk->unique_name = in_lto_p;
new_thunk->former_clone_of = thunk->decl;
- new_thunk->clone.args_to_skip = node->clone.args_to_skip;
- new_thunk->clone.combined_args_to_skip = node->clone.combined_args_to_skip;
+ new_thunk->clone.param_adjustments = node->clone.param_adjustments;
cgraph_edge *e = new_thunk->create_edge (node, NULL, new_thunk->count);
symtab->call_edge_duplication_hooks (thunk->callees, e);
If the new node is being inlined into another one, NEW_INLINED_TO should be
the outline function the new one is (even indirectly) inlined to. All hooks
will see this in node's global.inlined_to, when invoked. Can be NULL if the
- node is not inlined. */
+ node is not inlined.
+
+ If PARAM_ADJUSTMENTS is non-NULL, the parameter manipulation information
+ will be overwritten by the new structure. Otherwise the new node will
+ share parameter manipulation information with the original node. */
cgraph_node *
cgraph_node::create_clone (tree new_decl, profile_count prof_count,
vec<cgraph_edge *> redirect_callers,
bool call_duplication_hook,
cgraph_node *new_inlined_to,
- bitmap args_to_skip, const char *suffix)
+ ipa_param_adjustments *param_adjustments,
+ const char *suffix)
{
cgraph_node *new_node = symtab->create_empty ();
cgraph_edge *e;
new_node->merged_comdat = merged_comdat;
new_node->thunk = thunk;
+ if (param_adjustments)
+ new_node->clone.param_adjustments = param_adjustments;
+ else
+ new_node->clone.param_adjustments = clone.param_adjustments;
new_node->clone.tree_map = NULL;
- new_node->clone.args_to_skip = args_to_skip;
+ new_node->clone.performed_splits = vec_safe_copy (clone.performed_splits);
new_node->split_part = split_part;
- if (!args_to_skip)
- new_node->clone.combined_args_to_skip = clone.combined_args_to_skip;
- else if (clone.combined_args_to_skip)
- {
- new_node->clone.combined_args_to_skip = BITMAP_GGC_ALLOC ();
- bitmap_ior (new_node->clone.combined_args_to_skip,
- clone.combined_args_to_skip, args_to_skip);
- }
- else
- new_node->clone.combined_args_to_skip = args_to_skip;
FOR_EACH_VEC_ELT (redirect_callers, i, e)
{
cgraph_node *
cgraph_node::create_virtual_clone (vec<cgraph_edge *> redirect_callers,
vec<ipa_replace_map *, va_gc> *tree_map,
- bitmap args_to_skip, const char * suffix,
- unsigned num_suffix)
+ ipa_param_adjustments *param_adjustments,
+ const char * suffix, unsigned num_suffix)
{
tree old_decl = decl;
cgraph_node *new_node = NULL;
char *name;
gcc_checking_assert (local.versionable);
- gcc_assert (local.can_change_signature || !args_to_skip);
+ /* TODO: It would be nice if we could recognize that param_adjustments do not
+ actually perform any changes, but at the moment let's require it simply
+ does not exist. */
+ gcc_assert (local.can_change_signature || !param_adjustments);
/* Make a new FUNCTION_DECL tree node */
- if (!args_to_skip)
+ if (!param_adjustments)
new_decl = copy_node (old_decl);
else
- new_decl = build_function_decl_skip_args (old_decl, args_to_skip, false);
+ new_decl = param_adjustments->adjust_decl (old_decl);
/* These pointers represent function body and will be populated only when clone
is materialized. */
SET_DECL_RTL (new_decl, NULL);
new_node = create_clone (new_decl, count, false,
- redirect_callers, false, NULL, args_to_skip, suffix);
+ redirect_callers, false, NULL, param_adjustments,
+ suffix);
/* Update the properties.
Make clone visible only within this translation unit. Make sure
cgraph_node *
cgraph_node::create_version_clone_with_body
(vec<cgraph_edge *> redirect_callers,
- vec<ipa_replace_map *, va_gc> *tree_map, bitmap args_to_skip,
- bool skip_return, bitmap bbs_to_copy, basic_block new_entry_block,
- const char *suffix, tree target_attributes)
+ vec<ipa_replace_map *, va_gc> *tree_map,
+ ipa_param_adjustments *param_adjustments,
+ bitmap bbs_to_copy, basic_block new_entry_block, const char *suffix,
+ tree target_attributes)
{
tree old_decl = decl;
cgraph_node *new_version_node = NULL;
if (!tree_versionable_function_p (old_decl))
return NULL;
- gcc_assert (local.can_change_signature || !args_to_skip);
+ /* TODO: Restore an assert that we do not change signature if
+ local.can_change_signature is false. We cannot just check that
+ param_adjustments is NULL because unfortunately ipa-split removes return
+ values from such functions. */
/* Make a new FUNCTION_DECL tree node for the new version. */
- if (!args_to_skip && !skip_return)
- new_decl = copy_node (old_decl);
+ if (param_adjustments)
+ new_decl = param_adjustments->adjust_decl (old_decl);
else
- new_decl
- = build_function_decl_skip_args (old_decl, args_to_skip, skip_return);
+ new_decl = copy_node (old_decl);
/* Generate a new name for the new version. */
DECL_NAME (new_decl) = clone_function_name_numbered (old_decl, suffix);
new_version_node->ipa_transforms_to_apply
= ipa_transforms_to_apply.copy ();
/* Copy the OLD_VERSION_NODE function tree to the new version. */
- tree_function_versioning (old_decl, new_decl, tree_map, false, args_to_skip,
- skip_return, bbs_to_copy, new_entry_block);
+ tree_function_versioning (old_decl, new_decl, tree_map, param_adjustments,
+ false, bbs_to_copy, new_entry_block);
/* Update the new version's properties.
Make The new version visible only within this translation unit. Make sure
node->former_clone_of = node->clone_of->former_clone_of;
/* Copy the OLD_VERSION_NODE function tree to the new version. */
tree_function_versioning (node->clone_of->decl, node->decl,
- node->clone.tree_map, true,
- node->clone.args_to_skip, false,
- NULL, NULL);
+ node->clone.tree_map, node->clone.param_adjustments,
+ true, NULL, NULL);
if (symtab->dump_file)
{
dump_function_to_file (node->clone_of->decl, symtab->dump_file,
{
ipa_replace_map *replace_info;
replace_info = (*node->clone.tree_map)[i];
- print_generic_expr (symtab->dump_file,
- replace_info->old_tree);
- fprintf (symtab->dump_file, " -> ");
+ fprintf (symtab->dump_file, "%i -> ",
+ (*node->clone.tree_map)[i]->parm_num);
print_generic_expr (symtab->dump_file,
replace_info->new_tree);
- fprintf (symtab->dump_file, "%s%s;",
- replace_info->replace_p ? "(replace)":"",
- replace_info->ref_p ? "(ref)":"");
}
fprintf (symtab->dump_file, "\n");
}
- if (node->clone.args_to_skip)
- {
- fprintf (symtab->dump_file, " args_to_skip: ");
- dump_bitmap (symtab->dump_file,
- node->clone.args_to_skip);
- }
- if (node->clone.args_to_skip)
- {
- fprintf (symtab->dump_file, " combined_args_to_skip:");
- dump_bitmap (symtab->dump_file, node->clone.combined_args_to_skip);
- }
+ if (node->clone.param_adjustments)
+ node->clone.param_adjustments->dump (symtab->dump_file);
}
cgraph_materialize_clone (node);
stabilized = false;
struct symtab_node;
struct cgraph_node;
struct varpool_node;
+struct cgraph_edge;
union section;
typedef union section section;
DEBUG_COUNTER (dse)
DEBUG_COUNTER (dse1)
DEBUG_COUNTER (dse2)
-DEBUG_COUNTER (eipa_sra)
DEBUG_COUNTER (gcse2_delete)
DEBUG_COUNTER (global_alloc_at_func)
DEBUG_COUNTER (global_alloc_at_reg)
DEBUG_COUNTER (if_after_reload)
DEBUG_COUNTER (if_conversion)
DEBUG_COUNTER (if_conversion_tree)
+DEBUG_COUNTER (ipa_sra_params)
+DEBUG_COUNTER (ipa_sra_retvalues)
DEBUG_COUNTER (ira_move)
DEBUG_COUNTER (local_alloc_for_sched)
DEBUG_COUNTER (merged_ipa_icf)
@option{ipa-sra-ptr-growth-factor} times the size of the original
pointer parameter.
+@item ipa-sra-max-replacements
+Maximum pieces of an aggregate that IPA-SRA tracks. As a
+consequence, it is also the maximum number of replacements of a formal
+parameter.
+
@item sra-max-scalarization-size-Ospeed
@itemx sra-max-scalarization-size-Osize
The two Scalar Reduction of Aggregates passes (SRA and IPA-SRA) aim to
int i;
gcc_checking_assert (node->has_gimple_body_p ());
- if (node->local.local)
+
+ if (!ipa_get_param_count (info))
+ disable = true;
+ else if (node->local.local)
{
int caller_count = 0;
node->call_for_symbol_thunks_and_aliases (count_callers, &caller_count,
disable = true;
}
- for (i = 0; i < ipa_get_param_count (info); i++)
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ && !node->alias && !node->thunk.thunk_p)
{
- class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- plats->m_value_range.init ();
+ fprintf (dump_file, "Initializing lattices of %s\n",
+ node->dump_name ());
+ if (disable || variable)
+ fprintf (dump_file, " Marking all lattices as %s\n",
+ disable ? "BOTTOM" : "VARIABLE");
}
- if (disable || variable)
+ auto_vec<bool, 16> surviving_params;
+ bool pre_modified = false;
+ if (!disable && node->clone.param_adjustments)
{
- for (i = 0; i < ipa_get_param_count (info); i++)
+ /* At the moment all IPA optimizations should use the number of
+ parameters of the prevailing decl as the m_always_copy_start.
+ Handling any other value would complicate the code below, so for the
+ time bing let's only assert it is so. */
+ gcc_assert ((node->clone.param_adjustments->m_always_copy_start
+ == ipa_get_param_count (info))
+ || node->clone.param_adjustments->m_always_copy_start < 0);
+
+ pre_modified = true;
+ node->clone.param_adjustments->get_surviving_params (&surviving_params);
+
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ && !node->alias && !node->thunk.thunk_p)
{
- class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
- if (disable)
+ bool first = true;
+ for (int j = 0; j < ipa_get_param_count (info); j++)
{
- plats->itself.set_to_bottom ();
- plats->ctxlat.set_to_bottom ();
- set_agg_lats_to_bottom (plats);
- plats->bits_lattice.set_to_bottom ();
- plats->m_value_range.set_to_bottom ();
+ if (j < (int) surviving_params.length ()
+ && surviving_params[j])
+ continue;
+ if (first)
+ {
+ fprintf (dump_file,
+ " The following parameters are dead on arrival:");
+ first = false;
+ }
+ fprintf (dump_file, " %u", j);
}
- else
+ if (!first)
+ fprintf (dump_file, "\n");
+ }
+ }
+
+ for (i = 0; i < ipa_get_param_count (info); i++)
+ {
+ ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ if (disable
+ || (pre_modified && (surviving_params.length () <= (unsigned) i
+ || !surviving_params[i])))
+ {
+ plats->itself.set_to_bottom ();
+ plats->ctxlat.set_to_bottom ();
+ set_agg_lats_to_bottom (plats);
+ plats->bits_lattice.set_to_bottom ();
+ plats->m_value_range.set_to_bottom ();
+ }
+ else
+ {
+ plats->m_value_range.init ();
+ if (variable)
set_all_contains_variable (plats);
}
- if (dump_file && (dump_flags & TDF_DETAILS)
- && !node->alias && !node->thunk.thunk_p)
- fprintf (dump_file, "Marking all lattices of %s as %s\n",
- node->dump_name (), disable ? "BOTTOM" : "VARIABLE");
}
for (ie = node->indirect_calls; ie; ie = ie->next_callee)
print_generic_expr (dump_file, value);
fprintf (dump_file, "\n");
}
- replace_map->old_tree = NULL;
replace_map->parm_num = parm_num;
replace_map->new_tree = value;
- replace_map->replace_p = true;
- replace_map->ref_p = false;
-
return replace_map;
}
dump_profile_updates (orig_node, new_node);
}
+/* Return true if we would like to remove a parameter from NODE when cloning it
+ with KNOWN_CSTS scalar constants. */
+
+static bool
+want_remove_some_param_p (cgraph_node *node, vec<tree> known_csts)
+{
+ auto_vec<bool, 16> surviving;
+ bool filled_vec = false;
+ ipa_node_params *info = IPA_NODE_REF (node);
+ int i, count = ipa_get_param_count (info);
+
+ for (i = 0; i < count; i++)
+ {
+ if (!known_csts[i] && ipa_is_param_used (info, i))
+ continue;
+
+ if (!filled_vec)
+ {
+ if (!node->clone.param_adjustments)
+ return true;
+ node->clone.param_adjustments->get_surviving_params (&surviving);
+ filled_vec = true;
+ }
+ if (surviving.length() < (unsigned) i && surviving[i])
+ return true;
+ }
+ return false;
+}
+
/* Create a specialized version of NODE with known constants in KNOWN_CSTS,
known contexts in KNOWN_CONTEXTS and known aggregate values in AGGVALS and
redirect all edges in CALLERS to it. */
{
class ipa_node_params *new_info, *info = IPA_NODE_REF (node);
vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
+ vec<ipa_adjusted_param, va_gc> *new_params = NULL;
struct ipa_agg_replacement_value *av;
struct cgraph_node *new_node;
int i, count = ipa_get_param_count (info);
- bitmap args_to_skip;
-
+ ipa_param_adjustments *old_adjustments = node->clone.param_adjustments;
+ ipa_param_adjustments *new_adjustments;
gcc_assert (!info->ipcp_orig_node);
+ gcc_assert (node->local.can_change_signature
+ || !old_adjustments);
- if (node->local.can_change_signature)
+ if (old_adjustments)
{
- args_to_skip = BITMAP_GGC_ALLOC ();
- for (i = 0; i < count; i++)
+ /* At the moment all IPA optimizations should use the number of
+ parameters of the prevailing decl as the m_always_copy_start.
+ Handling any other value would complicate the code below, so for the
+ time bing let's only assert it is so. */
+ gcc_assert (old_adjustments->m_always_copy_start == count
+ || old_adjustments->m_always_copy_start < 0);
+ int old_adj_count = vec_safe_length (old_adjustments->m_adj_params);
+ for (i = 0; i < old_adj_count; i++)
{
- tree t = known_csts[i];
+ ipa_adjusted_param *old_adj = &(*old_adjustments->m_adj_params)[i];
+ if (!node->local.can_change_signature
+ || old_adj->op != IPA_PARAM_OP_COPY
+ || (!known_csts[old_adj->base_index]
+ && ipa_is_param_used (info, old_adj->base_index)))
+ {
+ ipa_adjusted_param new_adj = *old_adj;
- if (t || !ipa_is_param_used (info, i))
- bitmap_set_bit (args_to_skip, i);
+ new_adj.prev_clone_adjustment = true;
+ new_adj.prev_clone_index = i;
+ vec_safe_push (new_params, new_adj);
+ }
}
+ bool skip_return = old_adjustments->m_skip_return;
+ new_adjustments = (new (ggc_alloc <ipa_param_adjustments> ())
+ ipa_param_adjustments (new_params, count,
+ skip_return));
}
- else
+ else if (node->local.can_change_signature
+ && want_remove_some_param_p (node, known_csts))
{
- args_to_skip = NULL;
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " cannot change function signature\n");
+ ipa_adjusted_param adj;
+ memset (&adj, 0, sizeof (adj));
+ adj.op = IPA_PARAM_OP_COPY;
+ for (i = 0; i < count; i++)
+ if (!known_csts[i] && ipa_is_param_used (info, i))
+ {
+ adj.base_index = i;
+ adj.prev_clone_index = i;
+ vec_safe_push (new_params, adj);
+ }
+ new_adjustments = (new (ggc_alloc <ipa_param_adjustments> ())
+ ipa_param_adjustments (new_params, count, false));
}
+ else
+ new_adjustments = NULL;
+ replace_trees = vec_safe_copy (node->clone.tree_map);
for (i = 0; i < count; i++)
{
tree t = known_csts[i];
IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (
node->decl)));
new_node = node->create_virtual_clone (callers, replace_trees,
- args_to_skip, "constprop",
+ new_adjustments, "constprop",
suffix_counter);
suffix_counter++;
for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
{
- if (((!r->old_tree && r->parm_num == i)
- || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i)))
- && r->replace_p && !r->ref_p)
+ if (r->parm_num == i)
{
known_vals[i] = r->new_tree;
break;
/* Copy the OLD_VERSION_NODE function tree to the new version. */
tree_function_versioning (node->decl, first_clone->decl,
- NULL, true, NULL, false,
- NULL, NULL);
+ NULL, NULL, true, NULL, NULL);
/* The function will be short lived and removed after we inline all the clones,
but make it internal so we won't confuse ourself. */
#include "system.h"
#include "coretypes.h"
#include "backend.h"
-#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "ssa.h"
#include "cgraph.h"
#include "fold-const.h"
+#include "tree-eh.h"
#include "stor-layout.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
+#include "tree-cfg.h"
#include "tree-dfa.h"
#include "ipa-param-manipulation.h"
#include "print-tree.h"
#include "gimple-pretty-print.h"
#include "builtins.h"
+#include "tree-ssa.h"
+#include "tree-inline.h"
-/* Return a heap allocated vector containing formal parameters of FNDECL. */
-vec<tree>
-ipa_get_vector_of_formal_parms (tree fndecl)
+/* Actual prefixes of different newly synthetized parameters. Keep in sync
+ with IPA_PARAM_PREFIX_* defines. */
+
+static const char *ipa_param_prefixes[IPA_PARAM_PREFIX_COUNT]
+ = {"SYNTH",
+ "ISRA",
+ "simd",
+ "mask"};
+
+/* Names of parameters for dumping. Keep in sync with enum ipa_parm_op. */
+
+static const char *ipa_param_op_names[IPA_PARAM_PREFIX_COUNT]
+ = {"IPA_PARAM_OP_UNDEFINED",
+ "IPA_PARAM_OP_COPY",
+ "IPA_PARAM_OP_NEW",
+ "IPA_PARAM_OP_SPLIT"};
+
+/* Fill an empty vector ARGS with PARM_DECLs representing formal parameters of
+ FNDECL. The function should not be called during LTO WPA phase except for
+ thunks (or functions with bodies streamed in). */
+
+void
+push_function_arg_decls (vec<tree> *args, tree fndecl)
{
- vec<tree> args;
int count;
tree parm;
- gcc_assert (!flag_wpa);
+ /* Safety check that we do not attempt to use the function in WPA, except
+ when the function is a thunk and then we have DECL_ARGUMENTS or when we
+ have already explicitely loaded its body. */
+ gcc_assert (!flag_wpa
+ || DECL_ARGUMENTS (fndecl)
+ || gimple_has_body_p (fndecl));
count = 0;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
count++;
- args.create (count);
+ args->reserve_exact (count);
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
- args.quick_push (parm);
-
- return args;
+ args->quick_push (parm);
}
-/* Return a heap allocated vector containing types of formal parameters of
+/* Fill an empty vector TYPES with trees representing formal parameters of
function type FNTYPE. */
-vec<tree>
-ipa_get_vector_of_formal_parm_types (tree fntype)
+void
+push_function_arg_types (vec<tree> *types, tree fntype)
{
- vec<tree> types;
int count = 0;
tree t;
for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
count++;
- types.create (count);
+ types->reserve_exact (count);
for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- types.quick_push (TREE_VALUE (t));
-
- return types;
+ types->quick_push (TREE_VALUE (t));
}
-/* Modify the function declaration FNDECL and its type according to the plan in
- ADJUSTMENTS. It also sets base fields of individual adjustments structures
- to reflect the actual parameters being modified which are determined by the
- base_index field. */
+/* Dump the adjustments in the vector ADJUSTMENTS to dump_file in a human
+ friendly way, assuming they are meant to be applied to FNDECL. */
void
-ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
-{
- vec<tree> oparms = ipa_get_vector_of_formal_parms (fndecl);
- tree orig_type = TREE_TYPE (fndecl);
- tree old_arg_types = TYPE_ARG_TYPES (orig_type);
-
- /* The following test is an ugly hack, some functions simply don't have any
- arguments in their type. This is probably a bug but well... */
- bool care_for_types = (old_arg_types != NULL_TREE);
- bool last_parm_void;
- vec<tree> otypes;
- if (care_for_types)
- {
- last_parm_void = (TREE_VALUE (tree_last (old_arg_types))
- == void_type_node);
- otypes = ipa_get_vector_of_formal_parm_types (orig_type);
- if (last_parm_void)
- gcc_assert (oparms.length () + 1 == otypes.length ());
- else
- gcc_assert (oparms.length () == otypes.length ());
- }
- else
- {
- last_parm_void = false;
- otypes.create (0);
- }
+ipa_dump_adjusted_parameters (FILE *f,
+ vec<ipa_adjusted_param, va_gc> *adj_params)
+{
+ unsigned i, len = vec_safe_length (adj_params);
+ bool first = true;
- int len = adjustments.length ();
- tree *link = &DECL_ARGUMENTS (fndecl);
- tree new_arg_types = NULL;
- for (int i = 0; i < len; i++)
+ fprintf (f, " IPA adjusted parameters: ");
+ for (i = 0; i < len; i++)
{
- struct ipa_parm_adjustment *adj;
- gcc_assert (link);
+ struct ipa_adjusted_param *apm;
+ apm = &(*adj_params)[i];
- adj = &adjustments[i];
- tree parm;
- if (adj->op == IPA_PARM_OP_NEW)
- parm = NULL;
+ if (!first)
+ fprintf (f, " ");
else
- parm = oparms[adj->base_index];
- adj->base = parm;
+ first = false;
- if (adj->op == IPA_PARM_OP_COPY)
- {
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, otypes[adj->base_index],
- new_arg_types);
- *link = parm;
- link = &DECL_CHAIN (parm);
- }
- else if (adj->op != IPA_PARM_OP_REMOVE)
+ fprintf (f, "%i. %s %s", i, ipa_param_op_names[apm->op],
+ apm->prev_clone_adjustment ? "prev_clone_adjustment " : "");
+ switch (apm->op)
{
- tree new_parm;
- tree ptype;
-
- if (adj->by_ref)
- ptype = build_pointer_type (adj->type);
- else
- {
- ptype = adj->type;
- if (is_gimple_reg_type (ptype)
- && TYPE_MODE (ptype) != BLKmode)
- {
- unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ptype));
- if (TYPE_ALIGN (ptype) != malign)
- ptype = build_aligned_type (ptype, malign);
- }
- }
+ case IPA_PARAM_OP_UNDEFINED:
+ break;
- if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
+ case IPA_PARAM_OP_COPY:
+ fprintf (f, ", base_index: %u", apm->base_index);
+ fprintf (f, ", prev_clone_index: %u", apm->prev_clone_index);
+ break;
- new_parm = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL_TREE,
- ptype);
- const char *prefix = adj->arg_prefix ? adj->arg_prefix : "SYNTH";
- DECL_NAME (new_parm) = create_tmp_var_name (prefix);
- DECL_ARTIFICIAL (new_parm) = 1;
- DECL_ARG_TYPE (new_parm) = ptype;
- DECL_CONTEXT (new_parm) = fndecl;
- TREE_USED (new_parm) = 1;
- DECL_IGNORED_P (new_parm) = 1;
- layout_decl (new_parm, 0);
+ case IPA_PARAM_OP_SPLIT:
+ fprintf (f, ", offset: %u", apm->unit_offset);
+ /* fall-through */
+ case IPA_PARAM_OP_NEW:
+ fprintf (f, ", base_index: %u", apm->base_index);
+ fprintf (f, ", prev_clone_index: %u", apm->prev_clone_index);
+ print_node_brief (f, ", type: ", apm->type, 0);
+ print_node_brief (f, ", alias type: ", apm->alias_ptr_type, 0);
+ fprintf (f, " prefix: %s",
+ ipa_param_prefixes[apm->param_prefix_index]);
+ if (apm->reverse)
+ fprintf (f, ", reverse-sso");
+ break;
+ }
+ fprintf (f, "\n");
+ }
+}
- if (adj->op == IPA_PARM_OP_NEW)
- adj->base = NULL;
- else
- adj->base = parm;
- adj->new_decl = new_parm;
+/* Fill NEW_TYPES with types of a function after its current OTYPES have been
+ modified as described in ADJ_PARAMS. When USE_PREV_INDICES is true, use
+ prev_clone_index from ADJ_PARAMS as opposed to base_index when the parameter
+ is false. */
- *link = new_parm;
- link = &DECL_CHAIN (new_parm);
+static void
+fill_vector_of_new_param_types (vec<tree> *new_types, vec<tree> *otypes,
+ vec<ipa_adjusted_param, va_gc> *adj_params,
+ bool use_prev_indices)
+{
+ unsigned adj_len = vec_safe_length (adj_params);
+ new_types->reserve_exact (adj_len);
+ for (unsigned i = 0; i < adj_len ; i++)
+ {
+ ipa_adjusted_param *apm = &(*adj_params)[i];
+ if (apm->op == IPA_PARAM_OP_COPY)
+ {
+ unsigned index
+ = use_prev_indices ? apm->prev_clone_index : apm->base_index;
+ /* The following needs to be handled gracefully because of type
+ mismatches. This happens with LTO but apparently also in Fortran
+ with -fcoarray=lib -O2 -lcaf_single -latomic. */
+ if (index >= otypes->length ())
+ continue;
+ new_types->quick_push ((*otypes)[index]);
}
+ else if (apm->op == IPA_PARAM_OP_NEW
+ || apm->op == IPA_PARAM_OP_SPLIT)
+ {
+ tree ntype = apm->type;
+ if (is_gimple_reg_type (ntype)
+ && TYPE_MODE (ntype) != BLKmode)
+ {
+ unsigned malign = GET_MODE_ALIGNMENT (TYPE_MODE (ntype));
+ if (TYPE_ALIGN (ntype) != malign)
+ ntype = build_aligned_type (ntype, malign);
+ }
+ new_types->quick_push (ntype);
+ }
+ else
+ gcc_unreachable ();
}
+}
- *link = NULL_TREE;
+/* Build and return a function type just like ORIG_TYPE but with parameter
+ types given in NEW_PARAM_TYPES - which can be NULL if, but only if,
+ ORIG_TYPE itself has NULL TREE_ARG_TYPEs. If METHOD2FUNC is true, also make
+ it a FUNCTION_TYPE instead of FUNCTION_TYPE. */
- tree new_reversed = NULL;
- if (care_for_types)
+static tree
+build_adjusted_function_type (tree orig_type, vec<tree> *new_param_types,
+ bool method2func, bool skip_return)
+{
+ tree new_arg_types = NULL;
+ if (TYPE_ARG_TYPES (orig_type))
{
- new_reversed = nreverse (new_arg_types);
+ gcc_checking_assert (new_param_types);
+ bool last_parm_void = (TREE_VALUE (tree_last (TYPE_ARG_TYPES (orig_type)))
+ == void_type_node);
+ unsigned len = new_param_types->length ();
+ for (unsigned i = 0; i < len; i++)
+ new_arg_types = tree_cons (NULL_TREE, (*new_param_types)[i],
+ new_arg_types);
+
+ tree new_reversed = nreverse (new_arg_types);
if (last_parm_void)
{
if (new_reversed)
else
new_reversed = void_list_node;
}
+ new_arg_types = new_reversed;
}
- /* Use copy_node to preserve as much as possible from original type
- (debug info, attribute lists etc.)
- Exception is METHOD_TYPEs must have THIS argument.
- When we are asked to remove it, we need to build new FUNCTION_TYPE
- instead. */
+ /* Use build_distinct_type_copy to preserve as much as possible from original
+ type (debug info, attribute lists etc.). The one exception is
+ METHOD_TYPEs which must have THIS argument and when we are asked to remove
+ it, we need to build new FUNCTION_TYPE instead. */
tree new_type = NULL;
- if (TREE_CODE (orig_type) != METHOD_TYPE
- || (adjustments[0].op == IPA_PARM_OP_COPY
- && adjustments[0].base_index == 0))
+ if (method2func)
+ {
+ tree ret_type;
+ if (skip_return)
+ ret_type = void_type_node;
+ else
+ ret_type = TREE_TYPE (orig_type);
+
+ new_type
+ = build_distinct_type_copy (build_function_type (ret_type,
+ new_arg_types));
+ TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
+ }
+ else
{
new_type = build_distinct_type_copy (orig_type);
- TYPE_ARG_TYPES (new_type) = new_reversed;
+ TYPE_ARG_TYPES (new_type) = new_arg_types;
+ if (skip_return)
+ TREE_TYPE (new_type) = void_type_node;
+ }
+
+ return new_type;
+}
+
+/* Return the maximum index in any IPA_PARAM_OP_COPY adjustment or -1 if there
+ is none. */
+
+int
+ipa_param_adjustments::get_max_base_index ()
+{
+ unsigned adj_len = vec_safe_length (m_adj_params);
+ int max_index = -1;
+ for (unsigned i = 0; i < adj_len ; i++)
+ {
+ ipa_adjusted_param *apm = &(*m_adj_params)[i];
+ if (apm->op == IPA_PARAM_OP_COPY
+ && max_index < apm->base_index)
+ max_index = apm->base_index;
+ }
+ return max_index;
+}
+
+
+/* Fill SURVIVING_PARAMS with an array of bools where each one says whether a
+ parameter that originally was at that position still survives in the given
+ clone or is removed/replaced. If the final array is smaller than an index
+ of an original parameter, that parameter also did not survive. That a
+ parameter survives does not mean it has the same index as before. */
+
+void
+ipa_param_adjustments::get_surviving_params (vec<bool> *surviving_params)
+{
+ unsigned adj_len = vec_safe_length (m_adj_params);
+ int max_index = get_max_base_index ();
+
+ if (max_index < 0)
+ return;
+ surviving_params->reserve_exact (max_index + 1);
+ surviving_params->quick_grow_cleared (max_index + 1);
+ for (unsigned i = 0; i < adj_len ; i++)
+ {
+ ipa_adjusted_param *apm = &(*m_adj_params)[i];
+ if (apm->op == IPA_PARAM_OP_COPY)
+ (*surviving_params)[apm->base_index] = true;
+ }
+}
+
+/* Fill NEW_INDICES with new indices of each surviving parameter or -1 for
+ those which do not survive. Any parameter outside of lenght of the vector
+ does not survive. There is currently no support for a parameter to be
+ copied to two distinct new parameters. */
+
+void
+ipa_param_adjustments::get_updated_indices (vec<int> *new_indices)
+{
+ unsigned adj_len = vec_safe_length (m_adj_params);
+ int max_index = get_max_base_index ();
+
+ if (max_index < 0)
+ return;
+ unsigned res_len = max_index + 1;
+ new_indices->reserve_exact (res_len);
+ for (unsigned i = 0; i < res_len ; i++)
+ new_indices->quick_push (-1);
+ for (unsigned i = 0; i < adj_len ; i++)
+ {
+ ipa_adjusted_param *apm = &(*m_adj_params)[i];
+ if (apm->op == IPA_PARAM_OP_COPY)
+ (*new_indices)[apm->base_index] = i;
+ }
+}
+
+/* Return true if the first parameter (assuming there was one) survives the
+ transformation intact and remains the first one. */
+
+bool
+ipa_param_adjustments::first_param_intact_p ()
+{
+ return (!vec_safe_is_empty (m_adj_params)
+ && (*m_adj_params)[0].op == IPA_PARAM_OP_COPY
+ && (*m_adj_params)[0].base_index == 0);
+}
+
+/* Return true if we have to change what has formerly been a method into a
+ function. */
+
+bool
+ipa_param_adjustments::method2func_p (tree orig_type)
+{
+ return ((TREE_CODE (orig_type) == METHOD_TYPE) && !first_param_intact_p ());
+}
+
+/* Given function type OLD_TYPE, return a new type derived from it after
+ performing all atored modifications. TYPE_ORIGINAL_P should be true when
+ OLD_TYPE refers to the type before any IPA transformations, as opposed to a
+ type that can be an intermediate one in between various IPA
+ transformations. */
+
+tree
+ipa_param_adjustments::build_new_function_type (tree old_type,
+ bool type_original_p)
+{
+ auto_vec<tree,16> new_param_types, *new_param_types_p;
+ if (prototype_p (old_type))
+ {
+ auto_vec<tree, 16> otypes;
+ push_function_arg_types (&otypes, old_type);
+ fill_vector_of_new_param_types (&new_param_types, &otypes, m_adj_params,
+ !type_original_p);
+ new_param_types_p = &new_param_types;
}
else
+ new_param_types_p = NULL;
+
+ return build_adjusted_function_type (old_type, new_param_types_p,
+ method2func_p (old_type), m_skip_return);
+}
+
+/* Build variant of function decl ORIG_DECL which has no return value if
+ M_SKIP_RETURN is true and, if ORIG_DECL's types or parameters is known, has
+ this type adjusted as indicated in M_ADJ_PARAMS. Arguments from
+ DECL_ARGUMENTS list are not processed now, since they are linked by
+ TREE_CHAIN directly and not accessible in LTO during WPA. The caller is
+ responsible for eliminating them when clones are properly materialized. */
+
+tree
+ipa_param_adjustments::adjust_decl (tree orig_decl)
+{
+ tree new_decl = copy_node (orig_decl);
+ tree orig_type = TREE_TYPE (orig_decl);
+ if (prototype_p (orig_type)
+ || (m_skip_return && !VOID_TYPE_P (TREE_TYPE (orig_type))))
{
- new_type
- = build_distinct_type_copy (build_function_type (TREE_TYPE (orig_type),
- new_reversed));
- TYPE_CONTEXT (new_type) = TYPE_CONTEXT (orig_type);
- DECL_VINDEX (fndecl) = NULL_TREE;
+ tree new_type = build_new_function_type (orig_type, false);
+ TREE_TYPE (new_decl) = new_type;
}
+ if (method2func_p (orig_type))
+ DECL_VINDEX (new_decl) = NULL_TREE;
/* When signature changes, we need to clear builtin info. */
- if (fndecl_built_in_p (fndecl))
- set_decl_built_in_function (fndecl, NOT_BUILT_IN, 0);
+ if (fndecl_built_in_p (new_decl))
+ set_decl_built_in_function (new_decl, NOT_BUILT_IN, 0);
+
+ DECL_VIRTUAL_P (new_decl) = 0;
+ DECL_LANG_SPECIFIC (new_decl) = NULL;
- TREE_TYPE (fndecl) = new_type;
- DECL_VIRTUAL_P (fndecl) = 0;
- DECL_LANG_SPECIFIC (fndecl) = NULL;
- otypes.release ();
- oparms.release ();
+ return new_decl;
}
-/* Modify actual arguments of a function call CS as indicated in ADJUSTMENTS.
- If this is a directly recursive call, CS must be NULL. Otherwise it must
- contain the corresponding call graph edge. */
+/* Wrapper around get_base_ref_and_offset for cases interesting for IPA-SRA
+ transformations. Return true if EXPR has an interesting form and fill in
+ *BASE_P and *UNIT_OFFSET_P with the appropriate info. */
-void
-ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt,
- ipa_parm_adjustment_vec adjustments)
-{
- struct cgraph_node *current_node = cgraph_node::get (current_function_decl);
- vec<tree> vargs;
- vec<tree, va_gc> **debug_args = NULL;
- gcall *new_stmt;
- gimple_stmt_iterator gsi, prev_gsi;
- tree callee_decl;
- int i, len;
+static bool
+isra_get_ref_base_and_offset (tree expr, tree *base_p, unsigned *unit_offset_p)
+{
+ HOST_WIDE_INT offset, size;
+ bool reverse;
+ tree base
+ = get_ref_base_and_extent_hwi (expr, &offset, &size, &reverse);
+ if (!base || size < 0)
+ return false;
- len = adjustments.length ();
- vargs.create (len);
- callee_decl = !cs ? gimple_call_fndecl (stmt) : cs->callee->decl;
- current_node->remove_stmt_references (stmt);
+ if ((offset % BITS_PER_UNIT) != 0)
+ return false;
- gsi = gsi_for_stmt (stmt);
- prev_gsi = gsi;
- gsi_prev (&prev_gsi);
- for (i = 0; i < len; i++)
+ if (TREE_CODE (base) == MEM_REF)
{
- struct ipa_parm_adjustment *adj;
+ poly_int64 plmoff = mem_ref_offset (base).force_shwi ();
+ HOST_WIDE_INT moff;
+ bool is_cst = plmoff.is_constant (&moff);
+ if (!is_cst)
+ return false;
+ offset += moff * BITS_PER_UNIT;
+ base = TREE_OPERAND (base, 0);
+ }
- adj = &adjustments[i];
+ if (offset < 0 || (offset / BITS_PER_UNIT) > UINT_MAX)
+ return false;
- if (adj->op == IPA_PARM_OP_COPY)
- {
- tree arg = gimple_call_arg (stmt, adj->base_index);
+ *base_p = base;
+ *unit_offset_p = offset / BITS_PER_UNIT;
+ return true;
+}
- vargs.quick_push (arg);
+/* Return true if EXPR describes a transitive split (i.e. one that happened for
+ both the caller and the callee) as recorded in PERFORMED_SPLITS. In that
+ case, store index of the respective record in PERFORMED_SPLITS into
+ *SM_IDX_P and the unit offset from all handled components in EXPR into
+ *UNIT_OFFSET_P. */
+
+static bool
+transitive_split_p (vec<ipa_param_performed_split, va_gc> *performed_splits,
+ tree expr, unsigned *sm_idx_p, unsigned *unit_offset_p)
+{
+ tree base;
+ if (!isra_get_ref_base_and_offset (expr, &base, unit_offset_p))
+ return false;
+
+ if (TREE_CODE (base) == SSA_NAME)
+ {
+ base = SSA_NAME_VAR (base);
+ if (!base)
+ return false;
+ }
+
+ unsigned len = vec_safe_length (performed_splits);
+ for (unsigned i = 0 ; i < len; i++)
+ {
+ ipa_param_performed_split *sm = &(*performed_splits)[i];
+ if (sm->dummy_decl == base)
+ {
+ *sm_idx_p = i;
+ return true;
}
- else if (adj->op != IPA_PARM_OP_REMOVE)
+ }
+ return false;
+}
+
+/* Structure to hold declarations representing transitive IPA-SRA splits. In
+ essence, if we need to pass UNIT_OFFSET of a parameter which originally has
+ number BASE_INDEX, we should pass down REPL. */
+
+struct transitive_split_map
+{
+ tree repl;
+ unsigned base_index;
+ unsigned unit_offset;
+};
+
+/* If call STMT contains any parameters representing transitive splits as
+ described by PERFORMED_SPLITS, return the number of extra parameters that
+ were addded during clone materialization and fill in INDEX_MAP with adjusted
+ indices of corresponding original parameters and TRANS_MAP with description
+ of all transitive replacement descriptions. Otherwise return zero. */
+
+static unsigned
+init_transitive_splits (vec<ipa_param_performed_split, va_gc> *performed_splits,
+ gcall *stmt, vec <unsigned> *index_map,
+ auto_vec <transitive_split_map> *trans_map)
+{
+ unsigned phony_arguments = 0;
+ unsigned stmt_idx = 0, base_index = 0;
+ unsigned nargs = gimple_call_num_args (stmt);
+ while (stmt_idx < nargs)
+ {
+ unsigned unit_offset_delta;
+ tree base_arg = gimple_call_arg (stmt, stmt_idx);
+
+ if (phony_arguments > 0)
+ index_map->safe_push (stmt_idx);
+
+ unsigned sm_idx;
+ stmt_idx++;
+ if (transitive_split_p (performed_splits, base_arg, &sm_idx,
+ &unit_offset_delta))
{
- tree expr, base, off;
- location_t loc;
- unsigned int deref_align = 0;
- bool deref_base = false;
-
- /* We create a new parameter out of the value of the old one, we can
- do the following kind of transformations:
-
- - A scalar passed by reference is converted to a scalar passed by
- value. (adj->by_ref is false and the type of the original
- actual argument is a pointer to a scalar).
-
- - A part of an aggregate is passed instead of the whole aggregate.
- The part can be passed either by value or by reference, this is
- determined by value of adj->by_ref. Moreover, the code below
- handles both situations when the original aggregate is passed by
- value (its type is not a pointer) and when it is passed by
- reference (it is a pointer to an aggregate).
-
- When the new argument is passed by reference (adj->by_ref is true)
- it must be a part of an aggregate and therefore we form it by
- simply taking the address of a reference inside the original
- aggregate. */
-
- poly_int64 byte_offset = exact_div (adj->offset, BITS_PER_UNIT);
- base = gimple_call_arg (stmt, adj->base_index);
- loc = gimple_location (stmt);
-
- if (TREE_CODE (base) != ADDR_EXPR
- && POINTER_TYPE_P (TREE_TYPE (base)))
- off = build_int_cst (adj->alias_ptr_type, byte_offset);
- else
+ if (phony_arguments == 0)
+ /* We have optimistically avoided constructing index_map do far but
+ now it is clear it will be necessary, so let's create the easy
+ bit we skipped until now. */
+ for (unsigned k = 0; k < stmt_idx; k++)
+ index_map->safe_push (k);
+
+ tree dummy = (*performed_splits)[sm_idx].dummy_decl;
+ for (unsigned j = sm_idx; j < performed_splits->length (); j++)
{
- poly_int64 base_offset;
- tree prev_base;
- bool addrof;
+ ipa_param_performed_split *caller_split
+ = &(*performed_splits)[j];
+ if (caller_split->dummy_decl != dummy)
+ break;
- if (TREE_CODE (base) == ADDR_EXPR)
- {
- base = TREE_OPERAND (base, 0);
- addrof = true;
- }
- else
- addrof = false;
- prev_base = base;
- base = get_addr_base_and_unit_offset (base, &base_offset);
- /* Aggregate arguments can have non-invariant addresses. */
- if (!base)
- {
- base = build_fold_addr_expr (prev_base);
- off = build_int_cst (adj->alias_ptr_type, byte_offset);
- }
- else if (TREE_CODE (base) == MEM_REF)
- {
- if (!addrof)
- {
- deref_base = true;
- deref_align = TYPE_ALIGN (TREE_TYPE (base));
- }
- off = build_int_cst (adj->alias_ptr_type,
- base_offset + byte_offset);
- off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
- off);
- base = TREE_OPERAND (base, 0);
- }
- else
+ tree arg = gimple_call_arg (stmt, stmt_idx);
+ struct transitive_split_map tsm;
+ tsm.repl = arg;
+ tsm.base_index = base_index;
+ if (caller_split->unit_offset >= unit_offset_delta)
{
- off = build_int_cst (adj->alias_ptr_type,
- base_offset + byte_offset);
- base = build_fold_addr_expr (base);
+ tsm.unit_offset
+ = (caller_split->unit_offset - unit_offset_delta);
+ trans_map->safe_push (tsm);
}
+
+ phony_arguments++;
+ stmt_idx++;
}
+ }
+ base_index++;
+ }
+ return phony_arguments;
+}
- if (!adj->by_ref)
+/* Modify actual arguments of a function call in statement STMT, assuming it
+ calls CALLEE_DECL. CALLER_ADJ must be the description of parameter
+ adjustments of the caller or NULL if there are none. Return the new
+ statement that replaced the old one. When invoked, cfun and
+ current_function_decl have to be set to the caller. */
+
+gcall *
+ipa_param_adjustments::modify_call (gcall *stmt,
+ vec<ipa_param_performed_split,
+ va_gc> *performed_splits,
+ tree callee_decl, bool update_references)
+{
+ unsigned len = vec_safe_length (m_adj_params);
+ auto_vec<tree, 16> vargs (len);
+ tree old_decl = gimple_call_fndecl (stmt);
+ unsigned old_nargs = gimple_call_num_args (stmt);
+ auto_vec<bool, 16> kept (old_nargs);
+ kept.quick_grow_cleared (old_nargs);
+
+ auto_vec <unsigned, 16> index_map;
+ auto_vec <transitive_split_map> trans_map;
+ bool transitive_remapping = false;
+
+ if (performed_splits)
+ {
+ unsigned removed = init_transitive_splits (performed_splits,
+ stmt, &index_map, &trans_map);
+ if (removed > 0)
+ {
+ transitive_remapping = true;
+ old_nargs -= removed;
+ }
+ }
+
+ cgraph_node *current_node = cgraph_node::get (current_function_decl);
+ if (update_references)
+ current_node->remove_stmt_references (stmt);
+
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gimple_stmt_iterator prev_gsi = gsi;
+ gsi_prev (&prev_gsi);
+ for (unsigned i = 0; i < len; i++)
+ {
+ ipa_adjusted_param *apm = &(*m_adj_params)[i];
+ if (apm->op == IPA_PARAM_OP_COPY)
+ {
+ unsigned index = apm->base_index;
+ if (index >= old_nargs)
+ /* Can happen if the original call has argument mismatch,
+ ignore. */
+ continue;
+ if (transitive_remapping)
+ index = index_map[apm->base_index];
+
+ tree arg = gimple_call_arg (stmt, index);
+
+ vargs.quick_push (arg);
+ kept[index] = true;
+ continue;
+ }
+
+ /* At the moment the only user of IPA_PARAM_OP_NEW modifies calls itself.
+ If we ever want to support it during WPA IPA stage, we'll need a
+ mechanism to call into the IPA passes that introduced them. Currently
+ we simply mandate that IPA infrastructure understands all argument
+ modifications. Remember, edge redirection/modification is done only
+ once, not in steps for each pass modifying the callee like clone
+ materialization. */
+ gcc_assert (apm->op == IPA_PARAM_OP_SPLIT);
+
+ /* We have to handle transitive changes differently using the maps we
+ have created before. So look into them first. */
+ tree repl = NULL_TREE;
+ for (unsigned j = 0; j < trans_map.length (); j++)
+ if (trans_map[j].base_index == apm->base_index
+ && trans_map[j].unit_offset == apm->unit_offset)
+ {
+ repl = trans_map[j].repl;
+ break;
+ }
+ if (repl)
+ {
+ vargs.quick_push (repl);
+ continue;
+ }
+
+ unsigned index = apm->base_index;
+ if (index >= old_nargs)
+ /* Can happen if the original call has argument mismatch, ignore. */
+ continue;
+ if (transitive_remapping)
+ index = index_map[apm->base_index];
+ tree base = gimple_call_arg (stmt, index);
+
+ /* We create a new parameter out of the value of the old one, we can
+ do the following kind of transformations:
+
+ - A scalar passed by reference, potentially as a part of a larger
+ aggregate, is converted to a scalar passed by value.
+
+ - A part of an aggregate is passed instead of the whole aggregate. */
+
+ location_t loc = gimple_location (stmt);
+ tree off;
+ bool deref_base = false;
+ unsigned int deref_align = 0;
+ if (TREE_CODE (base) != ADDR_EXPR
+ && POINTER_TYPE_P (TREE_TYPE (base)))
+ off = build_int_cst (apm->alias_ptr_type, apm->unit_offset);
+ else
+ {
+ bool addrof;
+ if (TREE_CODE (base) == ADDR_EXPR)
{
- tree type = adj->type;
- unsigned int align;
- unsigned HOST_WIDE_INT misalign;
+ base = TREE_OPERAND (base, 0);
+ addrof = true;
+ }
+ else
+ addrof = false;
- if (deref_base)
- {
- align = deref_align;
- misalign = 0;
- }
- else
- {
- get_pointer_alignment_1 (base, &align, &misalign);
- if (TYPE_ALIGN (type) > align)
- align = TYPE_ALIGN (type);
- }
- misalign += (offset_int::from (wi::to_wide (off),
- SIGNED).to_short_addr ()
- * BITS_PER_UNIT);
- misalign = misalign & (align - 1);
- if (misalign != 0)
- align = least_bit_hwi (misalign);
- if (align < TYPE_ALIGN (type))
- type = build_aligned_type (type, align);
- base = force_gimple_operand_gsi (&gsi, base,
- true, NULL, true, GSI_SAME_STMT);
- expr = fold_build2_loc (loc, MEM_REF, type, base, off);
- REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
- /* If expr is not a valid gimple call argument emit
- a load into a temporary. */
- if (is_gimple_reg_type (TREE_TYPE (expr)))
+ tree prev_base = base;
+ poly_int64 base_offset;
+ base = get_addr_base_and_unit_offset (base, &base_offset);
+
+ /* Aggregate arguments can have non-invariant addresses. */
+ if (!base)
+ {
+ base = build_fold_addr_expr (prev_base);
+ off = build_int_cst (apm->alias_ptr_type, apm->unit_offset);
+ }
+ else if (TREE_CODE (base) == MEM_REF)
+ {
+ if (!addrof)
{
- gimple *tem = gimple_build_assign (NULL_TREE, expr);
- if (gimple_in_ssa_p (cfun))
- {
- gimple_set_vuse (tem, gimple_vuse (stmt));
- expr = make_ssa_name (TREE_TYPE (expr), tem);
- }
- else
- expr = create_tmp_reg (TREE_TYPE (expr));
- gimple_assign_set_lhs (tem, expr);
- gimple_set_location (tem, loc);
- gsi_insert_before (&gsi, tem, GSI_SAME_STMT);
+ deref_base = true;
+ deref_align = TYPE_ALIGN (TREE_TYPE (base));
}
+ off = build_int_cst (apm->alias_ptr_type,
+ base_offset + apm->unit_offset);
+ off = int_const_binop (PLUS_EXPR, TREE_OPERAND (base, 1),
+ off);
+ base = TREE_OPERAND (base, 0);
}
else
{
- expr = fold_build2_loc (loc, MEM_REF, adj->type, base, off);
- REF_REVERSE_STORAGE_ORDER (expr) = adj->reverse;
- expr = build_fold_addr_expr (expr);
- expr = force_gimple_operand_gsi (&gsi, expr,
- true, NULL, true, GSI_SAME_STMT);
+ off = build_int_cst (apm->alias_ptr_type,
+ base_offset + apm->unit_offset);
+ base = build_fold_addr_expr (base);
}
- vargs.quick_push (expr);
}
- if (adj->op != IPA_PARM_OP_COPY && MAY_HAVE_DEBUG_BIND_STMTS)
+
+ tree type = apm->type;
+ unsigned int align;
+ unsigned HOST_WIDE_INT misalign;
+
+ if (deref_base)
{
- unsigned int ix;
- tree ddecl = NULL_TREE, origin = DECL_ORIGIN (adj->base), arg;
- gimple *def_temp;
+ align = deref_align;
+ misalign = 0;
+ }
+ else
+ {
+ get_pointer_alignment_1 (base, &align, &misalign);
+ /* All users must make sure that we can be optimistic when it
+ comes to alignment in this case (by inspecting the final users
+ of these new parameters). */
+ if (TYPE_ALIGN (type) > align)
+ align = TYPE_ALIGN (type);
+ }
+ misalign
+ += (offset_int::from (wi::to_wide (off), SIGNED).to_short_addr ()
+ * BITS_PER_UNIT);
+ misalign = misalign & (align - 1);
+ if (misalign != 0)
+ align = least_bit_hwi (misalign);
+ if (align < TYPE_ALIGN (type))
+ type = build_aligned_type (type, align);
+ base = force_gimple_operand_gsi (&gsi, base,
+ true, NULL, true, GSI_SAME_STMT);
+ tree expr = fold_build2_loc (loc, MEM_REF, type, base, off);
+ REF_REVERSE_STORAGE_ORDER (expr) = apm->reverse;
+ /* If expr is not a valid gimple call argument emit
+ a load into a temporary. */
+ if (is_gimple_reg_type (TREE_TYPE (expr)))
+ {
+ gimple *tem = gimple_build_assign (NULL_TREE, expr);
+ if (gimple_in_ssa_p (cfun))
+ {
+ gimple_set_vuse (tem, gimple_vuse (stmt));
+ expr = make_ssa_name (TREE_TYPE (expr), tem);
+ }
+ else
+ expr = create_tmp_reg (TREE_TYPE (expr));
+ gimple_assign_set_lhs (tem, expr);
+ gsi_insert_before (&gsi, tem, GSI_SAME_STMT);
+ }
+ vargs.quick_push (expr);
+ }
+
+ if (m_always_copy_start >= 0)
+ for (unsigned i = m_always_copy_start; i < old_nargs; i++)
+ vargs.safe_push (gimple_call_arg (stmt, i));
+
+ /* For optimized away parameters, add on the caller side
+ before the call
+ DEBUG D#X => parm_Y(D)
+ stmts and associate D#X with parm in decl_debug_args_lookup
+ vector to say for debug info that if parameter parm had been passed,
+ it would have value parm_Y(D). */
+ if (MAY_HAVE_DEBUG_BIND_STMTS && old_decl && callee_decl)
+ {
+ vec<tree, va_gc> **debug_args = NULL;
+ unsigned i = 0;
+ for (tree old_parm = DECL_ARGUMENTS (old_decl);
+ old_parm && i < old_nargs && ((int) i) < m_always_copy_start;
+ old_parm = DECL_CHAIN (old_parm), i++)
+ {
+ if (!is_gimple_reg (old_parm) || kept[i])
+ continue;
+ tree origin = DECL_ORIGIN (old_parm);
+ tree arg = gimple_call_arg (stmt, i);
- arg = gimple_call_arg (stmt, adj->base_index);
if (!useless_type_conversion_p (TREE_TYPE (origin), TREE_TYPE (arg)))
{
if (!fold_convertible_p (TREE_TYPE (origin), arg))
continue;
- arg = fold_convert_loc (gimple_location (stmt),
- TREE_TYPE (origin), arg);
+ tree rhs1;
+ if (TREE_CODE (arg) == SSA_NAME
+ && gimple_assign_cast_p (SSA_NAME_DEF_STMT (arg))
+ && (rhs1
+ = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (arg)))
+ && useless_type_conversion_p (TREE_TYPE (origin),
+ TREE_TYPE (rhs1)))
+ arg = rhs1;
+ else
+ arg = fold_convert_loc (gimple_location (stmt),
+ TREE_TYPE (origin), arg);
}
if (debug_args == NULL)
debug_args = decl_debug_args_insert (callee_decl);
+ unsigned int ix;
+ tree ddecl = NULL_TREE;
for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl); ix += 2)
if (ddecl == origin)
{
vec_safe_push (*debug_args, origin);
vec_safe_push (*debug_args, ddecl);
}
- def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg), stmt);
+ gimple *def_temp = gimple_build_debug_bind (ddecl,
+ unshare_expr (arg), stmt);
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
}
}
print_gimple_stmt (dump_file, gsi_stmt (gsi), 0);
}
- new_stmt = gimple_build_call_vec (callee_decl, vargs);
- vargs.release ();
- if (gimple_call_lhs (stmt))
- gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+ gcall *new_stmt = gimple_build_call_vec (callee_decl, vargs);
+
+ if (tree lhs = gimple_call_lhs (stmt))
+ {
+ if (!m_skip_return)
+ gimple_call_set_lhs (new_stmt, lhs);
+ else if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ /* LHS should now by a default-def SSA. Unfortunately default-def
+ SSA_NAMEs need a backing variable (or at least some code examining
+ SSAs assumes it is non-NULL). So we either have to re-use the
+ decl we have at hand or introdice a new one. */
+ tree repl = create_tmp_var (TREE_TYPE (lhs), "removed_return");
+ repl = get_or_create_ssa_default_def (cfun, repl);
+ SSA_NAME_IS_DEFAULT_DEF (repl) = true;
+ imm_use_iterator ui;
+ use_operand_p use_p;
+ gimple *using_stmt;
+ FOR_EACH_IMM_USE_STMT (using_stmt, ui, lhs)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
+ {
+ SET_USE (use_p, repl);
+ }
+ update_stmt (using_stmt);
+ }
+ }
+ }
gimple_set_block (new_stmt, gimple_block (stmt));
if (gimple_has_location (stmt))
fprintf (dump_file, "\n");
}
gsi_replace (&gsi, new_stmt, true);
- if (cs)
- cs->set_call_stmt (new_stmt);
- do
- {
- current_node->record_stmt_references (gsi_stmt (gsi));
- gsi_prev (&gsi);
- }
- while (gsi_stmt (gsi) != gsi_stmt (prev_gsi));
+ if (update_references)
+ do
+ {
+ current_node->record_stmt_references (gsi_stmt (gsi));
+ gsi_prev (&gsi);
+ }
+ while (gsi_stmt (gsi) != gsi_stmt (prev_gsi));
+ return new_stmt;
}
-/* Return true iff BASE_INDEX is in ADJUSTMENTS more than once. */
+/* Dump information contained in the object in textual form to F. */
-static bool
-index_in_adjustments_multiple_times_p (int base_index,
- ipa_parm_adjustment_vec adjustments)
+void
+ipa_param_adjustments::dump (FILE *f)
{
- int i, len = adjustments.length ();
- bool one = false;
+ fprintf (f, " m_always_copy_start: %i\n", m_always_copy_start);
+ ipa_dump_adjusted_parameters (f, m_adj_params);
+ if (m_skip_return)
+ fprintf (f, " Will SKIP return.\n");
+}
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
+/* Dump information contained in the object in textual form to stderr. */
- if (adj->base_index == base_index)
- {
- if (one)
- return true;
- else
- one = true;
- }
- }
- return false;
+void
+ipa_param_adjustments::debug ()
+{
+ dump (stderr);
}
-/* Return adjustments that should have the same effect on function parameters
- and call arguments as if they were first changed according to adjustments in
- INNER and then by adjustments in OUTER. */
+/* Register that REPLACEMENT should replace parameter described in APM and
+ optionally as DUMMY to mark transitive splits accross calls. */
-ipa_parm_adjustment_vec
-ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
- ipa_parm_adjustment_vec outer)
+void
+ipa_param_body_adjustments::register_replacement (ipa_adjusted_param *apm,
+ tree replacement,
+ tree dummy)
{
- int i, outlen = outer.length ();
- int inlen = inner.length ();
- int removals = 0;
- ipa_parm_adjustment_vec adjustments, tmp;
+ gcc_checking_assert (apm->op == IPA_PARAM_OP_SPLIT
+ || apm->op == IPA_PARAM_OP_NEW);
+ gcc_checking_assert (!apm->prev_clone_adjustment);
+ ipa_param_body_replacement psr;
+ psr.base = m_oparms[apm->prev_clone_index];
+ psr.repl = replacement;
+ psr.dummy = dummy;
+ psr.unit_offset = apm->unit_offset;
+ m_replacements.safe_push (psr);
+}
+
+/* Copy or not, as appropriate given ID, a pre-existing PARM_DECL T so that
+ it can be included in the parameters of the modified function. */
- tmp.create (inlen);
- for (i = 0; i < inlen; i++)
+static tree
+carry_over_param (tree t, struct copy_body_data *id)
+{
+ tree new_parm;
+ if (id)
{
- struct ipa_parm_adjustment *n;
- n = &inner[i];
+ new_parm = remap_decl (t, id);
+ if (TREE_CODE (new_parm) != PARM_DECL)
+ new_parm = id->copy_decl (t, id);
+ }
+ else
+ new_parm = t;
+ return new_parm;
+}
- if (n->op == IPA_PARM_OP_REMOVE)
- removals++;
- else
+/* Common initialization performed by all ipa_param_body_adjustments
+ constructors. OLD_FNDECL is the declaration we take original arguments
+ from, (it may be the same as M_FNDECL). VARS, if non-NULL, is a pointer to
+ a chained list of new local variables. TREE_MAP is the IPA-CP produced
+ mapping of trees to constants.
+
+ The function is rather long but it really onlu initializes all data members
+ of the class. It creates new param DECLs, finds their new types, */
+
+void
+ipa_param_body_adjustments::common_initialization (tree old_fndecl,
+ tree *vars,
+ vec<ipa_replace_map *,
+ va_gc> *tree_map)
+{
+ push_function_arg_decls (&m_oparms, old_fndecl);
+ auto_vec<tree,16> otypes;
+ if (TYPE_ARG_TYPES (TREE_TYPE (old_fndecl)) != NULL_TREE)
+ push_function_arg_types (&otypes, TREE_TYPE (old_fndecl));
+ else
+ {
+ auto_vec<tree,16> oparms;
+ push_function_arg_decls (&oparms, old_fndecl);
+ unsigned ocount = oparms.length ();
+ otypes.reserve_exact (ocount);
+ for (unsigned i = 0; i < ocount; i++)
+ otypes.quick_push (TREE_TYPE (oparms[i]));
+ }
+ fill_vector_of_new_param_types (&m_new_types, &otypes, m_adj_params, true);
+
+ auto_vec<bool, 16> kept;
+ kept.reserve_exact (m_oparms.length ());
+ kept.quick_grow_cleared (m_oparms.length ());
+ auto_vec<tree, 16> isra_dummy_decls;
+ isra_dummy_decls.reserve_exact (m_oparms.length ());
+ isra_dummy_decls.quick_grow_cleared (m_oparms.length ());
+
+ unsigned adj_len = vec_safe_length (m_adj_params);
+ m_method2func = ((TREE_CODE (TREE_TYPE (m_fndecl)) == METHOD_TYPE)
+ && (adj_len == 0
+ || (*m_adj_params)[0].op != IPA_PARAM_OP_COPY
+ || (*m_adj_params)[0].base_index != 0));
+
+ /* The main job of the this function is to go over the vector of adjusted
+ parameters and create declarations or find corresponding old ones and push
+ them to m_new_decls. For IPA-SRA replacements it also creates
+ corresponding m_id->dst_node->clone.performed_splits entries. */
+
+ m_new_decls.reserve_exact (adj_len);
+ for (unsigned i = 0; i < adj_len ; i++)
+ {
+ ipa_adjusted_param *apm = &(*m_adj_params)[i];
+ unsigned prev_index = apm->prev_clone_index;
+ tree new_parm;
+ if (apm->op == IPA_PARAM_OP_COPY
+ || apm->prev_clone_adjustment)
{
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (n->op != IPA_PARM_OP_NEW);
- tmp.quick_push (*n);
+ kept[prev_index] = true;
+ new_parm = carry_over_param (m_oparms[prev_index], m_id);
+ m_new_decls.quick_push (new_parm);
}
+ else if (apm->op == IPA_PARAM_OP_NEW
+ || apm->op == IPA_PARAM_OP_SPLIT)
+ {
+ tree new_type = m_new_types[i];
+ gcc_checking_assert (new_type);
+ new_parm = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL_TREE,
+ new_type);
+ const char *prefix = ipa_param_prefixes[apm->param_prefix_index];
+ DECL_NAME (new_parm) = create_tmp_var_name (prefix);
+ DECL_ARTIFICIAL (new_parm) = 1;
+ DECL_ARG_TYPE (new_parm) = new_type;
+ DECL_CONTEXT (new_parm) = m_fndecl;
+ TREE_USED (new_parm) = 1;
+ DECL_IGNORED_P (new_parm) = 1;
+ /* We assume all newly created arguments are not addressable. */
+ if (TREE_CODE (new_type) == COMPLEX_TYPE
+ || TREE_CODE (new_type) == VECTOR_TYPE)
+ DECL_GIMPLE_REG_P (new_parm) = 1;
+ layout_decl (new_parm, 0);
+ m_new_decls.quick_push (new_parm);
+
+ if (apm->op == IPA_PARAM_OP_SPLIT)
+ {
+ m_split_modifications_p = true;
+
+ if (m_id)
+ {
+ tree dummy_decl;
+ if (!isra_dummy_decls[prev_index])
+ {
+ dummy_decl = copy_decl_to_var (m_oparms[prev_index],
+ m_id);
+ /* Any attempt to remap this dummy in this particular
+ instance of clone materialization should yield
+ itself. */
+ insert_decl_map (m_id, dummy_decl, dummy_decl);
+
+ DECL_CHAIN (dummy_decl) = *vars;
+ *vars = dummy_decl;
+ isra_dummy_decls[prev_index] = dummy_decl;
+ }
+ else
+ dummy_decl = isra_dummy_decls[prev_index];
+
+ register_replacement (apm, new_parm, dummy_decl);
+ ipa_param_performed_split ps;
+ ps.dummy_decl = dummy_decl;
+ ps.unit_offset = apm->unit_offset;
+ vec_safe_push (m_id->dst_node->clone.performed_splits, ps);
+ }
+ else
+ register_replacement (apm, new_parm);
+ }
+ }
+ else
+ gcc_unreachable ();
}
- adjustments.create (outlen + removals);
- for (i = 0; i < outlen; i++)
+
+ /* As part of body modifications, we will also have to replace remaining uses
+ of remaining uses of removed PARM_DECLs (which do not however use the
+ initial value) with their VAR_DECL copies.
+
+ We do this differently with and without m_id. With m_id, we rely on its
+ mapping and create a replacement straight away. Without it, we have our
+ own mechanism for which we have to populate m_removed_decls vector. Just
+ don't mix them, that is why you should not call
+ replace_removed_params_ssa_names or perform_cfun_body_modifications when
+ you construct with ID not equal to NULL. */
+
+ unsigned op_len = m_oparms.length ();
+ for (unsigned i = 0; i < op_len; i++)
+ if (!kept[i])
+ {
+ if (m_id)
+ {
+ if (!m_id->decl_map->get (m_oparms[i]))
+ {
+ /* TODO: Perhaps at least aggregate-type params could re-use
+ their isra_dummy_decl here? */
+ tree var = copy_decl_to_var (m_oparms[i], m_id);
+ insert_decl_map (m_id, m_oparms[i], var);
+ /* Declare this new variable. */
+ DECL_CHAIN (var) = *vars;
+ *vars = var;
+ }
+ }
+ else
+ {
+ m_removed_decls.safe_push (m_oparms[i]);
+ m_removed_map.put (m_oparms[i], m_removed_decls.length () - 1);
+ }
+ }
+
+ if (!MAY_HAVE_DEBUG_STMTS)
+ return;
+
+ /* Finally, when generating debug info, we fill vector m_reset_debug_decls
+ with removed parameters declarations. We do this in order to re-map their
+ debug bind statements and create debug decls for them. */
+
+ if (tree_map)
{
- struct ipa_parm_adjustment r;
- struct ipa_parm_adjustment *out = &outer[i];
- struct ipa_parm_adjustment *in = &tmp[out->base_index];
+ /* Do not output debuginfo for parameter declarations as if they vanished
+ when they were in fact replaced by a constant. */
+ auto_vec <int, 16> index_mapping;
+ bool need_remap = false;
- memset (&r, 0, sizeof (r));
- gcc_assert (in->op != IPA_PARM_OP_REMOVE);
- if (out->op == IPA_PARM_OP_REMOVE)
+ if (m_id && m_id->src_node->clone.param_adjustments)
{
- if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
- {
- r.op = IPA_PARM_OP_REMOVE;
- adjustments.quick_push (r);
- }
- continue;
+ ipa_param_adjustments *prev_adjustments
+ = m_id->src_node->clone.param_adjustments;
+ prev_adjustments->get_updated_indices (&index_mapping);
+ need_remap = true;
}
- else
+
+ for (unsigned i = 0; i < tree_map->length (); i++)
{
- /* FIXME: Handling of new arguments are not implemented yet. */
- gcc_assert (out->op != IPA_PARM_OP_NEW);
+ int parm_num = (*tree_map)[i]->parm_num;
+ gcc_assert (parm_num >= 0);
+ if (need_remap)
+ parm_num = index_mapping[parm_num];
+ kept[parm_num] = true;
}
+ }
+
+ for (unsigned i = 0; i < op_len; i++)
+ if (!kept[i] && is_gimple_reg (m_oparms[i]))
+ m_reset_debug_decls.safe_push (m_oparms[i]);
+}
- r.base_index = in->base_index;
- r.type = out->type;
+/* Constructor of ipa_param_body_adjustments from a simple list of
+ modifications to parameters listed in ADJ_PARAMS which will prepare ground
+ for modification of parameters of fndecl. Return value of the function will
+ not be removed and the object will assume it does not run as a part of
+ tree-function_versioning. */
+
+ipa_param_body_adjustments
+::ipa_param_body_adjustments (vec<ipa_adjusted_param, va_gc> *adj_params,
+ tree fndecl)
+ : m_adj_params (adj_params), m_adjustments (NULL), m_reset_debug_decls (),
+ m_split_modifications_p (false), m_fndecl (fndecl), m_id (NULL),
+ m_oparms (), m_new_decls (), m_new_types (), m_replacements (),
+ m_removed_decls (), m_removed_map (), m_method2func (false)
+{
+ common_initialization (fndecl, NULL, NULL);
+}
- /* FIXME: Create nonlocal value too. */
+/* Constructor of ipa_param_body_adjustments from ipa_param_adjustments in
+ ADJUSTMENTS which will prepare ground for modification of parameters of
+ fndecl. The object will assume it does not run as a part of
+ tree-function_versioning. */
+
+ipa_param_body_adjustments
+::ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl)
+ : m_adj_params (adjustments->m_adj_params), m_adjustments (adjustments),
+ m_reset_debug_decls (), m_split_modifications_p (false), m_fndecl (fndecl),
+ m_id (NULL), m_oparms (), m_new_decls (), m_new_types (),
+ m_replacements (), m_removed_decls (), m_removed_map (),
+ m_method2func (false)
+{
+ common_initialization (fndecl, NULL, NULL);
+}
- if (in->op == IPA_PARM_OP_COPY && out->op == IPA_PARM_OP_COPY)
- r.op = IPA_PARM_OP_COPY;
- else if (in->op == IPA_PARM_OP_COPY)
- r.offset = out->offset;
- else if (out->op == IPA_PARM_OP_COPY)
- r.offset = in->offset;
- else
- r.offset = in->offset + out->offset;
- adjustments.quick_push (r);
+/* Constructor of ipa_param_body_adjustments which sets it up as a part of
+ running tree_function_versioning. Planned modifications to the function are
+ in ADJUSTMENTS. FNDECL designates the new function clone which is being
+ modified. OLD_FNDECL is the function of which FNDECL is a clone (and which
+ at the time of invocation still share DECL_ARGUMENTS). ID is the
+ copy_body_data structure driving the wholy body copying process. VARS is a
+ pointer to the head of the list of new local variables, TREE_MAP is the map
+ that drives tree substitution in the cloning process. */
+
+ipa_param_body_adjustments
+::ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl, tree old_fndecl,
+ copy_body_data *id, tree *vars,
+ vec<ipa_replace_map *, va_gc> *tree_map)
+ : m_adj_params (adjustments->m_adj_params), m_adjustments (adjustments),
+ m_reset_debug_decls (), m_split_modifications_p (false), m_fndecl (fndecl),
+ m_id (id), m_oparms (), m_new_decls (), m_new_types (), m_replacements (),
+ m_removed_decls (), m_removed_map (), m_method2func (false)
+{
+ common_initialization (old_fndecl, vars, tree_map);
+}
+
+/* Chain new param decls up and return them. */
+
+tree
+ipa_param_body_adjustments::get_new_param_chain ()
+{
+ tree result;
+ tree *link = &result;
+
+ unsigned len = vec_safe_length (m_adj_params);
+ for (unsigned i = 0; i < len; i++)
+ {
+ tree new_decl = m_new_decls[i];
+ *link = new_decl;
+ link = &DECL_CHAIN (new_decl);
}
+ *link = NULL_TREE;
+ return result;
+}
+
+/* Modify the function parameters FNDECL and its type according to the plan in
+ ADJUSTMENTS. This function needs to be called when the decl has not already
+ been processed with ipa_param_adjustments::adjust_decl, otherwise just
+ seting DECL_ARGUMENTS to whatever get_new_param_chain will do is enough. */
+
+void
+ipa_param_body_adjustments::modify_formal_parameters ()
+{
+ tree orig_type = TREE_TYPE (m_fndecl);
+ DECL_ARGUMENTS (m_fndecl) = get_new_param_chain ();
+
+ /* When signature changes, we need to clear builtin info. */
+ if (fndecl_built_in_p (m_fndecl))
+ set_decl_built_in_function (m_fndecl, NOT_BUILT_IN, 0);
+
+ /* At this point, removing return value is only implemented when going
+ through tree_function_versioning, not when modifying function body
+ directly. */
+ gcc_assert (!m_adjustments || !m_adjustments->m_skip_return);
+ tree new_type = build_adjusted_function_type (orig_type, &m_new_types,
+ m_method2func, false);
+
+ TREE_TYPE (m_fndecl) = new_type;
+ DECL_VIRTUAL_P (m_fndecl) = 0;
+ DECL_LANG_SPECIFIC (m_fndecl) = NULL;
+ if (m_method2func)
+ DECL_VINDEX (m_fndecl) = NULL_TREE;
+}
- for (i = 0; i < inlen; i++)
+/* Given BASE and UNIT_OFFSET, find the corresponding record among replacement
+ structures. */
+
+ipa_param_body_replacement *
+ipa_param_body_adjustments::lookup_replacement_1 (tree base,
+ unsigned unit_offset)
+{
+ unsigned int len = m_replacements.length ();
+ for (unsigned i = 0; i < len; i++)
{
- struct ipa_parm_adjustment *n = &inner[i];
+ ipa_param_body_replacement *pbr = &m_replacements[i];
- if (n->op == IPA_PARM_OP_REMOVE)
- adjustments.quick_push (*n);
+ if (pbr->base == base
+ && (pbr->unit_offset == unit_offset))
+ return pbr;
}
+ return NULL;
+}
- tmp.release ();
- return adjustments;
+/* Given BASE and UNIT_OFFSET, find the corresponding replacement expression
+ and return it, assuming it is known it does not hold value by reference or
+ in reverse storage order. */
+
+tree
+ipa_param_body_adjustments::lookup_replacement (tree base, unsigned unit_offset)
+{
+ ipa_param_body_replacement *pbr = lookup_replacement_1 (base, unit_offset);
+ if (!pbr)
+ return NULL;
+ return pbr->repl;
}
/* If T is an SSA_NAME, return NULL if it is not a default def or
return t;
}
-/* Given an expression, return an adjustment entry specifying the
- transformation to be done on EXPR. If no suitable adjustment entry
- was found, returns NULL.
+/* Given an expression, return the structure describing how it should be
+ replaced if it accesses a part of a split parameter or NULL otherwise.
- If IGNORE_DEFAULT_DEF is set, consider SSA_NAMEs which are not a
- default def, otherwise bail on them.
+ Do not free the result, it will be deallocated when the object is destroyed.
- If CONVERT is non-NULL, this function will set *CONVERT if the
- expression provided is a component reference. ADJUSTMENTS is the
- adjustments vector. */
+ If IGNORE_DEFAULT_DEF is cleared, consider only SSA_NAMEs of PARM_DECLs
+ which are default definitions, if set, consider all SSA_NAMEs of
+ PARM_DECLs. */
-ipa_parm_adjustment *
-ipa_get_adjustment_candidate (tree **expr, bool *convert,
- ipa_parm_adjustment_vec adjustments,
- bool ignore_default_def)
+ipa_param_body_replacement *
+ipa_param_body_adjustments::get_expr_replacement (tree expr,
+ bool ignore_default_def)
{
- if (TREE_CODE (**expr) == BIT_FIELD_REF
- || TREE_CODE (**expr) == IMAGPART_EXPR
- || TREE_CODE (**expr) == REALPART_EXPR)
- {
- *expr = &TREE_OPERAND (**expr, 0);
- if (convert)
- *convert = true;
- }
+ tree base;
+ unsigned unit_offset;
- poly_int64 offset, size, max_size;
- bool reverse;
- tree base
- = get_ref_base_and_extent (**expr, &offset, &size, &max_size, &reverse);
- if (!base || !known_size_p (size) || !known_size_p (max_size))
+ if (!isra_get_ref_base_and_offset (expr, &base, &unit_offset))
return NULL;
- if (TREE_CODE (base) == MEM_REF)
- {
- offset += mem_ref_offset (base).force_shwi () * BITS_PER_UNIT;
- base = TREE_OPERAND (base, 0);
- }
-
base = get_ssa_base_param (base, ignore_default_def);
if (!base || TREE_CODE (base) != PARM_DECL)
return NULL;
+ return lookup_replacement_1 (base, unit_offset);
+}
- struct ipa_parm_adjustment *cand = NULL;
- unsigned int len = adjustments.length ();
- for (unsigned i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj = &adjustments[i];
+/* Given OLD_DECL, which is a PARM_DECL of a parameter that is being removed
+ (which includes it being split or replaced), return a new variable that
+ should be used for any SSA names that will remain in the function that
+ previously belonged to OLD_DECL. */
- if (adj->base == base
- && (known_eq (adj->offset, offset) || adj->op == IPA_PARM_OP_REMOVE))
- {
- cand = adj;
- break;
- }
+tree
+ipa_param_body_adjustments::get_replacement_ssa_base (tree old_decl)
+{
+ unsigned *idx = m_removed_map.get (old_decl);
+ if (!idx)
+ return NULL;
+
+ tree repl;
+ if (TREE_CODE (m_removed_decls[*idx]) == PARM_DECL)
+ {
+ gcc_assert (m_removed_decls[*idx] == old_decl);
+ repl = copy_var_decl (old_decl, DECL_NAME (old_decl),
+ TREE_TYPE (old_decl));
+ m_removed_decls[*idx] = repl;
}
+ else
+ repl = m_removed_decls[*idx];
+ return repl;
+}
+
+/* If OLD_NAME, which is being defined by statement STMT, is an SSA_NAME of a
+ parameter which is to be removed because its value is not used, create a new
+ SSA_NAME relating to a replacement VAR_DECL, replace all uses of the
+ original with it and return it. If there is no need to re-map, return NULL.
+ ADJUSTMENTS is a pointer to a vector of IPA-SRA adjustments. */
+
+tree
+ipa_param_body_adjustments::replace_removed_params_ssa_names (tree old_name,
+ gimple *stmt)
+{
+ gcc_assert (!m_id);
+ if (TREE_CODE (old_name) != SSA_NAME)
+ return NULL;
+
+ tree decl = SSA_NAME_VAR (old_name);
+ if (decl == NULL_TREE
+ || TREE_CODE (decl) != PARM_DECL)
+ return NULL;
- if (!cand || cand->op == IPA_PARM_OP_COPY || cand->op == IPA_PARM_OP_REMOVE)
+ tree repl = get_replacement_ssa_base (decl);
+ if (!repl)
return NULL;
- return cand;
+
+ tree new_name = make_ssa_name (repl, stmt);
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_name)
+ = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (old_name);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "replacing an SSA name of a removed param ");
+ print_generic_expr (dump_file, old_name);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, new_name);
+ fprintf (dump_file, "\n");
+ }
+
+ replace_uses_by (old_name, new_name);
+ return new_name;
}
-/* If the expression *EXPR should be replaced by a reduction of a parameter, do
- so. ADJUSTMENTS is a pointer to a vector of adjustments. CONVERT
- specifies whether the function should care about type incompatibility the
- current and new expressions. If it is false, the function will leave
- incompatibility issues to the caller. Return true iff the expression
- was modified. */
+/* If the expression *EXPR_P should be replaced, do so. CONVERT specifies
+ whether the function should care about type incompatibility of the current
+ and new expressions. If it is false, the function will leave
+ incompatibility issues to the caller - note that when the function
+ encounters a BIT_FIELD_REF, IMAGPART_EXPR or REALPART_EXPR, it will modify
+ their bases instead of the expressions themselves and then also performs any
+ necessary conversions. */
bool
-ipa_modify_expr (tree *expr, bool convert,
- ipa_parm_adjustment_vec adjustments)
+ipa_param_body_adjustments::modify_expression (tree *expr_p, bool convert)
{
- struct ipa_parm_adjustment *cand
- = ipa_get_adjustment_candidate (&expr, &convert, adjustments, false);
- if (!cand)
- return false;
+ tree expr = *expr_p;
- tree src;
- if (cand->by_ref)
+ if (TREE_CODE (expr) == BIT_FIELD_REF
+ || TREE_CODE (expr) == IMAGPART_EXPR
+ || TREE_CODE (expr) == REALPART_EXPR)
{
- src = build_simple_mem_ref (cand->new_decl);
- REF_REVERSE_STORAGE_ORDER (src) = cand->reverse;
+ expr_p = &TREE_OPERAND (expr, 0);
+ expr = *expr_p;
+ convert = true;
}
- else
- src = cand->new_decl;
+ ipa_param_body_replacement *pbr = get_expr_replacement (expr, false);
+ if (!pbr)
+ return false;
+
+ tree repl = pbr->repl;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "About to replace expr ");
- print_generic_expr (dump_file, *expr);
+ print_generic_expr (dump_file, expr);
fprintf (dump_file, " with ");
- print_generic_expr (dump_file, src);
+ print_generic_expr (dump_file, repl);
fprintf (dump_file, "\n");
}
- if (convert && !useless_type_conversion_p (TREE_TYPE (*expr), cand->type))
+ if (convert && !useless_type_conversion_p (TREE_TYPE (expr),
+ TREE_TYPE (repl)))
{
- tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*expr), src);
- *expr = vce;
+ tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (expr), repl);
+ *expr_p = vce;
}
else
- *expr = src;
+ *expr_p = repl;
return true;
}
-/* Dump the adjustments in the vector ADJUSTMENTS to dump_file in a human
- friendly way, assuming they are meant to be applied to FNDECL. */
+/* If the assignment statement STMT contains any expressions that need to
+ replaced with a different one as noted by ADJUSTMENTS, do so. Handle any
+ potential type incompatibilities. If any conversion sttements have to be
+ pre-pended to STMT, they will be added to EXTRA_STMTS. Return true iff the
+ statement was modified. */
-void
-ipa_dump_param_adjustments (FILE *file, ipa_parm_adjustment_vec adjustments,
- tree fndecl)
+bool
+ipa_param_body_adjustments::modify_assignment (gimple *stmt,
+ gimple_seq *extra_stmts)
{
- int i, len = adjustments.length ();
- bool first = true;
- vec<tree> parms = ipa_get_vector_of_formal_parms (fndecl);
+ tree *lhs_p, *rhs_p;
+ bool any;
- fprintf (file, "IPA param adjustments: ");
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- adj = &adjustments[i];
+ if (!gimple_assign_single_p (stmt))
+ return false;
- if (!first)
- fprintf (file, " ");
+ rhs_p = gimple_assign_rhs1_ptr (stmt);
+ lhs_p = gimple_assign_lhs_ptr (stmt);
+
+ any = modify_expression (lhs_p, false);
+ any |= modify_expression (rhs_p, false);
+ if (any
+ && !useless_type_conversion_p (TREE_TYPE (*lhs_p), TREE_TYPE (*rhs_p)))
+ {
+ if (TREE_CODE (*rhs_p) == CONSTRUCTOR)
+ {
+ /* V_C_Es of constructors can cause trouble (PR 42714). */
+ if (is_gimple_reg_type (TREE_TYPE (*lhs_p)))
+ *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p));
+ else
+ *rhs_p = build_constructor (TREE_TYPE (*lhs_p),
+ NULL);
+ }
else
- first = false;
+ {
+ tree new_rhs = fold_build1_loc (gimple_location (stmt),
+ VIEW_CONVERT_EXPR, TREE_TYPE (*lhs_p),
+ *rhs_p);
+ tree tmp = force_gimple_operand (new_rhs, extra_stmts, true,
+ NULL_TREE);
+ gimple_assign_set_rhs1 (stmt, tmp);
+ }
+ return true;
+ }
+
+ return any;
+}
+
+/* Data passed to remap_split_decl_to_dummy through walk_tree. */
+
+struct simple_tree_swap_info
+{
+ /* Change FROM to TO. */
+ tree from, to;
+ /* And set DONE to true when doing so. */
+ bool done;
+};
+
+/* Simple remapper to remap a split parameter to the same expression based on a
+ special dummy decl so that edge redirections can detect transitive splitting
+ and finish them. */
- fprintf (file, "%i. base_index: %i - ", i, adj->base_index);
- print_generic_expr (file, parms[adj->base_index]);
- if (adj->base)
+static tree
+remap_split_decl_to_dummy (tree *tp, int *walk_subtrees, void *data)
+{
+ tree t = *tp;
+
+ if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
+ {
+ struct simple_tree_swap_info *swapinfo
+ = (struct simple_tree_swap_info *) data;
+ if (t == swapinfo->from
+ || (TREE_CODE (t) == SSA_NAME
+ && SSA_NAME_VAR (t) == swapinfo->from))
{
- fprintf (file, ", base: ");
- print_generic_expr (file, adj->base);
+ *tp = swapinfo->to;
+ swapinfo->done = true;
+ }
+ *walk_subtrees = 0;
+ }
+ else if (TYPE_P (t))
+ *walk_subtrees = 0;
+ else
+ *walk_subtrees = 1;
+ return NULL_TREE;
+}
+
+
+/* If the call statement pointed at by STMT_P contains any expressions that
+ need to replaced with a different one as noted by ADJUSTMENTS, do so. f the
+ statement needs to be rebuilt, do so. Return true if any modifications have
+ been performed.
+
+ If the method is invoked as a part of IPA clone materialization and if any
+ parameter split is transitive, i.e. it applies to the functin that is being
+ modified and also to the callee of the statement, replace the parameter
+ passed to old callee with an equivalent expression based on a dummy decl
+ followed by PARM_DECLs representing the actual replacements. The actual
+ replacements will be then converted into SSA_NAMEs and then
+ ipa_param_adjustments::modify_call will find the appropriate ones and leave
+ only those in the call. */
+
+bool
+ipa_param_body_adjustments::modify_call_stmt (gcall **stmt_p)
+{
+ gcall *stmt = *stmt_p;
+ auto_vec <unsigned, 4> pass_through_args;
+ auto_vec <unsigned, 4> pass_through_pbr_indices;
+
+ if (m_split_modifications_p && m_id)
+ {
+ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ tree t = gimple_call_arg (stmt, i);
+ gcc_assert (TREE_CODE (t) != BIT_FIELD_REF
+ && TREE_CODE (t) != IMAGPART_EXPR
+ && TREE_CODE (t) != REALPART_EXPR);
+
+ tree base;
+ unsigned unit_offset;
+ if (!isra_get_ref_base_and_offset (t, &base, &unit_offset))
+ continue;
+
+ bool by_ref = false;
+ if (TREE_CODE (base) == SSA_NAME)
+ {
+ if (!SSA_NAME_IS_DEFAULT_DEF (base))
+ continue;
+ base = SSA_NAME_VAR (base);
+ gcc_checking_assert (base);
+ by_ref = true;
+ }
+ if (TREE_CODE (base) != PARM_DECL)
+ continue;
+
+ bool base_among_replacements = false;
+ unsigned j, repl_list_len = m_replacements.length ();
+ for (j = 0; j < repl_list_len; j++)
+ {
+ ipa_param_body_replacement *pbr = &m_replacements[j];
+ if (pbr->base == base)
+ {
+ base_among_replacements = true;
+ break;
+ }
+ }
+ if (!base_among_replacements)
+ continue;
+
+ /* We still have to distinguish between an end-use that we have to
+ transform now and a pass-through, which happens in the following
+ two cases. */
+
+ /* TODO: After we adjust ptr_parm_has_nonarg_uses to also consider
+ &MEM_REF[ssa_name + offset], we will also have to detect that case
+ here. */
+
+ if (TREE_CODE (t) == SSA_NAME
+ && SSA_NAME_IS_DEFAULT_DEF (t)
+ && SSA_NAME_VAR (t)
+ && TREE_CODE (SSA_NAME_VAR (t)) == PARM_DECL)
+ {
+ /* This must be a by_reference pass-through. */
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (t)));
+ pass_through_args.safe_push (i);
+ pass_through_pbr_indices.safe_push (j);
+ }
+ else if (!by_ref && AGGREGATE_TYPE_P (TREE_TYPE (t)))
+ {
+ /* Currently IPA-SRA guarantees the aggregate access type
+ exactly matches in this case. So if it does not match, it is
+ a pass-through argument that will be sorted out at edge
+ redirection time. */
+ ipa_param_body_replacement *pbr
+ = lookup_replacement_1 (base, unit_offset);
+
+ if (!pbr
+ || (TYPE_MAIN_VARIANT (TREE_TYPE (t))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (pbr->repl))))
+ {
+ pass_through_args.safe_push (i);
+ pass_through_pbr_indices.safe_push (j);
+ }
+ }
}
- if (adj->new_decl)
+ }
+
+ unsigned nargs = gimple_call_num_args (stmt);
+ if (!pass_through_args.is_empty ())
+ {
+ auto_vec<tree, 16> vargs;
+ unsigned pt_idx = 0;
+ for (unsigned i = 0; i < nargs; i++)
{
- fprintf (file, ", new_decl: ");
- print_generic_expr (file, adj->new_decl);
+ if (pt_idx < pass_through_args.length ()
+ && i == pass_through_args[pt_idx])
+ {
+ unsigned j = pass_through_pbr_indices[pt_idx];
+ pt_idx++;
+ tree base = m_replacements[j].base;
+
+ /* Map base will get mapped to the special transitive-isra marker
+ dummy decl. */
+ struct simple_tree_swap_info swapinfo;
+ swapinfo.from = base;
+ swapinfo.to = m_replacements[j].dummy;
+ swapinfo.done = false;
+ tree arg = gimple_call_arg (stmt, i);
+ walk_tree (&arg, remap_split_decl_to_dummy, &swapinfo, NULL);
+ gcc_assert (swapinfo.done);
+ vargs.safe_push (arg);
+ /* Now let's push all replacements pertaining to this parameter
+ so that all gimple register ones get correct SSA_NAMES. Edge
+ redirection will weed out the dummy argument as well as all
+ unused replacements later. */
+ unsigned int repl_list_len = m_replacements.length ();
+ for (; j < repl_list_len; j++)
+ {
+ if (m_replacements[j].base != base)
+ break;
+ vargs.safe_push (m_replacements[j].repl);
+ }
+ }
+ else
+ {
+ tree t = gimple_call_arg (stmt, i);
+ modify_expression (&t, true);
+ vargs.safe_push (t);
+ }
}
- if (adj->new_ssa_base)
+ gcall *new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
+ gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
+ gimple_call_copy_flags (new_stmt, stmt);
+ if (tree lhs = gimple_call_lhs (stmt))
{
- fprintf (file, ", new_ssa_base: ");
- print_generic_expr (file, adj->new_ssa_base);
+ modify_expression (&lhs, false);
+ gimple_call_set_lhs (new_stmt, lhs);
}
+ *stmt_p = new_stmt;
+ return true;
+ }
- if (adj->op == IPA_PARM_OP_COPY)
- fprintf (file, ", copy_param");
- else if (adj->op == IPA_PARM_OP_REMOVE)
- fprintf (file, ", remove_param");
- else
+ /* Otherwise, no need to rebuild the statement, let's just modify arguments
+ and the LHS if/as appropriate. */
+ bool modified = false;
+ for (unsigned i = 0; i < nargs; i++)
+ {
+ tree *t = gimple_call_arg_ptr (stmt, i);
+ modified |= modify_expression (t, true);
+ }
+
+ if (gimple_call_lhs (stmt))
+ {
+ tree *t = gimple_call_lhs_ptr (stmt);
+ modified |= modify_expression (t, false);
+ }
+
+ return modified;
+}
+
+/* If the statement STMT contains any expressions that need to replaced with a
+ different one as noted by ADJUSTMENTS, do so. Handle any potential type
+ incompatibilities. If any conversion sttements have to be pre-pended to
+ STMT, they will be added to EXTRA_STMTS. Return true iff the statement was
+ modified. */
+
+bool
+ipa_param_body_adjustments::modify_gimple_stmt (gimple **stmt,
+ gimple_seq *extra_stmts)
+{
+ bool modified = false;
+ tree *t;
+
+ switch (gimple_code (*stmt))
+ {
+ case GIMPLE_RETURN:
+ t = gimple_return_retval_ptr (as_a <greturn *> (*stmt));
+ if (m_adjustments && m_adjustments->m_skip_return)
+ *t = NULL_TREE;
+ else if (*t != NULL_TREE)
+ modified |= modify_expression (t, true);
+ break;
+
+ case GIMPLE_ASSIGN:
+ modified |= modify_assignment (*stmt, extra_stmts);
+ break;
+
+ case GIMPLE_CALL:
+ modified |= modify_call_stmt ((gcall **) stmt);
+ break;
+
+ case GIMPLE_ASM:
+ {
+ gasm *asm_stmt = as_a <gasm *> (*stmt);
+ for (unsigned i = 0; i < gimple_asm_ninputs (asm_stmt); i++)
+ {
+ t = &TREE_VALUE (gimple_asm_input_op (asm_stmt, i));
+ modified |= modify_expression (t, true);
+ }
+ for (unsigned i = 0; i < gimple_asm_noutputs (asm_stmt); i++)
+ {
+ t = &TREE_VALUE (gimple_asm_output_op (asm_stmt, i));
+ modified |= modify_expression (t, false);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ return modified;
+}
+
+
+/* Traverse body of the current function and perform the requested adjustments
+ on its statements. Return true iff the CFG has been changed. */
+
+bool
+ipa_param_body_adjustments::modify_cfun_body ()
+{
+ bool cfg_changed = false;
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gphi *phi = as_a <gphi *> (gsi_stmt (gsi));
+ tree new_lhs, old_lhs = gimple_phi_result (phi);
+ new_lhs = replace_removed_params_ssa_names (old_lhs, phi);
+ if (new_lhs)
+ {
+ gimple_phi_set_result (phi, new_lhs);
+ release_ssa_name (old_lhs);
+ }
+ }
+
+ gsi = gsi_start_bb (bb);
+ while (!gsi_end_p (gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ gimple *stmt_copy = stmt;
+ gimple_seq extra_stmts = NULL;
+ bool modified = modify_gimple_stmt (&stmt, &extra_stmts);
+ if (stmt != stmt_copy)
+ {
+ gcc_checking_assert (modified);
+ gsi_replace (&gsi, stmt, false);
+ }
+ if (!gimple_seq_empty_p (extra_stmts))
+ gsi_insert_seq_before (&gsi, extra_stmts, GSI_SAME_STMT);
+
+ def_operand_p defp;
+ ssa_op_iter iter;
+ FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_DEF)
+ {
+ tree old_def = DEF_FROM_PTR (defp);
+ if (tree new_def = replace_removed_params_ssa_names (old_def,
+ stmt))
+ {
+ SET_DEF (defp, new_def);
+ release_ssa_name (old_def);
+ modified = true;
+ }
+ }
+
+ if (modified)
+ {
+ update_stmt (stmt);
+ if (maybe_clean_eh_stmt (stmt)
+ && gimple_purge_dead_eh_edges (gimple_bb (stmt)))
+ cfg_changed = true;
+ }
+ gsi_next (&gsi);
+ }
+ }
+
+ return cfg_changed;
+}
+
+/* Call gimple_debug_bind_reset_value on all debug statements describing
+ gimple register parameters that are being removed or replaced. */
+
+void
+ipa_param_body_adjustments::reset_debug_stmts ()
+{
+ int i, len;
+ gimple_stmt_iterator *gsip = NULL, gsi;
+
+ if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
+ {
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ gsip = &gsi;
+ }
+ len = m_reset_debug_decls.length ();
+ for (i = 0; i < len; i++)
+ {
+ imm_use_iterator ui;
+ gimple *stmt;
+ gdebug *def_temp;
+ tree name, vexpr, copy = NULL_TREE;
+ use_operand_p use_p;
+ tree decl = m_reset_debug_decls[i];
+
+ gcc_checking_assert (is_gimple_reg (decl));
+ name = ssa_default_def (cfun, decl);
+ vexpr = NULL;
+ if (name)
+ FOR_EACH_IMM_USE_STMT (stmt, ui, name)
+ {
+ if (gimple_clobber_p (stmt))
+ {
+ gimple_stmt_iterator cgsi = gsi_for_stmt (stmt);
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&cgsi, true);
+ release_defs (stmt);
+ continue;
+ }
+ /* All other users must have been removed by function body
+ modification. */
+ gcc_assert (is_gimple_debug (stmt));
+ if (vexpr == NULL && gsip != NULL)
+ {
+ vexpr = make_node (DEBUG_EXPR_DECL);
+ def_temp = gimple_build_debug_source_bind (vexpr, decl, NULL);
+ DECL_ARTIFICIAL (vexpr) = 1;
+ TREE_TYPE (vexpr) = TREE_TYPE (name);
+ SET_DECL_MODE (vexpr, DECL_MODE (decl));
+ gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
+ }
+ if (vexpr)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
+ SET_USE (use_p, vexpr);
+ }
+ else
+ gimple_debug_bind_reset_value (stmt);
+ update_stmt (stmt);
+ }
+ /* Create a VAR_DECL for debug info purposes. */
+ if (!DECL_IGNORED_P (decl))
{
- fprintf (file, ", offset ");
- print_dec (adj->offset, file);
+ copy = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
+ VAR_DECL, DECL_NAME (decl),
+ TREE_TYPE (decl));
+ if (DECL_PT_UID_SET_P (decl))
+ SET_DECL_PT_UID (copy, DECL_PT_UID (decl));
+ TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl);
+ TREE_READONLY (copy) = TREE_READONLY (decl);
+ TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (decl);
+ DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (decl);
+ DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (decl);
+ DECL_IGNORED_P (copy) = DECL_IGNORED_P (decl);
+ DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (decl);
+ DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
+ SET_DECL_RTL (copy, 0);
+ TREE_USED (copy) = 1;
+ DECL_CONTEXT (copy) = current_function_decl;
+ add_local_decl (cfun, copy);
+ DECL_CHAIN (copy)
+ = BLOCK_VARS (DECL_INITIAL (current_function_decl));
+ BLOCK_VARS (DECL_INITIAL (current_function_decl)) = copy;
+ }
+ if (gsip != NULL && copy && target_for_debug_bind (decl))
+ {
+ gcc_assert (TREE_CODE (decl) == PARM_DECL);
+ if (vexpr)
+ def_temp = gimple_build_debug_bind (copy, vexpr, NULL);
+ else
+ def_temp = gimple_build_debug_source_bind (copy, decl,
+ NULL);
+ gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
}
- if (adj->by_ref)
- fprintf (file, ", by_ref");
- print_node_brief (file, ", type: ", adj->type, 0);
- fprintf (file, "\n");
}
- parms.release ();
+}
+
+/* Perform all necessary body changes to change signature, body and debug info
+ of fun according to adjustments passed at construction. Return true if CFG
+ was changed in any way. The main entry point for modification of standalone
+ functions that is not part of IPA clone materialization. */
+
+bool
+ipa_param_body_adjustments::perform_cfun_body_modifications ()
+{
+ bool cfg_changed;
+ modify_formal_parameters ();
+ cfg_changed = modify_cfun_body ();
+ reset_debug_stmts ();
+
+ return cfg_changed;
}
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
+<http://www.gnu.org/licenses/>.
+
+
+
+This file defines classes and other data structures that are used to manipulate
+the prototype of a function, especially to create, remove or split its formal
+parameters, but also to remove its return value, and also its call statements
+correspondingly.
+
+The most basic one is a vector of structures ipa_adjusted_param. It is simply
+a description how the new parameters should look like after the transformation
+in what way they relate to the previous ones (if in any). Such relation to an
+old parameter can be an outright copy or an IPA-SRA replacement. If an old
+parameter is not listed or otherwise mentioned, it is removed as unused or at
+least unnecessary. Note that this most basic structure does not work for
+modifying calls of functions with variable number of arguments.
+
+Class ipa_param_adjustments is only a little more than a thin encapsulation of
+a vector of ipa_param_adjustments. Along with this vector it contains an index
+of the first potential vararg argument and a boolean flag whether the return
+value should be removed or not. Moreover, the class contains method
+modify_call which can transform a call statement so that it correctly calls a
+modified function. These two data structures were designed to have a small
+memory footprint because they are allocated for each clone of a call graph node
+that has its prototype changed and live until the end of IPA clone
+materialization and call redirection phase.
+
+On the other hand, class ipa_param_body_adjustments can afford to allocate more
+data because its life span is much smaller, it is allocated and destroyed in
+the course of materialization of each single clone that needs it or only when a
+particular pass needs to change a function it is operating on. This class has
+various methods required to change function declaration and the body of the
+function according to instructions given either by class ipa_param_adjustments
+or only a vector of ipa_adjusted_params.
+
+When these classes are used in the context of call graph clone materialization
+and subsequent call statement redirection - which is the point at which we
+modify arguments in call statements - they need to cooperate with each other in
+order to handle what we refer to as transitive (IPA-SRA) splits. These are
+situations when a formal parameter of one function is split into several
+smaller ones and some of them are then passed on in a call to another function
+because the formal parameter of this callee has also been split.
+
+Consider a simple example:
+
+struct S {int a, b, c;};
+struct Z {int x; S s;};
+
+foo (S s)
+{
+ use (s.b);
+}
+
+bar (Z z)
+{
+ use (z.s.a);
+ foo (z.s);
+}
+
+baz ()
+{
+ bar (*global);
+}
+
+Both bar and foo would have their parameter split. Foo would receive one
+replacement representing s.b. Function bar would see its parameter split into
+one replacement representing z.s.a and another representing z.s.b which would
+be passed on to foo. It would be a so called transitive split IPA-SRA
+replacement, one which is passed in a call as an actual argument to another
+IPA-SRA replacement in another function.
+
+Note that the call chain the example can be arbitrarily long and recursive and
+that any function in it can be cloned by another IPA pass and any number of
+adjacent functions in the call chain can be inlined into each other. Call
+redirection takes place only after bodies of the function have been modified by
+all of the above.
+
+Call redirection has to be able to find the right decl or SSA_NAME that
+corresponds to the transitive split in the caller. The SSA names are assigned
+right after clone materialization/ modification and cannot be "added"
+afterwards. Moreover, if the caller has been inlined the SSA_NAMEs in question
+no longer belong to PARM_DECLs but to VAR_DECLs, indistinguishable from any
+others.
+
+Therefore, when clone materialization finds a call statement which it knows is
+a part of a transitive split, it will modify it into:
+
+ foo (DUMMY_Z_VAR.s, repl_for_a, repl_for_b, <rest of original arguments>);
+
+It will also store {DUMMY_S_VAR, 32} and {DUMMY_S_VAR, 64} representing offsets
+of z.s.a and z.s.b (assuming a 32-bit int) into foo's cgraph node
+clone->performed_splits vector (which is storing structures of type
+ipa_param_performed_split also defined in this header file).
+
+Call redirection will identify that expression DUMMY_Z_VAR.s is based on a
+variable stored in performed_splits vector and learn that the following
+arguments, already in SSA form, represent offsets 32 and 64 in a split original
+parameter. It subtracts offset of DUMMY_Z_VAR.s from 32 and 64 and arrives at
+offsets 0 and 32 within callee's original parameter. At this point it also
+knows from the call graph that only the bit with offset 32 is needed and so
+changes the call statement into final:
+
+bar (repl_for_b, <rest of original arguments>); */
#ifndef IPA_PARAM_MANIPULATION_H
#define IPA_PARAM_MANIPULATION_H
+/* Indices into ipa_param_prefixes to identify a human-readable prefix for newly
+ synthesized parameters. Keep in sync with the array. */
+enum ipa_param_name_prefix_indices
+ {
+ IPA_PARAM_PREFIX_SYNTH,
+ IPA_PARAM_PREFIX_ISRA,
+ IPA_PARAM_PREFIX_SIMD,
+ IPA_PARAM_PREFIX_MASK,
+ IPA_PARAM_PREFIX_COUNT
+};
+
+/* We do not support manipulating functions with more than
+ 1<<IPA_PARAM_MAX_INDEX_BITS parameters. */
+#define IPA_PARAM_MAX_INDEX_BITS 16
+
/* Operation to be performed for the parameter in ipa_parm_adjustment
below. */
-enum ipa_parm_op {
- IPA_PARM_OP_NONE,
-
- /* This describes a brand new parameter.
- The field `type' should be set to the new type, `arg_prefix'
- should be set to the string prefix for the new DECL_NAME, and
- `new_decl' will ultimately hold the newly created argument. */
- IPA_PARM_OP_NEW,
+enum ipa_parm_op
+{
+ /* Do not use or you will trigger an assert. */
+ IPA_PARAM_OP_UNDEFINED,
/* This new parameter is an unmodified parameter at index base_index. */
- IPA_PARM_OP_COPY,
+ IPA_PARAM_OP_COPY,
+
+ /* This describes a brand new parameter. If it somehow relates to any
+ original parameters, the user needs to manage the transition itself. */
+ IPA_PARAM_OP_NEW,
- /* This adjustment describes a parameter that is about to be removed
- completely. Most users will probably need to book keep those so that they
- don't leave behinfd any non default def ssa names belonging to them. */
- IPA_PARM_OP_REMOVE
+ /* Split parameter as indicated by fields base_index, offset and type. */
+ IPA_PARAM_OP_SPLIT
};
-/* Structure to describe transformations of formal parameters and actual
- arguments. Each instance describes one new parameter and they are meant to
- be stored in a vector. Additionally, most users will probably want to store
- adjustments about parameters that are being removed altogether so that SSA
- names belonging to them can be replaced by SSA names of an artificial
- variable. */
-struct ipa_parm_adjustment
-{
- /* The original PARM_DECL itself, helpful for processing of the body of the
- function itself. Intended for traversing function bodies.
- ipa_modify_formal_parameters, ipa_modify_call_arguments and
- ipa_combine_adjustments ignore this and use base_index.
- ipa_modify_formal_parameters actually sets this. */
- tree base;
+/* Structure that describes one parameter of a function after transformation.
+ Omitted parameters will be removed. */
- /* Type of the new parameter. However, if by_ref is true, the real type will
- be a pointer to this type. */
+struct GTY(()) ipa_adjusted_param
+{
+ /* Type of the new parameter. Required for all operations except
+ IPA_PARM_OP_COPY when the original type will be preserved. */
tree type;
- /* Alias refrerence type to be used in MEM_REFs when adjusting caller
- arguments. */
+ /* Alias reference type to be used in MEM_REFs when adjusting caller
+ arguments. Required for IPA_PARM_OP_SPLIT operation. */
tree alias_ptr_type;
- /* The new declaration when creating/replacing a parameter. Created
- by ipa_modify_formal_parameters, useful for functions modifying
- the body accordingly. For brand new arguments, this is the newly
- created argument. */
- tree new_decl;
+ /* Offset into the original parameter (for the cases when the new parameter
+ is a component of an original one). Required for IPA_PARM_OP_SPLIT
+ operation. */
+ unsigned unit_offset;
- /* New declaration of a substitute variable that we may use to replace all
- non-default-def ssa names when a parm decl is going away. */
- tree new_ssa_base;
+ /* Zero based index of the original parameter this one is based on. Required
+ for IPA_PARAM_OP_COPY and IPA_PARAM_OP_SPLIT, users of IPA_PARAM_OP_NEW
+ only need to specify it if they use replacement lookup provided by
+ ipa_param_body_adjustments. */
+ unsigned base_index : IPA_PARAM_MAX_INDEX_BITS;
- /* This holds the prefix to be used for the new DECL_NAME. */
- const char *arg_prefix;
+ /* Zero based index of the parameter this one is based on in the previous
+ clone. If there is no previous clone, it must be equal to base_index. */
+ unsigned prev_clone_index : IPA_PARAM_MAX_INDEX_BITS;
- /* Offset into the original parameter (for the cases when the new parameter
- is a component of an original one). */
- poly_int64_pod offset;
+ /* Specify the operation, if any, to be performed on the parameter. */
+ enum ipa_parm_op op : 2;
- /* Zero based index of the original parameter this one is based on. */
- int base_index;
+ /* If set, this structure describes a parameter copied over from a previous
+ IPA clone, any transformations are thus not to be re-done. */
+ unsigned prev_clone_adjustment : 1;
- /* Whether this parameter is a new parameter, a copy of an old one,
- or one about to be removed. */
- enum ipa_parm_op op;
+ /* Index into ipa_param_prefixes specifying a prefix to be used with
+ DECL_NAMEs of newly synthesized parameters. */
+ unsigned param_prefix_index : 2;
/* Storage order of the original parameter (for the cases when the new
parameter is a component of an original one). */
unsigned reverse : 1;
- /* The parameter is to be passed by reference. */
- unsigned by_ref : 1;
+ /* A bit free for the user. */
+ unsigned user_flag : 1;
+};
+
+void ipa_dump_adjusted_parameters (FILE *f,
+ vec<ipa_adjusted_param, va_gc> *adj_params);
+
+/* Structure to remember the split performed on a node so that edge redirection
+ (i.e. splitting arguments of call statements) know how split formal
+ parameters of the caller are represented. */
+
+struct GTY(()) ipa_param_performed_split
+{
+ /* The dummy VAR_DECL that was created instead of the split parameter that
+ sits in the call in the meantime between clone materialization and call
+ redirection. All entries in a vector of performed splits that correspond
+ to the same dumy decl must be grouped together. */
+ tree dummy_decl;
+ /* Offset into the original parameter. */
+ unsigned unit_offset;
+};
+
+/* Class used to record planned modifications to parameters of a function and
+ also to perform necessary modifications at the caller side at the gimple
+ level. Used to describe all cgraph node clones that have their parameters
+ changed, therefore the class should only have a small memory footprint. */
+
+class GTY(()) ipa_param_adjustments
+{
+public:
+ /* Constructor from NEW_PARAMS showing how new parameters should look like
+ plus copying any pre-existing actual arguments starting from argument
+ with index ALWAYS_COPY_START (if non-negative, negative means do not copy
+ anything beyond what is described in NEW_PARAMS), and SKIP_RETURN, which
+ indicates that the function should return void after transformation. */
+
+ ipa_param_adjustments (vec<ipa_adjusted_param, va_gc> *new_params,
+ int always_copy_start, bool skip_return)
+ : m_adj_params (new_params), m_always_copy_start (always_copy_start),
+ m_skip_return (skip_return)
+ {}
+
+ /* Modify a call statement arguments (and possibly remove the return value)
+ as described in the data fields of this class. */
+ gcall *modify_call (gcall *stmt,
+ vec<ipa_param_performed_split, va_gc> *performed_splits,
+ tree callee_decl, bool update_references);
+ /* Return if the first parameter is left intact. */
+ bool first_param_intact_p ();
+ /* Build a function type corresponding to the modified call. */
+ tree build_new_function_type (tree old_type, bool type_is_original_p);
+ /* Build a declaration corresponding to the target of the modified call. */
+ tree adjust_decl (tree orig_decl);
+ /* Fill a vector marking which parameters are intact by the described
+ modifications. */
+ void get_surviving_params (vec<bool> *surviving_params);
+ /* Fill a vector with new indices of surviving original parameters. */
+ void get_updated_indices (vec<int> *new_indices);
+
+ void dump (FILE *f);
+ void debug ();
+
+ /* How the known part of arguments should look like. */
+ vec<ipa_adjusted_param, va_gc> *m_adj_params;
+
+ /* If non-negative, copy any arguments starting at this offset without any
+ modifications so that functions with variable number of arguments can be
+ modified. This number should be equal to the number of original forma
+ parameters. */
+ int m_always_copy_start;
+ /* If true, make the function not return any value. */
+ bool m_skip_return;
+
+private:
+ ipa_param_adjustments () {}
+
+ void init (vec<tree> *cur_params);
+ int get_max_base_index ();
+ bool method2func_p (tree orig_type);
+};
+
+/* Structure used to map expressions accessing split or replaced parameters to
+ new PARM_DECLs. */
+
+struct ipa_param_body_replacement
+{
+ /* The old decl of the original parameter. */
+ tree base;
+ /* The new decl it should be replaced with. */
+ tree repl;
+ /* When modifying clones during IPA clone materialization, this is a dummy
+ decl used to mark calls in which we need to apply transitive splitting,
+ these dummy delcls are inserted as arguments to such calls and then
+ followed by all the replacements with offset info stored in
+ ipa_param_performed_split.
+
+ Users of ipa_param_body_adjustments that modify standalone functions
+ outside of IPA clone materialization can use this field for their internal
+ purposes. */
+ tree dummy;
+ /* The offset within BASE that REPL represents. */
+ unsigned unit_offset;
+};
+
+struct ipa_replace_map;
+
+/* Class used when actually performing adjustments to formal parameters of a
+ function to map accesses that need to be replaced to replacements. The
+ class attempts to work in two very different sets of circumstances: as a
+ part of tree-inine.c's tree_function_versioning machinery to clone functions
+ (when M_ID is not NULL) and in s standalone fashion, modifying an existing
+ function in place (when M_ID is NULL). While a lot of stuff handled in a
+ unified way in both modes, there are many aspects of the processs that
+ requires distinct paths. */
+
+class ipa_param_body_adjustments
+{
+public:
+ /* Constructor to use from within tree-inline. */
+ ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl, tree old_fndecl,
+ struct copy_body_data *id, tree *vars,
+ vec<ipa_replace_map *, va_gc> *tree_map);
+ /* Constructor to use for modifying a function outside of tree-inline from an
+ instance of ipa_param_adjustments. */
+ ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl);
+ /* Constructor to use for modifying a function outside of tree-inline from a
+ simple vector of desired parameter modification. */
+ ipa_param_body_adjustments (vec<ipa_adjusted_param, va_gc> *adj_params,
+ tree fndecl);
+
+ /* The do-it-all function for modifying a function outside of
+ tree-inline. */
+ bool perform_cfun_body_modifications ();
+
+ /* Change the PARM_DECLs. */
+ void modify_formal_parameters ();
+ /* Register a replacement decl for the transformation done in APM. */
+ void register_replacement (ipa_adjusted_param *apm, tree replacement,
+ tree dummy = NULL_TREE);
+ /* Lookup a replacement for a given offset within a given parameter. */
+ tree lookup_replacement (tree base, unsigned unit_offset);
+ /* Lookup a replacement for an expression, if there is one. */
+ ipa_param_body_replacement *get_expr_replacement (tree expr,
+ bool ignore_default_def);
+ /* Lookup the new base for surviving names previously belonging to a
+ parameter. */
+ tree get_replacement_ssa_base (tree old_decl);
+ /* Modify a statement. */
+ bool modify_gimple_stmt (gimple **stmt, gimple_seq *extra_stmts);
+ /* Return the new chain of parameters. */
+ tree get_new_param_chain ();
+
+ /* Pointers to data structures defining how the function should be
+ modified. */
+ vec<ipa_adjusted_param, va_gc> *m_adj_params;
+ ipa_param_adjustments *m_adjustments;
+
+ /* Vector of old parameter declarations that must have their debug bind
+ statements re-mapped and debug decls created. */
+
+ auto_vec<tree, 16> m_reset_debug_decls;
+
+ /* Set to true if there are any IPA_PARAM_OP_SPLIT adjustments among stored
+ adjustments. */
+ bool m_split_modifications_p;
+private:
+ void common_initialization (tree old_fndecl, tree *vars,
+ vec<ipa_replace_map *, va_gc> *tree_map);
+ unsigned get_base_index (ipa_adjusted_param *apm);
+ ipa_param_body_replacement *lookup_replacement_1 (tree base,
+ unsigned unit_offset);
+ tree replace_removed_params_ssa_names (tree old_name, gimple *stmt);
+ bool modify_expression (tree *expr_p, bool convert);
+ bool modify_assignment (gimple *stmt, gimple_seq *extra_stmts);
+ bool modify_call_stmt (gcall **stmt_p);
+ bool modify_cfun_body ();
+ void reset_debug_stmts ();
+
+ /* Declaration of the function that is being transformed. */
+
+ tree m_fndecl;
+
+ /* If non-NULL, the tree-inline master data structure guiding materialization
+ of the current clone. */
+ struct copy_body_data *m_id;
+
+ /* Vector of old parameter declarations (before changing them). */
+
+ auto_vec<tree, 16> m_oparms;
+
+ /* Vector of parameter declarations the function will have after
+ transformation. */
+
+ auto_vec<tree, 16> m_new_decls;
+
+ /* If the function type has non-NULL TYPE_ARG_TYPES, this is the vector of
+ these types after transformation, otherwise an empty one. */
+
+ auto_vec<tree, 16> m_new_types;
+
+ /* Vector of structures telling how to replace old parameters in in the
+ function body. TODO: Even though there usually be only few, but should we
+ use a hash? */
+
+ auto_vec<ipa_param_body_replacement, 16> m_replacements;
+
+ /* Vector for remapping SSA_BASES from old parameter declarations that are
+ being removed as a part of the transformation. Before a new VAR_DECL is
+ created, it holds the old PARM_DECL, once the variable is built it is
+ stored here. */
+
+ auto_vec<tree> m_removed_decls;
+
+ /* Hash to quickly lookup the item in m_removed_decls given the old decl. */
+
+ hash_map<tree, unsigned> m_removed_map;
+
+ /* True iff the transformed function is a class method that is about to loose
+ its this pointer and must be converted to a normal function. */
+
+ bool m_method2func;
};
-typedef vec<ipa_parm_adjustment> ipa_parm_adjustment_vec;
-
-vec<tree> ipa_get_vector_of_formal_parms (tree fndecl);
-vec<tree> ipa_get_vector_of_formal_parm_types (tree fntype);
-void ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec);
-void ipa_modify_call_arguments (struct cgraph_edge *, gcall *,
- ipa_parm_adjustment_vec);
-ipa_parm_adjustment_vec ipa_combine_adjustments (ipa_parm_adjustment_vec,
- ipa_parm_adjustment_vec);
-void ipa_dump_param_adjustments (FILE *, ipa_parm_adjustment_vec, tree);
-
-bool ipa_modify_expr (tree *, bool, ipa_parm_adjustment_vec);
-ipa_parm_adjustment *ipa_get_adjustment_candidate (tree **, bool *,
- ipa_parm_adjustment_vec,
- bool);
+void push_function_arg_decls (vec<tree> *args, tree fndecl);
+void push_function_arg_types (vec<tree> *types, tree fntype);
#endif /* IPA_PARAM_MANIPULATION_H */
struct ipa_agg_replacement_value *aggval)
{
struct ipa_agg_replacement_value *v;
- int i, c = 0, d = 0, *adj;
- if (!node->clone.combined_args_to_skip)
+ if (!node->clone.param_adjustments)
return;
+ auto_vec<int, 16> new_indices;
+ node->clone.param_adjustments->get_updated_indices (&new_indices);
for (v = aggval; v; v = v->next)
{
- gcc_assert (v->index >= 0);
- if (c < v->index)
- c = v->index;
- }
- c++;
-
- adj = XALLOCAVEC (int, c);
- for (i = 0; i < c; i++)
- if (bitmap_bit_p (node->clone.combined_args_to_skip, i))
- {
- adj[i] = -1;
- d++;
- }
- else
- adj[i] = i - d;
+ gcc_checking_assert (v->index >= 0);
- for (v = aggval; v; v = v->next)
- v->index = adj[v->index];
+ if ((unsigned) v->index < new_indices.length ())
+ v->index = new_indices[v->index];
+ else
+ /* This can happen if we know about a constant passed by reference by
+ an argument which is never actually used for anything, let alone
+ loading that constant. */
+ v->index = -1;
+ }
}
/* Dominator walker driving the ipcp modification phase. */
static void
ipcp_update_bits (struct cgraph_node *node)
{
- tree parm = DECL_ARGUMENTS (node->decl);
- tree next_parm = parm;
ipcp_transformation *ts = ipcp_get_transformation_summary (node);
if (!ts || vec_safe_length (ts->bits) == 0)
return;
-
vec<ipa_bits *, va_gc> &bits = *ts->bits;
unsigned count = bits.length ();
+ if (!count)
+ return;
- for (unsigned i = 0; i < count; ++i, parm = next_parm)
+ auto_vec<int, 16> new_indices;
+ bool need_remapping = false;
+ if (node->clone.param_adjustments)
{
- if (node->clone.combined_args_to_skip
- && bitmap_bit_p (node->clone.combined_args_to_skip, i))
- continue;
+ node->clone.param_adjustments->get_updated_indices (&new_indices);
+ need_remapping = true;
+ }
+ auto_vec <tree, 16> parm_decls;
+ push_function_arg_decls (&parm_decls, node->decl);
+ for (unsigned i = 0; i < count; ++i)
+ {
+ tree parm;
+ if (need_remapping)
+ {
+ if (i >= new_indices.length ())
+ continue;
+ int idx = new_indices[i];
+ if (idx < 0)
+ continue;
+ parm = parm_decls[idx];
+ }
+ else
+ parm = parm_decls[i];
gcc_checking_assert (parm);
- next_parm = DECL_CHAIN (parm);
+
if (!bits[i]
|| !(INTEGRAL_TYPE_P (TREE_TYPE (parm))
static void
ipcp_update_vr (struct cgraph_node *node)
{
- tree fndecl = node->decl;
- tree parm = DECL_ARGUMENTS (fndecl);
- tree next_parm = parm;
ipcp_transformation *ts = ipcp_get_transformation_summary (node);
if (!ts || vec_safe_length (ts->m_vr) == 0)
return;
const vec<ipa_vr, va_gc> &vr = *ts->m_vr;
unsigned count = vr.length ();
+ if (!count)
+ return;
- for (unsigned i = 0; i < count; ++i, parm = next_parm)
+ auto_vec<int, 16> new_indices;
+ bool need_remapping = false;
+ if (node->clone.param_adjustments)
{
- if (node->clone.combined_args_to_skip
- && bitmap_bit_p (node->clone.combined_args_to_skip, i))
- continue;
+ node->clone.param_adjustments->get_updated_indices (&new_indices);
+ need_remapping = true;
+ }
+ auto_vec <tree, 16> parm_decls;
+ push_function_arg_decls (&parm_decls, node->decl);
+
+ for (unsigned i = 0; i < count; ++i)
+ {
+ tree parm;
+ int remapped_idx;
+ if (need_remapping)
+ {
+ if (i >= new_indices.length ())
+ continue;
+ remapped_idx = new_indices[i];
+ if (remapped_idx < 0)
+ continue;
+ }
+ else
+ remapped_idx = i;
+
+ parm = parm_decls[remapped_idx];
+
gcc_checking_assert (parm);
- next_parm = DECL_CHAIN (parm);
tree ddef = ssa_default_def (DECL_STRUCT_FUNCTION (node->decl), parm);
if (!ddef || !is_gimple_reg (parm))
{
if (dump_file)
{
- fprintf (dump_file, "Setting value range of param %u ", i);
+ fprintf (dump_file, "Setting value range of param %u "
+ "(now %i) ", i, remapped_idx);
fprintf (dump_file, "%s[",
(vr[i].type == VR_ANTI_RANGE) ? "~" : "");
print_decs (vr[i].min, dump_file);
}
}
+ ipa_param_adjustments *adjustments;
+ bool skip_return = (!split_part_return_p
+ || !split_point->split_part_set_retval);
+ /* TODO: Perhaps get rid of args_to_skip entirely, after we make sure the
+ debug info generation and discrepancy avoiding works well too. */
+ if ((args_to_skip && !bitmap_empty_p (args_to_skip))
+ || skip_return)
+ {
+ vec<ipa_adjusted_param, va_gc> *new_params = NULL;
+ unsigned j;
+ for (parm = DECL_ARGUMENTS (current_function_decl), j = 0;
+ parm; parm = DECL_CHAIN (parm), j++)
+ if (!args_to_skip || !bitmap_bit_p (args_to_skip, j))
+ {
+ ipa_adjusted_param adj;
+ memset (&adj, 0, sizeof (adj));
+ adj.op = IPA_PARAM_OP_COPY;
+ adj.base_index = j;
+ adj.prev_clone_index = j;
+ vec_safe_push (new_params, adj);
+ }
+ adjustments = new ipa_param_adjustments (new_params, j, skip_return);
+ }
+ else
+ adjustments = NULL;
+
/* Now create the actual clone. */
cgraph_edge::rebuild_edges ();
node = cur_node->create_version_clone_with_body
- (vNULL, NULL, args_to_skip,
- !split_part_return_p || !split_point->split_part_set_retval,
+ (vNULL, NULL, adjustments,
split_point->split_bbs, split_point->entry_bb, "part");
-
+ delete adjustments;
node->split_part = true;
if (cur_node->same_comdat_group)
= gimple_build_debug_bind (ddecl, unshare_expr (arg), call);
gsi_insert_after (&gsi, def_temp, GSI_NEW_STMT);
}
+ BITMAP_FREE (args_to_skip);
}
/* We avoid address being taken on any variable used by split part,
--- /dev/null
+/* Interprocedural scalar replacement of aggregates
+ Copyright (C) 2008-2019 Free Software Foundation, Inc.
+
+ Contributed by Martin Jambor <mjambor@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* IPA-SRA is an interprocedural pass that removes unused function return
+ values (turning functions returning a value which is never used into void
+ functions), removes unused function parameters. It can also replace an
+ aggregate parameter by a set of other parameters representing part of the
+ original, turning those passed by reference into new ones which pass the
+ value directly.
+
+ The pass is a true IPA one, which means that it works in three stages in
+ order to be able to take advantage of LTO. First, summaries about functions
+ and each calls are generated. Function summaries (often called call graph
+ node summaries) contain mainly information about which parameters are
+ potential transformation candidates and which bits of candidates are
+ accessed. We differentiate between accesses done as a part of a call
+ statement (which might be not necessary if the callee is also transformed)
+ and others (which are mandatory). Call summaries (often called call graph
+ edge summaries) contain information about which function formal parameters
+ feed into which actual call arguments so that if two parameters are only
+ used in a sum which is then passed to another function which then however
+ does not use this parameter, all three parameters of the two functions can
+ be eliminated. Edge summaries also have flags whether the return value is
+ used or if it is only returned in the caller too. In LTO mode these
+ summaries are then streamed to the object file in the compilation phase and
+ streamed back in in the WPA analysis stage.
+
+ The interprocedural analysis phase traverses the graph in topological order
+ in two sweeps, one in each direction. First, from callees to callers for
+ parameter removal and splitting. Each strongly-connected component is
+ processed iteratively until the situation in it stabilizes. The pass from
+ callers to callees is then carried out to remove unused return values in a
+ very similar fashion.
+
+ Because parameter manipulation has big implications for call redirection
+ which is done only after all call graph nodes materialize, the
+ transformation phase is not part of this patch but is carried out by the
+ clone materialization and edge redirection itself, see comments in
+ ipa-param-manipulation.h for more details. */
+
+
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "gimple.h"
+#include "predict.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "cgraph.h"
+#include "gimple-pretty-print.h"
+#include "alias.h"
+#include "tree-eh.h"
+#include "gimple-iterator.h"
+#include "gimple-walk.h"
+#include "tree-dfa.h"
+#include "tree-sra.h"
+#include "symbol-summary.h"
+#include "params.h"
+#include "dbgcnt.h"
+#include "tree-inline.h"
+#include "ipa-utils.h"
+#include "builtins.h"
+#include "cfganal.h"
+#include "tree-streamer.h"
+
+
+/* Bits used to track size of an aggregate in bytes interprocedurally. */
+#define ISRA_ARG_SIZE_LIMIT_BITS 16
+#define ISRA_ARG_SIZE_LIMIT (1 << ISRA_ARG_SIZE_LIMIT_BITS)
+/* How many parameters can feed into a call actual argument and still be
+ tracked. */
+#define IPA_SRA_MAX_PARAM_FLOW_LEN 7
+
+/* Structure describing accesses to a specific portion of an aggregate
+ parameter, as given by the offset and size. Any smaller accesses that occur
+ within a function that fall within another access form a tree. The pass
+ cannot analyze parameters with only partially overlapping accesses. */
+
+struct GTY(()) param_access
+{
+ /* Type that a potential replacement should have. This field only has
+ meaning in the summary building and transformation phases, when it is
+ reconstructoed from the body. Must not be touched in IPA analysys
+ stage. */
+ tree type;
+
+ /* Alias reference type to be used in MEM_REFs when adjusting caller
+ arguments. */
+ tree alias_ptr_type;
+
+ /* Values returned by get_ref_base_and_extent but converted to bytes and
+ stored as unsigned ints. */
+ unsigned unit_offset;
+ unsigned unit_size : ISRA_ARG_SIZE_LIMIT_BITS;
+
+ /* Set once we are sure that the access will really end up in a potentially
+ transformed function - initially not set for portions of formal parameters
+ that are only used as actual function arguments passed to callees. */
+ unsigned certain : 1;
+ /* Set if the access has a reversed scalar storage order. */
+ unsigned reverse : 1;
+};
+
+/* This structure has the same purpose as the one above and additoonally it
+ contains some fields that are only necessary in the summary generation
+ phase. */
+
+struct gensum_param_access
+{
+ /* Values returned by get_ref_base_and_extent. */
+ HOST_WIDE_INT offset;
+ HOST_WIDE_INT size;
+
+ /* if this access has any children (in terms of the definition above), this
+ points to the first one. */
+ struct gensum_param_access *first_child;
+ /* In intraprocedural SRA, pointer to the next sibling in the access tree as
+ described above. */
+ struct gensum_param_access *next_sibling;
+
+ /* Type that a potential replacement should have. This field only has
+ meaning in the summary building and transformation phases, when it is
+ reconstructoed from the body. Must not be touched in IPA analysys
+ stage. */
+ tree type;
+ /* Alias refrerence type to be used in MEM_REFs when adjusting caller
+ arguments. */
+ tree alias_ptr_type;
+
+ /* Have there been writes to or reads from this exact location except for as
+ arguments to a function call that can be tracked. */
+ bool nonarg;
+
+ /* Set if the access has a reversed scalar storage order. */
+ bool reverse;
+};
+
+/* Summary describing a parameter in the IPA stages. */
+
+struct GTY(()) isra_param_desc
+{
+ /* List of access representatives to the parameters, sorted according to
+ their offset. */
+ vec <param_access *, va_gc> *accesses;
+
+ /* Unit size limit of total size of all replacements. */
+ unsigned param_size_limit : ISRA_ARG_SIZE_LIMIT_BITS;
+ /* Sum of unit sizes of all certain replacements. */
+ unsigned size_reached : ISRA_ARG_SIZE_LIMIT_BITS;
+
+ /* A parameter that is used only in call arguments and can be removed if all
+ concerned actual arguments are removed. */
+ unsigned locally_unused : 1;
+ /* An aggregate that is a candidate for breaking up or complete removal. */
+ unsigned split_candidate : 1;
+ /* Is this a parameter passing stuff by reference? */
+ unsigned by_ref : 1;
+};
+
+/* Structure used when generating summaries that describes a parameter. */
+
+struct gensum_param_desc
+{
+ /* Roots of param_accesses. */
+ gensum_param_access *accesses;
+ /* Number of accesses in the access tree rooted in field accesses. */
+ unsigned access_count;
+
+ /* If the below is non-zero, this is the nuber of uses as actual arguents. */
+ int call_uses;
+ /* Number of times this parameter has been directly passed to. */
+ unsigned ptr_pt_count;
+
+ /* Size limit of total size of all replacements. */
+ unsigned param_size_limit;
+ /* Sum of sizes of nonarg accesses. */
+ unsigned nonarg_acc_size;
+
+ /* A parameter that is used only in call arguments and can be removed if all
+ concerned actual arguments are removed. */
+ bool locally_unused;
+ /* An aggregate that is a candidate for breaking up or a pointer passing data
+ by reference that is a candidate for being converted to a set of
+ parameters passing thosa data by value. */
+ bool split_candidate;
+ /* Is this a parameter passing stuff by reference? */
+ bool by_ref;
+
+ /* The number of this parameter as they are ordered in function decl. */
+ int param_number;
+ /* For parameters passing data by reference, this is parameter index to
+ compute indices to bb_dereferences. */
+ int deref_index;
+};
+
+/* Properly deallocate accesses of DESC. TODO: Since this data strucutre is
+ not in GC memory, this is not necessary and we can consider removing the
+ function. */
+
+static void
+free_param_decl_accesses (isra_param_desc *desc)
+{
+ unsigned len = vec_safe_length (desc->accesses);
+ for (unsigned i = 0; i < len; ++i)
+ ggc_free ((*desc->accesses)[i]);
+ vec_free (desc->accesses);
+}
+
+/* Class used to convey information about functions from the
+ intra-procedurwl analysis stage to inter-procedural one. */
+
+class GTY((for_user)) isra_func_summary
+{
+public:
+ /* initialize the object. */
+
+ isra_func_summary ()
+ : m_parameters (NULL), m_candidate (false), m_returns_value (false),
+ m_return_ignored (false), m_queued (false)
+ {}
+
+ /* Destroy m_parameters. */
+
+ ~isra_func_summary ();
+
+ /* Mark the function as not a candidate for any IPA-SRA transofrmation.
+ Return true if it was a candidate until now. */
+
+ bool zap ();
+
+ /* Vector of parameter descriptors corresponding to the function being
+ analyzed. */
+ vec<isra_param_desc, va_gc> *m_parameters;
+
+ /* Whether the node is even a candidate for any IPA-SRA transformation at
+ all. */
+ unsigned m_candidate : 1;
+
+ /* Whether the original function returns any value. */
+ unsigned m_returns_value : 1;
+
+ /* Set to true if all call statements do not actually use the returned
+ value. */
+
+ unsigned m_return_ignored : 1;
+
+ /* Whether the node is already queued in IPA SRA stack during processing of
+ call graphs SCCs. */
+
+ unsigned m_queued : 1;
+};
+
+/* Claen up and deallocate isra_func_summary points to. TODO: Since this data
+ strucutre is not in GC memory, this is not necessary and we can consider
+ removing the destructor. */
+
+isra_func_summary::~isra_func_summary ()
+{
+ unsigned len = vec_safe_length (m_parameters);
+ for (unsigned i = 0; i < len; ++i)
+ free_param_decl_accesses (&(*m_parameters)[i]);
+ vec_free (m_parameters);
+}
+
+
+/* Mark the function as not a candidate for any IPA-SRA transofrmation. Return
+ true if it was a candidate until now. */
+
+bool
+isra_func_summary::zap ()
+{
+ bool ret = m_candidate;
+ m_candidate = false;
+
+ unsigned len = vec_safe_length (m_parameters);
+ for (unsigned i = 0; i < len; ++i)
+ free_param_decl_accesses (&(*m_parameters)[i]);
+ vec_free (m_parameters);
+
+ return ret;
+}
+
+/* Structure to describe which formal parameters feed into a particular actual
+ arguments. */
+
+struct isra_param_flow
+{
+ /* Number of elements in array inputs that contain valid data. */
+ char length;
+ /* Indices of formal parameters that feed into the described actual argument.
+ If aggregate_pass_through or pointer_pass_through below are true, it must
+ contain exactly one element which is passed through from a formal
+ parameter if the given number. Otherwise, the array contains indices of
+ collee's formal parameters which are used to calculate value of this
+ actual argument. */
+ unsigned char inputs[IPA_SRA_MAX_PARAM_FLOW_LEN];
+
+ /* Offset within the formal parameter. */
+ unsigned unit_offset;
+ /* Size of the portion of the formal parameter that is being passed. */
+ unsigned unit_size : ISRA_ARG_SIZE_LIMIT_BITS;
+
+ /* True when the value of this actual copy is a portion of a formal
+ parameter. */
+ unsigned aggregate_pass_through : 1;
+ /* True when the value of this actual copy is a verbatim pass through of an
+ obtained pointer. */
+ unsigned pointer_pass_through : 1;
+ /* True when it is safe to copy access candidates here from the callee, which
+ would mean introducing dereferences into callers of the caller. */
+ unsigned safe_to_import_accesses : 1;
+};
+
+/* Strucutre used to convey information about calls from the intra-procedurwl
+ analysis stage to inter-procedural one. */
+
+class isra_call_summary
+{
+public:
+ isra_call_summary ()
+ : m_arg_flow (), m_return_ignored (false), m_return_returned (false),
+ m_bit_aligned_arg (false)
+ {}
+
+ void init_inputs (unsigned arg_count);
+ void dump (FILE *f);
+
+ /* Information about what formal parameters of the caller are used to compute
+ indivisual actual arguments of this call. */
+ auto_vec <isra_param_flow> m_arg_flow;
+
+ /* Set to true if the call statement does not have a LHS. */
+ unsigned m_return_ignored : 1;
+
+ /* Set to true if the LHS of call statement is only used to construct the
+ return value of the caller. */
+ unsigned m_return_returned : 1;
+
+ /* Set when any of the call arguments are not byte-aligned. */
+ unsigned m_bit_aligned_arg : 1;
+};
+
+/* Class to manage function summaries. */
+
+class GTY((user)) ipa_sra_function_summaries
+ : public function_summary <isra_func_summary *>
+{
+public:
+ ipa_sra_function_summaries (symbol_table *table, bool ggc):
+ function_summary<isra_func_summary *> (table, ggc) { }
+
+ virtual void duplicate (cgraph_node *, cgraph_node *,
+ isra_func_summary *old_sum,
+ isra_func_summary *new_sum);
+};
+
+/* Hook that is called by summary when a node is duplicated. */
+
+void
+ipa_sra_function_summaries::duplicate (cgraph_node *, cgraph_node *,
+ isra_func_summary *old_sum,
+ isra_func_summary *new_sum)
+{
+ /* TODO: Somehow stop copying when ISRA is doing the cloning, it is
+ useless. */
+ new_sum->m_candidate = old_sum->m_candidate;
+ new_sum->m_returns_value = old_sum->m_returns_value;
+ new_sum->m_return_ignored = old_sum->m_return_ignored;
+ gcc_assert (!old_sum->m_queued);
+ new_sum->m_queued = false;
+
+ unsigned param_count = vec_safe_length (old_sum->m_parameters);
+ if (!param_count)
+ return;
+ vec_safe_reserve_exact (new_sum->m_parameters, param_count);
+ new_sum->m_parameters->quick_grow_cleared (param_count);
+ for (unsigned i = 0; i < param_count; i++)
+ {
+ isra_param_desc *s = &(*old_sum->m_parameters)[i];
+ isra_param_desc *d = &(*new_sum->m_parameters)[i];
+
+ d->param_size_limit = s->param_size_limit;
+ d->size_reached = s->size_reached;
+ d->locally_unused = s->locally_unused;
+ d->split_candidate = s->split_candidate;
+ d->by_ref = s->by_ref;
+
+ unsigned acc_count = vec_safe_length (s->accesses);
+ vec_safe_reserve_exact (d->accesses, acc_count);
+ for (unsigned j = 0; j < acc_count; j++)
+ {
+ param_access *from = (*s->accesses)[j];
+ param_access *to = ggc_cleared_alloc<param_access> ();
+ to->type = from->type;
+ to->alias_ptr_type = from->alias_ptr_type;
+ to->unit_offset = from->unit_offset;
+ to->unit_size = from->unit_size;
+ to->certain = from->certain;
+ d->accesses->quick_push (to);
+ }
+ }
+}
+
+/* Pointer to the pass function summary holder. */
+
+static GTY(()) ipa_sra_function_summaries *func_sums;
+
+/* Class to manage call summaries. */
+
+class ipa_sra_call_summaries: public call_summary <isra_call_summary *>
+{
+public:
+ ipa_sra_call_summaries (symbol_table *table):
+ call_summary<isra_call_summary *> (table) { }
+
+ /* Duplicate info when an edge is cloned. */
+ virtual void duplicate (cgraph_edge *, cgraph_edge *,
+ isra_call_summary *old_sum,
+ isra_call_summary *new_sum);
+};
+
+static ipa_sra_call_summaries *call_sums;
+
+
+/* Initialize m_arg_flow of a particular instance of isra_call_summary.
+ ARG_COUNT is the number of actual arguments passed. */
+
+void
+isra_call_summary::init_inputs (unsigned arg_count)
+{
+ if (arg_count == 0)
+ {
+ gcc_checking_assert (m_arg_flow.length () == 0);
+ return;
+ }
+ if (m_arg_flow.length () == 0)
+ {
+ m_arg_flow.reserve_exact (arg_count);
+ m_arg_flow.quick_grow_cleared (arg_count);
+ }
+ else
+ gcc_checking_assert (arg_count == m_arg_flow.length ());
+}
+
+/* Dump all information in call summary to F. */
+
+void
+isra_call_summary::dump (FILE *f)
+{
+ if (m_return_ignored)
+ fprintf (f, " return value ignored\n");
+ if (m_return_returned)
+ fprintf (f, " return value used only to compute caller return value\n");
+ for (unsigned i = 0; i < m_arg_flow.length (); i++)
+ {
+ fprintf (f, " Parameter %u:\n", i);
+ isra_param_flow *ipf = &m_arg_flow[i];
+
+ if (ipf->length)
+ {
+ bool first = true;
+ fprintf (f, " Scalar param sources: ");
+ for (int j = 0; j < ipf->length; j++)
+ {
+ if (!first)
+ fprintf (f, ", ");
+ else
+ first = false;
+ fprintf (f, "%i", (int) ipf->inputs[j]);
+ }
+ fprintf (f, "\n");
+ }
+ if (ipf->aggregate_pass_through)
+ fprintf (f, " Aggregate pass through from the param given above, "
+ "unit offset: %u , unit size: %u\n",
+ ipf->unit_offset, ipf->unit_size);
+ if (ipf->pointer_pass_through)
+ fprintf (f, " Pointer pass through from the param given above, "
+ "safe_to_import_accesses: %u\n", ipf->safe_to_import_accesses);
+ }
+}
+
+/* Duplicate edge summare when an edge is cloned. */
+
+void
+ipa_sra_call_summaries::duplicate (cgraph_edge *, cgraph_edge *,
+ isra_call_summary *old_sum,
+ isra_call_summary *new_sum)
+{
+ unsigned arg_count = old_sum->m_arg_flow.length ();
+ new_sum->init_inputs (arg_count);
+ for (unsigned i = 0; i < arg_count; i++)
+ new_sum->m_arg_flow[i] = old_sum->m_arg_flow[i];
+
+ new_sum->m_return_ignored = old_sum->m_return_ignored;
+ new_sum->m_return_returned = old_sum->m_return_returned;
+ new_sum->m_bit_aligned_arg = old_sum->m_bit_aligned_arg;
+}
+
+
+/* With all GTY stuff done, we can move to anonymous namespace. */
+namespace {
+/* Quick mapping from a decl to its param descriptor. */
+
+hash_map<tree, gensum_param_desc *> *decl2desc;
+
+/* Countdown of allowe Alias analysis steps during summary building. */
+
+int aa_walking_limit;
+
+/* This is a table in which for each basic block and parameter there is a
+ distance (offset + size) in that parameter which is dereferenced and
+ accessed in that BB. */
+HOST_WIDE_INT *bb_dereferences = NULL;
+/* How many by-reference parameters there are in the current function. */
+int by_ref_count;
+
+/* Bitmap of BBs that can cause the function to "stop" progressing by
+ returning, throwing externally, looping infinitely or calling a function
+ which might abort etc.. */
+bitmap final_bbs;
+
+/* Obstack to allocate various small structures required only when generating
+ summary for a function. */
+struct obstack gensum_obstack;
+
+/* Return false the function is apparently unsuitable for IPA-SRA based on it's
+ attributes, return true otherwise. NODE is the cgraph node of the current
+ function. */
+
+static bool
+ipa_sra_preliminary_function_checks (cgraph_node *node)
+{
+ if (!node->local.can_change_signature)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function cannot change signature.\n");
+ return false;
+ }
+
+ if (!tree_versionable_function_p (node->decl))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function is not versionable.\n");
+ return false;
+ }
+
+ if (!opt_for_fn (node->decl, optimize)
+ || !opt_for_fn (node->decl, flag_ipa_sra))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Not optimizing or IPA-SRA turned off for this "
+ "function.\n");
+ return false;
+ }
+
+ if (DECL_VIRTUAL_P (node->decl))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function is a virtual method.\n");
+ return false;
+ }
+
+ struct function *fun = DECL_STRUCT_FUNCTION (node->decl);
+ if (fun->stdarg)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function uses stdarg. \n");
+ return false;
+ }
+
+ if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function type has attributes. \n");
+ return false;
+ }
+
+ if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Always inline function will be inlined "
+ "anyway. \n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Print access tree starting at ACCESS to F. */
+
+static void
+dump_gensum_access (FILE *f, gensum_param_access *access, unsigned indent)
+{
+ fprintf (f, " ");
+ for (unsigned i = 0; i < indent; i++)
+ fprintf (f, " ");
+ fprintf (f, " * Access to offset: " HOST_WIDE_INT_PRINT_DEC,
+ access->offset);
+ fprintf (f, ", size: " HOST_WIDE_INT_PRINT_DEC, access->size);
+ fprintf (f, ", type: ");
+ print_generic_expr (f, access->type);
+ fprintf (f, ", alias_ptr_type: ");
+ print_generic_expr (f, access->alias_ptr_type);
+ fprintf (f, ", nonarg: %u, reverse: %u\n", access->nonarg, access->reverse);
+ for (gensum_param_access *ch = access->first_child;
+ ch;
+ ch = ch->next_sibling)
+ dump_gensum_access (f, ch, indent + 2);
+}
+
+
+/* Print access tree starting at ACCESS to F. */
+
+static void
+dump_isra_access (FILE *f, param_access *access)
+{
+ fprintf (f, " * Access to unit offset: %u", access->unit_offset);
+ fprintf (f, ", unit size: %u", access->unit_size);
+ fprintf (f, ", type: ");
+ print_generic_expr (f, access->type);
+ fprintf (f, ", alias_ptr_type: ");
+ print_generic_expr (f, access->alias_ptr_type);
+ if (access->certain)
+ fprintf (f, ", certain");
+ else
+ fprintf (f, ", not-certain");
+ if (access->reverse)
+ fprintf (f, ", reverse");
+ fprintf (f, "\n");
+}
+
+/* Dump access tree starting at ACCESS to stderr. */
+
+DEBUG_FUNCTION void
+debug_isra_access (param_access *access)
+{
+ dump_isra_access (stderr, access);
+}
+
+/* Dump DESC to F. */
+
+static void
+dump_gensum_param_descriptor (FILE *f, gensum_param_desc *desc)
+{
+ if (desc->locally_unused)
+ fprintf (f, " unused with %i call_uses\n", desc->call_uses);
+ if (!desc->split_candidate)
+ {
+ fprintf (f, " not a candidate\n");
+ return;
+ }
+ if (desc->by_ref)
+ fprintf (f, " by_ref with %u pass throughs\n", desc->ptr_pt_count);
+
+ for (gensum_param_access *acc = desc->accesses; acc; acc = acc->next_sibling)
+ dump_gensum_access (f, acc, 2);
+}
+
+/* Dump all parameter descriptors in IFS, assuming it describes FNDECl, to
+ F. */
+
+static void
+dump_gensum_param_descriptors (FILE *f, tree fndecl,
+ vec<gensum_param_desc> *param_descriptions)
+{
+ tree parm = DECL_ARGUMENTS (fndecl);
+ for (unsigned i = 0;
+ i < param_descriptions->length ();
+ ++i, parm = DECL_CHAIN (parm))
+ {
+ fprintf (f, " Descriptor for parameter %i ", i);
+ print_generic_expr (f, parm, TDF_UID);
+ fprintf (f, "\n");
+ dump_gensum_param_descriptor (f, &(*param_descriptions)[i]);
+ }
+}
+
+
+/* Dump DESC to F. */
+
+static void
+dump_isra_param_descriptor (FILE *f, isra_param_desc *desc)
+{
+ if (desc->locally_unused)
+ {
+ fprintf (f, " (locally) unused\n");
+ }
+ if (!desc->split_candidate)
+ {
+ fprintf (f, " not a candidate for splitting\n");
+ return;
+ }
+ fprintf (f, " param_size_limit: %u, size_reached: %u%s\n",
+ desc->param_size_limit, desc->size_reached,
+ desc->by_ref ? ", by_ref" : "");
+
+ for (unsigned i = 0; i < vec_safe_length (desc->accesses); ++i)
+ {
+ param_access *access = (*desc->accesses)[i];
+ dump_isra_access (f, access);
+ }
+}
+
+/* Dump all parameter descriptors in IFS, assuming it describes FNDECl, to
+ F. */
+
+static void
+dump_isra_param_descriptors (FILE *f, tree fndecl,
+ isra_func_summary *ifs)
+{
+ tree parm = DECL_ARGUMENTS (fndecl);
+ if (!ifs->m_parameters)
+ {
+ fprintf (f, " parameter descriptors not available\n");
+ return;
+ }
+
+ for (unsigned i = 0;
+ i < ifs->m_parameters->length ();
+ ++i, parm = DECL_CHAIN (parm))
+ {
+ fprintf (f, " Descriptor for parameter %i ", i);
+ print_generic_expr (f, parm, TDF_UID);
+ fprintf (f, "\n");
+ dump_isra_param_descriptor (f, &(*ifs->m_parameters)[i]);
+ }
+}
+
+/* Add SRC to inputs of PARAM_FLOW, unless it would exceed storage. If the
+ function fails return false, otherwise return true. SRC must fit into an
+ unsigned char. Used for purposes of transitive unused parameter
+ removal. */
+
+static bool
+add_src_to_param_flow (isra_param_flow *param_flow, int src)
+{
+ gcc_checking_assert (src >= 0 && src <= UCHAR_MAX);
+ if (param_flow->length == IPA_SRA_MAX_PARAM_FLOW_LEN)
+ return false;
+
+ param_flow->inputs[(int) param_flow->length] = src;
+ param_flow->length++;
+ return true;
+}
+
+/* Add a SRC to the inputs of PARAM_FLOW unless it is already there and assert
+ it is the only input. Used for purposes of transitive parameter
+ splitting. */
+
+static void
+set_single_param_flow_source (isra_param_flow *param_flow, int src)
+{
+ gcc_checking_assert (src >= 0 && src <= UCHAR_MAX);
+ if (param_flow->length == 0)
+ {
+ param_flow->inputs[0] = src;
+ param_flow->length = 1;
+ }
+ else if (param_flow->length == 1)
+ gcc_assert (param_flow->inputs[0] == src);
+ else
+ gcc_unreachable ();
+}
+
+/* Assert that there is only a single value in PARAM_FLOW's inputs and return
+ it. */
+
+static unsigned
+get_single_param_flow_source (const isra_param_flow *param_flow)
+{
+ gcc_assert (param_flow->length == 1);
+ return param_flow->inputs[0];
+}
+
+/* Inspect all uses of NAME and simple arithmetic calculations involving NAME
+ in NODE and return a negative number if any of them is used for something
+ else than either an actual call argument, simple arithmetic operation or
+ debug statement. If there are no such uses, return the number of actual
+ arguments that this parameter eventually feeds to (or zero if there is none).
+ For any such parameter, mark PARM_NUM as one of its sources. ANALYZED is a
+ bitmap that tracks which SSA names we have already started
+ investigating. */
+
+static int
+isra_track_scalar_value_uses (cgraph_node *node, tree name, int parm_num,
+ bitmap analyzed)
+{
+ int res = 0;
+ imm_use_iterator imm_iter;
+ gimple *stmt;
+
+ FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
+ {
+ if (is_gimple_debug (stmt))
+ continue;
+
+ /* TODO: We could handle at least const builtin functions like arithmetic
+ operations below. */
+ if (is_gimple_call (stmt))
+ {
+ int all_uses = 0;
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
+ all_uses++;
+
+ gcall *call = as_a <gcall *> (stmt);
+ unsigned arg_count;
+ if (gimple_call_internal_p (call)
+ || (arg_count = gimple_call_num_args (call)) == 0)
+ {
+ res = -1;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+
+ cgraph_edge *cs = node->get_edge (stmt);
+ gcc_checking_assert (cs);
+ isra_call_summary *csum = call_sums->get_create (cs);
+ csum->init_inputs (arg_count);
+
+ int simple_uses = 0;
+ for (unsigned i = 0; i < arg_count; i++)
+ if (gimple_call_arg (call, i) == name)
+ {
+ if (!add_src_to_param_flow (&csum->m_arg_flow[i], parm_num))
+ {
+ simple_uses = -1;
+ break;
+ }
+ simple_uses++;
+ }
+
+ if (simple_uses < 0
+ || all_uses != simple_uses)
+ {
+ res = -1;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ res += all_uses;
+ }
+ else if ((is_gimple_assign (stmt) && !gimple_has_volatile_ops (stmt))
+ || gimple_code (stmt) == GIMPLE_PHI)
+ {
+ tree lhs;
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ lhs = gimple_phi_result (stmt);
+ else
+ lhs = gimple_assign_lhs (stmt);
+
+ if (TREE_CODE (lhs) != SSA_NAME)
+ {
+ res = -1;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ gcc_assert (!gimple_vdef (stmt));
+ if (bitmap_set_bit (analyzed, SSA_NAME_VERSION (lhs)))
+ {
+ int tmp = isra_track_scalar_value_uses (node, lhs, parm_num,
+ analyzed);
+ if (tmp < 0)
+ {
+ res = tmp;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ res += tmp;
+ }
+ }
+ else
+ {
+ res = -1;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ }
+ return res;
+}
+
+/* Inspect all uses of PARM, which must be a gimple register, in FUN (which is
+ also described by NODE) and simple arithmetic calculations involving PARM
+ and return false if any of them is used for something else than either an
+ actual call argument, simple arithmetic operation or debug statement. If
+ there are no such uses, return true and store the number of actual arguments
+ that this parameter eventually feeds to (or zero if there is none) to
+ *CALL_USES_P. For any such parameter, mark PARM_NUM as one of its
+ sources.
+
+ This function is similar to ptr_parm_has_nonarg_uses but its results are
+ meant for unused parameter removal, as opposed to splitting of parameters
+ passed by reference or converting them to passed by value.
+ */
+
+static bool
+isra_track_scalar_param_local_uses (function *fun, cgraph_node *node, tree parm,
+ int parm_num, int *call_uses_p)
+{
+ gcc_checking_assert (is_gimple_reg (parm));
+
+ tree name = ssa_default_def (fun, parm);
+ if (!name || has_zero_uses (name))
+ {
+ *call_uses_p = 0;
+ return false;
+ }
+
+ /* Edge summaries can only handle callers with fewer than 256 parameters. */
+ if (parm_num > UCHAR_MAX)
+ return true;
+
+ bitmap analyzed = BITMAP_ALLOC (NULL);
+ int call_uses = isra_track_scalar_value_uses (node, name, parm_num, analyzed);
+ BITMAP_FREE (analyzed);
+ if (call_uses < 0)
+ return true;
+ *call_uses_p = call_uses;
+ return false;
+}
+
+/* Scan immediate uses of a default definition SSA name of a parameter PARM and
+ examine whether there are any nonarg uses that are not actual arguments or
+ otherwise infeasible uses. If so, return true, otherwise return false.
+ Create pass-through IPA flow records for any direct uses as argument calls
+ and if returning false, store their number into *PT_COUNT_P. NODE and FUN
+ must represent the function that is currently analyzed, PARM_NUM must be the
+ index of the analyzed parameter.
+
+ This function is similar to isra_track_scalar_param_local_uses but its
+ results are meant for splitting of parameters passed by reference or turning
+ them into bits passed by value, as opposed to generic unused parameter
+ removal.
+ */
+
+static bool
+ptr_parm_has_nonarg_uses (cgraph_node *node, function *fun, tree parm,
+ int parm_num, unsigned *pt_count_p)
+{
+ imm_use_iterator ui;
+ gimple *stmt;
+ tree name = ssa_default_def (fun, parm);
+ bool ret = false;
+ unsigned pt_count = 0;
+
+ if (!name || has_zero_uses (name))
+ return false;
+
+ /* Edge summaries can only handle callers with fewer than 256 parameters. */
+ if (parm_num > UCHAR_MAX)
+ return true;
+
+ FOR_EACH_IMM_USE_STMT (stmt, ui, name)
+ {
+ unsigned uses_ok = 0;
+ use_operand_p use_p;
+
+ if (is_gimple_debug (stmt))
+ continue;
+
+ if (gimple_assign_single_p (stmt))
+ {
+ tree rhs = gimple_assign_rhs1 (stmt);
+ while (handled_component_p (rhs))
+ rhs = TREE_OPERAND (rhs, 0);
+ if (TREE_CODE (rhs) == MEM_REF
+ && TREE_OPERAND (rhs, 0) == name
+ && integer_zerop (TREE_OPERAND (rhs, 1))
+ && types_compatible_p (TREE_TYPE (rhs),
+ TREE_TYPE (TREE_TYPE (name)))
+ && !TREE_THIS_VOLATILE (rhs))
+ uses_ok++;
+ }
+ else if (is_gimple_call (stmt))
+ {
+ gcall *call = as_a <gcall *> (stmt);
+ unsigned arg_count;
+ if (gimple_call_internal_p (call)
+ || (arg_count = gimple_call_num_args (call)) == 0)
+ {
+ ret = true;
+ BREAK_FROM_IMM_USE_STMT (ui);
+ }
+
+ cgraph_edge *cs = node->get_edge (stmt);
+ gcc_checking_assert (cs);
+ isra_call_summary *csum = call_sums->get_create (cs);
+ csum->init_inputs (arg_count);
+
+ for (unsigned i = 0; i < arg_count; ++i)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+
+ if (arg == name)
+ {
+ /* TODO: Allow &MEM_REF[name + offset] here,
+ ipa_param_body_adjustments::modify_call_stmt has to be
+ adjusted too. */
+ csum->m_arg_flow[i].pointer_pass_through = true;
+ set_single_param_flow_source (&csum->m_arg_flow[i], parm_num);
+ pt_count++;
+ uses_ok++;
+ continue;
+ }
+
+ while (handled_component_p (arg))
+ arg = TREE_OPERAND (arg, 0);
+ if (TREE_CODE (arg) == MEM_REF
+ && TREE_OPERAND (arg, 0) == name
+ && integer_zerop (TREE_OPERAND (arg, 1))
+ && types_compatible_p (TREE_TYPE (arg),
+ TREE_TYPE (TREE_TYPE (name)))
+ && !TREE_THIS_VOLATILE (arg))
+ uses_ok++;
+ }
+ }
+
+ /* If the number of valid uses does not match the number of
+ uses in this stmt there is an unhandled use. */
+ unsigned all_uses = 0;
+ FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
+ all_uses++;
+
+ gcc_checking_assert (uses_ok <= all_uses);
+ if (uses_ok != all_uses)
+ {
+ ret = true;
+ BREAK_FROM_IMM_USE_STMT (ui);
+ }
+ }
+
+ *pt_count_p = pt_count;
+ return ret;
+}
+
+/* Initialize vector of parameter descriptors of NODE. Return true if there
+ are any candidates for splitting or unused aggregate parameter removal (the
+ function may return false if there are candidates for removal of register
+ parameters) and function body must be scanned. */
+
+static bool
+create_parameter_descriptors (cgraph_node *node,
+ vec<gensum_param_desc> *param_descriptions)
+{
+ function *fun = DECL_STRUCT_FUNCTION (node->decl);
+ bool ret = false;
+
+ int num = 0;
+ for (tree parm = DECL_ARGUMENTS (node->decl);
+ parm;
+ parm = DECL_CHAIN (parm), num++)
+ {
+ const char *msg;
+ gensum_param_desc *desc = &(*param_descriptions)[num];
+ /* param_descriptions vector is grown cleared in the caller. */
+ desc->param_number = num;
+ decl2desc->put (parm, desc);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ print_generic_expr (dump_file, parm, TDF_UID);
+
+ int scalar_call_uses;
+ tree type = TREE_TYPE (parm);
+ if (TREE_THIS_VOLATILE (parm))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, is volatile\n");
+ continue;
+ }
+ if (!is_gimple_reg_type (type) && is_va_list_type (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, is a va_list type\n");
+ continue;
+ }
+ if (TREE_ADDRESSABLE (parm))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, is addressable\n");
+ continue;
+ }
+ if (TREE_ADDRESSABLE (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, type cannot be split\n");
+ continue;
+ }
+
+ if (is_gimple_reg (parm)
+ && !isra_track_scalar_param_local_uses (fun, node, parm, num,
+ &scalar_call_uses))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " is a scalar with only %i call uses\n",
+ scalar_call_uses);
+
+ desc->locally_unused = true;
+ desc->call_uses = scalar_call_uses;
+ }
+
+ if (POINTER_TYPE_P (type))
+ {
+ type = TREE_TYPE (type);
+
+ if (TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, reference to "
+ "a function\n");
+ continue;
+ }
+ if (TYPE_VOLATILE (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, reference to "
+ "a volatile type\n");
+ continue;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE
+ && TYPE_NONALIASED_COMPONENT (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, reference to a"
+ "nonaliased component array\n");
+ continue;
+ }
+ if (!is_gimple_reg (parm))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, a reference which is "
+ "not a gimple register (probably addressable)\n");
+ continue;
+ }
+ if (is_va_list_type (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, reference to "
+ "a va list\n");
+ continue;
+ }
+ if (ptr_parm_has_nonarg_uses (node, fun, parm, num,
+ &desc->ptr_pt_count))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, reference has "
+ "nonarg uses\n");
+ continue;
+ }
+ desc->by_ref = true;
+ }
+ else if (!AGGREGATE_TYPE_P (type))
+ {
+ /* This is in an else branch because scalars passed by reference are
+ still candidates to be passed by value. */
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, not an aggregate\n");
+ continue;
+ }
+
+ if (!COMPLETE_TYPE_P (type))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, not a complete type\n");
+ continue;
+ }
+ if (!tree_fits_uhwi_p (TYPE_SIZE (type)))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, size not representable\n");
+ continue;
+ }
+ unsigned HOST_WIDE_INT type_size
+ = tree_to_uhwi (TYPE_SIZE (type)) / BITS_PER_UNIT;
+ if (type_size == 0
+ || type_size >= ISRA_ARG_SIZE_LIMIT)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, has zero or huge size\n");
+ continue;
+ }
+ if (type_internals_preclude_sra_p (type, &msg))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a candidate, %s\n", msg);
+ continue;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " is a candidate\n");
+
+ ret = true;
+ desc->split_candidate = true;
+ if (desc->by_ref)
+ desc->deref_index = by_ref_count++;
+ }
+ return ret;
+}
+
+/* Return pointer to descriptor of parameter DECL or NULL if it cannot be
+ found, which happens if DECL is for a static chain. */
+
+static gensum_param_desc *
+get_gensum_param_desc (tree decl)
+{
+ gcc_checking_assert (TREE_CODE (decl) == PARM_DECL);
+ gensum_param_desc **slot = decl2desc->get (decl);
+ if (!slot)
+ /* This can happen for static chains which we cannot handle so far. */
+ return NULL;
+ gcc_checking_assert (*slot);
+ return *slot;
+}
+
+
+/* Remove parameter described by DESC from candidates for IPA-SRA splitting and
+ write REASON to the dump file if there is one. */
+
+static void
+disqualify_split_candidate (gensum_param_desc *desc, const char *reason)
+{
+ if (!desc->split_candidate)
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "! Disqualifying parameter number %i - %s\n",
+ desc->param_number, reason);
+
+ desc->split_candidate = false;
+}
+
+/* Remove DECL from candidates for IPA-SRA and write REASON to the dump file if
+ there is one. */
+
+static void
+disqualify_split_candidate (tree decl, const char *reason)
+{
+ gensum_param_desc *desc = get_gensum_param_desc (decl);
+ if (desc)
+ disqualify_split_candidate (desc, reason);
+}
+
+/* Allocate a new access to DESC and fill it in with OFFSET and SIZE. But
+ first, check that there are not too many of them already. If so, do not
+ allocate anything and return NULL. */
+
+static gensum_param_access *
+allocate_access (gensum_param_desc *desc,
+ HOST_WIDE_INT offset, HOST_WIDE_INT size)
+{
+ if (desc->access_count
+ == (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+ {
+ disqualify_split_candidate (desc, "Too many replacement candidates");
+ return NULL;
+ }
+
+ gensum_param_access *access
+ = (gensum_param_access *) obstack_alloc (&gensum_obstack,
+ sizeof (gensum_param_access));
+ memset (access, 0, sizeof (*access));
+ access->offset = offset;
+ access->size = size;
+ return access;
+}
+
+/* In what context scan_expr_access has been called, whether it deals with a
+ load, a function argument, or a store. */
+
+enum isra_scan_context {ISRA_CTX_LOAD, ISRA_CTX_ARG, ISRA_CTX_STORE};
+
+/* Return an access describing memory access to the variable described by DESC
+ at OFFSET with SIZE in context CTX, starting at pointer to the linked list
+ at a certaint tree level FIRST. Attempt to create it and put into the
+ appropriate place in the access tree if does not exist, but fail and return
+ NULL if there are already too many accesses, if it would create a partially
+ overlapping access or if an access would end up withiin a pre-existing
+ non-call access. */
+
+static gensum_param_access *
+get_access_1 (gensum_param_desc *desc, gensum_param_access **first,
+ HOST_WIDE_INT offset, HOST_WIDE_INT size, isra_scan_context ctx)
+{
+ gensum_param_access *access = *first, **ptr = first;
+
+ if (!access)
+ {
+ /* No pre-existing access at this level, just create it. */
+ gensum_param_access *a = allocate_access (desc, offset, size);
+ if (!a)
+ return NULL;
+ *first = a;
+ return *first;
+ }
+
+ if (access->offset >= offset + size)
+ {
+ /* We want to squeeze it in front of the very first access, just do
+ it. */
+ gensum_param_access *r = allocate_access (desc, offset, size);
+ if (!r)
+ return NULL;
+ r->next_sibling = access;
+ *first = r;
+ return r;
+ }
+
+ /* Skip all accesses that have to come before us until the next sibling is
+ already too far. */
+ while (offset >= access->offset + access->size
+ && access->next_sibling
+ && access->next_sibling->offset < offset + size)
+ {
+ ptr = &access->next_sibling;
+ access = access->next_sibling;
+ }
+
+ /* At this point we know we do not belong before access. */
+ gcc_assert (access->offset < offset + size);
+
+ if (access->offset == offset && access->size == size)
+ /* We found what we were looking for. */
+ return access;
+
+ if (access->offset <= offset
+ && access->offset + access->size >= offset + size)
+ {
+ /* We fit into access which is larger than us. We need to find/create
+ something below access. But we only allow nesting in call
+ arguments. */
+ if (access->nonarg)
+ return NULL;
+
+ return get_access_1 (desc, &access->first_child, offset, size, ctx);
+ }
+
+ if (offset <= access->offset
+ && offset + size >= access->offset + access->size)
+ /* We are actually bigger than access, which fully fits into us, take its
+ place and make all accesses fitting into it its children. */
+ {
+ /* But first, we only allow nesting in call arguments so check if that is
+ what we are trying to represent. */
+ if (ctx != ISRA_CTX_ARG)
+ return NULL;
+
+ gensum_param_access *r = allocate_access (desc, offset, size);
+ if (!r)
+ return NULL;
+ r->first_child = access;
+
+ while (access->next_sibling
+ && access->next_sibling->offset < offset + size)
+ access = access->next_sibling;
+ if (access->offset + access->size > offset + size)
+ {
+ /* This must be a different access, which are sorted, so the
+ following must be true and this signals a partial overlap. */
+ gcc_assert (access->offset > offset);
+ return NULL;
+ }
+
+ r->next_sibling = access->next_sibling;
+ access->next_sibling = NULL;
+ *ptr = r;
+ return r;
+ }
+
+ if (offset >= access->offset + access->size)
+ {
+ /* We belong after access. */
+ gensum_param_access *r = allocate_access (desc, offset, size);
+ if (!r)
+ return NULL;
+ r->next_sibling = access->next_sibling;
+ access->next_sibling = r;
+ return r;
+ }
+
+ if (offset < access->offset)
+ {
+ /* We know the following, otherwise we would have created a
+ super-access. */
+ gcc_checking_assert (offset + size < access->offset + access->size);
+ return NULL;
+ }
+
+ if (offset + size > access->offset + access->size)
+ {
+ /* Likewise. */
+ gcc_checking_assert (offset > access->offset);
+ return NULL;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Return an access describing memory access to the variable described by DESC
+ at OFFSET with SIZE in context CTX, mark it as used in context CTX. Attempt
+ to create if it does not exist, but fail and return NULL if there are
+ already too many accesses, if it would create a partially overlapping access
+ or if an access woule end up in a non-call access. */
+
+static gensum_param_access *
+get_access (gensum_param_desc *desc, HOST_WIDE_INT offset, HOST_WIDE_INT size,
+ isra_scan_context ctx)
+{
+ gcc_checking_assert (desc->split_candidate);
+
+ gensum_param_access *access = get_access_1 (desc, &desc->accesses, offset,
+ size, ctx);
+ if (!access)
+ {
+ disqualify_split_candidate (desc,
+ "Bad access overlap or too many accesses");
+ return NULL;
+ }
+
+ switch (ctx)
+ {
+ case ISRA_CTX_STORE:
+ gcc_assert (!desc->by_ref);
+ /* Fall-through */
+ case ISRA_CTX_LOAD:
+ access->nonarg = true;
+ break;
+ case ISRA_CTX_ARG:
+ break;
+ }
+
+ return access;
+}
+
+/* Verify that parameter access tree starting with ACCESS is in good shape.
+ PARENT_OFFSET and PARENT_SIZE are the ciorresponding fields of parent of
+ ACCESS or zero if there is none. */
+
+static bool
+verify_access_tree_1 (gensum_param_access *access, HOST_WIDE_INT parent_offset,
+ HOST_WIDE_INT parent_size)
+{
+ while (access)
+ {
+ gcc_assert (access->offset >= 0 && access->size > 0);
+
+ if (parent_size != 0)
+ {
+ if (access->offset < parent_offset)
+ {
+ error ("Access offset before parent offset");
+ return true;
+ }
+ if (access->size >= parent_size)
+ {
+ error ("Access size greater or equal to its parent size");
+ return true;
+ }
+ if (access->offset + access->size > parent_offset + parent_size)
+ {
+ error ("Access terminates outside of its parent");
+ return true;
+ }
+ }
+
+ if (verify_access_tree_1 (access->first_child, access->offset,
+ access->size))
+ return true;
+
+ if (access->next_sibling
+ && (access->next_sibling->offset < access->offset + access->size))
+ {
+ error ("Access overlaps with its sibling");
+ return true;
+ }
+
+ access = access->next_sibling;
+ }
+ return false;
+}
+
+/* Verify that parameter access tree starting with ACCESS is in good shape,
+ halt compilation and dump the tree to stderr if not. */
+
+DEBUG_FUNCTION void
+isra_verify_access_tree (gensum_param_access *access)
+{
+ if (verify_access_tree_1 (access, 0, 0))
+ {
+ for (; access; access = access->next_sibling)
+ dump_gensum_access (stderr, access, 2);
+ internal_error ("IPA-SRA access verification failed");
+ }
+}
+
+
+/* Callback of walk_stmt_load_store_addr_ops visit_addr used to determine
+ GIMPLE_ASM operands with memory constrains which cannot be scalarized. */
+
+static bool
+asm_visit_addr (gimple *, tree op, tree, void *)
+{
+ op = get_base_address (op);
+ if (op
+ && TREE_CODE (op) == PARM_DECL)
+ disqualify_split_candidate (op, "Non-scalarizable GIMPLE_ASM operand.");
+
+ return false;
+}
+
+/* Mark a dereference of parameter identified by DESC of distance DIST in a
+ basic block BB, unless the BB has already been marked as a potentially
+ final. */
+
+static void
+mark_param_dereference (gensum_param_desc *desc, HOST_WIDE_INT dist,
+ basic_block bb)
+{
+ gcc_assert (desc->by_ref);
+ gcc_checking_assert (desc->split_candidate);
+
+ if (bitmap_bit_p (final_bbs, bb->index))
+ return;
+
+ int idx = bb->index * by_ref_count + desc->deref_index;
+ if (bb_dereferences[idx] < dist)
+ bb_dereferences[idx] = dist;
+}
+
+/* Return true, if any potential replacements should use NEW_TYPE as opposed to
+ previously recorded OLD_TYPE. */
+
+static bool
+type_prevails_p (tree old_type, tree new_type)
+{
+ if (old_type == new_type)
+ return false;
+
+ /* Non-aggregates are always better. */
+ if (!is_gimple_reg_type (old_type)
+ && is_gimple_reg_type (new_type))
+ return true;
+ if (is_gimple_reg_type (old_type)
+ && !is_gimple_reg_type (new_type))
+ return false;
+
+ /* Prefer any complex or vector type over any other scalar type. */
+ if (TREE_CODE (old_type) != COMPLEX_TYPE
+ && TREE_CODE (old_type) != VECTOR_TYPE
+ && (TREE_CODE (new_type) == COMPLEX_TYPE
+ || TREE_CODE (new_type) == VECTOR_TYPE))
+ return true;
+ if ((TREE_CODE (old_type) == COMPLEX_TYPE
+ || TREE_CODE (old_type) == VECTOR_TYPE)
+ && TREE_CODE (new_type) != COMPLEX_TYPE
+ && TREE_CODE (new_type) != VECTOR_TYPE)
+ return false;
+
+ /* Use the integral type with the bigger precision. */
+ if (INTEGRAL_TYPE_P (old_type)
+ && INTEGRAL_TYPE_P (new_type))
+ return (TYPE_PRECISION (new_type) > TYPE_PRECISION (old_type));
+
+ /* Attempt to disregard any integral type with non-full precision. */
+ if (INTEGRAL_TYPE_P (old_type)
+ && (TREE_INT_CST_LOW (TYPE_SIZE (old_type))
+ != TYPE_PRECISION (old_type)))
+ return true;
+ if (INTEGRAL_TYPE_P (new_type)
+ && (TREE_INT_CST_LOW (TYPE_SIZE (new_type))
+ != TYPE_PRECISION (new_type)))
+ return false;
+ /* Stabilize the selection. */
+ return TYPE_UID (old_type) < TYPE_UID (new_type);
+}
+
+/* When scanning an expression which is a call argument, this structure
+ specifies the call and the position of the agrument. */
+
+struct scan_call_info
+{
+ /* Call graph edge representing the call. */
+ cgraph_edge *cs;
+ /* Total number of arguments in the call. */
+ unsigned argument_count;
+ /* Number of the actual argument being scanned. */
+ unsigned arg_idx;
+};
+
+/* Record use of ACCESS which belongs to a parameter described by DESC in a
+ call argument described by CALL_INFO. */
+
+static void
+record_nonregister_call_use (gensum_param_desc *desc,
+ scan_call_info *call_info,
+ unsigned unit_offset, unsigned unit_size)
+{
+ isra_call_summary *csum = call_sums->get_create (call_info->cs);
+ csum->init_inputs (call_info->argument_count);
+
+ isra_param_flow *param_flow = &csum->m_arg_flow[call_info->arg_idx];
+ param_flow->aggregate_pass_through = true;
+ set_single_param_flow_source (param_flow, desc->param_number);
+ param_flow->unit_offset = unit_offset;
+ param_flow->unit_size = unit_size;
+ desc->call_uses++;
+}
+
+/* Callback of walk_aliased_vdefs, just mark that there was a possible
+ modification. */
+
+static bool
+mark_maybe_modified (ao_ref *, tree, void *data)
+{
+ bool *maybe_modified = (bool *) data;
+ *maybe_modified = true;
+ return true;
+}
+
+/* Analyze expression EXPR from GIMPLE for accesses to parameters. CTX
+ specifies whether EXPR is used in a load, store or as an argument call. BB
+ must be the basic block in which expr resides. If CTX specifies call
+ arguemnt context, CALL_INFO must describe tha call and argument position,
+ otherwise it is ignored. */
+
+static void
+scan_expr_access (tree expr, gimple *stmt, isra_scan_context ctx,
+ basic_block bb, scan_call_info *call_info = NULL)
+{
+ poly_int64 poffset, psize, pmax_size;
+ HOST_WIDE_INT offset, size, max_size;
+ tree base;
+ bool deref = false;
+ bool reverse;
+
+ if (TREE_CODE (expr) == BIT_FIELD_REF
+ || TREE_CODE (expr) == IMAGPART_EXPR
+ || TREE_CODE (expr) == REALPART_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+
+ base = get_ref_base_and_extent (expr, &poffset, &psize, &pmax_size, &reverse);
+
+ if (TREE_CODE (base) == MEM_REF)
+ {
+ tree op = TREE_OPERAND (base, 0);
+ if (TREE_CODE (op) != SSA_NAME
+ || !SSA_NAME_IS_DEFAULT_DEF (op))
+ return;
+ base = SSA_NAME_VAR (op);
+ if (!base)
+ return;
+ deref = true;
+ }
+ if (TREE_CODE (base) != PARM_DECL)
+ return;
+
+ gensum_param_desc *desc = get_gensum_param_desc (base);
+ if (!desc || !desc->split_candidate)
+ return;
+
+ if (!poffset.is_constant (&offset)
+ || !psize.is_constant (&size)
+ || !pmax_size.is_constant (&max_size))
+ {
+ disqualify_split_candidate (desc, "Encountered a polynomial-sized "
+ "access.");
+ return;
+ }
+ if (size < 0 || size != max_size)
+ {
+ disqualify_split_candidate (desc, "Encountered a variable sized access.");
+ return;
+ }
+ if (TREE_CODE (expr) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (expr, 1)))
+ {
+ disqualify_split_candidate (desc, "Encountered a bit-field access.");
+ return;
+ }
+ gcc_assert (offset >= 0);
+ gcc_assert ((offset % BITS_PER_UNIT) == 0);
+ gcc_assert ((size % BITS_PER_UNIT) == 0);
+ if ((offset / BITS_PER_UNIT) >= (UINT_MAX - ISRA_ARG_SIZE_LIMIT)
+ || (size / BITS_PER_UNIT) >= ISRA_ARG_SIZE_LIMIT)
+ {
+ disqualify_split_candidate (desc, "Encountered an access with too big "
+ "offset or size");
+ return;
+ }
+
+ tree type = TREE_TYPE (expr);
+ unsigned int exp_align = get_object_alignment (expr);
+
+ if (exp_align < TYPE_ALIGN (type))
+ {
+ disqualify_split_candidate (desc, "Underaligned access.");
+ return;
+ }
+
+ if (deref)
+ {
+ if (!desc->by_ref)
+ {
+ disqualify_split_candidate (desc, "Dereferencing a non-reference.");
+ return;
+ }
+ else if (ctx == ISRA_CTX_STORE)
+ {
+ disqualify_split_candidate (desc, "Storing to data passed by "
+ "reference.");
+ return;
+ }
+
+ if (!aa_walking_limit)
+ {
+ disqualify_split_candidate (desc, "Out of alias analysis step "
+ "limit.");
+ return;
+ }
+
+ gcc_checking_assert (gimple_vuse (stmt));
+ bool maybe_modified = false;
+ ao_ref ar;
+
+ ao_ref_init (&ar, expr);
+ bitmap visited = BITMAP_ALLOC (NULL);
+ int walked = walk_aliased_vdefs (&ar, gimple_vuse (stmt),
+ mark_maybe_modified, &maybe_modified,
+ &visited, NULL, aa_walking_limit);
+ BITMAP_FREE (visited);
+ if (walked > 0)
+ {
+ gcc_assert (aa_walking_limit > walked);
+ aa_walking_limit = aa_walking_limit - walked;
+ }
+ if (walked < 0)
+ aa_walking_limit = 0;
+ if (maybe_modified || walked < 0)
+ {
+ disqualify_split_candidate (desc, "Data passed by reference possibly "
+ "modified through an alias.");
+ return;
+ }
+ else
+ mark_param_dereference (desc, offset + size, bb);
+ }
+ else
+ /* Pointer parameters with direct uses should have been ruled out by
+ analyzing SSA default def when looking at the paremeters. */
+ gcc_assert (!desc->by_ref);
+
+ gensum_param_access *access = get_access (desc, offset, size, ctx);
+ if (!access)
+ return;
+
+ if (ctx == ISRA_CTX_ARG)
+ {
+ gcc_checking_assert (call_info);
+
+ if (!deref)
+ record_nonregister_call_use (desc, call_info, offset / BITS_PER_UNIT,
+ size / BITS_PER_UNIT);
+ else
+ /* This is not a pass-through of a pointer, this is a use like any
+ other. */
+ access->nonarg = true;
+ }
+
+ if (!access->type)
+ {
+ access->type = type;
+ access->alias_ptr_type = reference_alias_ptr_type (expr);
+ access->reverse = reverse;
+ }
+ else
+ {
+ if (exp_align < TYPE_ALIGN (access->type))
+ {
+ disqualify_split_candidate (desc, "Reference has lower alignment "
+ "than a previous one.");
+ return;
+ }
+ if (access->alias_ptr_type != reference_alias_ptr_type (expr))
+ {
+ disqualify_split_candidate (desc, "Multiple alias pointer types.");
+ return;
+ }
+ if (access->reverse != reverse)
+ {
+ disqualify_split_candidate (desc, "Both normal and reverse "
+ "scalar storage order.");
+ return;
+ }
+ if (!deref
+ && (AGGREGATE_TYPE_P (type) || AGGREGATE_TYPE_P (access->type))
+ && (TYPE_MAIN_VARIANT (access->type) != TYPE_MAIN_VARIANT (type)))
+ {
+ /* We need the same aggregate type on all accesses to be able to
+ distinguish transformation spots from pass-through arguments in
+ the transofrmation phase. */
+ disqualify_split_candidate (desc, "We do not support aggegate "
+ "type punning.");
+ return;
+ }
+
+ if (type_prevails_p (access->type, type))
+ access->type = type;
+ }
+}
+
+/* Scan body function described by NODE and FUN and create access trees for
+ parameters. */
+
+static void
+scan_function (cgraph_node *node, struct function *fun)
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (stmt_can_throw_external (fun, stmt))
+ bitmap_set_bit (final_bbs, bb->index);
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_RETURN:
+ {
+ tree t = gimple_return_retval (as_a <greturn *> (stmt));
+ if (t != NULL_TREE)
+ scan_expr_access (t, stmt, ISRA_CTX_LOAD, bb);
+ bitmap_set_bit (final_bbs, bb->index);
+ }
+ break;
+
+ case GIMPLE_ASSIGN:
+ if (gimple_assign_single_p (stmt)
+ && !gimple_clobber_p (stmt))
+ {
+ tree rhs = gimple_assign_rhs1 (stmt);
+ scan_expr_access (rhs, stmt, ISRA_CTX_LOAD, bb);
+ tree lhs = gimple_assign_lhs (stmt);
+ scan_expr_access (lhs, stmt, ISRA_CTX_STORE, bb);
+ }
+ break;
+
+ case GIMPLE_CALL:
+ {
+ unsigned argument_count = gimple_call_num_args (stmt);
+ scan_call_info call_info;
+ call_info.cs = node->get_edge (stmt);
+ call_info.argument_count = argument_count;
+
+ for (unsigned i = 0; i < argument_count; i++)
+ {
+ call_info.arg_idx = i;
+ scan_expr_access (gimple_call_arg (stmt, i), stmt,
+ ISRA_CTX_ARG, bb, &call_info);
+ }
+
+ tree lhs = gimple_call_lhs (stmt);
+ if (lhs)
+ scan_expr_access (lhs, stmt, ISRA_CTX_STORE, bb);
+ int flags = gimple_call_flags (stmt);
+ if ((flags & (ECF_CONST | ECF_PURE)) == 0)
+ bitmap_set_bit (final_bbs, bb->index);
+ }
+ break;
+
+ case GIMPLE_ASM:
+ {
+ gasm *asm_stmt = as_a <gasm *> (stmt);
+ walk_stmt_load_store_addr_ops (asm_stmt, NULL, NULL, NULL,
+ asm_visit_addr);
+ bitmap_set_bit (final_bbs, bb->index);
+
+ for (unsigned i = 0; i < gimple_asm_ninputs (asm_stmt); i++)
+ {
+ tree t = TREE_VALUE (gimple_asm_input_op (asm_stmt, i));
+ scan_expr_access (t, stmt, ISRA_CTX_LOAD, bb);
+ }
+ for (unsigned i = 0; i < gimple_asm_noutputs (asm_stmt); i++)
+ {
+ tree t = TREE_VALUE (gimple_asm_output_op (asm_stmt, i));
+ scan_expr_access (t, stmt, ISRA_CTX_STORE, bb);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+}
+
+/* Return true if SSA_NAME NAME is only used in return statements, or if
+ results of any operations it is involved in are only used in return
+ statements. ANALYZED is a bitmap that tracks which SSA names we have
+ already started investigating. */
+
+static bool
+ssa_name_only_returned_p (tree name, bitmap analyzed)
+{
+ bool res = true;
+ imm_use_iterator imm_iter;
+ gimple *stmt;
+
+ FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
+ {
+ if (is_gimple_debug (stmt))
+ continue;
+
+ if (gimple_code (stmt) == GIMPLE_RETURN)
+ {
+ tree t = gimple_return_retval (as_a <greturn *> (stmt));
+ if (t != name)
+ {
+ res = false;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ }
+ else if ((is_gimple_assign (stmt) && !gimple_has_volatile_ops (stmt))
+ || gimple_code (stmt) == GIMPLE_PHI)
+ {
+ /* TODO: And perhaps for const function calls too? */
+ tree lhs;
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ lhs = gimple_phi_result (stmt);
+ else
+ lhs = gimple_assign_lhs (stmt);
+
+ if (TREE_CODE (lhs) != SSA_NAME)
+ {
+ res = false;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ gcc_assert (!gimple_vdef (stmt));
+ if (bitmap_set_bit (analyzed, SSA_NAME_VERSION (lhs))
+ && !ssa_name_only_returned_p (lhs, analyzed))
+ {
+ res = false;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ }
+ else
+ {
+ res = false;
+ BREAK_FROM_IMM_USE_STMT (imm_iter);
+ }
+ }
+ return res;
+}
+
+/* Inspect the uses of the return value of the call associated with CS, and if
+ it is not used or if it is only used to construct the return value of the
+ caller, mark it as such in call or caller summary. Also check for
+ misaligned arguments. */
+
+static void
+isra_analyze_call (cgraph_edge *cs)
+{
+ gcall *call_stmt = cs->call_stmt;
+ unsigned count = gimple_call_num_args (call_stmt);
+ isra_call_summary *csum = call_sums->get_create (cs);
+
+ for (unsigned i = 0; i < count; i++)
+ {
+ tree arg = gimple_call_arg (call_stmt, i);
+ if (is_gimple_reg (arg))
+ continue;
+
+ tree offset;
+ poly_int64 bitsize, bitpos;
+ machine_mode mode;
+ int unsignedp, reversep, volatilep = 0;
+ get_inner_reference (arg, &bitsize, &bitpos, &offset, &mode,
+ &unsignedp, &reversep, &volatilep);
+ if (!multiple_p (bitpos, BITS_PER_UNIT))
+ {
+ csum->m_bit_aligned_arg = true;
+ break;
+ }
+ }
+
+ tree lhs = gimple_call_lhs (call_stmt);
+ if (lhs)
+ {
+ /* TODO: Also detect aggregates on a LHS of a call that are only returned
+ from this function (without being read anywhere). */
+ if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ bitmap analyzed = BITMAP_ALLOC (NULL);
+ if (ssa_name_only_returned_p (lhs, analyzed))
+ csum->m_return_returned = true;
+ BITMAP_FREE (analyzed);
+ }
+ }
+ else
+ csum->m_return_ignored = true;
+}
+
+/* Look at all calls going out of NODE, described also by IFS and perform all
+ analyses necessary for IPA-SRA that are not done at body scan time or done
+ even when body is not scanned because the function is not a candidate. */
+
+static void
+isra_analyze_all_outgoing_calls (cgraph_node *node)
+{
+ for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
+ isra_analyze_call (cs);
+ for (cgraph_edge *cs = node->indirect_calls; cs; cs = cs->next_callee)
+ isra_analyze_call (cs);
+}
+
+/* Dump a dereferences table with heading STR to file F. */
+
+static void
+dump_dereferences_table (FILE *f, struct function *fun, const char *str)
+{
+ basic_block bb;
+
+ fprintf (dump_file, "%s", str);
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun),
+ EXIT_BLOCK_PTR_FOR_FN (fun), next_bb)
+ {
+ fprintf (f, "%4i %i ", bb->index, bitmap_bit_p (final_bbs, bb->index));
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
+ {
+ int i;
+ for (i = 0; i < by_ref_count; i++)
+ {
+ int idx = bb->index * by_ref_count + i;
+ fprintf (f, " %4" HOST_WIDE_INT_PRINT "d", bb_dereferences[idx]);
+ }
+ }
+ fprintf (f, "\n");
+ }
+ fprintf (dump_file, "\n");
+}
+
+/* Propagate distances in bb_dereferences in the opposite direction than the
+ control flow edges, in each step storing the maximum of the current value
+ and the minimum of all successors. These steps are repeated until the table
+ stabilizes. Note that BBs which might terminate the functions (according to
+ final_bbs bitmap) never updated in this way. */
+
+static void
+propagate_dereference_distances (struct function *fun)
+{
+ basic_block bb;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_dereferences_table (dump_file, fun,
+ "Dereference table before propagation:\n");
+
+ auto_vec<basic_block> queue (last_basic_block_for_fn (fun));
+ queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ queue.quick_push (bb);
+ bb->aux = bb;
+ }
+
+ while (!queue.is_empty ())
+ {
+ edge_iterator ei;
+ edge e;
+ bool change = false;
+ int i;
+
+ bb = queue.pop ();
+ bb->aux = NULL;
+
+ if (bitmap_bit_p (final_bbs, bb->index))
+ continue;
+
+ for (i = 0; i < by_ref_count; i++)
+ {
+ int idx = bb->index * by_ref_count + i;
+ bool first = true;
+ HOST_WIDE_INT inh = 0;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ int succ_idx = e->dest->index * by_ref_count + i;
+
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (fun))
+ continue;
+
+ if (first)
+ {
+ first = false;
+ inh = bb_dereferences [succ_idx];
+ }
+ else if (bb_dereferences [succ_idx] < inh)
+ inh = bb_dereferences [succ_idx];
+ }
+
+ if (!first && bb_dereferences[idx] < inh)
+ {
+ bb_dereferences[idx] = inh;
+ change = true;
+ }
+ }
+
+ if (change)
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->src->aux)
+ continue;
+
+ e->src->aux = e->src;
+ queue.quick_push (e->src);
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_dereferences_table (dump_file, fun,
+ "Dereference table after propagation:\n");
+}
+
+/* Perform basic checks on ACCESS to PARM described by DESC and all its
+ children, return true if the parameter cannot be split, otherwise return
+ true and update *TOTAL_SIZE and *ONLY_CALLS. ENTRY_BB_INDEX must be the
+ index of the entry BB in the function of PARM. */
+
+static bool
+check_gensum_access (tree parm, gensum_param_desc *desc,
+ gensum_param_access *access,
+ HOST_WIDE_INT *nonarg_acc_size, bool *only_calls,
+ int entry_bb_index)
+{
+ if (access->nonarg)
+ {
+ *only_calls = false;
+ *nonarg_acc_size += access->size;
+
+ if (access->first_child)
+ {
+ disqualify_split_candidate (desc, "Overlapping non-call uses.");
+ return true;
+ }
+ }
+ /* Do not decompose a non-BLKmode param in a way that would create
+ BLKmode params. Especially for by-reference passing (thus,
+ pointer-type param) this is hardly worthwhile. */
+ if (DECL_MODE (parm) != BLKmode
+ && TYPE_MODE (access->type) == BLKmode)
+ {
+ disqualify_split_candidate (desc, "Would convert a non-BLK to a BLK.");
+ return true;
+ }
+
+ if (desc->by_ref)
+ {
+ int idx = (entry_bb_index * by_ref_count + desc->deref_index);
+ if ((access->offset + access->size) > bb_dereferences[idx])
+ {
+ disqualify_split_candidate (desc, "Would create a possibly "
+ "illegal dereference in a caller.");
+ return true;
+ }
+ }
+
+ for (gensum_param_access *ch = access->first_child;
+ ch;
+ ch = ch->next_sibling)
+ if (check_gensum_access (parm, desc, ch, nonarg_acc_size, only_calls,
+ entry_bb_index))
+ return true;
+
+ return false;
+}
+
+/* Copy data from FROM and all of its children to a vector of accesses in IPA
+ descriptor DESC. */
+
+static void
+copy_accesses_to_ipa_desc (gensum_param_access *from, isra_param_desc *desc)
+{
+ param_access *to = ggc_cleared_alloc<param_access> ();
+ gcc_checking_assert ((from->offset % BITS_PER_UNIT) == 0);
+ gcc_checking_assert ((from->size % BITS_PER_UNIT) == 0);
+ to->unit_offset = from->offset / BITS_PER_UNIT;
+ to->unit_size = from->size / BITS_PER_UNIT;
+ to->type = from->type;
+ to->alias_ptr_type = from->alias_ptr_type;
+ to->certain = from->nonarg;
+ to->reverse = from->reverse;
+ vec_safe_push (desc->accesses, to);
+
+ for (gensum_param_access *ch = from->first_child;
+ ch;
+ ch = ch->next_sibling)
+ copy_accesses_to_ipa_desc (ch, desc);
+}
+
+/* Analyze function body scan results stored in param_accesses and
+ param_accesses, detect possible transformations and store information of
+ those in function summary. NODE, FUN and IFS are all various structures
+ describing the currently analyzed function. */
+
+static void
+process_scan_results (cgraph_node *node, struct function *fun,
+ isra_func_summary *ifs,
+ vec<gensum_param_desc> *param_descriptions)
+{
+ bool check_pass_throughs = false;
+ bool dereferences_propagated = false;
+ tree parm = DECL_ARGUMENTS (node->decl);
+ unsigned param_count = param_descriptions->length();
+
+ for (unsigned desc_index = 0;
+ desc_index < param_count;
+ desc_index++, parm = DECL_CHAIN (parm))
+ {
+ gensum_param_desc *desc = &(*param_descriptions)[desc_index];
+ if (!desc->locally_unused && !desc->split_candidate)
+ continue;
+
+ if (flag_checking)
+ isra_verify_access_tree (desc->accesses);
+
+ if (!dereferences_propagated
+ && desc->by_ref
+ && desc->accesses)
+ {
+ propagate_dereference_distances (fun);
+ dereferences_propagated = true;
+ }
+
+ HOST_WIDE_INT nonarg_acc_size = 0;
+ bool only_calls = true;
+ bool check_failed = false;
+
+ int entry_bb_index = ENTRY_BLOCK_PTR_FOR_FN (fun)->index;
+ for (gensum_param_access *acc = desc->accesses;
+ acc;
+ acc = acc->next_sibling)
+ if (check_gensum_access (parm, desc, acc, &nonarg_acc_size, &only_calls,
+ entry_bb_index))
+ {
+ check_failed = true;
+ break;
+ }
+ if (check_failed)
+ continue;
+
+ if (only_calls)
+ desc->locally_unused = true;
+
+ HOST_WIDE_INT cur_param_size
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm)));
+ HOST_WIDE_INT param_size_limit;
+ if (!desc->by_ref || optimize_function_for_size_p (fun))
+ param_size_limit = cur_param_size;
+ else
+ param_size_limit = (PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR)
+ * cur_param_size);
+ if (nonarg_acc_size > param_size_limit
+ || (!desc->by_ref && nonarg_acc_size == param_size_limit))
+ {
+ disqualify_split_candidate (desc, "Would result into a too big set of"
+ "replacements.");
+ }
+ else
+ {
+ /* create_parameter_descriptors makes sure unit sizes of all
+ candidate parameters fit unsigned integers restricted to
+ ISRA_ARG_SIZE_LIMIT. */
+ desc->param_size_limit = param_size_limit / BITS_PER_UNIT;
+ desc->nonarg_acc_size = nonarg_acc_size / BITS_PER_UNIT;
+ if (desc->split_candidate && desc->ptr_pt_count)
+ {
+ gcc_assert (desc->by_ref);
+ check_pass_throughs = true;
+ }
+ }
+ }
+
+ /* When a pointer parameter is passed-through to a callee, in which it is
+ only used to read only one or a few items, we can attempt to transform it
+ to obtaining and passing through the items instead of the pointer. But we
+ must take extra care that 1) we do not introduce any segfault by moving
+ dereferences above control flow and that 2) the data is not modified
+ through an alias in this function. The IPA analysis must not introduce
+ any accesses candidates unless it can prove both.
+
+ The current solution is very crude as it consists of ensuring that the
+ call postdominates entry BB and that the definition of VUSE of the call is
+ default definition. TODO: For non-recursive callees in the same
+ compilation unit we could do better by doing analysis in topological order
+ an looking into access candidates of callees, using their alias_ptr_types
+ to attempt real AA. We could also use the maximum known dereferenced
+ offset in this function at IPA level.
+
+ TODO: Measure the overhead and the effect of just being pessimistic.
+ Maybe this is ony -O3 material?
+ */
+ bool pdoms_calculated = false;
+ if (check_pass_throughs)
+ for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
+ {
+ gcall *call_stmt = cs->call_stmt;
+ tree vuse = gimple_vuse (call_stmt);
+
+ /* If the callee is a const function, we don't get a VUSE. In such
+ case there will be no memory accesses in the called function (or the
+ const attribute is wrong) and then we just don't care. */
+ bool uses_memory_as_obtained = vuse && SSA_NAME_IS_DEFAULT_DEF (vuse);
+
+ unsigned count = gimple_call_num_args (call_stmt);
+ isra_call_summary *csum = call_sums->get_create (cs);
+ csum->init_inputs (count);
+ for (unsigned argidx = 0; argidx < count; argidx++)
+ {
+ if (!csum->m_arg_flow[argidx].pointer_pass_through)
+ continue;
+ unsigned pidx
+ = get_single_param_flow_source (&csum->m_arg_flow[argidx]);
+ gensum_param_desc *desc = &(*param_descriptions)[pidx];
+ if (!desc->split_candidate)
+ {
+ csum->m_arg_flow[argidx].pointer_pass_through = false;
+ continue;
+ }
+ if (!uses_memory_as_obtained)
+ continue;
+
+ /* Post-dominator check placed last, hoping that it usually won't
+ be needed. */
+ if (!pdoms_calculated)
+ {
+ gcc_checking_assert (cfun);
+ add_noreturn_fake_exit_edges ();
+ connect_infinite_loops_to_exit ();
+ calculate_dominance_info (CDI_POST_DOMINATORS);
+ pdoms_calculated = true;
+ }
+ if (dominated_by_p (CDI_POST_DOMINATORS,
+ gimple_bb (call_stmt),
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (fun))))
+ csum->m_arg_flow[argidx].safe_to_import_accesses = true;
+ }
+
+ }
+ if (pdoms_calculated)
+ {
+ free_dominance_info (CDI_POST_DOMINATORS);
+ remove_fake_exit_edges ();
+ }
+
+ /* TODO: Add early exit if we disqualified everything. This also requires
+ that we either relax the restriction that
+ ipa_param_adjustments.m_always_copy_start mut be the number of PARM_DECLs
+ or store the number of parameters to IPA-SRA function summary and use that
+ when just removing params. */
+
+ vec_safe_reserve_exact (ifs->m_parameters, param_count);
+ ifs->m_parameters->quick_grow_cleared (param_count);
+ for (unsigned desc_index = 0; desc_index < param_count; desc_index++)
+ {
+ gensum_param_desc *s = &(*param_descriptions)[desc_index];
+ isra_param_desc *d = &(*ifs->m_parameters)[desc_index];
+
+ d->param_size_limit = s->param_size_limit;
+ d->size_reached = s->nonarg_acc_size;
+ d->locally_unused = s->locally_unused;
+ d->split_candidate = s->split_candidate;
+ d->by_ref = s->by_ref;
+
+ for (gensum_param_access *acc = s->accesses;
+ acc;
+ acc = acc->next_sibling)
+ copy_accesses_to_ipa_desc (acc, d);
+ }
+
+ if (dump_file)
+ dump_isra_param_descriptors (dump_file, node->decl, ifs);
+}
+
+/* Return true if there are any overlaps among certain accesses of DESC. If
+ non-NULL, set *CERTAIN_ACCESS_PRESENT_P upon encountering a certain accesss
+ too. DESC is assumed to be a split candidate that is not locally
+ unused. */
+
+static bool
+overlapping_certain_accesses_p (isra_param_desc *desc,
+ bool *certain_access_present_p)
+{
+ unsigned pclen = vec_safe_length (desc->accesses);
+ for (unsigned i = 0; i < pclen; i++)
+ {
+ param_access *a1 = (*desc->accesses)[i];
+
+ if (!a1->certain)
+ continue;
+ if (certain_access_present_p)
+ *certain_access_present_p = true;
+ for (unsigned j = i + 1; j < pclen; j++)
+ {
+ param_access *a2 = (*desc->accesses)[j];
+ if (a2->certain
+ && a1->unit_offset < a2->unit_offset + a2->unit_size
+ && a1->unit_offset + a1->unit_size > a2->unit_offset)
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Check for any overlaps of certain param accesses among splitting candidates
+ and signal an ICE if there are any. If CERTAIN_MUST_EXIST is set, also
+ check that used splitting candidates have at least one certain access. */
+
+static void
+verify_splitting_accesses (cgraph_node *node, bool certain_must_exist)
+{
+ isra_func_summary *ifs = func_sums->get (node);
+ if (!ifs || !ifs->m_candidate)
+ return;
+ unsigned param_count = vec_safe_length (ifs->m_parameters);
+ for (unsigned pidx = 0; pidx < param_count; pidx++)
+ {
+ isra_param_desc *desc = &(*ifs->m_parameters)[pidx];
+ if (!desc->split_candidate || desc->locally_unused)
+ continue;
+
+ bool certain_access_present = !certain_must_exist;
+ if (overlapping_certain_accesses_p (desc, &certain_access_present))
+ internal_error ("Function %s, parameter %u, has IPA_SRA accesses "
+ "which overlap", node->dump_name (), pidx);
+ if (!certain_access_present)
+ internal_error ("Function %s, parameter %u, is used but does not "
+ "have any certain IPA-SRA access",
+ node->dump_name (), pidx);
+ }
+}
+
+/* Intraprocedural part of IPA-SRA analysis. Scan function body of NODE and
+ create a summary structure describing IPA-SRA opportunities and constraints
+ in it. */
+
+static void
+ipa_sra_summarize_function (cgraph_node *node)
+{
+ if (dump_file)
+ fprintf (dump_file, "Creating summary for %s/%i:\n", node->name (),
+ node->order);
+ if (!ipa_sra_preliminary_function_checks (node))
+ return;
+ gcc_obstack_init (&gensum_obstack);
+ isra_func_summary *ifs = func_sums->get_create (node);
+ ifs->m_candidate = true;
+ tree ret = TREE_TYPE (TREE_TYPE (node->decl));
+ ifs->m_returns_value = (TREE_CODE (ret) != VOID_TYPE);
+
+ decl2desc = new hash_map<tree, gensum_param_desc *>;
+ unsigned count = 0;
+ for (tree parm = DECL_ARGUMENTS (node->decl); parm; parm = DECL_CHAIN (parm))
+ count++;
+
+ if (count > 0)
+ {
+ auto_vec<gensum_param_desc, 16> param_descriptions (count);
+ param_descriptions.reserve_exact (count);
+ param_descriptions.quick_grow_cleared (count);
+
+ bool cfun_pushed = false;
+ struct function *fun = DECL_STRUCT_FUNCTION (node->decl);
+ if (create_parameter_descriptors (node, ¶m_descriptions))
+ {
+ push_cfun (fun);
+ cfun_pushed = true;
+ final_bbs = BITMAP_ALLOC (NULL);
+ bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
+ by_ref_count
+ * last_basic_block_for_fn (fun));
+ aa_walking_limit = PARAM_VALUE (PARAM_IPA_MAX_AA_STEPS);
+ scan_function (node, fun);
+
+ if (dump_file)
+ {
+ dump_gensum_param_descriptors (dump_file, node->decl,
+ ¶m_descriptions);
+ fprintf (dump_file, "----------------------------------------\n");
+ }
+ }
+ process_scan_results (node, fun, ifs, ¶m_descriptions);
+
+ if (cfun_pushed)
+ pop_cfun ();
+ if (bb_dereferences)
+ {
+ free (bb_dereferences);
+ bb_dereferences = NULL;
+ BITMAP_FREE (final_bbs);
+ final_bbs = NULL;
+ }
+ }
+ isra_analyze_all_outgoing_calls (node);
+
+ delete decl2desc;
+ decl2desc = NULL;
+ obstack_free (&gensum_obstack, NULL);
+ if (dump_file)
+ fprintf (dump_file, "\n\n");
+ if (flag_checking)
+ verify_splitting_accesses (node, false);
+ return;
+}
+
+/* Intraprocedural part of IPA-SRA analysis. Scan bodies of all functions in
+ this compilation unit and create summary structures describing IPA-SRA
+ opportunities and constraints in them. */
+
+static void
+ipa_sra_generate_summary (void)
+{
+ struct cgraph_node *node;
+
+ gcc_checking_assert (!func_sums);
+ gcc_checking_assert (!call_sums);
+ func_sums
+ = (new (ggc_cleared_alloc <ipa_sra_function_summaries> ())
+ ipa_sra_function_summaries (symtab, true));
+ call_sums = new ipa_sra_call_summaries (symtab);
+
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ ipa_sra_summarize_function (node);
+ return;
+}
+
+/* Write intraproceural analysis information about E and all of its outgoing
+ edges into a stream for LTO WPA. */
+
+static void
+isra_write_edge_summary (output_block *ob, cgraph_edge *e)
+{
+ isra_call_summary *csum = call_sums->get (e);
+ unsigned input_count = csum->m_arg_flow.length ();
+ streamer_write_uhwi (ob, input_count);
+ for (unsigned i = 0; i < input_count; i++)
+ {
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+ streamer_write_hwi (ob, ipf->length);
+ bitpack_d bp = bitpack_create (ob->main_stream);
+ for (int j = 0; j < ipf->length; j++)
+ bp_pack_value (&bp, ipf->inputs[j], 8);
+ bp_pack_value (&bp, ipf->aggregate_pass_through, 1);
+ bp_pack_value (&bp, ipf->pointer_pass_through, 1);
+ bp_pack_value (&bp, ipf->safe_to_import_accesses, 1);
+ streamer_write_bitpack (&bp);
+ streamer_write_uhwi (ob, ipf->unit_offset);
+ streamer_write_uhwi (ob, ipf->unit_size);
+ }
+ bitpack_d bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, csum->m_return_ignored, 1);
+ bp_pack_value (&bp, csum->m_return_returned, 1);
+ bp_pack_value (&bp, csum->m_bit_aligned_arg, 1);
+ streamer_write_bitpack (&bp);
+}
+
+/* Write intraproceural analysis information about NODE and all of its outgoing
+ edges into a stream for LTO WPA. */
+
+static void
+isra_write_node_summary (output_block *ob, cgraph_node *node)
+{
+ isra_func_summary *ifs = func_sums->get (node);
+ lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
+ int node_ref = lto_symtab_encoder_encode (encoder, node);
+ streamer_write_uhwi (ob, node_ref);
+
+ unsigned param_desc_count = vec_safe_length (ifs->m_parameters);
+ streamer_write_uhwi (ob, param_desc_count);
+ for (unsigned i = 0; i < param_desc_count; i++)
+ {
+ isra_param_desc *desc = &(*ifs->m_parameters)[i];
+ unsigned access_count = vec_safe_length (desc->accesses);
+ streamer_write_uhwi (ob, access_count);
+ for (unsigned j = 0; j < access_count; j++)
+ {
+ param_access *acc = (*desc->accesses)[j];
+ stream_write_tree (ob, acc->type, true);
+ stream_write_tree (ob, acc->alias_ptr_type, true);
+ streamer_write_uhwi (ob, acc->unit_offset);
+ streamer_write_uhwi (ob, acc->unit_size);
+ bitpack_d bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, acc->certain, 1);
+ streamer_write_bitpack (&bp);
+ }
+ streamer_write_uhwi (ob, desc->param_size_limit);
+ streamer_write_uhwi (ob, desc->size_reached);
+ bitpack_d bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, desc->locally_unused, 1);
+ bp_pack_value (&bp, desc->split_candidate, 1);
+ bp_pack_value (&bp, desc->by_ref, 1);
+ streamer_write_bitpack (&bp);
+ }
+ bitpack_d bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, ifs->m_candidate, 1);
+ bp_pack_value (&bp, ifs->m_returns_value, 1);
+ bp_pack_value (&bp, ifs->m_return_ignored, 1);
+ gcc_assert (!ifs->m_queued);
+ streamer_write_bitpack (&bp);
+
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ isra_write_edge_summary (ob, e);
+ for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
+ isra_write_edge_summary (ob, e);
+}
+
+/* Write intraproceural analysis information into a stream for LTO WPA. */
+
+static void
+ipa_sra_write_summary (void)
+{
+ if (!func_sums || !call_sums)
+ return;
+
+ struct output_block *ob = create_output_block (LTO_section_ipa_sra);
+ lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder;
+ ob->symbol = NULL;
+
+ unsigned int count = 0;
+ lto_symtab_encoder_iterator lsei;
+ for (lsei = lsei_start_function_in_partition (encoder);
+ !lsei_end_p (lsei);
+ lsei_next_function_in_partition (&lsei))
+ {
+ cgraph_node *node = lsei_cgraph_node (lsei);
+ if (node->has_gimple_body_p ()
+ && func_sums->get (node) != NULL)
+ count++;
+ }
+ streamer_write_uhwi (ob, count);
+
+ /* Process all of the functions. */
+ for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei);
+ lsei_next_function_in_partition (&lsei))
+ {
+ cgraph_node *node = lsei_cgraph_node (lsei);
+ if (node->has_gimple_body_p ()
+ && func_sums->get (node) != NULL)
+ isra_write_node_summary (ob, node);
+ }
+ streamer_write_char_stream (ob->main_stream, 0);
+ produce_asm (ob, NULL);
+ destroy_output_block (ob);
+}
+
+/* Read intraproceural analysis information about E and all of its outgoing
+ edges into a stream for LTO WPA. */
+
+static void
+isra_read_edge_summary (struct lto_input_block *ib, cgraph_edge *cs)
+{
+ isra_call_summary *csum = call_sums->get_create (cs);
+ unsigned input_count = streamer_read_uhwi (ib);
+ csum->init_inputs (input_count);
+ for (unsigned i = 0; i < input_count; i++)
+ {
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+ ipf->length = streamer_read_hwi (ib);
+ bitpack_d bp = streamer_read_bitpack (ib);
+ for (int j = 0; j < ipf->length; j++)
+ ipf->inputs[j] = bp_unpack_value (&bp, 8);
+ ipf->aggregate_pass_through = bp_unpack_value (&bp, 1);
+ ipf->pointer_pass_through = bp_unpack_value (&bp, 1);
+ ipf->safe_to_import_accesses = bp_unpack_value (&bp, 1);
+ ipf->unit_offset = streamer_read_uhwi (ib);
+ ipf->unit_size = streamer_read_uhwi (ib);
+ }
+ bitpack_d bp = streamer_read_bitpack (ib);
+ csum->m_return_ignored = bp_unpack_value (&bp, 1);
+ csum->m_return_returned = bp_unpack_value (&bp, 1);
+ csum->m_bit_aligned_arg = bp_unpack_value (&bp, 1);
+}
+
+/* Read intraproceural analysis information about NODE and all of its outgoing
+ edges into a stream for LTO WPA. */
+
+static void
+isra_read_node_info (struct lto_input_block *ib, cgraph_node *node,
+ struct data_in *data_in)
+{
+ isra_func_summary *ifs = func_sums->get_create (node);
+ unsigned param_desc_count = streamer_read_uhwi (ib);
+ if (param_desc_count > 0)
+ {
+ vec_safe_reserve_exact (ifs->m_parameters, param_desc_count);
+ ifs->m_parameters->quick_grow_cleared (param_desc_count);
+ }
+ for (unsigned i = 0; i < param_desc_count; i++)
+ {
+ isra_param_desc *desc = &(*ifs->m_parameters)[i];
+ unsigned access_count = streamer_read_uhwi (ib);
+ for (unsigned j = 0; j < access_count; j++)
+ {
+ param_access *acc = ggc_cleared_alloc<param_access> ();
+ acc->type = stream_read_tree (ib, data_in);
+ acc->alias_ptr_type = stream_read_tree (ib, data_in);
+ acc->unit_offset = streamer_read_uhwi (ib);
+ acc->unit_size = streamer_read_uhwi (ib);
+ bitpack_d bp = streamer_read_bitpack (ib);
+ acc->certain = bp_unpack_value (&bp, 1);
+ vec_safe_push (desc->accesses, acc);
+ }
+ desc->param_size_limit = streamer_read_uhwi (ib);
+ desc->size_reached = streamer_read_uhwi (ib);
+ bitpack_d bp = streamer_read_bitpack (ib);
+ desc->locally_unused = bp_unpack_value (&bp, 1);
+ desc->split_candidate = bp_unpack_value (&bp, 1);
+ desc->by_ref = bp_unpack_value (&bp, 1);
+ }
+ bitpack_d bp = streamer_read_bitpack (ib);
+ ifs->m_candidate = bp_unpack_value (&bp, 1);
+ ifs->m_returns_value = bp_unpack_value (&bp, 1);
+ ifs->m_return_ignored = bp_unpack_value (&bp, 1);
+ ifs->m_queued = 0;
+
+ for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+ isra_read_edge_summary (ib, e);
+ for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
+ isra_read_edge_summary (ib, e);
+}
+
+/* Read IPA-SRA summaries from a section in file FILE_DATA of length LEN with
+ data DATA. TODO: This function was copied almost verbatim from ipa-prop.c,
+ it should be possible to unify them somehow. */
+
+static void
+isra_read_summary_section (struct lto_file_decl_data *file_data,
+ const char *data, size_t len)
+{
+ const struct lto_function_header *header =
+ (const struct lto_function_header *) data;
+ const int cfg_offset = sizeof (struct lto_function_header);
+ const int main_offset = cfg_offset + header->cfg_size;
+ const int string_offset = main_offset + header->main_size;
+ struct data_in *data_in;
+ unsigned int i;
+ unsigned int count;
+
+ lto_input_block ib_main ((const char *) data + main_offset,
+ header->main_size, file_data->mode_table);
+
+ data_in =
+ lto_data_in_create (file_data, (const char *) data + string_offset,
+ header->string_size, vNULL);
+ count = streamer_read_uhwi (&ib_main);
+
+ for (i = 0; i < count; i++)
+ {
+ unsigned int index;
+ struct cgraph_node *node;
+ lto_symtab_encoder_t encoder;
+
+ index = streamer_read_uhwi (&ib_main);
+ encoder = file_data->symtab_node_encoder;
+ node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder,
+ index));
+ gcc_assert (node->definition);
+ isra_read_node_info (&ib_main, node, data_in);
+ }
+ lto_free_section_data (file_data, LTO_section_ipa_sra, NULL, data,
+ len);
+ lto_data_in_delete (data_in);
+}
+
+/* Read intraproceural analysis information into a stream for LTO WPA. */
+
+static void
+ipa_sra_read_summary (void)
+{
+ struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
+ struct lto_file_decl_data *file_data;
+ unsigned int j = 0;
+
+ gcc_checking_assert (!func_sums);
+ gcc_checking_assert (!call_sums);
+ func_sums
+ = (new (ggc_cleared_alloc <ipa_sra_function_summaries> ())
+ ipa_sra_function_summaries (symtab, true));
+ call_sums = new ipa_sra_call_summaries (symtab);
+
+ while ((file_data = file_data_vec[j++]))
+ {
+ size_t len;
+ const char *data = lto_get_section_data (file_data, LTO_section_ipa_sra,
+ NULL, &len);
+ if (data)
+ isra_read_summary_section (file_data, data, len);
+ }
+}
+
+/* Dump all IPA-SRA summary data for all cgraph nodes and edges to file F. */
+
+static void
+ipa_sra_dump_all_summaries (FILE *f)
+{
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ fprintf (f, "\nSummary for node %s:\n", node->dump_name ());
+
+ isra_func_summary *ifs = func_sums->get (node);
+ if (!ifs)
+ {
+ fprintf (f, " Function does not have any associated IPA-SRA "
+ "summary\n");
+ continue;
+ }
+ if (!ifs->m_candidate)
+ {
+ fprintf (f, " Not a candidate function\n");
+ continue;
+ }
+ if (ifs->m_returns_value)
+ fprintf (f, " Returns value\n");
+ if (vec_safe_is_empty (ifs->m_parameters))
+ fprintf (f, " No parameter information. \n");
+ else
+ for (unsigned i = 0; i < ifs->m_parameters->length (); ++i)
+ {
+ fprintf (f, " Descriptor for parameter %i:\n", i);
+ dump_isra_param_descriptor (f, &(*ifs->m_parameters)[i]);
+ }
+ fprintf (f, "\n");
+
+ struct cgraph_edge *cs;
+ for (cs = node->callees; cs; cs = cs->next_callee)
+ {
+ fprintf (f, " Summary for edge %s->%s:\n", cs->caller->dump_name (),
+ cs->callee->dump_name ());
+ isra_call_summary *csum = call_sums->get (cs);
+ if (csum)
+ csum->dump (f);
+ else
+ fprintf (f, " Call summary is MISSING!\n");
+ }
+
+ }
+ fprintf (f, "\n\n");
+}
+
+/* Perform function-scope viability tests that can be only made at IPA level
+ and return false if the function is deemed unsuitable for IPA-SRA. */
+
+static bool
+ipa_sra_ipa_function_checks (cgraph_node *node)
+{
+ if (!node->can_be_local_p ())
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function %s disqualified because it cannot be "
+ "made local.\n", node->dump_name ());
+ return false;
+ }
+ if (!node->local.can_change_signature)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Function can not change signature.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Issues found out by check_callers_for_issues. */
+
+struct caller_issues
+{
+ /* There is a thunk among callers. */
+ bool thunk;
+ /* Call site with no available information. */
+ bool unknown_callsite;
+ /* There is a bit-aligned load into one of non-gimple-typed arguments. */
+ bool bit_aligned_aggregate_argument;
+};
+
+/* Worker for call_for_symbol_and_aliases, set any flags of passed caller_issues
+ that apply. */
+
+static bool
+check_for_caller_issues (struct cgraph_node *node, void *data)
+{
+ struct caller_issues *issues = (struct caller_issues *) data;
+
+ for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
+ {
+ if (cs->caller->thunk.thunk_p)
+ {
+ issues->thunk = true;
+ /* TODO: We should be able to process at least some types of
+ thunks. */
+ return true;
+ }
+
+ isra_call_summary *csum = call_sums->get (cs);
+ if (!csum)
+ {
+ issues->unknown_callsite = true;
+ return true;
+ }
+
+ if (csum->m_bit_aligned_arg)
+ issues->bit_aligned_aggregate_argument = true;
+ }
+ return false;
+}
+
+/* Look at all incoming edges to NODE, including aliases and thunks and look
+ for problems. Return true if NODE type should not be modified at all. */
+
+static bool
+check_all_callers_for_issues (cgraph_node *node)
+{
+ struct caller_issues issues;
+ memset (&issues, 0, sizeof (issues));
+
+ node->call_for_symbol_and_aliases (check_for_caller_issues, &issues, true);
+ if (issues.unknown_callsite)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "A call of %s has not been analyzed. Disabling "
+ "all modifications.\n", node->dump_name ());
+ return true;
+ }
+ /* TODO: We should be able to process at least some types of thunks. */
+ if (issues.thunk)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "A call of %s is through thunk, which are not"
+ " handled yet. Disabling all modifications.\n",
+ node->dump_name ());
+ return true;
+ }
+
+ if (issues.bit_aligned_aggregate_argument)
+ {
+ /* Let's only remove parameters/return values from such functions.
+ TODO: We could only prevent splitting the problematic parameters if
+ anybody thinks it is worth it. */
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "A call of %s has bit-alinged aggregate argument,"
+ " disabling parameter splitting.\n", node->dump_name ());
+
+ isra_func_summary *ifs = func_sums->get (node);
+ gcc_checking_assert (ifs);
+ unsigned param_count = vec_safe_length (ifs->m_parameters);
+ for (unsigned i = 0; i < param_count; i++)
+ (*ifs->m_parameters)[i].split_candidate = false;
+ }
+ return false;
+}
+
+/* Find the access with corresponding OFFSET and SIZE among accesses in
+ PARAM_DESC and return it or NULL if such an access is not there. */
+
+static param_access *
+find_param_access (isra_param_desc *param_desc, unsigned offset, unsigned size)
+{
+ unsigned pclen = vec_safe_length (param_desc->accesses);
+
+ /* The search is linear but the number of stored accesses is bound by
+ PARAM_IPA_SRA_MAX_REPLACEMENTS, so most probably 8. */
+
+ for (unsigned i = 0; i < pclen; i++)
+ if ((*param_desc->accesses)[i]->unit_offset == offset
+ && (*param_desc->accesses)[i]->unit_size == size)
+ return (*param_desc->accesses)[i];
+
+ return NULL;
+}
+
+/* Return iff the total size of definite replacement SIZE would violate the
+ limit set for it in PARAM. */
+
+static bool
+size_would_violate_limit_p (isra_param_desc *desc, unsigned size)
+{
+ unsigned limit = desc->param_size_limit;
+ if (size > limit
+ || (!desc->by_ref && size == limit))
+ return true;
+ return false;
+}
+
+/* Increase reached size of DESC by SIZE or disqualify it if it would violate
+ the set limit. IDX is the parameter number which is dumped when
+ disqualifying. */
+
+static void
+bump_reached_size (isra_param_desc *desc, unsigned size, unsigned idx)
+{
+ unsigned after = desc->size_reached + size;
+ if (size_would_violate_limit_p (desc, after))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ...size limit reached, disqualifying "
+ "candidate parameter %u\n", idx);
+ desc->split_candidate = false;
+ return;
+ }
+ desc->size_reached = after;
+}
+
+/* Take all actions required to deal with an edge CS that represents a call to
+ an unknown or un-analyzed function, for both parameter removal and
+ splitting. */
+
+static void
+process_edge_to_unknown_caller (cgraph_edge *cs)
+{
+ isra_func_summary *from_ifs = func_sums->get (cs->caller);
+ gcc_checking_assert (from_ifs);
+ isra_call_summary *csum = call_sums->get (cs);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Processing an edge to an unknown caller from %s:\n",
+ cs->caller->dump_name ());
+
+ unsigned args_count = csum->m_arg_flow.length ();
+ for (unsigned i = 0; i < args_count; i++)
+ {
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+
+ if (ipf->pointer_pass_through)
+ {
+ isra_param_desc *param_desc
+ = &(*from_ifs->m_parameters)[get_single_param_flow_source (ipf)];
+ param_desc->locally_unused = false;
+ param_desc->split_candidate = false;
+ continue;
+ }
+ if (ipf->aggregate_pass_through)
+ {
+ unsigned idx = get_single_param_flow_source (ipf);
+ isra_param_desc *param_desc = &(*from_ifs->m_parameters)[idx];
+
+ param_desc->locally_unused = false;
+ if (!param_desc->split_candidate)
+ continue;
+ gcc_assert (!param_desc->by_ref);
+ param_access *pacc = find_param_access (param_desc, ipf->unit_offset,
+ ipf->unit_size);
+ gcc_checking_assert (pacc);
+ pacc->certain = true;
+ if (overlapping_certain_accesses_p (param_desc, NULL))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ...leading to overlap, "
+ " disqualifying candidate parameter %u\n",
+ idx);
+ param_desc->split_candidate = false;
+ }
+ else
+ bump_reached_size (param_desc, pacc->unit_size, idx);
+ ipf->aggregate_pass_through = false;
+ continue;
+ }
+
+ for (int j = 0; j < ipf->length; j++)
+ {
+ int input_idx = ipf->inputs[j];
+ (*from_ifs->m_parameters)[input_idx].locally_unused = false;
+ }
+ }
+}
+
+/* Propagate parameter removal information through cross-SCC edge CS,
+ i.e. decrease the use count in the caller parameter descriptor for each use
+ in this call. */
+
+static void
+param_removal_cross_scc_edge (cgraph_edge *cs)
+{
+ enum availability availability;
+ cgraph_node *callee = cs->callee->function_symbol (&availability);
+ isra_func_summary *to_ifs = func_sums->get (callee);
+ if (!to_ifs || !to_ifs->m_candidate
+ || (availability < AVAIL_AVAILABLE)
+ || vec_safe_is_empty (to_ifs->m_parameters))
+ {
+ process_edge_to_unknown_caller (cs);
+ return;
+ }
+ isra_func_summary *from_ifs = func_sums->get (cs->caller);
+ gcc_checking_assert (from_ifs);
+
+ isra_call_summary *csum = call_sums->get (cs);
+ unsigned args_count = csum->m_arg_flow.length ();
+ unsigned param_count = vec_safe_length (to_ifs->m_parameters);
+
+ for (unsigned i = 0; i < args_count; i++)
+ {
+ bool unused_in_callee;
+ if (i < param_count)
+ unused_in_callee = (*to_ifs->m_parameters)[i].locally_unused;
+ else
+ unused_in_callee = false;
+
+ if (!unused_in_callee)
+ {
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+ for (int j = 0; j < ipf->length; j++)
+ {
+ int input_idx = ipf->inputs[j];
+ (*from_ifs->m_parameters)[input_idx].locally_unused = false;
+ }
+ }
+ }
+}
+
+/* Unless it is already there, push NODE which is also described by IFS to
+ STACK. */
+
+static void
+isra_push_node_to_stack (cgraph_node *node, isra_func_summary *ifs,
+ vec<cgraph_node *> *stack)
+{
+ if (!ifs->m_queued)
+ {
+ ifs->m_queued = true;
+ stack->safe_push (node);
+ }
+}
+
+/* If parameter with index INPUT_IDX is marked as locally unused, mark it as
+ used and push CALLER on STACK. */
+
+static void
+isra_mark_caller_param_used (isra_func_summary *from_ifs, int input_idx,
+ cgraph_node *caller, vec<cgraph_node *> *stack)
+{
+ if ((*from_ifs->m_parameters)[input_idx].locally_unused)
+ {
+ (*from_ifs->m_parameters)[input_idx].locally_unused = false;
+ isra_push_node_to_stack (caller, from_ifs, stack);
+ }
+}
+
+
+/* Propagate information that any parameter is not used only locally within a
+ SCC accross CS to the caller, which must be in the same SCC as the
+ callee. Push any callers that need to be re-processed to STACK. */
+
+static void
+propagate_used_across_scc_edge (cgraph_edge *cs, vec<cgraph_node *> *stack)
+{
+ isra_func_summary *from_ifs = func_sums->get (cs->caller);
+ if (!from_ifs || vec_safe_is_empty (from_ifs->m_parameters))
+ return;
+
+ isra_call_summary *csum = call_sums->get (cs);
+ gcc_checking_assert (csum);
+ unsigned args_count = csum->m_arg_flow.length ();
+ enum availability availability;
+ cgraph_node *callee = cs->callee->function_symbol (&availability);
+ isra_func_summary *to_ifs = func_sums->get (callee);
+
+ unsigned param_count
+ = (to_ifs && (availability >= AVAIL_AVAILABLE))
+ ? vec_safe_length (to_ifs->m_parameters) : 0;
+ for (unsigned i = 0; i < args_count; i++)
+ {
+ if (i < param_count
+ && (*to_ifs->m_parameters)[i].locally_unused)
+ continue;
+
+ /* The argument is needed in the callee it, we must mark the parameter as
+ used also in the caller and its callers within this SCC. */
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+ for (int j = 0; j < ipf->length; j++)
+ {
+ int input_idx = ipf->inputs[j];
+ isra_mark_caller_param_used (from_ifs, input_idx, cs->caller, stack);
+ }
+ }
+}
+
+/* Propagate information that any parameter is not used only locally within a
+ SCC (i.e. is used also elsewhere) to all callers of NODE that are in the
+ same SCC. Push any callers that need to be re-processed to STACK. */
+
+static bool
+propagate_used_to_scc_callers (cgraph_node *node, void *data)
+{
+ vec<cgraph_node *> *stack = (vec<cgraph_node *> *) data;
+ cgraph_edge *cs;
+ for (cs = node->callers; cs; cs = cs->next_caller)
+ if (ipa_edge_within_scc (cs))
+ propagate_used_across_scc_edge (cs, stack);
+ return false;
+}
+
+/* Return true iff all certain accesses in ARG_DESC are also present as
+ certain accesses in PARAM_DESC. */
+
+static bool
+all_callee_accesses_present_p (isra_param_desc *param_desc,
+ isra_param_desc *arg_desc)
+{
+ unsigned aclen = vec_safe_length (arg_desc->accesses);
+ for (unsigned j = 0; j < aclen; j++)
+ {
+ param_access *argacc = (*arg_desc->accesses)[j];
+ if (!argacc->certain)
+ continue;
+ param_access *pacc = find_param_access (param_desc, argacc->unit_offset,
+ argacc->unit_size);
+ if (!pacc || !pacc->certain)
+ return false;
+ }
+ return true;
+}
+
+/* Type internal to function pull_accesses_from_callee. Unfortunately gcc 4.8
+ does not allow instantiating an auto_vec with a type defined within a
+ function so it is a global type. */
+enum acc_prop_kind {ACC_PROP_DONT, ACC_PROP_COPY, ACC_PROP_CERTAIN};
+
+
+/* Attempt to propagate all definite accesses from ARG_DESC to PARAM_DESC, if
+ they would not violate some constraint there. If successful, return NULL,
+ otherwise return the string reason for failure (which can be written to the
+ dump file). DELTA_OFFSET is the known offset of the actual argument withing
+ the formal parameter (so of ARG_DESCS within PARAM_DESCS), ARG_SIZE is the
+ size of the actual argument or zero, if not known. In case of success, set
+ *CHANGE_P to true if propagation actually changed anything. */
+
+static const char *
+pull_accesses_from_callee (isra_param_desc *param_desc,
+ isra_param_desc *arg_desc,
+ unsigned delta_offset, unsigned arg_size,
+ bool *change_p)
+{
+ unsigned pclen = vec_safe_length (param_desc->accesses);
+ unsigned aclen = vec_safe_length (arg_desc->accesses);
+ unsigned prop_count = 0;
+ unsigned prop_size = 0;
+ bool change = false;
+
+ auto_vec <enum acc_prop_kind, 8> prop_kinds (aclen);
+ for (unsigned j = 0; j < aclen; j++)
+ {
+ param_access *argacc = (*arg_desc->accesses)[j];
+ prop_kinds.safe_push (ACC_PROP_DONT);
+
+ if (arg_size > 0
+ && argacc->unit_offset + argacc->unit_size > arg_size)
+ return "callee access outsize size boundary";
+
+ if (!argacc->certain)
+ continue;
+
+ unsigned offset = argacc->unit_offset + delta_offset;
+ /* Given that accesses are initially stored according to increasing
+ offset and decreasing size in case of equal offsets, the following
+ searches could be written more efficiently if we kept the ordering
+ when copying. But the number of accesses is capped at
+ PARAM_IPA_SRA_MAX_REPLACEMENTS (so most likely 8) and the code gets
+ messy quickly, so let's improve on that only if necessary. */
+
+ bool exact_match = false;
+ for (unsigned i = 0; i < pclen; i++)
+ {
+ /* Check for overlaps. */
+ param_access *pacc = (*param_desc->accesses)[i];
+ if (pacc->unit_offset == offset
+ && pacc->unit_size == argacc->unit_size)
+ {
+ if (argacc->alias_ptr_type != pacc->alias_ptr_type
+ || !types_compatible_p (argacc->type, pacc->type))
+ return "propagated access types would not match existing ones";
+
+ exact_match = true;
+ if (!pacc->certain)
+ {
+ prop_kinds[j] = ACC_PROP_CERTAIN;
+ prop_size += argacc->unit_size;
+ change = true;
+ }
+ continue;
+ }
+
+ if (offset < pacc->unit_offset + pacc->unit_size
+ && offset + argacc->unit_size > pacc->unit_offset)
+ {
+ /* None permissible with load accesses, possible to fit into
+ argument ones. */
+ if (pacc->certain
+ || offset < pacc->unit_offset
+ || (offset + argacc->unit_size
+ > pacc->unit_offset + pacc->unit_size))
+ return "a propagated access would conflict in caller";
+ }
+ }
+
+ if (!exact_match)
+ {
+ prop_kinds[j] = ACC_PROP_COPY;
+ prop_count++;
+ prop_size += argacc->unit_size;
+ change = true;
+ }
+ }
+
+ if (!change)
+ return NULL;
+
+ if ((prop_count + pclen
+ > (unsigned) PARAM_VALUE (PARAM_IPA_SRA_MAX_REPLACEMENTS))
+ || size_would_violate_limit_p (param_desc,
+ param_desc->size_reached + prop_size))
+ return "propagating accesses would violate the count or size limit";
+
+ *change_p = true;
+ for (unsigned j = 0; j < aclen; j++)
+ {
+ if (prop_kinds[j] == ACC_PROP_COPY)
+ {
+ param_access *argacc = (*arg_desc->accesses)[j];
+
+ param_access *copy = ggc_cleared_alloc<param_access> ();
+ copy->unit_offset = argacc->unit_offset + delta_offset;
+ copy->unit_size = argacc->unit_size;
+ copy->type = argacc->type;
+ copy->alias_ptr_type = argacc->alias_ptr_type;
+ copy->certain = true;
+ vec_safe_push (param_desc->accesses, copy);
+ }
+ else if (prop_kinds[j] == ACC_PROP_CERTAIN)
+ {
+ param_access *argacc = (*arg_desc->accesses)[j];
+ param_access *csp
+ = find_param_access (param_desc, argacc->unit_offset + delta_offset,
+ argacc->unit_size);
+ csp->certain = true;
+ }
+ }
+
+ param_desc->size_reached += prop_size;
+
+ return NULL;
+}
+
+/* Propagate parameter splitting information through call graph edge CS.
+ Return true if any changes that might need to be propagated within SCCs have
+ been made. The function also clears the aggregate_pass_through and
+ pointer_pass_through in call summarries which do not need to be processed
+ again if this CS is revisited when iterating while changes are propagated
+ within an SCC. */
+
+static bool
+param_splitting_across_edge (cgraph_edge *cs)
+{
+ bool res = false;
+ bool cross_scc = !ipa_edge_within_scc (cs);
+ enum availability availability;
+ cgraph_node *callee = cs->callee->function_symbol (&availability);
+ isra_func_summary *from_ifs = func_sums->get (cs->caller);
+ gcc_checking_assert (from_ifs && from_ifs->m_parameters);
+
+ isra_call_summary *csum = call_sums->get (cs);
+ gcc_checking_assert (csum);
+ unsigned args_count = csum->m_arg_flow.length ();
+ isra_func_summary *to_ifs = func_sums->get (callee);
+ unsigned param_count
+ = ((to_ifs && to_ifs->m_candidate && (availability >= AVAIL_AVAILABLE))
+ ? vec_safe_length (to_ifs->m_parameters)
+ : 0);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Splitting accross %s->%s:\n",
+ cs->caller->dump_name (), callee->dump_name ());
+
+ unsigned i;
+ for (i = 0; (i < args_count) && (i < param_count); i++)
+ {
+ isra_param_desc *arg_desc = &(*to_ifs->m_parameters)[i];
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+
+ if (arg_desc->locally_unused)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ->%u: unused in callee\n", i);
+ ipf->pointer_pass_through = false;
+ continue;
+ }
+
+ if (ipf->pointer_pass_through)
+ {
+ int idx = get_single_param_flow_source (ipf);
+ isra_param_desc *param_desc = &(*from_ifs->m_parameters)[idx];
+ if (!param_desc->split_candidate)
+ continue;
+ gcc_assert (param_desc->by_ref);
+
+ if (!arg_desc->split_candidate || !arg_desc->by_ref)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: not candidate or not by "
+ "reference in callee\n", idx, i);
+ param_desc->split_candidate = false;
+ ipf->pointer_pass_through = false;
+ res = true;
+ }
+ else if (!ipf->safe_to_import_accesses)
+ {
+ if (!all_callee_accesses_present_p (param_desc, arg_desc))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: cannot import accesses.\n",
+ idx, i);
+ param_desc->split_candidate = false;
+ ipf->pointer_pass_through = false;
+ res = true;
+
+ }
+ else
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: verified callee accesses "
+ "present.\n", idx, i);
+ if (cross_scc)
+ ipf->pointer_pass_through = false;
+ }
+ }
+ else
+ {
+ const char *pull_failure
+ = pull_accesses_from_callee (param_desc, arg_desc, 0, 0, &res);
+ if (pull_failure)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: by_ref access pull "
+ "failed: %s.\n", idx, i, pull_failure);
+ param_desc->split_candidate = false;
+ ipf->pointer_pass_through = false;
+ res = true;
+ }
+ else
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: by_ref access pull "
+ "succeeded.\n", idx, i);
+ if (cross_scc)
+ ipf->pointer_pass_through = false;
+ }
+ }
+ }
+ else if (ipf->aggregate_pass_through)
+ {
+ int idx = get_single_param_flow_source (ipf);
+ isra_param_desc *param_desc = &(*from_ifs->m_parameters)[idx];
+ if (!param_desc->split_candidate)
+ continue;
+ gcc_assert (!param_desc->by_ref);
+ param_access *pacc = find_param_access (param_desc, ipf->unit_offset,
+ ipf->unit_size);
+ gcc_checking_assert (pacc);
+
+ if (pacc->certain)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: already certain\n", idx, i);
+ ipf->aggregate_pass_through = false;
+ }
+ else if (!arg_desc->split_candidate || arg_desc->by_ref)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: not candidate or by "
+ "reference in callee\n", idx, i);
+
+ pacc->certain = true;
+ if (overlapping_certain_accesses_p (param_desc, NULL))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ...leading to overlap, "
+ " disqualifying candidate parameter %u\n",
+ idx);
+ param_desc->split_candidate = false;
+ }
+ else
+ bump_reached_size (param_desc, pacc->unit_size, idx);
+
+ ipf->aggregate_pass_through = false;
+ res = true;
+ }
+ else
+ {
+ const char *pull_failure
+ = pull_accesses_from_callee (param_desc, arg_desc,
+ ipf->unit_offset,
+ ipf->unit_size, &res);
+ if (pull_failure)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: arg access pull "
+ "failed: %s.\n", idx, i, pull_failure);
+
+ ipf->aggregate_pass_through = false;
+ pacc->certain = true;
+
+ if (overlapping_certain_accesses_p (param_desc, NULL))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " ...leading to overlap, "
+ " disqualifying candidate parameter %u\n",
+ idx);
+ param_desc->split_candidate = false;
+ }
+ else
+ bump_reached_size (param_desc, pacc->unit_size, idx);
+
+ res = true;
+ }
+ else
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: arg access pull "
+ "succeeded.\n", idx, i);
+ if (cross_scc)
+ ipf->aggregate_pass_through = false;
+ }
+ }
+ }
+ }
+
+ /* Handle argument-parameter count mismatches. */
+ for (; (i < args_count); i++)
+ {
+ isra_param_flow *ipf = &csum->m_arg_flow[i];
+
+ if (ipf->pointer_pass_through || ipf->aggregate_pass_through)
+ {
+ int idx = get_single_param_flow_source (ipf);
+ ipf->pointer_pass_through = false;
+ ipf->aggregate_pass_through = false;
+ isra_param_desc *param_desc = &(*from_ifs->m_parameters)[idx];
+ if (!param_desc->split_candidate)
+ continue;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " %u->%u: no corresponding formal parameter\n",
+ idx, i);
+ param_desc->split_candidate = false;
+ res = true;
+ }
+ }
+ return res;
+}
+
+/* Worker for call_for_symbol_and_aliases, look at all callers and if all their
+ callers ignore the return value, or come from the same SCC and use the
+ return value only to compute their return value, return false, otherwise
+ return true. */
+
+static bool
+retval_used_p (cgraph_node *node, void *)
+{
+ for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
+ {
+ isra_call_summary *csum = call_sums->get (cs);
+ gcc_checking_assert (csum);
+ if (csum->m_return_ignored)
+ continue;
+ if (!csum->m_return_returned)
+ return true;
+
+ isra_func_summary *from_ifs = func_sums->get (cs->caller);
+ if (!from_ifs || !from_ifs->m_candidate)
+ return true;
+
+ if (!ipa_edge_within_scc (cs)
+ && !from_ifs->m_return_ignored)
+ return true;
+ }
+
+ return false;
+}
+
+/* Push into NEW_PARAMS all required parameter adjustment entries to copy or
+ modify parameter which originally had index BASE_INDEX, in the adjustment
+ vector of parent clone (if any) had PREV_CLONE_INDEX and was described by
+ PREV_ADJUSTMENT. If the parent clone is the original function,
+ PREV_ADJUSTMENT is NULL and PREV_CLONE_INDEX is equal to BASE_INDEX. */
+
+
+static void
+push_param_adjustments_for_index (isra_func_summary *ifs, unsigned base_index,
+ unsigned prev_clone_index,
+ ipa_adjusted_param *prev_adjustment,
+ vec<ipa_adjusted_param, va_gc> **new_params)
+{
+ isra_param_desc *desc = &(*ifs->m_parameters)[base_index];
+ if (desc->locally_unused)
+ {
+ if (dump_file)
+ fprintf (dump_file, " Will remove parameter %u\n", base_index);
+ return;
+ }
+
+ if (!desc->split_candidate)
+ {
+ ipa_adjusted_param adj;
+ if (prev_adjustment)
+ {
+ adj = *prev_adjustment;
+ adj.prev_clone_adjustment = true;
+ adj.prev_clone_index = prev_clone_index;
+ }
+ else
+ {
+ memset (&adj, 0, sizeof (adj));
+ adj.op = IPA_PARAM_OP_COPY;
+ adj.base_index = base_index;
+ adj.prev_clone_index = prev_clone_index;
+ }
+ vec_safe_push ((*new_params), adj);
+ return;
+ }
+
+ if (dump_file)
+ fprintf (dump_file, " Will split parameter %u\n", base_index);
+
+ gcc_assert (!prev_adjustment || prev_adjustment->op == IPA_PARAM_OP_COPY);
+ unsigned aclen = vec_safe_length (desc->accesses);
+ for (unsigned j = 0; j < aclen; j++)
+ {
+ param_access *pa = (*desc->accesses)[j];
+ if (!pa->certain)
+ continue;
+ if (dump_file)
+ fprintf (dump_file, " - component at byte offset %u, "
+ "size %u\n", pa->unit_offset, pa->unit_size);
+
+ ipa_adjusted_param adj;
+ memset (&adj, 0, sizeof (adj));
+ adj.op = IPA_PARAM_OP_SPLIT;
+ adj.base_index = base_index;
+ adj.prev_clone_index = prev_clone_index;
+ adj.param_prefix_index = IPA_PARAM_PREFIX_ISRA;
+ adj.reverse = pa->reverse;
+ adj.type = pa->type;
+ adj.alias_ptr_type = pa->alias_ptr_type;
+ adj.unit_offset = pa->unit_offset;
+ vec_safe_push ((*new_params), adj);
+ }
+}
+
+
+/* Do finall processing of results of IPA propagation regarding NODE, clone it
+ if appropriate. */
+
+static void
+process_isra_node_results (cgraph_node *node,
+ hash_map<const char *, unsigned> *clone_num_suffixes)
+{
+ isra_func_summary *ifs = func_sums->get (node);
+ if (!ifs || !ifs->m_candidate)
+ return;
+
+ auto_vec<bool, 16> surviving_params;
+ bool check_surviving = false;
+ if (node->clone.param_adjustments)
+ {
+ check_surviving = true;
+ node->clone.param_adjustments->get_surviving_params (&surviving_params);
+ }
+
+ unsigned param_count = vec_safe_length (ifs->m_parameters);
+ bool will_change_function = false;
+ if (ifs->m_returns_value && ifs->m_return_ignored)
+ will_change_function = true;
+ else
+ for (unsigned i = 0; i < param_count; i++)
+ {
+ isra_param_desc *desc = &(*ifs->m_parameters)[i];
+ if ((desc->locally_unused || desc->split_candidate)
+ /* Make sure we do not clone just to attempt to remove an already
+ removed unused argument. */
+ && (!check_surviving
+ || (i < surviving_params.length ()
+ && surviving_params[i])))
+ {
+ will_change_function = true;
+ break;
+ }
+ }
+ if (!will_change_function)
+ return;
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\nEvaluating analysis results for %s\n",
+ node->dump_name ());
+ if (ifs->m_returns_value && ifs->m_return_ignored)
+ fprintf (dump_file, " Will remove return value.\n");
+ }
+
+ vec<ipa_adjusted_param, va_gc> *new_params = NULL;
+ if (ipa_param_adjustments *old_adjustments = node->clone.param_adjustments)
+ {
+ unsigned old_adj_len = vec_safe_length (old_adjustments->m_adj_params);
+ for (unsigned i = 0; i < old_adj_len; i++)
+ {
+ ipa_adjusted_param *old_adj = &(*old_adjustments->m_adj_params)[i];
+ push_param_adjustments_for_index (ifs, old_adj->base_index, i,
+ old_adj, &new_params);
+ }
+ }
+ else
+ for (unsigned i = 0; i < param_count; i++)
+ push_param_adjustments_for_index (ifs, i, i, NULL, &new_params);
+
+ ipa_param_adjustments *new_adjustments
+ = (new (ggc_alloc <ipa_param_adjustments> ())
+ ipa_param_adjustments (new_params, param_count,
+ ifs->m_returns_value && ifs->m_return_ignored));
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\n Created adjustments:\n");
+ new_adjustments->dump (dump_file);
+ }
+
+ unsigned &suffix_counter = clone_num_suffixes->get_or_insert (
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (
+ node->decl)));
+ vec<cgraph_edge *> callers = node->collect_callers ();
+ cgraph_node *new_node
+ = node->create_virtual_clone (callers, NULL, new_adjustments, "isra",
+ suffix_counter);
+ suffix_counter++;
+
+ if (dump_file)
+ fprintf (dump_file, " Created new node %s\n", new_node->dump_name ());
+ callers.release ();
+}
+
+/* Check which parameters of NODE described by IFS have survived until IPA-SRA
+ and disable transformations for those which have not or which should not
+ transformed because the associated debug counter reached its limit. Return
+ true if none survived or if there were no candidates to begin with. */
+
+static bool
+disable_unavailable_parameters (cgraph_node *node, isra_func_summary *ifs)
+{
+ bool ret = true;
+ unsigned len = vec_safe_length (ifs->m_parameters);
+ if (!len)
+ return true;
+
+ auto_vec<bool, 16> surviving_params;
+ bool check_surviving = false;
+ if (node->clone.param_adjustments)
+ {
+ check_surviving = true;
+ node->clone.param_adjustments->get_surviving_params (&surviving_params);
+ }
+ bool dumped_first = false;
+ for (unsigned i = 0; i < len; i++)
+ {
+ isra_param_desc *desc = &(*ifs->m_parameters)[i];
+ if (!dbg_cnt (ipa_sra_params))
+ {
+ desc->locally_unused = false;
+ desc->split_candidate = false;
+ }
+ else if (check_surviving
+ && (i >= surviving_params.length ()
+ || !surviving_params[i]))
+ {
+ /* Even if the parameter was removed by a previous IPA pass, we do
+ not clear locally_unused because if it really is unused, this
+ information might be useful in callers. */
+ desc->split_candidate = false;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ if (!dumped_first)
+ {
+ fprintf (dump_file,
+ "The following parameters of %s are dead on "
+ "arrival:", node->dump_name ());
+ dumped_first = true;
+ }
+ fprintf (dump_file, " %u", i);
+ }
+ }
+ else if (desc->locally_unused || desc->split_candidate)
+ ret = false;
+ }
+
+ if (dumped_first)
+ fprintf (dump_file, "\n");
+
+ return ret;
+}
+
+
+/* Run the interprocedural part of IPA-SRA. */
+
+static unsigned int
+ipa_sra_analysis (void)
+{
+ if (dump_file)
+ {
+ fprintf (dump_file, "\n========== IPA-SRA IPA stage ==========\n");
+ ipa_sra_dump_all_summaries (dump_file);
+ }
+
+ gcc_checking_assert (func_sums);
+ gcc_checking_assert (call_sums);
+ cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
+ auto_vec <cgraph_node *, 16> stack;
+ int node_scc_count = ipa_reduced_postorder (order, true, NULL);
+
+ /* One sweep from callees to callers for parameter removal and splitting. */
+ for (int i = 0; i < node_scc_count; i++)
+ {
+ cgraph_node *scc_rep = order[i];
+ vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (scc_rep);
+ unsigned j;
+
+ /* Preliminary IPA function level checks and first step of parameter
+ removal. */
+ cgraph_node *v;
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ {
+ isra_func_summary *ifs = func_sums->get (v);
+ if (!ifs || !ifs->m_candidate)
+ continue;
+ if (!ipa_sra_ipa_function_checks (v)
+ || check_all_callers_for_issues (v))
+ {
+ ifs->zap ();
+ continue;
+ }
+ if (disable_unavailable_parameters (v, ifs))
+ continue;
+ for (cgraph_edge *cs = v->indirect_calls; cs; cs = cs->next_callee)
+ process_edge_to_unknown_caller (cs);
+ for (cgraph_edge *cs = v->callees; cs; cs = cs->next_callee)
+ if (!ipa_edge_within_scc (cs))
+ param_removal_cross_scc_edge (cs);
+ }
+
+ /* Look at edges within the current SCC and propagate used-ness accross
+ them, pushing onto the stack all notes which might need to be
+ revisited. */
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ v->call_for_symbol_thunks_and_aliases (propagate_used_to_scc_callers,
+ &stack, true);
+
+ /* Keep revisiting and pushing until nothing changes. */
+ while (!stack.is_empty ())
+ {
+ cgraph_node *v = stack.pop ();
+ isra_func_summary *ifs = func_sums->get (v);
+ gcc_checking_assert (ifs && ifs->m_queued);
+ ifs->m_queued = false;
+
+ v->call_for_symbol_thunks_and_aliases (propagate_used_to_scc_callers,
+ &stack, true);
+ }
+
+ /* Parameter splitting. */
+ bool repeat_scc_access_propagation;
+ do
+ {
+ repeat_scc_access_propagation = false;
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ {
+ isra_func_summary *ifs = func_sums->get (v);
+ if (!ifs
+ || !ifs->m_candidate
+ || vec_safe_is_empty (ifs->m_parameters))
+ continue;
+ for (cgraph_edge *cs = v->callees; cs; cs = cs->next_callee)
+ if (param_splitting_across_edge (cs))
+ repeat_scc_access_propagation = true;
+ }
+ }
+ while (repeat_scc_access_propagation);
+
+ if (flag_checking)
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ verify_splitting_accesses (v, true);
+
+ cycle_nodes.release ();
+ }
+
+ /* One sweep from caller to callees for result removal. */
+ for (int i = node_scc_count - 1; i >= 0 ; i--)
+ {
+ cgraph_node *scc_rep = order[i];
+ vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (scc_rep);
+ unsigned j;
+
+ cgraph_node *v;
+ FOR_EACH_VEC_ELT (cycle_nodes, j, v)
+ {
+ isra_func_summary *ifs = func_sums->get (v);
+ if (!ifs || !ifs->m_candidate)
+ continue;
+
+ bool return_needed
+ = (ifs->m_returns_value
+ && (!dbg_cnt (ipa_sra_retvalues)
+ || v->call_for_symbol_and_aliases (retval_used_p,
+ NULL, true)));
+ ifs->m_return_ignored = !return_needed;
+ if (return_needed)
+ isra_push_node_to_stack (v, ifs, &stack);
+ }
+
+ while (!stack.is_empty ())
+ {
+ cgraph_node *node = stack.pop ();
+ isra_func_summary *ifs = func_sums->get (node);
+ gcc_checking_assert (ifs && ifs->m_queued);
+ ifs->m_queued = false;
+
+ for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
+ if (ipa_edge_within_scc (cs)
+ && call_sums->get (cs)->m_return_returned)
+ {
+ enum availability av;
+ cgraph_node *callee = cs->callee->function_symbol (&av);
+ isra_func_summary *to_ifs = func_sums->get (callee);
+ if (to_ifs && to_ifs->m_return_ignored)
+ {
+ to_ifs->m_return_ignored = false;
+ isra_push_node_to_stack (callee, to_ifs, &stack);
+ }
+ }
+ }
+ cycle_nodes.release ();
+ }
+
+ ipa_free_postorder_info ();
+ free (order);
+
+ if (dump_file)
+ {
+ if (dump_flags & TDF_DETAILS)
+ {
+ fprintf (dump_file, "\n========== IPA-SRA propagation final state "
+ " ==========\n");
+ ipa_sra_dump_all_summaries (dump_file);
+ }
+ fprintf (dump_file, "\n========== IPA-SRA decisions ==========\n");
+ }
+
+ hash_map<const char *, unsigned> *clone_num_suffixes
+ = new hash_map<const char *, unsigned>;
+
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ process_isra_node_results (node, clone_num_suffixes);
+
+ delete clone_num_suffixes;
+ func_sums->release ();
+ func_sums = NULL;
+ call_sums->release ();
+ call_sums = NULL;
+
+ if (dump_file)
+ fprintf (dump_file, "\n========== IPA SRA IPA analysis done "
+ "==========\n\n");
+ return 0;
+}
+
+
+const pass_data pass_data_ipa_sra =
+{
+ IPA_PASS, /* type */
+ "sra", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_IPA_SRA, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ ( TODO_dump_symtab | TODO_remove_functions ), /* todo_flags_finish */
+};
+
+class pass_ipa_sra : public ipa_opt_pass_d
+{
+public:
+ pass_ipa_sra (gcc::context *ctxt)
+ : ipa_opt_pass_d (pass_data_ipa_sra, ctxt,
+ ipa_sra_generate_summary, /* generate_summary */
+ ipa_sra_write_summary, /* write_summary */
+ ipa_sra_read_summary, /* read_summary */
+ NULL , /* write_optimization_summary */
+ NULL, /* read_optimization_summary */
+ NULL, /* stmt_fixup */
+ 0, /* function_transform_todo_flags_start */
+ NULL, /* function_transform */
+ NULL) /* variable_transform */
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ /* TODO: We should remove the optimize check after we ensure we never run
+ IPA passes when not optimizing. */
+ return (flag_ipa_sra && optimize);
+ }
+
+ virtual unsigned int execute (function *) { return ipa_sra_analysis (); }
+
+}; // class pass_ipa_sra
+
+} // anon namespace
+
+ipa_opt_pass_d *
+make_pass_ipa_sra (gcc::context *ctxt)
+{
+ return new pass_ipa_sra (ctxt);
+}
+
+
+#include "gt-ipa-sra.h"
{
return ((node->clone_of || node->former_clone_of)
&& (node->clone.tree_map
- || node->clone.args_to_skip
- || node->clone.combined_args_to_skip));
+ || node->clone.param_adjustments));
}
/* Output optimization summary for EDGE to OB. */
struct cgraph_node *node,
lto_symtab_encoder_t encoder)
{
- unsigned int index;
- bitmap_iterator bi;
struct ipa_replace_map *map;
- struct bitpack_d bp;
int i;
struct cgraph_edge *e;
- if (node->clone.args_to_skip)
- {
- streamer_write_uhwi (ob, bitmap_count_bits (node->clone.args_to_skip));
- EXECUTE_IF_SET_IN_BITMAP (node->clone.args_to_skip, 0, index, bi)
- streamer_write_uhwi (ob, index);
- }
- else
- streamer_write_uhwi (ob, 0);
- if (node->clone.combined_args_to_skip)
+ /* TODO: Should this code be moved to ipa-param-manipulation? */
+ struct bitpack_d bp;
+ bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, (node->clone.param_adjustments != NULL), 1);
+ streamer_write_bitpack (&bp);
+ if (ipa_param_adjustments *adjustments = node->clone.param_adjustments)
{
- streamer_write_uhwi (ob, bitmap_count_bits (node->clone.combined_args_to_skip));
- EXECUTE_IF_SET_IN_BITMAP (node->clone.combined_args_to_skip, 0, index, bi)
- streamer_write_uhwi (ob, index);
+ streamer_write_uhwi (ob, vec_safe_length (adjustments->m_adj_params));
+ ipa_adjusted_param *adj;
+ FOR_EACH_VEC_SAFE_ELT (adjustments->m_adj_params, i, adj)
+ {
+ bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, adj->base_index, IPA_PARAM_MAX_INDEX_BITS);
+ bp_pack_value (&bp, adj->prev_clone_index, IPA_PARAM_MAX_INDEX_BITS);
+ bp_pack_value (&bp, adj->op, 2);
+ bp_pack_value (&bp, adj->param_prefix_index, 2);
+ bp_pack_value (&bp, adj->prev_clone_adjustment, 1);
+ bp_pack_value (&bp, adj->reverse, 1);
+ bp_pack_value (&bp, adj->user_flag, 1);
+ streamer_write_bitpack (&bp);
+ if (adj->op == IPA_PARAM_OP_SPLIT
+ || adj->op == IPA_PARAM_OP_NEW)
+ {
+ stream_write_tree (ob, adj->type, true);
+ if (adj->op == IPA_PARAM_OP_SPLIT)
+ {
+ stream_write_tree (ob, adj->alias_ptr_type, true);
+ streamer_write_uhwi (ob, adj->unit_offset);
+ }
+ }
+ }
+ streamer_write_hwi (ob, adjustments->m_always_copy_start);
+ bp = bitpack_create (ob->main_stream);
+ bp_pack_value (&bp, node->clone.param_adjustments->m_skip_return, 1);
+ streamer_write_bitpack (&bp);
}
- else
- streamer_write_uhwi (ob, 0);
+
streamer_write_uhwi (ob, vec_safe_length (node->clone.tree_map));
FOR_EACH_VEC_SAFE_ELT (node->clone.tree_map, i, map)
{
- /* At the moment we assume all old trees to be PARM_DECLs, because we have no
- mechanism to store function local declarations into summaries. */
- gcc_assert (!map->old_tree);
streamer_write_uhwi (ob, map->parm_num);
gcc_assert (EXPR_LOCATION (map->new_tree) == UNKNOWN_LOCATION);
stream_write_tree (ob, map->new_tree, true);
- bp = bitpack_create (ob->main_stream);
- bp_pack_value (&bp, map->replace_p, 1);
- bp_pack_value (&bp, map->ref_p, 1);
- streamer_write_bitpack (&bp);
}
if (lto_symtab_encoder_in_partition_p (encoder, node))
{
int i;
int count;
- int bit;
- struct bitpack_d bp;
struct cgraph_edge *e;
- count = streamer_read_uhwi (ib_main);
- if (count)
- node->clone.args_to_skip = BITMAP_GGC_ALLOC ();
- for (i = 0; i < count; i++)
- {
- bit = streamer_read_uhwi (ib_main);
- bitmap_set_bit (node->clone.args_to_skip, bit);
- }
- count = streamer_read_uhwi (ib_main);
- if (count)
- node->clone.combined_args_to_skip = BITMAP_GGC_ALLOC ();
- for (i = 0; i < count; i++)
+ /* TODO: Should this code be moved to ipa-param-manipulation? */
+ struct bitpack_d bp;
+ bp = streamer_read_bitpack (ib_main);
+ bool have_adjustments = bp_unpack_value (&bp, 1);
+ if (have_adjustments)
{
- bit = streamer_read_uhwi (ib_main);
- bitmap_set_bit (node->clone.combined_args_to_skip, bit);
+ count = streamer_read_uhwi (ib_main);
+ vec<ipa_adjusted_param, va_gc> *new_params = NULL;
+ for (i = 0; i < count; i++)
+ {
+ ipa_adjusted_param adj;
+ memset (&adj, 0, sizeof (adj));
+ bp = streamer_read_bitpack (ib_main);
+ adj.base_index = bp_unpack_value (&bp, IPA_PARAM_MAX_INDEX_BITS);
+ adj.prev_clone_index
+ = bp_unpack_value (&bp, IPA_PARAM_MAX_INDEX_BITS);
+ adj.op = (enum ipa_parm_op) bp_unpack_value (&bp, 2);
+ adj.param_prefix_index = bp_unpack_value (&bp, 2);
+ adj.prev_clone_adjustment = bp_unpack_value (&bp, 1);
+ adj.reverse = bp_unpack_value (&bp, 1);
+ adj.user_flag = bp_unpack_value (&bp, 1);
+ if (adj.op == IPA_PARAM_OP_SPLIT
+ || adj.op == IPA_PARAM_OP_NEW)
+ {
+ adj.type = stream_read_tree (ib_main, data_in);
+ if (adj.op == IPA_PARAM_OP_SPLIT)
+ {
+ adj.alias_ptr_type = stream_read_tree (ib_main, data_in);
+ adj.unit_offset = streamer_read_uhwi (ib_main);
+ }
+ }
+ vec_safe_push (new_params, adj);
+ }
+ int always_copy_start = streamer_read_hwi (ib_main);
+ bp = streamer_read_bitpack (ib_main);
+ bool skip_return = bp_unpack_value (&bp, 1);
+ node->clone.param_adjustments
+ = (new (ggc_alloc <ipa_param_adjustments> ())
+ ipa_param_adjustments (new_params, always_copy_start, skip_return));
}
+
count = streamer_read_uhwi (ib_main);
for (i = 0; i < count; i++)
{
vec_safe_push (node->clone.tree_map, map);
map->parm_num = streamer_read_uhwi (ib_main);
- map->old_tree = NULL;
map->new_tree = stream_read_tree (ib_main, data_in);
- bp = streamer_read_bitpack (ib_main);
- map->replace_p = bp_unpack_value (&bp, 1);
- map->ref_p = bp_unpack_value (&bp, 1);
}
for (e = node->callees; e; e = e->next_callee)
input_edge_opt_summary (e, ib_main);
"offload_table",
"mode_table",
"hsa",
- "lto"
+ "lto",
+ "ipa-sra"
};
/* Hooks so that the ipa passes can call into the lto front end to get
LTO_section_mode_table,
LTO_section_ipa_hsa,
LTO_section_lto,
+ LTO_section_ipa_sra,
LTO_N_SECTION_TYPES /* Must be last. */
};
if (definition)
{
new_node = node->create_version_clone_with_body (vNULL, NULL,
- NULL, false,
- NULL, NULL,
- name, attributes);
+ NULL, NULL,
+ NULL, name, attributes);
if (new_node == NULL)
return NULL;
new_node->force_output = true;
* sizeof (struct cgraph_simd_clone_arg))));
}
-/* Return vector of parameter types of function FNDECL. This uses
- TYPE_ARG_TYPES if available, otherwise falls back to types of
+/* Fill an empty vector ARGS with parameter types of function FNDECL. This
+ uses TYPE_ARG_TYPES if available, otherwise falls back to types of
DECL_ARGUMENTS types. */
-static vec<tree>
-simd_clone_vector_of_formal_parm_types (tree fndecl)
+static void
+simd_clone_vector_of_formal_parm_types (vec<tree> *args, tree fndecl)
{
if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
- return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
- vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
+ {
+ push_function_arg_types (args, TREE_TYPE (fndecl));
+ return;
+ }
+ push_function_arg_decls (args, fndecl);
unsigned int i;
tree arg;
- FOR_EACH_VEC_ELT (args, i, arg)
- args[i] = TREE_TYPE (args[i]);
- return args;
+ FOR_EACH_VEC_ELT (*args, i, arg)
+ (*args)[i] = TREE_TYPE ((*args)[i]);
}
/* Given a simd function in NODE, extract the simd specific
simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
bool *inbranch_specified)
{
- vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
+ auto_vec<tree> args;
+ simd_clone_vector_of_formal_parm_types (&args, node->decl);
tree t;
int n;
*inbranch_specified = false;
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring large linear step");
- args.release ();
return NULL;
}
else if (integer_zerop (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring zero linear step");
- args.release ();
return NULL;
}
else
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified return type");
- args.release ();
return NULL;
}
return NULL;
}
- args.release ();
return clone_info;
}
such parameter. */
else
{
- vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
+ auto_vec<tree> map;
+ simd_clone_vector_of_formal_parm_types (&map, fndecl);
for (unsigned int i = 0; i < clone_info->nargs; ++i)
if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
type = map[i];
break;
}
- map.release ();
}
/* c) If the characteristic data type determined by a) or b) above
return NULL;
old_node->get_body ();
new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
- false, NULL, NULL,
+ NULL, NULL,
"simdclone");
}
else
NODE is the function whose arguments are to be adjusted.
- Returns an adjustment vector that will be filled describing how the
- argument types will be adjusted. */
+ If NODE does not represent function definition, returns NULL. Otherwise
+ returns an adjustment class that will be filled describing how the argument
+ declarations will be remapped. New arguments which are not to be remapped
+ are marked with USER_FLAG. */
-static ipa_parm_adjustment_vec
+static ipa_param_body_adjustments *
simd_clone_adjust_argument_types (struct cgraph_node *node)
{
- vec<tree> args;
- ipa_parm_adjustment_vec adjustments;
+ auto_vec<tree> args;
if (node->definition)
- args = ipa_get_vector_of_formal_parms (node->decl);
+ push_function_arg_decls (&args, node->decl);
else
- args = simd_clone_vector_of_formal_parm_types (node->decl);
- adjustments.create (args.length ());
- unsigned i, j, veclen;
- struct ipa_parm_adjustment adj;
+ simd_clone_vector_of_formal_parm_types (&args, node->decl);
struct cgraph_simd_clone *sc = node->simdclone;
+ vec<ipa_adjusted_param, va_gc> *new_params = NULL;
+ vec_safe_reserve (new_params, sc->nargs);
+ unsigned i, j, veclen;
for (i = 0; i < sc->nargs; ++i)
{
+ ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
tree parm = args[i];
tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
adj.base_index = i;
- adj.base = parm;
+ adj.prev_clone_index = i;
sc->args[i].orig_arg = node->definition ? parm : NULL_TREE;
sc->args[i].orig_type = parm_type;
{
default:
/* No adjustment necessary for scalar arguments. */
- adj.op = IPA_PARM_OP_COPY;
+ adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
= create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
TREE_TYPE (parm_type),
sc->simdlen);
- adj.op = IPA_PARM_OP_COPY;
+ adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
- adj.arg_prefix = "simd";
+ adj.op = IPA_PARAM_OP_NEW;
+ adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
if (POINTER_TYPE_P (parm_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
sc->args[i].vector_type = adj.type;
for (j = veclen; j < sc->simdlen; j += veclen)
{
- adjustments.safe_push (adj);
+ vec_safe_push (new_params, adj);
if (j == veclen)
{
memset (&adj, 0, sizeof (adj));
- adj.op = IPA_PARM_OP_NEW;
- adj.arg_prefix = "simd";
+ adj.op = IPA_PARAM_OP_NEW;
+ adj.user_flag = 1;
+ adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
adj.base_index = i;
+ adj.prev_clone_index = i;
adj.type = sc->args[i].vector_type;
}
}
? IDENTIFIER_POINTER (DECL_NAME (parm))
: NULL, parm_type, sc->simdlen);
}
- adjustments.safe_push (adj);
+ vec_safe_push (new_params, adj);
}
if (sc->inbranch)
{
tree base_type = simd_clone_compute_base_data_type (sc->origin, sc);
-
+ ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
- adj.op = IPA_PARM_OP_NEW;
- adj.arg_prefix = "mask";
+ adj.op = IPA_PARAM_OP_NEW;
+ adj.user_flag = 1;
+ adj.param_prefix_index = IPA_PARAM_PREFIX_MASK;
adj.base_index = i;
+ adj.prev_clone_index = i;
if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
veclen = sc->vecsize_int;
else
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (base_type, veclen);
- adjustments.safe_push (adj);
+ vec_safe_push (new_params, adj);
for (j = veclen; j < sc->simdlen; j += veclen)
- adjustments.safe_push (adj);
+ vec_safe_push (new_params, adj);
/* We have previously allocated one extra entry for the mask. Use
it and fill it. */
}
if (node->definition)
- ipa_modify_formal_parameters (node->decl, adjustments);
+ {
+ ipa_param_body_adjustments *adjustments
+ = new ipa_param_body_adjustments (new_params, node->decl);
+
+ adjustments->modify_formal_parameters ();
+ return adjustments;
+ }
else
{
tree new_arg_types = NULL_TREE, new_reversed;
last_parm_void = true;
gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
- j = adjustments.length ();
+ j = vec_safe_length (new_params);
for (i = 0; i < j; i++)
{
- struct ipa_parm_adjustment *adj = &adjustments[i];
+ struct ipa_adjusted_param *adj = &(*new_params)[i];
tree ptype;
- if (adj->op == IPA_PARM_OP_COPY)
+ if (adj->op == IPA_PARAM_OP_COPY)
ptype = args[adj->base_index];
else
ptype = adj->type;
new_reversed = void_list_node;
}
TYPE_ARG_TYPES (TREE_TYPE (node->decl)) = new_reversed;
- adjustments.release ();
+ return NULL;
}
- args.release ();
- return adjustments;
}
/* Initialize and copy the function arguments in NODE to their
static gimple_seq
simd_clone_init_simd_arrays (struct cgraph_node *node,
- ipa_parm_adjustment_vec adjustments)
+ ipa_param_body_adjustments *adjustments)
{
gimple_seq seq = NULL;
unsigned i = 0, j = 0, k;
arg;
arg = DECL_CHAIN (arg), i++, j++)
{
- if (adjustments[j].op == IPA_PARM_OP_COPY
+ if ((*adjustments->m_adj_params)[j].op == IPA_PARAM_OP_COPY
|| POINTER_TYPE_P (TREE_TYPE (arg)))
continue;
/* Callback info for ipa_simd_modify_stmt_ops below. */
struct modify_stmt_info {
- ipa_parm_adjustment_vec adjustments;
+ ipa_param_body_adjustments *adjustments;
gimple *stmt;
/* True if the parent statement was modified by
ipa_simd_modify_stmt_ops. */
tree *orig_tp = tp;
if (TREE_CODE (*tp) == ADDR_EXPR)
tp = &TREE_OPERAND (*tp, 0);
- struct ipa_parm_adjustment *cand = NULL;
+
+ if (TREE_CODE (*tp) == BIT_FIELD_REF
+ || TREE_CODE (*tp) == IMAGPART_EXPR
+ || TREE_CODE (*tp) == REALPART_EXPR)
+ tp = &TREE_OPERAND (*tp, 0);
+
+ tree repl = NULL_TREE;
+ ipa_param_body_replacement *pbr = NULL;
+
if (TREE_CODE (*tp) == PARM_DECL)
- cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
+ {
+ pbr = info->adjustments->get_expr_replacement (*tp, true);
+ if (pbr)
+ repl = pbr->repl;
+ }
else if (TYPE_P (*tp))
*walk_subtrees = 0;
- tree repl = NULL_TREE;
- if (cand)
- repl = unshare_expr (cand->new_decl);
+ if (repl)
+ repl = unshare_expr (repl);
else
{
if (tp != orig_tp)
if (tp != orig_tp)
{
if (gimple_code (info->stmt) == GIMPLE_PHI
- && cand
+ && pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL
- && cand->alias_ptr_type)
+ && pbr->dummy)
{
- gcc_assert (TREE_CODE (cand->alias_ptr_type) == SSA_NAME);
- *orig_tp = cand->alias_ptr_type;
+ gcc_assert (TREE_CODE (pbr->dummy) == SSA_NAME);
+ *orig_tp = pbr->dummy;
info->modified = true;
return NULL_TREE;
}
{
gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Cache SSA_NAME for next time. */
- if (cand
+ if (pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL)
- cand->alias_ptr_type = repl;
+ {
+ gcc_assert (!pbr->dummy);
+ pbr->dummy = repl;
+ }
}
else
gsi = gsi_for_stmt (info->stmt);
static void
ipa_simd_modify_function_body (struct cgraph_node *node,
- ipa_parm_adjustment_vec adjustments,
+ ipa_param_body_adjustments *adjustments,
tree retval_array, tree iter)
{
basic_block bb;
- unsigned int i, j, l;
+ unsigned int i, j;
- /* Re-use the adjustments array, but this time use it to replace
- every function argument use to an offset into the corresponding
- simd_array. */
+
+ /* Register replacements for every function argument use to an offset into
+ the corresponding simd_array. */
for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
{
- if (!node->simdclone->args[i].vector_arg)
+ if (!node->simdclone->args[i].vector_arg
+ || (*adjustments->m_adj_params)[j].user_flag)
continue;
tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
- adjustments[j].new_decl
- = build4 (ARRAY_REF,
- basetype,
- node->simdclone->args[i].simd_array,
- iter,
- NULL_TREE, NULL_TREE);
- if (adjustments[j].op == IPA_PARM_OP_NONE
- && simd_clone_subparts (vectype) < node->simdclone->simdlen)
+ tree r = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array,
+ iter, NULL_TREE, NULL_TREE);
+ adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r);
+
+ if (simd_clone_subparts (vectype) < node->simdclone->simdlen)
j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1;
}
- l = adjustments.length ();
tree name;
-
FOR_EACH_SSA_NAME (i, name, cfun)
{
+ tree base_var;
if (SSA_NAME_VAR (name)
- && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
+ && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
+ && (base_var
+ = adjustments->get_replacement_ssa_base (SSA_NAME_VAR (name))))
{
- for (j = 0; j < l; j++)
- if (SSA_NAME_VAR (name) == adjustments[j].base
- && adjustments[j].new_decl)
- {
- tree base_var;
- if (adjustments[j].new_ssa_base == NULL_TREE)
- {
- base_var
- = copy_var_decl (adjustments[j].base,
- DECL_NAME (adjustments[j].base),
- TREE_TYPE (adjustments[j].base));
- adjustments[j].new_ssa_base = base_var;
- }
- else
- base_var = adjustments[j].new_ssa_base;
- if (SSA_NAME_IS_DEFAULT_DEF (name))
- {
- bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- gimple_stmt_iterator gsi = gsi_after_labels (bb);
- tree new_decl = unshare_expr (adjustments[j].new_decl);
- set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
- SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
- SSA_NAME_IS_DEFAULT_DEF (name) = 0;
- gimple *stmt = gimple_build_assign (name, new_decl);
- gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
- }
- else
- SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
- }
+ if (SSA_NAME_IS_DEFAULT_DEF (name))
+ {
+ tree old_decl = SSA_NAME_VAR (name);
+ bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
+ tree repl = adjustments->lookup_replacement (old_decl, 0);
+ gcc_checking_assert (repl);
+ repl = unshare_expr (repl);
+ set_ssa_default_def (cfun, old_decl, NULL_TREE);
+ SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
+ SSA_NAME_IS_DEFAULT_DEF (name) = 0;
+ gimple *stmt = gimple_build_assign (name, repl);
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
+ }
+ else
+ SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
}
}
targetm.simd_clone.adjust (node);
tree retval = simd_clone_adjust_return_type (node);
- ipa_parm_adjustment_vec adjustments
+ ipa_param_body_adjustments *adjustments
= simd_clone_adjust_argument_types (node);
+ gcc_assert (adjustments);
push_gimplify_context ();
tree iter1 = make_ssa_name (iter);
tree iter2 = NULL_TREE;
ipa_simd_modify_function_body (node, adjustments, retval, iter1);
- adjustments.release ();
+ delete adjustments;
/* Initialize the iteration variable. */
basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
"that ipa-sra replaces a pointer to an aggregate with.",
2, 0, 0)
+DEFPARAM (PARAM_IPA_SRA_MAX_REPLACEMENTS,
+ "ipa-sra-max-replacements",
+ "Maximum pieces that IPA-SRA tracks per formal parameter, as "
+ "a consequence, also the maximum number of replacements of a formal "
+ "parameter.",
+ 8, 0, 16)
+
DEFPARAM (PARAM_TM_MAX_AGGREGATE_SIZE,
"tm-max-aggregate-size",
"Size in bytes after which thread-local aggregates should be "
NEXT_PASS (pass_dse);
NEXT_PASS (pass_cd_dce);
NEXT_PASS (pass_phiopt, true /* early_p */);
- NEXT_PASS (pass_early_ipa_sra);
NEXT_PASS (pass_tail_recursion);
NEXT_PASS (pass_convert_switch);
NEXT_PASS (pass_cleanup_eh);
NEXT_PASS (pass_ipa_icf);
NEXT_PASS (pass_ipa_devirt);
NEXT_PASS (pass_ipa_cp);
+ NEXT_PASS (pass_ipa_sra);
NEXT_PASS (pass_ipa_cdtor_merge);
NEXT_PASS (pass_ipa_hsa);
NEXT_PASS (pass_ipa_fn_summary);
+2019-09-20 Martin Jambor <mjambor@suse.cz>
+
+ * g++.dg/ipa/pr81248.C: Adjust dg-options and dump-scan.
+ * gcc.dg/ipa/ipa-sra-1.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-10.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-11.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-3.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-4.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-5.c: Likewise.
+ * gcc.dg/ipa/ipacost-2.c: Disable ipa-sra.
+ * gcc.dg/ipa/ipcp-agg-9.c: Likewise.
+ * gcc.dg/ipa/pr78121.c: Adjust scan pattern.
+ * gcc.dg/ipa/vrp1.c: Likewise.
+ * gcc.dg/ipa/vrp2.c: Likewise.
+ * gcc.dg/ipa/vrp3.c: Likewise.
+ * gcc.dg/ipa/vrp7.c: Likewise.
+ * gcc.dg/ipa/vrp8.c: Likewise.
+ * gcc.dg/noreorder.c: use noipa attribute instead of noinline.
+ * gcc.dg/ipa/20040703-wpa.c: New test.
+ * gcc.dg/ipa/ipa-sra-12.c: New test.
+ * gcc.dg/ipa/ipa-sra-13.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-14.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-15.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-16.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-17.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-18.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-19.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-20.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-21.c: Likewise.
+ * gcc.dg/ipa/ipa-sra-22.c: Likewise.
+ * gcc.dg/sso/ipa-sra-1.c: Likewise.
+ * g++.dg/ipa/ipa-sra-2.C: Likewise.
+ * g++.dg/ipa/ipa-sra-3.C: Likewise.
+ * gcc.dg/tree-ssa/ipa-cp-1.c: Make return value used.
+ * g++.dg/ipa/devirt-19.C: Add missing return, add -fipa-cp-clone
+ option.
+ * g++.dg/lto/devirt-19_0.C: Add -fipa-cp-clone option.
+ * gcc.dg/ipa/ipa-sra-2.c: Removed.
+ * gcc.dg/ipa/ipa-sra-6.c: Likewise.
+
2019-09-19 Martin Sebor <msebor@redhat.com>
PR middle-end/91631
Previously we were failing by considering CLOBBER statement to be
a type change. */
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-ipa-cp" } */
+/* { dg-options "-O2 -fdump-ipa-cp -fipa-cp-clone" } */
/* { dg-additional-options "-Wno-return-type" } */
struct A {
C<int, int> b;
template <typename T, typename M> const M &C<T, M>::m_fn2(const T &) {
+
A a = _map.m_fn2();
a == _map.m_fn1();
m_fn1();
+ static M m;
+ return m;
}
void fn1() { b.m_fn2(0); }
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fipa-sra" } */
+
+void fn1(int *, int *, double *, int *, double *);
+int a, c, d, e, f, g, k;
+double *b;
+double h, i;
+void fn2(int *p1, int *p2, double *p3) {
+ int l = 0, j, q, r;
+ double m, n, o, p, s, t, u;
+ --p3;
+ for (; a;) {
+ if (c) {
+ ++*p2;
+ goto L170;
+ }
+ m = n = b[c];
+ p = t = m;
+ for (; j; ++j) {
+ u = 1.;
+ if (k) {
+ s = o;
+ u = -1.;
+ }
+ }
+ i = u * p;
+ L60:
+ p3[1] = s;
+ for (; d;)
+ goto L60;
+ fn1(&f, &g, &h, &l, &p3[1]);
+ o = p3[1];
+ L100:
+ o *= i;
+ if (e)
+ goto L100;
+ L170:;
+ }
+ if (*p1)
+ for (;;) {
+ if (r)
+ q = *p2;
+ d = q - j;
+ r = j;
+ }
+}
--- /dev/null
+/* { dg-do compile { target c++11 } } */
+/* { dg-options "-O2 -fipa-sra" } */
+
+class a {
+ void b();
+ char16_t c;
+ char16_t d;
+};
+void e(a);
+void g();
+void a::b() {
+ char16_t f = d;
+ e(*this);
+ for (;;) {
+ g();
+ if (f)
+ break;
+ }
+}
--- /dev/null
+/* { dg-options "-O2 -fipa-sra -fno-inline -fno-ipa-cp" } */
+
+
+char *a() __attribute__((__malloc__));
+static char *b() {
+ char *c = a();
+ return c;
+}
+int d() { b(); return 4; }
// { dg-do compile { target c++17 } }
-// { dg-options "-O2 -fdump-tree-eipa_sra" }
+// { dg-options "-O2 -fdump-ipa-sra" }
#include <type_traits>
f(n2);
}
-// { dg-final { scan-tree-dump-times "Adjusting call" 2 "eipa_sra" } }
+// { dg-final { scan-ipa-dump "Will split parameter 0" "sra" } }
/* { dg-lto-do link } */
-/* { dg-lto-options { "-O2 -fdump-ipa-cp -Wno-return-type -flto -r -nostdlib" } } */
+/* { dg-lto-options { "-O2 -fdump-ipa-cp -fipa-cp-clone -Wno-return-type -flto -r -nostdlib" } } */
/* { dg-extra-ld-options "-flinker-output=nolto-rel -flto=auto" } */
#include "../ipa/devirt-19.C"
/* { dg-final { scan-wpa-ipa-dump-times "Discovered a virtual call to a known target" 1 "cp" } } */
--- /dev/null
+/* With -fwhole-program this is an excelent testcase for inlining IPA-SRAed
+ functions into each other. */
+/* { dg-do run } */
+/* { dg-options "-O2 -w -fno-ipa-cp -fwhole-program" } */
+/* { dg-require-effective-target int32plus } */
+
+#define PART_PRECISION (sizeof (cpp_num_part) * 8)
+
+typedef unsigned int cpp_num_part;
+typedef struct cpp_num cpp_num;
+struct cpp_num
+{
+ cpp_num_part high;
+ cpp_num_part low;
+ int unsignedp; /* True if value should be treated as unsigned. */
+ int overflow; /* True if the most recent calculation overflowed. */
+};
+
+static int
+num_positive (cpp_num num, unsigned int precision)
+{
+ if (precision > PART_PRECISION)
+ {
+ precision -= PART_PRECISION;
+ return (num.high & (cpp_num_part) 1 << (precision - 1)) == 0;
+ }
+
+ return (num.low & (cpp_num_part) 1 << (precision - 1)) == 0;
+}
+
+static cpp_num
+num_trim (cpp_num num, unsigned int precision)
+{
+ if (precision > PART_PRECISION)
+ {
+ precision -= PART_PRECISION;
+ if (precision < PART_PRECISION)
+ num.high &= ((cpp_num_part) 1 << precision) - 1;
+ }
+ else
+ {
+ if (precision < PART_PRECISION)
+ num.low &= ((cpp_num_part) 1 << precision) - 1;
+ num.high = 0;
+ }
+
+ return num;
+}
+
+/* Shift NUM, of width PRECISION, right by N bits. */
+static cpp_num
+num_rshift (cpp_num num, unsigned int precision, unsigned int n)
+{
+ cpp_num_part sign_mask;
+ int x = num_positive (num, precision);
+
+ if (num.unsignedp || x)
+ sign_mask = 0;
+ else
+ sign_mask = ~(cpp_num_part) 0;
+
+ if (n >= precision)
+ num.high = num.low = sign_mask;
+ else
+ {
+ /* Sign-extend. */
+ if (precision < PART_PRECISION)
+ num.high = sign_mask, num.low |= sign_mask << precision;
+ else if (precision < 2 * PART_PRECISION)
+ num.high |= sign_mask << (precision - PART_PRECISION);
+
+ if (n >= PART_PRECISION)
+ {
+ n -= PART_PRECISION;
+ num.low = num.high;
+ num.high = sign_mask;
+ }
+
+ if (n)
+ {
+ num.low = (num.low >> n) | (num.high << (PART_PRECISION - n));
+ num.high = (num.high >> n) | (sign_mask << (PART_PRECISION - n));
+ }
+ }
+
+ num = num_trim (num, precision);
+ num.overflow = 0;
+ return num;
+}
+ #define num_zerop(num) ((num.low | num.high) == 0)
+#define num_eq(num1, num2) (num1.low == num2.low && num1.high == num2.high)
+
+cpp_num
+num_lshift (cpp_num num, unsigned int precision, unsigned int n)
+{
+ if (n >= precision)
+ {
+ num.overflow = !num.unsignedp && !num_zerop (num);
+ num.high = num.low = 0;
+ }
+ else
+ {
+ cpp_num orig;
+ unsigned int m = n;
+
+ orig = num;
+ if (m >= PART_PRECISION)
+ {
+ m -= PART_PRECISION;
+ num.high = num.low;
+ num.low = 0;
+ }
+ if (m)
+ {
+ num.high = (num.high << m) | (num.low >> (PART_PRECISION - m));
+ num.low <<= m;
+ }
+ num = num_trim (num, precision);
+
+ if (num.unsignedp)
+ num.overflow = 0;
+ else
+ {
+ cpp_num maybe_orig = num_rshift (num, precision, n);
+ num.overflow = !num_eq (orig, maybe_orig);
+ }
+ }
+
+ return num;
+}
+
+unsigned int precision = 64;
+unsigned int n = 16;
+
+cpp_num num = { 0, 3, 0, 0 };
+
+int main()
+{
+ cpp_num res = num_lshift (num, 64, n);
+
+ if (res.low != 0x30000)
+ abort ();
+
+ if (res.high != 0)
+ abort ();
+
+ if (res.overflow != 0)
+ abort ();
+
+ exit (0);
+}
/* { dg-do run } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra-details" } */
struct bovid
{
return 0;
}
-/* { dg-final { scan-tree-dump-times "About to replace expr" 2 "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump "Will split parameter" "sra" } } */
/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-options "-O2 -fno-ipa-cp -fipa-sra -fdump-ipa-sra" } */
extern void consume (int);
extern int glob, glob1, glob2;
return 0;
}
-/* { dg-final { scan-tree-dump-times "replacing an SSA name of a removed param" 4 "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump "Will remove parameter 0" "sra" } } */
-/* { dg-do run } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra-details" } */
struct bovid
{
return 0;
}
-/* { dg-final { scan-tree-dump-not "About to replace expr" "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump-not "Will split parameter" "sra" } } */
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra" } */
+
+/* Check of a simple and transitive structure split. */
+
+struct S
+{
+ float red;
+ void *blue;
+ int green;
+};
+
+
+void __attribute__((noipa))
+check (float r, int g, int g2)
+{
+ if (r < 7.39 || r > 7.41
+ || g != 6 || g2 != 6)
+ __builtin_abort ();
+}
+
+static void
+__attribute__((noinline))
+foo (struct S s)
+{
+ check (s.red, s.green, s.green);
+}
+
+static void
+__attribute__((noinline))
+bar (struct S s)
+{
+ foo (s);
+}
+
+int
+main (int argc, char *argv[])
+{
+ struct S s;
+
+ s.red = 7.4;
+ s.green = 6;
+ s.blue = &s;
+
+ bar (s);
+ return 0;
+}
+
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 2 "sra" } } */
+/* { dg-final { scan-ipa-dump-times "component at byte offset" 4 "sra" } } */
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra" } */
+
+/* Check of a by-reference structure split. */
+
+struct S
+{
+ float red;
+ void *blue;
+ int green;
+};
+
+void __attribute__((noipa))
+check (float r, int g, int g2)
+{
+ if (r < 7.39 || r > 7.41
+ || g != 6 || g2 != 6)
+ __builtin_abort ();
+}
+
+static void
+__attribute__((noinline))
+foo (struct S *s)
+{
+ check (s->red, s->green, s->green);
+}
+
+static void
+__attribute__((noinline))
+bar (struct S *s)
+{
+ foo (s);
+}
+
+int
+main (int argc, char *argv[])
+{
+ struct S s;
+
+ s.red = 7.4;
+ s.green = 6;
+ s.blue = &s;
+
+ bar (&s);
+ return 0;
+}
+
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 2 "sra" } } */
+/* { dg-final { scan-ipa-dump-times "component at byte offset" 4 "sra" } } */
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra" } */
+
+/* Check of a transitive recursive structure split. */
+
+struct S
+{
+ float red;
+ void *blue;
+ int green;
+};
+
+
+static int done = 0;
+
+void __attribute__((noipa))
+check (float r, int g, int g2)
+{
+ if (r < 7.39 || r > 7.41
+ || g != 6 || g2 != 6)
+ __builtin_abort ();
+}
+
+static void __attribute__((noinline)) bar (struct S s);
+
+static void
+__attribute__((noinline))
+foo (struct S s)
+{
+ if (!done)
+ {
+ done = 1;
+ bar (s);
+ }
+ check (s.red, s.green, s.green);
+}
+
+static void
+__attribute__((noinline))
+bar (struct S s)
+{
+ foo (s);
+}
+
+int
+main (int argc, char *argv[])
+{
+ struct S s;
+
+ s.red = 7.4;
+ s.green = 6;
+ s.blue = &s;
+
+ bar (s);
+ return 0;
+}
+
+
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 2 "sra" } } */
+/* { dg-final { scan-ipa-dump-times "component at byte offset" 4 "sra" } } */
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra" } */
+
+/* Check of a recursive by-reference structure split. The recursive functions
+ have to be pure right from the start, otherwise the current AA would detect
+ possible modification of data. */
+
+struct S
+{
+ float red;
+ void *blue;
+ int green;
+};
+
+void __attribute__((noipa))
+check (float r, int g, int g2)
+{
+ if (r < 7.39 || r > 7.41
+ || g != 6 || g2 != 6)
+ __builtin_abort ();
+ return;
+}
+
+static int __attribute__((noinline, pure)) bar (struct S *s, int rec);
+
+static int
+__attribute__((noinline, pure))
+foo (struct S *s , int rec)
+{
+ int t = 0;
+ if (rec)
+ t = bar (s, 0);
+ check (s->red, s->green, s->green);
+ return t;
+}
+
+static int
+__attribute__((noinline, pure))
+bar (struct S *s, int rec)
+{
+ int t = foo (s, rec);
+ return t + t;
+}
+
+volatile int g;
+
+int
+main (int argc, char *argv[])
+{
+ struct S s;
+
+ s.red = 7.4;
+ s.green = 6;
+ s.blue = &s;
+
+ g = bar (&s, 1);
+ return 0;
+}
+
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 2 "sra" } } */
+/* { dg-final { scan-ipa-dump-times "component at byte offset" 4 "sra" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra -fdump-tree-optimized" } */
+
+/* Testing removal of unused parameters in recursive calls. */
+
+extern int work_1 (int);
+extern int work_2 (int);
+
+static int __attribute__((noinline))
+foo (int l, int w1, int w2, int useless, int useless2);
+
+
+static int __attribute__((noinline))
+bar_1 (int l, int w1, int w2, int useless, int useless2)
+{
+ return work_1 (w1) + foo (l, w1, w2, useless2, useless);
+}
+
+static int __attribute__((noinline))
+baz_1 (int useless, int useless2, int l, int w1, int w2)
+{
+ return bar_1 (l, w1, w2, useless, useless2);
+}
+
+static int __attribute__((noinline))
+bax_1 (int l, int w1, int w2, int useless, int useless2)
+{
+ return baz_1 (useless, useless2, l, w1, w2);
+}
+
+
+
+static int __attribute__((noinline))
+bar_2 (int l, int w1, int w2, int useless, int useless2)
+{
+ return foo (l, w1, w2, useless2 + 5, useless);
+}
+
+static int __attribute__((noinline))
+baz_2 (int useless, int useless2, int l, int w1, int w2)
+{
+ return bar_2 (l, w1, w2, useless, useless2);
+}
+
+
+static int __attribute__((noinline))
+bax_2 (int l, int w1, int w2, int useless, int useless2)
+{
+ return work_2 (w2) + baz_2 (useless, useless2, l, w1, w2);
+}
+
+
+static int __attribute__((noinline))
+ foo (int l, int w1, int w2, int useless, int useless2)
+{
+ int r = 0;
+ if (!l)
+ return r;
+ if (l % 2)
+ r = bax_1 (l - 1, w1, w2, useless, useless2);
+ else
+ r = bax_2 (l - 1, w1, w2, useless, useless2);
+
+ return r;
+}
+
+int
+entry (int l, int w1, int w2, int noneed, int noneed2)
+{
+ return foo (l, w2, w2, noneed2, noneed2 + 4);
+}
+
+/* { dg-final { scan-ipa-dump-times "Will remove parameter" 14 "sra" } } */
+/* { dg-final { scan-tree-dump-not "useless" "optimized"} } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-ipa-sra -fdump-tree-optimized" } */
+
+#define DOIT
+#define DONT
+
+
+extern int extern_leaf (int);
+
+/* ----- 1 ----- */
+#ifdef DOIT
+static int __attribute__((noinline))
+whee_1 (int i, int j)
+{
+ return extern_leaf (i * j) + 1;
+}
+
+static int foo_1 (int i, int j);
+
+static int __attribute__((noinline))
+baz_1 (int i, int j)
+{
+ int a = 5;
+ if (j)
+ a = foo_1 (i, j - 1);
+ return whee_1 (i, j) + a + 1;
+}
+
+static int __attribute__((noinline))
+bar_1 (int i, int j)
+{
+ return baz_1 (i, j) + 1;
+}
+
+static int __attribute__((noinline))
+foo_1 (int i, int j)
+{
+ return bar_1 (i, j) + 1;
+}
+
+static int __attribute__((noinline))
+inter_1 (int i, int j)
+{
+ return foo_1 (i, j) + 1;
+}
+#endif
+
+/* ----- 2 ----- */
+#ifdef DONT
+static int __attribute__((noinline))
+whee_2 (int i, int j)
+{
+ return extern_leaf (i * j) + 2;
+}
+
+static int foo_2 (int i, int j);
+
+static int __attribute__((noinline))
+baz_2 (int i, int j)
+{
+ int a = 6;
+ if (j)
+ a = foo_2 (i, j - 1);
+ return whee_2 (i, j) + a + 2;
+}
+
+static int __attribute__((noinline))
+bar_2 (int i, int j)
+{
+ return baz_2 (i, j) + 2;
+}
+
+static int __attribute__((noinline))
+foo_2 (int i, int j)
+{
+ return bar_2 (i, j) + 2;
+}
+#endif
+
+/* ----- entries ----- */
+#ifdef DOIT
+int
+entry_1 (int i, int j)
+{
+ inter_1 (i, j);
+ return i + j + 1;
+}
+#endif
+
+#ifdef DONT
+int
+entry_2 (int i, int j)
+{
+#ifdef DOIT
+ inter_1 (i, j);
+#endif
+ return i + j + bar_2 (i, j);
+}
+#endif
+
+/* { dg-final { scan-ipa-dump-times "Will remove return value" 5 "sra" } } */
+/* { dg-final { scan-tree-dump-times "return;" 5 "optimized"} } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-ipa-sra" } */
+
+struct S
+{
+ long a, b;
+};
+
+extern void leaf_a (int );
+extern void leaf_b (int, int);
+extern void leaf_c (int, int);
+
+extern void leaf_sa (struct S);
+
+static void baz (int i, int j, int k, int l, struct S a, struct S b);
+
+extern int gi;
+
+static void __attribute__((noinline))
+foo (int i, int j, int k, int l, struct S a, struct S b)
+{
+ gi += l;
+ baz (i, j, k, l, a, b);
+}
+
+static void __attribute__((noinline))
+bar (int i, int j, int k, int l, struct S a, struct S b)
+{
+ foo (i, j, k, l, a, b);
+ leaf_sa (b);
+}
+
+
+static void __attribute__((noinline))
+baz (int i, int j, int k, int l, struct S a, struct S b)
+{
+ if (--k)
+ bar (i, j, k, l, a, b);
+ leaf_b (i, k);
+}
+
+void
+entry (int i, int j, int k, int l, struct S a, struct S b)
+{
+ foo (i, j, k, l, a, b);
+}
+
+/* { dg-final { scan-ipa-dump-times "Will remove parameter 1" 3 "sra" } } */
+/* { dg-final { scan-ipa-dump-times "Will remove parameter 4" 3 "sra" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef int __attribute__((__vector_size__(16))) vectype;
+
+vectype dk();
+vectype k();
+
+int b;
+vectype *j;
+inline int c(vectype *d) {
+ vectype e;
+ vectype f;
+ vectype g = *d;
+ vectype h = g;
+ vectype i = h;
+ f = i == dk();
+ e = f == b;
+ k(e);
+}
+
+static void m(vectype *d) {
+ int l = c(d);
+ if (l)
+ c(j);
+}
+
+void o(void) {
+ vectype n;
+ m(&n);
+}
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
-
-struct bovid
-{
- float red;
- int green;
- void *blue;
-};
-
-static int
-__attribute__((noinline))
-ox (struct bovid *cow)
-{
- cow->red = cow->red + cow->green + cow->green;
- return 0;
-}
-
-int something;
-
-static int
-__attribute__((noinline))
-ox_improved (struct bovid *calf)
-{
- if (something > 0)
- calf->red = calf->red + calf->green;
- else
- calf->red = calf->green + 87;
- something = 77;
- return 0;
-}
-
-
-int main (int argc, char *argv[])
-{
- struct bovid cow;
-
- cow.red = 7.4;
- cow.green = 6;
- cow.blue = &cow;
-
- ox (&cow);
-
- ox_improved (&cow);
- return 0;
-}
-
-/* { dg-final { scan-tree-dump "About to replace expr cow_.*D.->red with \\*ISRA" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump "About to replace expr cow_.*D.->green with ISRA" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump "About to replace expr calf_.*D.->red with \\*ISRA" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump "About to replace expr calf_.*D.->green with ISRA" "eipa_sra" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O1 -fipa-sra" } */
+
+typedef struct {
+ int a;
+} b;
+typedef struct {
+ double c;
+ double a;
+} d;
+typedef struct {
+ d e;
+ d f;
+} g;
+g h;
+b i, m;
+int j, k, l, n, o;
+static b q(d s) {
+ int r = s.c ?: 0;
+ if (r)
+ if (j)
+ l = j - 2;
+ o = k;
+ n = l;
+ i = m;
+ return m;
+}
+static void t(g s) {
+ {
+ d p = s.e;
+ int r = p.c ?: 0;
+ if (r) {
+ l = j - 2;
+ }
+ }
+ b f = q(s.f);
+}
+void main() { t(h); }
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef int a;
+typedef int b;
+int c, e;
+void i();
+void n(d, ab, f, ae, af, action, ag, ah, ai, g, h, aj, ak, al, j, k, am, an, ao,
+ l, m) int action,
+ ag;
+int f, ae, af;
+int ah, ai;
+int j, k;
+int l, m;
+a aj, am;
+int ak, al, an, ao, g, h;
+char d, ab;
+{
+ if (c)
+ i(e);
+}
+void o(d, ab, action, at, ag, g, h, aj, ak, al, au, av, am, an, ao, aw, ax, ay,
+ az, ba, bb, ai) int action,
+ ag;
+int at, ai;
+int au, av, aw, ax;
+b ay, ba;
+int az, bb;
+int g, h;
+int ak, al, an, ao;
+a aj, am;
+char d, ab;
+{ n(); }
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fipa-sra" } */
+
+struct W
+{
+ int a, b;
+};
+
+union U
+{
+ struct W w;
+ long l;
+};
+
+struct Z
+{
+ int k;
+ union U u;
+};
+
+struct S
+{
+ int i, j;
+ struct Z z;
+ char buf[64];
+};
+
+struct W gw;
+
+
+static long
+__attribute__((noinline))
+foo (struct Z z)
+{
+ return z.u.l;
+}
+
+static long
+__attribute__((noinline))
+bar (struct S s)
+{
+ if (s.i > 100)
+ return s.z.u.w.a;
+ else
+ return foo (s.z);
+}
+
+volatile long g;
+
+long
+entry (struct S *p)
+{
+ struct S s = *p;
+
+ return bar (s) | 2;
+}
/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-options "-O2 -fno-ipa-cp -fipa-sra -fdump-ipa-sra" } */
struct bovid
{
return;
}
-/* { dg-final { scan-tree-dump "base: z, remove_param" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump "base: calf, remove_param" "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump "Will split parameter 0" "sra" } } */
+/* { dg-final { scan-ipa-dump "Will remove parameter 1" "sra" } } */
+/* { dg-final { scan-ipa-dump "Will remove parameter 2" "sra" } } */
/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-options "-O2 -fipa-sra -fno-ipa-pure-const -fdump-ipa-sra" } */
static int
__attribute__((noinline))
return;
}
-/* { dg-final { scan-tree-dump "About to replace expr \\*i_.*D. with ISRA" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump "About to replace expr \\*l_.*D. with ISRA" "eipa_sra" } } */
-/* { dg-final { scan-tree-dump-times "About to replace expr \*j_.*D. with ISRA" 0 "eipa_sra" } } */
-/* { dg-final { scan-tree-dump-times "About to replace expr \*k_.*D. with ISRA" 0 "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 2 "sra" } } */
+
/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-details" } */
+/* { dg-options "-O2 -fipa-sra -fdump-ipa-sra" } */
static int *
__attribute__((noinline,used))
return ox (&a, &b);
}
-/* { dg-final { scan-tree-dump-times "base: j, remove_param" 0 "eipa_sra" } } */
+/* { dg-final { scan-ipa-dump-times "Will split parameter" 0 "sra" } } */
+++ /dev/null
-/* { dg-do compile } */
-/* { dg-options "-O2 -fipa-sra -fdump-tree-eipa_sra-slim" } */
-/* { dg-require-effective-target non_strict_align } */
-
-struct bovid
-{
- float a;
- int b;
- struct bovid *next;
-};
-
-static int
-__attribute__((noinline))
-foo (struct bovid *cow, int i)
-{
- i++;
- if (cow->next)
- foo (cow->next, i);
- return i;
-}
-
-int main (int argc, char *argv[])
-{
- struct bovid cow;
-
- cow.a = 7.4;
- cow.b = 6;
- cow.next = (struct bovid *) 0;
-
- return foo (&cow, 0);
-}
-
-/* { dg-final { scan-tree-dump-times "foo " 1 "eipa_sra" } } */
/* { dg-do compile } */
-/* { dg-options "-O3 -fipa-cp -fipa-cp-clone -fdump-ipa-cp -fno-early-inlining -fdump-tree-optimized -fno-ipa-icf" } */
+/* { dg-options "-O3 -fipa-cp -fipa-cp-clone -fdump-ipa-cp -fno-early-inlining -fno-ipa-sra -fdump-tree-optimized -fno-ipa-icf" } */
/* { dg-add-options bind_pic_locally } */
int array[100];
}
/* { dg-final { scan-ipa-dump-times "Creating a specialized node of i_can_be_propagated_fully2" 1 "cp" } } */
-/* { dg-final { scan-ipa-dump-times "Creating a specialized node of i_can_be_propagated_fully/" 1 "cp" } } */
+/* { dg-final { scan-ipa-dump-times "Creating a specialized node of i_can_be_propagated_fully\[./\]" 1 "cp" } } */
/* { dg-final { scan-ipa-dump-not "Creating a specialized node of i_can_not_be_propagated_fully2" "cp" } } */
/* { dg-final { scan-ipa-dump-not "Creating a specialized node of i_can_not_be_propagated_fully/" "cp" } } */
/* { dg-final { scan-tree-dump-not "i_can_be_propagated_fully \\(" "optimized" } } */
/* Verify that IPA-CP can make edges direct based on aggregate contents. */
/* { dg-do compile } */
-/* { dg-options "-O3 -fno-early-inlining -fdump-ipa-cp -fdump-ipa-inline" } */
+/* { dg-options "-O3 -fno-early-inlining -fno-ipa-sra -fdump-ipa-cp -fdump-ipa-inline" } */
struct S
{
void fn3() { fn1 (267); }
-/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\\[11, 35\\\]" 1 "cp" } } */
+/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\(now 0\\) \\\[11, 35\\\]" "cp" } } */
return 0;
}
-/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\\[6," "cp" } } */
-/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\\[0, 999\\\]" "cp" } } */
+/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\(now 0\\) \\\[6," "cp" } } */
+/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\(now 0\\) \\\[0, 999\\\]" "cp" } } */
return 0;
}
-/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\\[4," "cp" } } */
-/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\\[0, 11\\\]" "cp" } } */
+/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\(now 0\\) \\\[4," "cp" } } */
+/* { dg-final { scan-ipa-dump "Setting value range of param 0 \\(now 0\\) \\\[0, 11\\\]" "cp" } } */
return 0;
}
-/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\\[0, 9\\\]" 2 "cp" } } */
+/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\(now 0\\) \\\[0, 9\\\]" 2 "cp" } } */
return 0;
}
-/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\\[-10, 9\\\]" 1 "cp" } } */
+/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\(now 0\\) \\\[-10, 9\\\]" 1 "cp" } } */
return 0;
}
-/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\\[-10, 9\\\]" 1 "cp" } } */
+/* { dg-final { scan-ipa-dump-times "Setting value range of param 0 \\(now 0\\) \\\[-10, 9\\\]" 1 "cp" } } */
asm("firstasm");
-NOREORDER __attribute__((noinline)) int bozo(void)
+NOREORDER __attribute__((noipa)) int bozo(void)
{
f2(3);
func2();
asm("jukjuk");
-NOREORDER __attribute__((noinline)) static int func1(void)
+NOREORDER __attribute__((noipa)) static int func1(void)
{
f2(1);
}
asm("barbar");
-NOREORDER __attribute__((noinline)) static int func2(void)
+NOREORDER __attribute__((noipa)) static int func2(void)
{
func1();
}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fipa-sra" } */
+
+
+struct __attribute__((scalar_storage_order("little-endian"))) LE
+{
+ int i;
+ int j;
+};
+
+struct __attribute__((scalar_storage_order("big-endian"))) BE
+{
+ int i;
+ int j;
+};
+
+struct LE gle;
+struct BE gbe;
+
+#define VAL 0x12345678
+
+void __attribute__((noipa))
+fill (void)
+{
+ gle.i = VAL;
+ gle.j = 0xdeadbeef;
+ gbe.i = VAL;
+ gbe.j = 0x11223344;
+}
+
+static int __attribute__((noinline))
+readLE (struct LE p)
+{
+ return p.i;
+}
+
+static int __attribute__((noinline))
+readBE (struct BE p)
+{
+ return p.i;
+}
+
+int
+main (int argc, char *argv[])
+{
+ int r;
+ fill ();
+
+ r = readLE (gle);
+ if (r != VAL)
+ __builtin_abort ();
+ r = readBE (gbe);
+ if (r != VAL)
+ __builtin_abort ();
+
+ return 0;
+}
int
blah ()
{
- very_long_function (1);
+ return very_long_function (1);
}
/* One appearance for dump, one self recursive call and one call from main. */
/* { dg-final { scan-tree-dump-times "very_long_function.constprop \\(\\)" 3 "optimized"} } */
}
tree_function_versioning (old_decl, new_decl,
- NULL, false, NULL,
- false, NULL, NULL);
+ NULL, NULL, false, NULL, NULL);
}
record_tm_clone_pair (old_decl, new_decl);
static void declare_inline_vars (tree, tree);
static void remap_save_expr (tree *, hash_map<tree, tree> *, int *);
static void prepend_lexical_block (tree current_block, tree new_block);
-static tree copy_decl_to_var (tree, copy_body_data *);
static tree copy_result_decl_to_var (tree, copy_body_data *);
static tree copy_decl_maybe_to_var (tree, copy_body_data *);
static gimple_seq remap_gimple_stmt (gimple *, copy_body_data *);
n = id->decl_map->get (name);
if (n)
- return unshare_expr (*n);
+ {
+ /* WHen we perform edge redirection as part of CFG copy, IPA-SRA can
+ remove an unused LHS from a call statement. Such LHS can however
+ still appear in debug statements, but their value is lost in this
+ function and we do not want to map them. */
+ if (id->killed_new_ssa_names
+ && id->killed_new_ssa_names->contains (*n))
+ {
+ gcc_assert (processing_debug_stmt);
+ processing_debug_stmt = -1;
+ return name;
+ }
+
+ return unshare_expr (*n);
+ }
if (processing_debug_stmt)
{
gcc_assert (n);
gimple_set_block (copy, *n);
}
+ if (id->param_body_adjs)
+ {
+ gimple_seq extra_stmts = NULL;
+ id->param_body_adjs->modify_gimple_stmt (©, &extra_stmts);
+ if (!gimple_seq_empty_p (extra_stmts))
+ {
+ memset (&wi, 0, sizeof (wi));
+ wi.info = id;
+ for (gimple_stmt_iterator egsi = gsi_start (extra_stmts);
+ !gsi_end_p (egsi);
+ gsi_next (&egsi))
+ walk_gimple_op (gsi_stmt (egsi), remap_gimple_op_r, &wi);
+ gimple_seq_add_seq (&stmts, extra_stmts);
+ }
+ }
if (id->reset_location)
gimple_set_location (copy, input_location);
gimple *stmt = gsi_stmt (si);
if (is_gimple_call (stmt))
{
+ tree old_lhs = gimple_call_lhs (stmt);
struct cgraph_edge *edge = id->dst_node->get_edge (stmt);
if (edge)
{
- edge->redirect_call_stmt_to_callee ();
+ gimple *new_stmt = edge->redirect_call_stmt_to_callee ();
+ /* If IPA-SRA transformation, run as part of edge redirection,
+ removed the LHS because it is unused, save it to
+ killed_new_ssa_names so that we can prune it from debug
+ statements. */
+ if (old_lhs
+ && TREE_CODE (old_lhs) == SSA_NAME
+ && !gimple_call_lhs (new_stmt))
+ {
+ if (!id->killed_new_ssa_names)
+ id->killed_new_ssa_names = new hash_set<tree> (16);
+ id->killed_new_ssa_names->add (old_lhs);
+ }
+
if (stmt == last && id->call_stmt && maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (bb);
}
body = copy_cfg_body (id, entry_block_map, exit_block_map,
new_entry);
copy_debug_stmts (id);
+ delete id->killed_new_ssa_names;
+ id->killed_new_ssa_names = NULL;
return body;
}
/* Add local vars in this inlined callee to caller. */
add_local_variables (id->src_cfun, cfun, id);
+ if (id->src_node->clone.performed_splits)
+ {
+ /* Any calls from the inlined function will be turned into calls from the
+ function we inline into. We must preserve notes about how to split
+ parameters such calls should be redirected/updated. */
+ unsigned len = vec_safe_length (id->src_node->clone.performed_splits);
+ for (unsigned i = 0; i < len; i++)
+ {
+ ipa_param_performed_split ps
+ = (*id->src_node->clone.performed_splits)[i];
+ ps.dummy_decl = remap_decl (ps.dummy_decl, id);
+ vec_safe_push (id->dst_node->clone.performed_splits, ps);
+ }
+
+ if (flag_checking)
+ {
+ len = vec_safe_length (id->dst_node->clone.performed_splits);
+ for (unsigned i = 0; i < len; i++)
+ {
+ ipa_param_performed_split *ps1
+ = &(*id->dst_node->clone.performed_splits)[i];
+ for (unsigned j = i + 1; j < len; j++)
+ {
+ ipa_param_performed_split *ps2
+ = &(*id->dst_node->clone.performed_splits)[j];
+ gcc_assert (ps1->dummy_decl != ps2->dummy_decl
+ || ps1->unit_offset != ps2->unit_offset);
+ }
+ }
+ }
+ }
+
if (dump_enabled_p ())
{
char buf[128];
return copy;
}
-static tree
+/* Create a new VAR_DECL that is indentical in all respect to DECL except that
+ DECL can be either a VAR_DECL, a PARM_DECL or RESULT_DECL. The original
+ DECL must come from ID->src_fn and the copy will be part of ID->dst_fn. */
+
+tree
copy_decl_to_var (tree decl, copy_body_data *id)
{
tree copy, type;
return copy_decl_no_change (decl, id);
}
-/* Return a copy of the function's argument tree. */
+/* Return a copy of the function's argument tree without any modifications. */
+
static tree
-copy_arguments_for_versioning (tree orig_parm, copy_body_data * id,
- bitmap args_to_skip, tree *vars)
+copy_arguments_nochange (tree orig_parm, copy_body_data * id)
{
tree arg, *parg;
tree new_parm = NULL;
- int i = 0;
parg = &new_parm;
-
- for (arg = orig_parm; arg; arg = DECL_CHAIN (arg), i++)
- if (!args_to_skip || !bitmap_bit_p (args_to_skip, i))
- {
- tree new_tree = remap_decl (arg, id);
- if (TREE_CODE (new_tree) != PARM_DECL)
- new_tree = id->copy_decl (arg, id);
- lang_hooks.dup_lang_specific_decl (new_tree);
- *parg = new_tree;
- parg = &DECL_CHAIN (new_tree);
- }
- else if (!id->decl_map->get (arg))
- {
- /* Make an equivalent VAR_DECL. If the argument was used
- as temporary variable later in function, the uses will be
- replaced by local variable. */
- tree var = copy_decl_to_var (arg, id);
- insert_decl_map (id, arg, var);
- /* Declare this new variable. */
- DECL_CHAIN (var) = *vars;
- *vars = var;
- }
+ for (arg = orig_parm; arg; arg = DECL_CHAIN (arg))
+ {
+ tree new_tree = remap_decl (arg, id);
+ if (TREE_CODE (new_tree) != PARM_DECL)
+ new_tree = id->copy_decl (arg, id);
+ lang_hooks.dup_lang_specific_decl (new_tree);
+ *parg = new_tree;
+ parg = &DECL_CHAIN (new_tree);
+ }
return new_parm;
}
static void
update_clone_info (copy_body_data * id)
{
+ vec<ipa_param_performed_split, va_gc> *cur_performed_splits
+ = id->dst_node->clone.performed_splits;
+ if (cur_performed_splits)
+ {
+ unsigned len = cur_performed_splits->length ();
+ for (unsigned i = 0; i < len; i++)
+ {
+ ipa_param_performed_split *ps = &(*cur_performed_splits)[i];
+ ps->dummy_decl = remap_decl (ps->dummy_decl, id);
+ }
+ }
+
struct cgraph_node *node;
if (!id->dst_node->clones)
return;
{
struct ipa_replace_map *replace_info;
replace_info = (*node->clone.tree_map)[i];
- walk_tree (&replace_info->old_tree, copy_tree_body_r, id, NULL);
walk_tree (&replace_info->new_tree, copy_tree_body_r, id, NULL);
}
}
+ if (node->clone.performed_splits)
+ {
+ unsigned len = vec_safe_length (node->clone.performed_splits);
+ for (unsigned i = 0; i < len; i++)
+ {
+ ipa_param_performed_split *ps
+ = &(*node->clone.performed_splits)[i];
+ ps->dummy_decl = remap_decl (ps->dummy_decl, id);
+ }
+ }
+ if (unsigned len = vec_safe_length (cur_performed_splits))
+ {
+ /* We do not want to add current performed splits when we are saving
+ a copy of function body for later during inlining, that would just
+ duplicate all entries. So let's have a look whether anything
+ referring to the first dummy_decl is present. */
+ unsigned dst_len = vec_safe_length (node->clone.performed_splits);
+ ipa_param_performed_split *first = &(*cur_performed_splits)[0];
+ for (unsigned i = 0; i < dst_len; i++)
+ if ((*node->clone.performed_splits)[i].dummy_decl
+ == first->dummy_decl)
+ {
+ len = 0;
+ break;
+ }
+
+ for (unsigned i = 0; i < len; i++)
+ vec_safe_push (node->clone.performed_splits,
+ (*cur_performed_splits)[i]);
+ if (flag_checking)
+ {
+ for (unsigned i = 0; i < dst_len; i++)
+ {
+ ipa_param_performed_split *ps1
+ = &(*node->clone.performed_splits)[i];
+ for (unsigned j = i + 1; j < dst_len; j++)
+ {
+ ipa_param_performed_split *ps2
+ = &(*node->clone.performed_splits)[j];
+ gcc_assert (ps1->dummy_decl != ps2->dummy_decl
+ || ps1->unit_offset != ps2->unit_offset);
+ }
+ }
+ }
+ }
+
if (node->clones)
node = node->clones;
else if (node->next_sibling_clone)
tree with another tree while duplicating the function's
body, TREE_MAP represents the mapping between these
trees. If UPDATE_CLONES is set, the call_stmt fields
- of edges of clones of the function will be updated.
+ of edges of clones of the function will be updated.
- If non-NULL ARGS_TO_SKIP determine function parameters to remove
- from new version.
- If SKIP_RETURN is true, the new version will return void.
- If non-NULL BLOCK_TO_COPY determine what basic blocks to copy.
+ If non-NULL PARAM_ADJUSTMENTS determines how function prototype (i.e. the
+ function parameters and return value) should be modified).
+ If non-NULL BLOCKS_TO_COPY determine what basic blocks to copy.
If non_NULL NEW_ENTRY determine new entry BB of the clone.
*/
void
tree_function_versioning (tree old_decl, tree new_decl,
vec<ipa_replace_map *, va_gc> *tree_map,
- bool update_clones, bitmap args_to_skip,
- bool skip_return, bitmap blocks_to_copy,
+ ipa_param_adjustments *param_adjustments,
+ bool update_clones, bitmap blocks_to_copy,
basic_block new_entry)
{
struct cgraph_node *old_version_node;
basic_block old_entry_block, bb;
auto_vec<gimple *, 10> init_stmts;
tree vars = NULL_TREE;
- bitmap debug_args_to_skip = args_to_skip;
gcc_assert (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) == FUNCTION_DECL);
DECL_STRUCT_FUNCTION (new_decl)->static_chain_decl
= copy_static_chain (p, &id);
+ auto_vec<int, 16> new_param_indices;
+ ipa_param_adjustments *old_param_adjustments
+ = old_version_node->clone.param_adjustments;
+ if (old_param_adjustments)
+ old_param_adjustments->get_updated_indices (&new_param_indices);
+
/* If there's a tree_map, prepare for substitution. */
if (tree_map)
for (i = 0; i < tree_map->length (); i++)
{
gimple *init;
replace_info = (*tree_map)[i];
- if (replace_info->replace_p)
+
+ int p = replace_info->parm_num;
+ if (old_param_adjustments)
+ p = new_param_indices[p];
+
+ tree parm;
+ tree req_type, new_type;
+
+ for (parm = DECL_ARGUMENTS (old_decl); p;
+ parm = DECL_CHAIN (parm))
+ p--;
+ tree old_tree = parm;
+ req_type = TREE_TYPE (parm);
+ new_type = TREE_TYPE (replace_info->new_tree);
+ if (!useless_type_conversion_p (req_type, new_type))
{
- int parm_num = -1;
- if (!replace_info->old_tree)
- {
- int p = replace_info->parm_num;
- tree parm;
- tree req_type, new_type;
-
- for (parm = DECL_ARGUMENTS (old_decl); p;
- parm = DECL_CHAIN (parm))
- p--;
- replace_info->old_tree = parm;
- parm_num = replace_info->parm_num;
- req_type = TREE_TYPE (parm);
- new_type = TREE_TYPE (replace_info->new_tree);
- if (!useless_type_conversion_p (req_type, new_type))
- {
- if (fold_convertible_p (req_type, replace_info->new_tree))
- replace_info->new_tree
- = fold_build1 (NOP_EXPR, req_type,
- replace_info->new_tree);
- else if (TYPE_SIZE (req_type) == TYPE_SIZE (new_type))
- replace_info->new_tree
- = fold_build1 (VIEW_CONVERT_EXPR, req_type,
- replace_info->new_tree);
- else
- {
- if (dump_file)
- {
- fprintf (dump_file, " const ");
- print_generic_expr (dump_file,
- replace_info->new_tree);
- fprintf (dump_file,
- " can't be converted to param ");
- print_generic_expr (dump_file, parm);
- fprintf (dump_file, "\n");
- }
- replace_info->old_tree = NULL;
- }
- }
- }
+ if (fold_convertible_p (req_type, replace_info->new_tree))
+ replace_info->new_tree
+ = fold_build1 (NOP_EXPR, req_type, replace_info->new_tree);
+ else if (TYPE_SIZE (req_type) == TYPE_SIZE (new_type))
+ replace_info->new_tree
+ = fold_build1 (VIEW_CONVERT_EXPR, req_type,
+ replace_info->new_tree);
else
- gcc_assert (TREE_CODE (replace_info->old_tree) == PARM_DECL);
- if (replace_info->old_tree)
{
- init = setup_one_parameter (&id, replace_info->old_tree,
- replace_info->new_tree, id.src_fn,
- NULL,
- &vars);
- if (init)
- init_stmts.safe_push (init);
- if (MAY_HAVE_DEBUG_BIND_STMTS && args_to_skip)
+ if (dump_file)
{
- if (parm_num == -1)
- {
- tree parm;
- int p;
- for (parm = DECL_ARGUMENTS (old_decl), p = 0; parm;
- parm = DECL_CHAIN (parm), p++)
- if (parm == replace_info->old_tree)
- {
- parm_num = p;
- break;
- }
- }
- if (parm_num != -1)
- {
- if (debug_args_to_skip == args_to_skip)
- {
- debug_args_to_skip = BITMAP_ALLOC (NULL);
- bitmap_copy (debug_args_to_skip, args_to_skip);
- }
- bitmap_clear_bit (debug_args_to_skip, parm_num);
- }
+ fprintf (dump_file, " const ");
+ print_generic_expr (dump_file,
+ replace_info->new_tree);
+ fprintf (dump_file,
+ " can't be converted to param ");
+ print_generic_expr (dump_file, parm);
+ fprintf (dump_file, "\n");
}
+ old_tree = NULL;
}
}
+
+ if (old_tree)
+ {
+ init = setup_one_parameter (&id, old_tree, replace_info->new_tree,
+ id.src_fn, NULL, &vars);
+ if (init)
+ init_stmts.safe_push (init);
+ }
}
- /* Copy the function's arguments. */
- if (DECL_ARGUMENTS (old_decl) != NULL_TREE)
+
+ ipa_param_body_adjustments *param_body_adjs = NULL;
+ if (param_adjustments)
+ {
+ param_body_adjs = new ipa_param_body_adjustments (param_adjustments,
+ new_decl, old_decl,
+ &id, &vars, tree_map);
+ id.param_body_adjs = param_body_adjs;
+ DECL_ARGUMENTS (new_decl) = param_body_adjs->get_new_param_chain ();
+ }
+ else if (DECL_ARGUMENTS (old_decl) != NULL_TREE)
DECL_ARGUMENTS (new_decl)
- = copy_arguments_for_versioning (DECL_ARGUMENTS (old_decl), &id,
- args_to_skip, &vars);
+ = copy_arguments_nochange (DECL_ARGUMENTS (old_decl), &id);
DECL_INITIAL (new_decl) = remap_blocks (DECL_INITIAL (id.src_fn), &id);
BLOCK_SUPERCONTEXT (DECL_INITIAL (new_decl)) = new_decl;
if (DECL_RESULT (old_decl) == NULL_TREE)
;
- else if (skip_return && !VOID_TYPE_P (TREE_TYPE (DECL_RESULT (old_decl))))
+ else if (param_adjustments && param_adjustments->m_skip_return
+ && !VOID_TYPE_P (TREE_TYPE (DECL_RESULT (old_decl))))
{
+ tree resdecl_repl = copy_result_decl_to_var (DECL_RESULT (old_decl),
+ &id);
+ declare_inline_vars (NULL, resdecl_repl);
+ insert_decl_map (&id, DECL_RESULT (old_decl), resdecl_repl);
+
DECL_RESULT (new_decl)
= build_decl (DECL_SOURCE_LOCATION (DECL_RESULT (old_decl)),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_CONTEXT (DECL_RESULT (new_decl)) = new_decl;
+ DECL_IS_MALLOC (new_decl) = false;
cfun->returns_struct = 0;
cfun->returns_pcc_struct = 0;
}
}
}
- if (debug_args_to_skip && MAY_HAVE_DEBUG_BIND_STMTS)
+ if (param_body_adjs && MAY_HAVE_DEBUG_BIND_STMTS)
{
- tree parm;
vec<tree, va_gc> **debug_args = NULL;
unsigned int len = 0;
- for (parm = DECL_ARGUMENTS (old_decl), i = 0;
- parm; parm = DECL_CHAIN (parm), i++)
- if (bitmap_bit_p (debug_args_to_skip, i) && is_gimple_reg (parm))
- {
- tree ddecl;
+ unsigned reset_len = param_body_adjs->m_reset_debug_decls.length ();
- if (debug_args == NULL)
- {
- debug_args = decl_debug_args_insert (new_decl);
- len = vec_safe_length (*debug_args);
- }
- ddecl = make_node (DEBUG_EXPR_DECL);
- DECL_ARTIFICIAL (ddecl) = 1;
- TREE_TYPE (ddecl) = TREE_TYPE (parm);
- SET_DECL_MODE (ddecl, DECL_MODE (parm));
- vec_safe_push (*debug_args, DECL_ORIGIN (parm));
- vec_safe_push (*debug_args, ddecl);
- }
+ for (i = 0; i < reset_len; i++)
+ {
+ tree parm = param_body_adjs->m_reset_debug_decls[i];
+ gcc_assert (is_gimple_reg (parm));
+ tree ddecl;
+
+ if (debug_args == NULL)
+ {
+ debug_args = decl_debug_args_insert (new_decl);
+ len = vec_safe_length (*debug_args);
+ }
+ ddecl = make_node (DEBUG_EXPR_DECL);
+ DECL_ARTIFICIAL (ddecl) = 1;
+ TREE_TYPE (ddecl) = TREE_TYPE (parm);
+ SET_DECL_MODE (ddecl, DECL_MODE (parm));
+ vec_safe_push (*debug_args, DECL_ORIGIN (parm));
+ vec_safe_push (*debug_args, ddecl);
+ }
if (debug_args != NULL)
{
/* On the callee side, add
if (var == NULL_TREE)
break;
vexpr = make_node (DEBUG_EXPR_DECL);
- parm = (**debug_args)[i];
+ tree parm = (**debug_args)[i];
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (parm);
SET_DECL_MODE (vexpr, DECL_MODE (parm));
while (i > len);
}
}
-
- if (debug_args_to_skip && debug_args_to_skip != args_to_skip)
- BITMAP_FREE (debug_args_to_skip);
+ delete param_body_adjs;
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
outside of the inlined function, this should be the number
of basic blocks in the caller before inlining. Zero otherwise. */
int add_clobbers_to_eh_landing_pads;
+
+ /* Class managing changes to function parameters and return value planned
+ during IPA stage. */
+ class ipa_param_body_adjustments *param_body_adjs;
+
+ /* Hash set of SSA names that have been killed during call graph edge
+ redirection and should not be introduced into debug statements or NULL if no
+ SSA_NAME was deleted during redirections happened. */
+ hash_set <tree> *killed_new_ssa_names;
};
/* Weights of constructions for estimate_num_insns. */
extern tree copy_fn (tree, tree&, tree&);
extern const char *copy_forbidden (struct function *fun);
extern tree copy_decl_for_dup_finish (copy_body_data *id, tree decl, tree copy);
+extern tree copy_decl_to_var (tree, copy_body_data *);
/* This is in tree-inline.c since the routine uses
data structures from the inliner. */
extern gimple_opt_pass *make_pass_cleanup_eh (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_sra (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_sra_early (gcc::context *ctxt);
-extern gimple_opt_pass *make_pass_early_ipa_sra (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_tail_recursion (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_tail_calls (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_fix_loops (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_free_lang_data (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_free_fn_summary (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
#include "tree-cfg.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
-#include "symbol-summary.h"
-#include "ipa-param-manipulation.h"
-#include "ipa-prop.h"
#include "params.h"
#include "dbgcnt.h"
-#include "tree-inline.h"
-#include "ipa-fnsummary.h"
-#include "ipa-utils.h"
#include "builtins.h"
+#include "tree-sra.h"
/* Enumeration of all aggregate reductions we can do. */
struct access *first_child;
/* In intraprocedural SRA, pointer to the next sibling in the access tree as
- described above. In IPA-SRA this is a pointer to the next access
- belonging to the same group (having the same representative). */
+ described above. */
struct access *next_sibling;
/* Pointers to the first and last element in the linked list of assign
when grp_to_be_replaced flag is set. */
tree replacement_decl;
- /* Is this access an access to a non-addressable field? */
- unsigned non_addressable : 1;
-
/* Is this access made in reverse storage order? */
unsigned reverse : 1;
/* Should TREE_NO_WARNING of a replacement be set? */
unsigned grp_no_warning : 1;
-
- /* Is it possible that the group refers to data which might be (directly or
- otherwise) modified? */
- unsigned grp_maybe_modified : 1;
-
- /* Set when this is a representative of a pointer to scalar (i.e. by
- reference) parameter which we consider for turning into a plain scalar
- (i.e. a by value parameter). */
- unsigned grp_scalar_ptr : 1;
-
- /* Set when we discover that this pointer is not safe to dereference in the
- caller. */
- unsigned grp_not_necessarilly_dereferenced : 1;
};
typedef struct access *access_p;
propagated to their assignment counterparts. */
static struct access *work_queue_head;
-/* Number of parameters of the analyzed function when doing early ipa SRA. */
-static int func_param_count;
-
-/* scan_function sets the following to true if it encounters a call to
- __builtin_apply_args. */
-static bool encountered_apply_args;
-
-/* Set by scan_function when it finds a recursive call. */
-static bool encountered_recursive_call;
-
-/* Set by scan_function when it finds a recursive call with less actual
- arguments than formal parameters.. */
-static bool encountered_unchangable_recursive_call;
-
-/* This is a table in which for each basic block and parameter there is a
- distance (offset + size) in that parameter which is dereferenced and
- accessed in that BB. */
-static HOST_WIDE_INT *bb_dereferences;
-/* Bitmap of BBs that can cause the function to "stop" progressing by
- returning, throwing externally, looping infinitely or calling a function
- which might abort etc.. */
-static bitmap final_bbs;
-
/* Representative of no accesses at all. */
static struct access no_accesses_representant;
print_generic_expr (f, access->expr);
fprintf (f, ", type = ");
print_generic_expr (f, access->type);
- fprintf (f, ", non_addressable = %d, reverse = %d",
- access->non_addressable, access->reverse);
+ fprintf (f, ", reverse = %d", access->reverse);
if (grp)
fprintf (f, ", grp_read = %d, grp_write = %d, grp_assignment_read = %d, "
"grp_assignment_write = %d, grp_scalar_read = %d, "
"grp_hint = %d, grp_covered = %d, "
"grp_unscalarizable_region = %d, grp_unscalarized_data = %d, "
"grp_same_access_path = %d, grp_partial_lhs = %d, "
- "grp_to_be_replaced = %d, grp_to_be_debug_replaced = %d, "
- "grp_maybe_modified = %d, "
- "grp_not_necessarilly_dereferenced = %d\n",
+ "grp_to_be_replaced = %d, grp_to_be_debug_replaced = %d\n",
access->grp_read, access->grp_write, access->grp_assignment_read,
access->grp_assignment_write, access->grp_scalar_read,
access->grp_scalar_write, access->grp_total_scalarization,
access->grp_hint, access->grp_covered,
access->grp_unscalarizable_region, access->grp_unscalarized_data,
access->grp_same_access_path, access->grp_partial_lhs,
- access->grp_to_be_replaced, access->grp_to_be_debug_replaced,
- access->grp_maybe_modified,
- access->grp_not_necessarilly_dereferenced);
+ access->grp_to_be_replaced, access->grp_to_be_debug_replaced);
else
fprintf (f, ", write = %d, grp_total_scalarization = %d, "
"grp_partial_lhs = %d\n",
gcc_obstack_init (&name_obstack);
base_access_vec = new hash_map<tree, auto_vec<access_p> >;
memset (&sra_stats, 0, sizeof (sra_stats));
- encountered_apply_args = false;
- encountered_recursive_call = false;
- encountered_unchangable_recursive_call = false;
}
/* Deallocate all general structures. */
}
/* Return true iff the type contains a field or an element which does not allow
- scalarization. */
+ scalarization. Use VISITED_TYPES to avoid re-checking already checked
+ (sub-)types. */
static bool
-type_internals_preclude_sra_p (tree type, const char **msg)
+type_internals_preclude_sra_p_1 (tree type, const char **msg,
+ hash_set<tree> *visited_types)
{
tree fld;
tree et;
+ if (visited_types->contains (type))
+ return false;
+ visited_types->add (type);
+
switch (TREE_CODE (type))
{
case RECORD_TYPE:
for (fld = TYPE_FIELDS (type); fld; fld = DECL_CHAIN (fld))
if (TREE_CODE (fld) == FIELD_DECL)
{
+ if (TREE_CODE (fld) == FUNCTION_DECL)
+ continue;
tree ft = TREE_TYPE (fld);
if (TREE_THIS_VOLATILE (fld))
return true;
}
- if (AGGREGATE_TYPE_P (ft) && type_internals_preclude_sra_p (ft, msg))
+ if (AGGREGATE_TYPE_P (ft)
+ && type_internals_preclude_sra_p_1 (ft, msg, visited_types))
return true;
}
return true;
}
- if (AGGREGATE_TYPE_P (et) && type_internals_preclude_sra_p (et, msg))
+ if (AGGREGATE_TYPE_P (et)
+ && type_internals_preclude_sra_p_1 (et, msg, visited_types))
return true;
return false;
}
}
-/* If T is an SSA_NAME, return NULL if it is not a default def or return its
- base variable if it is. Return T if it is not an SSA_NAME. */
+/* Return true iff the type contains a field or an element which does not allow
+ scalarization. */
-static tree
-get_ssa_base_param (tree t)
+bool
+type_internals_preclude_sra_p (tree type, const char **msg)
{
- if (TREE_CODE (t) == SSA_NAME)
- {
- if (SSA_NAME_IS_DEFAULT_DEF (t))
- return SSA_NAME_VAR (t);
- else
- return NULL_TREE;
- }
- return t;
+ hash_set<tree> visited_types;
+ return type_internals_preclude_sra_p_1 (type, msg, &visited_types);
}
-/* Mark a dereference of BASE of distance DIST in a basic block tht STMT
- belongs to, unless the BB has already been marked as a potentially
- final. */
-
-static void
-mark_parm_dereference (tree base, HOST_WIDE_INT dist, gimple *stmt)
-{
- basic_block bb = gimple_bb (stmt);
- int idx, parm_index = 0;
- tree parm;
-
- if (bitmap_bit_p (final_bbs, bb->index))
- return;
-
- for (parm = DECL_ARGUMENTS (current_function_decl);
- parm && parm != base;
- parm = DECL_CHAIN (parm))
- parm_index++;
-
- gcc_assert (parm_index < func_param_count);
-
- idx = bb->index * func_param_count + parm_index;
- if (bb_dereferences[idx] < dist)
- bb_dereferences[idx] = dist;
-}
/* Allocate an access structure for BASE, OFFSET and SIZE, clear it, fill in
the three fields. Also add it to the vector of accesses corresponding to
poly_int64 poffset, psize, pmax_size;
HOST_WIDE_INT offset, size, max_size;
tree base = expr;
- bool reverse, ptr, unscalarizable_region = false;
+ bool reverse, unscalarizable_region = false;
base = get_ref_base_and_extent (expr, &poffset, &psize, &pmax_size,
&reverse);
return NULL;
}
- if (sra_mode == SRA_MODE_EARLY_IPA
- && TREE_CODE (base) == MEM_REF)
- {
- base = get_ssa_base_param (TREE_OPERAND (base, 0));
- if (!base)
- return NULL;
- ptr = true;
- }
- else
- ptr = false;
-
/* For constant-pool entries, check we can substitute the constant value. */
- if (constant_decl_p (base)
- && (sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA))
+ if (constant_decl_p (base))
{
gcc_assert (!bitmap_bit_p (disqualified_constants, DECL_UID (base)));
if (expr != base
if (!DECL_P (base) || !bitmap_bit_p (candidate_bitmap, DECL_UID (base)))
return NULL;
- if (sra_mode == SRA_MODE_EARLY_IPA)
+ if (size != max_size)
{
- if (size < 0 || size != max_size)
- {
- disqualify_candidate (base, "Encountered a variable sized access.");
- return NULL;
- }
- if (TREE_CODE (expr) == COMPONENT_REF
- && DECL_BIT_FIELD (TREE_OPERAND (expr, 1)))
- {
- disqualify_candidate (base, "Encountered a bit-field access.");
- return NULL;
- }
- gcc_checking_assert ((offset % BITS_PER_UNIT) == 0);
-
- if (ptr)
- mark_parm_dereference (base, offset + size, stmt);
+ size = max_size;
+ unscalarizable_region = true;
}
- else
+ if (size < 0)
{
- if (size != max_size)
- {
- size = max_size;
- unscalarizable_region = true;
- }
- if (size < 0)
- {
- disqualify_candidate (base, "Encountered an unconstrained access.");
- return NULL;
- }
+ disqualify_candidate (base, "Encountered an unconstrained access.");
+ return NULL;
}
access = create_access_1 (base, offset, size);
access->stmt = stmt;
access->reverse = reverse;
- if (TREE_CODE (expr) == COMPONENT_REF
- && DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1)))
- access->non_addressable = 1;
-
return access;
}
disqualify_base_of_expr (tree t, const char *reason)
{
t = get_base_address (t);
- if (sra_mode == SRA_MODE_EARLY_IPA
- && TREE_CODE (t) == MEM_REF)
- t = get_ssa_base_param (TREE_OPERAND (t, 0));
-
if (t && DECL_P (t))
disqualify_candidate (t, reason);
}
switch (TREE_CODE (expr))
{
case MEM_REF:
- if (TREE_CODE (TREE_OPERAND (expr, 0)) != ADDR_EXPR
- && sra_mode != SRA_MODE_EARLY_IPA)
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) != ADDR_EXPR)
return NULL;
/* fall through */
case VAR_DECL:
static bool
disqualify_if_bad_bb_terminating_stmt (gimple *stmt, tree lhs, tree rhs)
{
- if ((sra_mode == SRA_MODE_EARLY_INTRA || sra_mode == SRA_MODE_INTRA)
- && stmt_ends_bb_p (stmt))
+ if (stmt_ends_bb_p (stmt))
{
if (single_non_eh_succ (gimple_bb (stmt)))
return false;
return false;
}
-/* Return true iff callsite CALL has at least as many actual arguments as there
- are formal parameters of the function currently processed by IPA-SRA and
- that their types match. */
-
-static inline bool
-callsite_arguments_match_p (gimple *call)
-{
- if (gimple_call_num_args (call) < (unsigned) func_param_count)
- return false;
-
- tree parm;
- int i;
- for (parm = DECL_ARGUMENTS (current_function_decl), i = 0;
- parm;
- parm = DECL_CHAIN (parm), i++)
- {
- tree arg = gimple_call_arg (call, i);
- if (!useless_type_conversion_p (TREE_TYPE (parm), TREE_TYPE (arg)))
- return false;
- }
- return true;
-}
-
/* Scan function and look for interesting expressions and create access
structures for them. Return true iff any access is created. */
tree t;
unsigned i;
- if (final_bbs && stmt_can_throw_external (cfun, stmt))
- bitmap_set_bit (final_bbs, bb->index);
switch (gimple_code (stmt))
{
case GIMPLE_RETURN:
t = gimple_return_retval (as_a <greturn *> (stmt));
if (t != NULL_TREE)
ret |= build_access_from_expr (t, stmt, false);
- if (final_bbs)
- bitmap_set_bit (final_bbs, bb->index);
break;
case GIMPLE_ASSIGN:
ret |= build_access_from_expr (gimple_call_arg (stmt, i),
stmt, false);
- if (sra_mode == SRA_MODE_EARLY_IPA)
- {
- tree dest = gimple_call_fndecl (stmt);
- int flags = gimple_call_flags (stmt);
-
- if (dest)
- {
- if (fndecl_built_in_p (dest, BUILT_IN_APPLY_ARGS))
- encountered_apply_args = true;
- if (recursive_call_p (current_function_decl, dest))
- {
- encountered_recursive_call = true;
- if (!callsite_arguments_match_p (stmt))
- encountered_unchangable_recursive_call = true;
- }
- }
-
- if (final_bbs
- && (flags & (ECF_CONST | ECF_PURE)) == 0)
- bitmap_set_bit (final_bbs, bb->index);
- }
-
t = gimple_call_lhs (stmt);
if (t && !disqualify_if_bad_bb_terminating_stmt (stmt, t, NULL))
ret |= build_access_from_expr (t, stmt, true);
gasm *asm_stmt = as_a <gasm *> (stmt);
walk_stmt_load_store_addr_ops (asm_stmt, NULL, NULL, NULL,
asm_visit_addr);
- if (final_bbs)
- bitmap_set_bit (final_bbs, bb->index);
-
for (i = 0; i < gimple_asm_ninputs (asm_stmt); i++)
{
t = TREE_VALUE (gimple_asm_input_op (asm_stmt, i));
}
}
-/* Return true iff TYPE is stdarg va_list type. */
-
-static inline bool
-is_va_list_type (tree type)
-{
- return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node);
-}
-
/* Print message to dump file why a variable was rejected. */
static void
reject (var, "not aggregate");
return false;
}
- /* Allow constant-pool entries (that "need to live in memory")
- unless we are doing IPA SRA. */
- if (needs_to_live_in_memory (var)
- && (sra_mode == SRA_MODE_EARLY_IPA || !constant_decl_p (var)))
+ /* Allow constant-pool entries that "need to live in memory". */
+ if (needs_to_live_in_memory (var) && !constant_decl_p (var))
{
reject (var, "needs to live in memory");
return false;
{
return new pass_sra (ctxt);
}
-
-
-/* Return true iff PARM (which must be a parm_decl) is an unused scalar
- parameter. */
-
-static bool
-is_unused_scalar_param (tree parm)
-{
- tree name;
- return (is_gimple_reg (parm)
- && (!(name = ssa_default_def (cfun, parm))
- || has_zero_uses (name)));
-}
-
-/* Scan immediate uses of a default definition SSA name of a parameter PARM and
- examine whether there are any direct or otherwise infeasible ones. If so,
- return true, otherwise return false. PARM must be a gimple register with a
- non-NULL default definition. */
-
-static bool
-ptr_parm_has_direct_uses (tree parm)
-{
- imm_use_iterator ui;
- gimple *stmt;
- tree name = ssa_default_def (cfun, parm);
- bool ret = false;
-
- FOR_EACH_IMM_USE_STMT (stmt, ui, name)
- {
- int uses_ok = 0;
- use_operand_p use_p;
-
- if (is_gimple_debug (stmt))
- continue;
-
- /* Valid uses include dereferences on the lhs and the rhs. */
- if (gimple_has_lhs (stmt))
- {
- tree lhs = gimple_get_lhs (stmt);
- while (handled_component_p (lhs))
- lhs = TREE_OPERAND (lhs, 0);
- if (TREE_CODE (lhs) == MEM_REF
- && TREE_OPERAND (lhs, 0) == name
- && integer_zerop (TREE_OPERAND (lhs, 1))
- && types_compatible_p (TREE_TYPE (lhs),
- TREE_TYPE (TREE_TYPE (name)))
- && !TREE_THIS_VOLATILE (lhs))
- uses_ok++;
- }
- if (gimple_assign_single_p (stmt))
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- while (handled_component_p (rhs))
- rhs = TREE_OPERAND (rhs, 0);
- if (TREE_CODE (rhs) == MEM_REF
- && TREE_OPERAND (rhs, 0) == name
- && integer_zerop (TREE_OPERAND (rhs, 1))
- && types_compatible_p (TREE_TYPE (rhs),
- TREE_TYPE (TREE_TYPE (name)))
- && !TREE_THIS_VOLATILE (rhs))
- uses_ok++;
- }
- else if (is_gimple_call (stmt))
- {
- unsigned i;
- for (i = 0; i < gimple_call_num_args (stmt); ++i)
- {
- tree arg = gimple_call_arg (stmt, i);
- while (handled_component_p (arg))
- arg = TREE_OPERAND (arg, 0);
- if (TREE_CODE (arg) == MEM_REF
- && TREE_OPERAND (arg, 0) == name
- && integer_zerop (TREE_OPERAND (arg, 1))
- && types_compatible_p (TREE_TYPE (arg),
- TREE_TYPE (TREE_TYPE (name)))
- && !TREE_THIS_VOLATILE (arg))
- uses_ok++;
- }
- }
-
- /* If the number of valid uses does not match the number of
- uses in this stmt there is an unhandled use. */
- FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
- --uses_ok;
-
- if (uses_ok != 0)
- ret = true;
-
- if (ret)
- BREAK_FROM_IMM_USE_STMT (ui);
- }
-
- return ret;
-}
-
-/* Identify candidates for reduction for IPA-SRA based on their type and mark
- them in candidate_bitmap. Note that these do not necessarily include
- parameter which are unused and thus can be removed. Return true iff any
- such candidate has been found. */
-
-static bool
-find_param_candidates (void)
-{
- tree parm;
- int count = 0;
- bool ret = false;
- const char *msg;
-
- for (parm = DECL_ARGUMENTS (current_function_decl);
- parm;
- parm = DECL_CHAIN (parm))
- {
- tree type = TREE_TYPE (parm);
- tree_node **slot;
-
- count++;
-
- if (TREE_THIS_VOLATILE (parm)
- || TREE_ADDRESSABLE (parm)
- || (!is_gimple_reg_type (type) && is_va_list_type (type)))
- continue;
-
- if (is_unused_scalar_param (parm))
- {
- ret = true;
- continue;
- }
-
- if (POINTER_TYPE_P (type))
- {
- type = TREE_TYPE (type);
-
- if (TREE_CODE (type) == FUNCTION_TYPE
- || TYPE_VOLATILE (type)
- || (TREE_CODE (type) == ARRAY_TYPE
- && TYPE_NONALIASED_COMPONENT (type))
- || !is_gimple_reg (parm)
- || is_va_list_type (type)
- || ptr_parm_has_direct_uses (parm))
- continue;
- }
- else if (!AGGREGATE_TYPE_P (type))
- continue;
-
- if (!COMPLETE_TYPE_P (type)
- || !tree_fits_uhwi_p (TYPE_SIZE (type))
- || tree_to_uhwi (TYPE_SIZE (type)) == 0
- || (AGGREGATE_TYPE_P (type)
- && type_internals_preclude_sra_p (type, &msg)))
- continue;
-
- bitmap_set_bit (candidate_bitmap, DECL_UID (parm));
- slot = candidates->find_slot_with_hash (parm, DECL_UID (parm), INSERT);
- *slot = parm;
-
- ret = true;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Candidate (%d): ", DECL_UID (parm));
- print_generic_expr (dump_file, parm);
- fprintf (dump_file, "\n");
- }
- }
-
- func_param_count = count;
- return ret;
-}
-
-/* Callback of walk_aliased_vdefs, marks the access passed as DATA as
- maybe_modified. */
-
-static bool
-mark_maybe_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED,
- void *data)
-{
- struct access *repr = (struct access *) data;
-
- repr->grp_maybe_modified = 1;
- return true;
-}
-
-/* Analyze what representatives (in linked lists accessible from
- REPRESENTATIVES) can be modified by side effects of statements in the
- current function. */
-
-static void
-analyze_modified_params (vec<access_p> representatives)
-{
- int i;
-
- for (i = 0; i < func_param_count; i++)
- {
- struct access *repr;
-
- for (repr = representatives[i];
- repr;
- repr = repr->next_grp)
- {
- struct access *access;
- bitmap visited;
- ao_ref ar;
-
- if (no_accesses_p (repr))
- continue;
- if (!POINTER_TYPE_P (TREE_TYPE (repr->base))
- || repr->grp_maybe_modified)
- continue;
-
- ao_ref_init (&ar, repr->expr);
- visited = BITMAP_ALLOC (NULL);
- for (access = repr; access; access = access->next_sibling)
- {
- /* All accesses are read ones, otherwise grp_maybe_modified would
- be trivially set. */
- walk_aliased_vdefs (&ar, gimple_vuse (access->stmt),
- mark_maybe_modified, repr, &visited);
- if (repr->grp_maybe_modified)
- break;
- }
- BITMAP_FREE (visited);
- }
- }
-}
-
-/* Propagate distances in bb_dereferences in the opposite direction than the
- control flow edges, in each step storing the maximum of the current value
- and the minimum of all successors. These steps are repeated until the table
- stabilizes. Note that BBs which might terminate the functions (according to
- final_bbs bitmap) never updated in this way. */
-
-static void
-propagate_dereference_distances (void)
-{
- basic_block bb;
-
- auto_vec<basic_block> queue (last_basic_block_for_fn (cfun));
- queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- FOR_EACH_BB_FN (bb, cfun)
- {
- queue.quick_push (bb);
- bb->aux = bb;
- }
-
- while (!queue.is_empty ())
- {
- edge_iterator ei;
- edge e;
- bool change = false;
- int i;
-
- bb = queue.pop ();
- bb->aux = NULL;
-
- if (bitmap_bit_p (final_bbs, bb->index))
- continue;
-
- for (i = 0; i < func_param_count; i++)
- {
- int idx = bb->index * func_param_count + i;
- bool first = true;
- HOST_WIDE_INT inh = 0;
-
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- int succ_idx = e->dest->index * func_param_count + i;
-
- if (e->src == EXIT_BLOCK_PTR_FOR_FN (cfun))
- continue;
-
- if (first)
- {
- first = false;
- inh = bb_dereferences [succ_idx];
- }
- else if (bb_dereferences [succ_idx] < inh)
- inh = bb_dereferences [succ_idx];
- }
-
- if (!first && bb_dereferences[idx] < inh)
- {
- bb_dereferences[idx] = inh;
- change = true;
- }
- }
-
- if (change && !bitmap_bit_p (final_bbs, bb->index))
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- if (e->src->aux)
- continue;
-
- e->src->aux = e->src;
- queue.quick_push (e->src);
- }
- }
-}
-
-/* Dump a dereferences TABLE with heading STR to file F. */
-
-static void
-dump_dereferences_table (FILE *f, const char *str, HOST_WIDE_INT *table)
-{
- basic_block bb;
-
- fprintf (dump_file, "%s", str);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
- EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
- {
- fprintf (f, "%4i %i ", bb->index, bitmap_bit_p (final_bbs, bb->index));
- if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
- {
- int i;
- for (i = 0; i < func_param_count; i++)
- {
- int idx = bb->index * func_param_count + i;
- fprintf (f, " %4" HOST_WIDE_INT_PRINT "d", table[idx]);
- }
- }
- fprintf (f, "\n");
- }
- fprintf (dump_file, "\n");
-}
-
-/* Determine what (parts of) parameters passed by reference that are not
- assigned to are not certainly dereferenced in this function and thus the
- dereferencing cannot be safely moved to the caller without potentially
- introducing a segfault. Mark such REPRESENTATIVES as
- grp_not_necessarilly_dereferenced.
-
- The dereferenced maximum "distance," i.e. the offset + size of the accessed
- part is calculated rather than simple booleans are calculated for each
- pointer parameter to handle cases when only a fraction of the whole
- aggregate is allocated (see testsuite/gcc.c-torture/execute/ipa-sra-2.c for
- an example).
-
- The maximum dereference distances for each pointer parameter and BB are
- already stored in bb_dereference. This routine simply propagates these
- values upwards by propagate_dereference_distances and then compares the
- distances of individual parameters in the ENTRY BB to the equivalent
- distances of each representative of a (fraction of a) parameter. */
-
-static void
-analyze_caller_dereference_legality (vec<access_p> representatives)
-{
- int i;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump_dereferences_table (dump_file,
- "Dereference table before propagation:\n",
- bb_dereferences);
-
- propagate_dereference_distances ();
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump_dereferences_table (dump_file,
- "Dereference table after propagation:\n",
- bb_dereferences);
-
- for (i = 0; i < func_param_count; i++)
- {
- struct access *repr = representatives[i];
- int idx = ENTRY_BLOCK_PTR_FOR_FN (cfun)->index * func_param_count + i;
-
- if (!repr || no_accesses_p (repr))
- continue;
-
- do
- {
- if ((repr->offset + repr->size) > bb_dereferences[idx])
- repr->grp_not_necessarilly_dereferenced = 1;
- repr = repr->next_grp;
- }
- while (repr);
- }
-}
-
-/* Return the representative access for the parameter declaration PARM if it is
- a scalar passed by reference which is not written to and the pointer value
- is not used directly. Thus, if it is legal to dereference it in the caller
- and we can rule out modifications through aliases, such parameter should be
- turned into one passed by value. Return NULL otherwise. */
-
-static struct access *
-unmodified_by_ref_scalar_representative (tree parm)
-{
- int i, access_count;
- struct access *repr;
- vec<access_p> *access_vec;
-
- access_vec = get_base_access_vector (parm);
- gcc_assert (access_vec);
- repr = (*access_vec)[0];
- if (repr->write)
- return NULL;
- repr->group_representative = repr;
-
- access_count = access_vec->length ();
- for (i = 1; i < access_count; i++)
- {
- struct access *access = (*access_vec)[i];
- if (access->write)
- return NULL;
- access->group_representative = repr;
- access->next_sibling = repr->next_sibling;
- repr->next_sibling = access;
- }
-
- repr->grp_read = 1;
- repr->grp_scalar_ptr = 1;
- return repr;
-}
-
-/* Return true iff this ACCESS precludes IPA-SRA of the parameter it is
- associated with. REQ_ALIGN is the minimum required alignment. */
-
-static bool
-access_precludes_ipa_sra_p (struct access *access, unsigned int req_align)
-{
- unsigned int exp_align;
- /* Avoid issues such as the second simple testcase in PR 42025. The problem
- is incompatible assign in a call statement (and possibly even in asm
- statements). This can be relaxed by using a new temporary but only for
- non-TREE_ADDRESSABLE types and is probably not worth the complexity. (In
- intraprocedural SRA we deal with this by keeping the old aggregate around,
- something we cannot do in IPA-SRA.) */
- if (access->write
- && (is_gimple_call (access->stmt)
- || gimple_code (access->stmt) == GIMPLE_ASM))
- return true;
-
- exp_align = get_object_alignment (access->expr);
- if (exp_align < req_align)
- return true;
-
- return false;
-}
-
-
-/* Sort collected accesses for parameter PARM, identify representatives for
- each accessed region and link them together. Return NULL if there are
- different but overlapping accesses, return the special ptr value meaning
- there are no accesses for this parameter if that is the case and return the
- first representative otherwise. Set *RO_GRP if there is a group of accesses
- with only read (i.e. no write) accesses. */
-
-static struct access *
-splice_param_accesses (tree parm, bool *ro_grp)
-{
- int i, j, access_count, group_count;
- int total_size = 0;
- struct access *access, *res, **prev_acc_ptr = &res;
- vec<access_p> *access_vec;
-
- access_vec = get_base_access_vector (parm);
- if (!access_vec)
- return &no_accesses_representant;
- access_count = access_vec->length ();
-
- access_vec->qsort (compare_access_positions);
-
- i = 0;
- total_size = 0;
- group_count = 0;
- while (i < access_count)
- {
- bool modification;
- tree a1_alias_type;
- access = (*access_vec)[i];
- modification = access->write;
- if (access_precludes_ipa_sra_p (access, TYPE_ALIGN (access->type)))
- return NULL;
- a1_alias_type = reference_alias_ptr_type (access->expr);
-
- /* Access is about to become group representative unless we find some
- nasty overlap which would preclude us from breaking this parameter
- apart. */
-
- j = i + 1;
- while (j < access_count)
- {
- struct access *ac2 = (*access_vec)[j];
- if (ac2->offset != access->offset)
- {
- /* All or nothing law for parameters. */
- if (access->offset + access->size > ac2->offset)
- return NULL;
- else
- break;
- }
- else if (ac2->size != access->size)
- return NULL;
-
- if (access_precludes_ipa_sra_p (ac2, TYPE_ALIGN (access->type))
- || (ac2->type != access->type
- && (TREE_ADDRESSABLE (ac2->type)
- || TREE_ADDRESSABLE (access->type)))
- || (reference_alias_ptr_type (ac2->expr) != a1_alias_type))
- return NULL;
-
- modification |= ac2->write;
- ac2->group_representative = access;
- ac2->next_sibling = access->next_sibling;
- access->next_sibling = ac2;
- j++;
- }
-
- group_count++;
- access->grp_maybe_modified = modification;
- if (!modification)
- *ro_grp = true;
- *prev_acc_ptr = access;
- prev_acc_ptr = &access->next_grp;
- total_size += access->size;
- i = j;
- }
-
- gcc_assert (group_count > 0);
- return res;
-}
-
-/* Decide whether parameters with representative accesses given by REPR should
- be reduced into components. */
-
-static int
-decide_one_param_reduction (struct access *repr)
-{
- HOST_WIDE_INT total_size, cur_parm_size;
- bool by_ref;
- tree parm;
-
- parm = repr->base;
- cur_parm_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm)));
- gcc_assert (cur_parm_size > 0);
-
- if (POINTER_TYPE_P (TREE_TYPE (parm)))
- by_ref = true;
- else
- by_ref = false;
-
- if (dump_file)
- {
- struct access *acc;
- fprintf (dump_file, "Evaluating PARAM group sizes for ");
- print_generic_expr (dump_file, parm);
- fprintf (dump_file, " (UID: %u): \n", DECL_UID (parm));
- for (acc = repr; acc; acc = acc->next_grp)
- dump_access (dump_file, acc, true);
- }
-
- total_size = 0;
- int new_param_count = 0;
-
- for (; repr; repr = repr->next_grp)
- {
- gcc_assert (parm == repr->base);
-
- /* Taking the address of a non-addressable field is verboten. */
- if (by_ref && repr->non_addressable)
- return 0;
-
- /* Do not decompose a non-BLKmode param in a way that would
- create BLKmode params. Especially for by-reference passing
- (thus, pointer-type param) this is hardly worthwhile. */
- if (DECL_MODE (parm) != BLKmode
- && TYPE_MODE (repr->type) == BLKmode)
- return 0;
-
- if (!by_ref || (!repr->grp_maybe_modified
- && !repr->grp_not_necessarilly_dereferenced))
- total_size += repr->size;
- else
- total_size += cur_parm_size;
-
- new_param_count++;
- }
-
- gcc_assert (new_param_count > 0);
-
- if (!by_ref)
- {
- if (total_size >= cur_parm_size)
- return 0;
- }
- else
- {
- int parm_num_limit;
- if (optimize_function_for_size_p (cfun))
- parm_num_limit = 1;
- else
- parm_num_limit = PARAM_VALUE (PARAM_IPA_SRA_PTR_GROWTH_FACTOR);
-
- if (new_param_count > parm_num_limit
- || total_size > (parm_num_limit * cur_parm_size))
- return 0;
- }
-
- if (dump_file)
- fprintf (dump_file, " ....will be split into %i components\n",
- new_param_count);
- return new_param_count;
-}
-
-/* The order of the following enums is important, we need to do extra work for
- UNUSED_PARAMS, BY_VAL_ACCESSES and UNMODIF_BY_REF_ACCESSES. */
-enum ipa_splicing_result { NO_GOOD_ACCESS, UNUSED_PARAMS, BY_VAL_ACCESSES,
- MODIF_BY_REF_ACCESSES, UNMODIF_BY_REF_ACCESSES };
-
-/* Identify representatives of all accesses to all candidate parameters for
- IPA-SRA. Return result based on what representatives have been found. */
-
-static enum ipa_splicing_result
-splice_all_param_accesses (vec<access_p> &representatives)
-{
- enum ipa_splicing_result result = NO_GOOD_ACCESS;
- tree parm;
- struct access *repr;
-
- representatives.create (func_param_count);
-
- for (parm = DECL_ARGUMENTS (current_function_decl);
- parm;
- parm = DECL_CHAIN (parm))
- {
- if (is_unused_scalar_param (parm))
- {
- representatives.quick_push (&no_accesses_representant);
- if (result == NO_GOOD_ACCESS)
- result = UNUSED_PARAMS;
- }
- else if (POINTER_TYPE_P (TREE_TYPE (parm))
- && is_gimple_reg_type (TREE_TYPE (TREE_TYPE (parm)))
- && bitmap_bit_p (candidate_bitmap, DECL_UID (parm)))
- {
- repr = unmodified_by_ref_scalar_representative (parm);
- representatives.quick_push (repr);
- if (repr)
- result = UNMODIF_BY_REF_ACCESSES;
- }
- else if (bitmap_bit_p (candidate_bitmap, DECL_UID (parm)))
- {
- bool ro_grp = false;
- repr = splice_param_accesses (parm, &ro_grp);
- representatives.quick_push (repr);
-
- if (repr && !no_accesses_p (repr))
- {
- if (POINTER_TYPE_P (TREE_TYPE (parm)))
- {
- if (ro_grp)
- result = UNMODIF_BY_REF_ACCESSES;
- else if (result < MODIF_BY_REF_ACCESSES)
- result = MODIF_BY_REF_ACCESSES;
- }
- else if (result < BY_VAL_ACCESSES)
- result = BY_VAL_ACCESSES;
- }
- else if (no_accesses_p (repr) && (result == NO_GOOD_ACCESS))
- result = UNUSED_PARAMS;
- }
- else
- representatives.quick_push (NULL);
- }
-
- if (result == NO_GOOD_ACCESS)
- {
- representatives.release ();
- return NO_GOOD_ACCESS;
- }
-
- return result;
-}
-
-/* Return the index of BASE in PARMS. Abort if it is not found. */
-
-static inline int
-get_param_index (tree base, vec<tree> parms)
-{
- int i, len;
-
- len = parms.length ();
- for (i = 0; i < len; i++)
- if (parms[i] == base)
- return i;
- gcc_unreachable ();
-}
-
-/* Convert the decisions made at the representative level into compact
- parameter adjustments. REPRESENTATIVES are pointers to first
- representatives of each param accesses, ADJUSTMENTS_COUNT is the expected
- final number of adjustments. */
-
-static ipa_parm_adjustment_vec
-turn_representatives_into_adjustments (vec<access_p> representatives,
- int adjustments_count)
-{
- vec<tree> parms;
- ipa_parm_adjustment_vec adjustments;
- tree parm;
- int i;
-
- gcc_assert (adjustments_count > 0);
- parms = ipa_get_vector_of_formal_parms (current_function_decl);
- adjustments.create (adjustments_count);
- parm = DECL_ARGUMENTS (current_function_decl);
- for (i = 0; i < func_param_count; i++, parm = DECL_CHAIN (parm))
- {
- struct access *repr = representatives[i];
-
- if (!repr || no_accesses_p (repr))
- {
- struct ipa_parm_adjustment adj;
-
- memset (&adj, 0, sizeof (adj));
- adj.base_index = get_param_index (parm, parms);
- adj.base = parm;
- if (!repr)
- adj.op = IPA_PARM_OP_COPY;
- else
- adj.op = IPA_PARM_OP_REMOVE;
- adj.arg_prefix = "ISRA";
- adjustments.quick_push (adj);
- }
- else
- {
- struct ipa_parm_adjustment adj;
- int index = get_param_index (parm, parms);
-
- for (; repr; repr = repr->next_grp)
- {
- memset (&adj, 0, sizeof (adj));
- gcc_assert (repr->base == parm);
- adj.base_index = index;
- adj.base = repr->base;
- adj.type = repr->type;
- adj.alias_ptr_type = reference_alias_ptr_type (repr->expr);
- adj.offset = repr->offset;
- adj.reverse = repr->reverse;
- adj.by_ref = (POINTER_TYPE_P (TREE_TYPE (repr->base))
- && (repr->grp_maybe_modified
- || repr->grp_not_necessarilly_dereferenced));
- adj.arg_prefix = "ISRA";
- adjustments.quick_push (adj);
- }
- }
- }
- parms.release ();
- return adjustments;
-}
-
-/* Analyze the collected accesses and produce a plan what to do with the
- parameters in the form of adjustments, NULL meaning nothing. */
-
-static ipa_parm_adjustment_vec
-analyze_all_param_acesses (void)
-{
- enum ipa_splicing_result repr_state;
- bool proceed = false;
- int i, adjustments_count = 0;
- vec<access_p> representatives;
- ipa_parm_adjustment_vec adjustments;
-
- repr_state = splice_all_param_accesses (representatives);
- if (repr_state == NO_GOOD_ACCESS)
- return ipa_parm_adjustment_vec ();
-
- /* If there are any parameters passed by reference which are not modified
- directly, we need to check whether they can be modified indirectly. */
- if (repr_state == UNMODIF_BY_REF_ACCESSES)
- {
- analyze_caller_dereference_legality (representatives);
- analyze_modified_params (representatives);
- }
-
- for (i = 0; i < func_param_count; i++)
- {
- struct access *repr = representatives[i];
-
- if (repr && !no_accesses_p (repr))
- {
- if (repr->grp_scalar_ptr)
- {
- adjustments_count++;
- if (repr->grp_not_necessarilly_dereferenced
- || repr->grp_maybe_modified)
- representatives[i] = NULL;
- else
- {
- proceed = true;
- sra_stats.scalar_by_ref_to_by_val++;
- }
- }
- else
- {
- int new_components = decide_one_param_reduction (repr);
-
- if (new_components == 0)
- {
- representatives[i] = NULL;
- adjustments_count++;
- }
- else
- {
- adjustments_count += new_components;
- sra_stats.aggregate_params_reduced++;
- sra_stats.param_reductions_created += new_components;
- proceed = true;
- }
- }
- }
- else
- {
- if (no_accesses_p (repr))
- {
- proceed = true;
- sra_stats.deleted_unused_parameters++;
- }
- adjustments_count++;
- }
- }
-
- if (!proceed && dump_file)
- fprintf (dump_file, "NOT proceeding to change params.\n");
-
- if (proceed)
- adjustments = turn_representatives_into_adjustments (representatives,
- adjustments_count);
- else
- adjustments = ipa_parm_adjustment_vec ();
-
- representatives.release ();
- return adjustments;
-}
-
-/* If a parameter replacement identified by ADJ does not yet exist in the form
- of declaration, create it and record it, otherwise return the previously
- created one. */
-
-static tree
-get_replaced_param_substitute (struct ipa_parm_adjustment *adj)
-{
- tree repl;
- if (!adj->new_ssa_base)
- {
- char *pretty_name = make_fancy_name (adj->base);
-
- repl = create_tmp_reg (TREE_TYPE (adj->base), "ISR");
- DECL_NAME (repl) = get_identifier (pretty_name);
- DECL_NAMELESS (repl) = 1;
- obstack_free (&name_obstack, pretty_name);
-
- adj->new_ssa_base = repl;
- }
- else
- repl = adj->new_ssa_base;
- return repl;
-}
-
-/* Find the first adjustment for a particular parameter BASE in a vector of
- ADJUSTMENTS which is not a copy_param. Return NULL if there is no such
- adjustment. */
-
-static struct ipa_parm_adjustment *
-get_adjustment_for_base (ipa_parm_adjustment_vec adjustments, tree base)
-{
- int i, len;
-
- len = adjustments.length ();
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
-
- adj = &adjustments[i];
- if (adj->op != IPA_PARM_OP_COPY && adj->base == base)
- return adj;
- }
-
- return NULL;
-}
-
-/* If OLD_NAME, which is being defined by statement STMT, is an SSA_NAME of a
- parameter which is to be removed because its value is not used, create a new
- SSA_NAME relating to a replacement VAR_DECL, replace all uses of the
- original with it and return it. If there is no need to re-map, return NULL.
- ADJUSTMENTS is a pointer to a vector of IPA-SRA adjustments. */
-
-static tree
-replace_removed_params_ssa_names (tree old_name, gimple *stmt,
- ipa_parm_adjustment_vec adjustments)
-{
- struct ipa_parm_adjustment *adj;
- tree decl, repl, new_name;
-
- if (TREE_CODE (old_name) != SSA_NAME)
- return NULL;
-
- decl = SSA_NAME_VAR (old_name);
- if (decl == NULL_TREE
- || TREE_CODE (decl) != PARM_DECL)
- return NULL;
-
- adj = get_adjustment_for_base (adjustments, decl);
- if (!adj)
- return NULL;
-
- repl = get_replaced_param_substitute (adj);
- new_name = make_ssa_name (repl, stmt);
- SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_name)
- = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (old_name);
-
- if (dump_file)
- {
- fprintf (dump_file, "replacing an SSA name of a removed param ");
- print_generic_expr (dump_file, old_name);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, new_name);
- fprintf (dump_file, "\n");
- }
-
- replace_uses_by (old_name, new_name);
- return new_name;
-}
-
-/* If the statement STMT contains any expressions that need to replaced with a
- different one as noted by ADJUSTMENTS, do so. Handle any potential type
- incompatibilities (GSI is used to accommodate conversion statements and must
- point to the statement). Return true iff the statement was modified. */
-
-static bool
-sra_ipa_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi,
- ipa_parm_adjustment_vec adjustments)
-{
- tree *lhs_p, *rhs_p;
- bool any;
-
- if (!gimple_assign_single_p (stmt))
- return false;
-
- rhs_p = gimple_assign_rhs1_ptr (stmt);
- lhs_p = gimple_assign_lhs_ptr (stmt);
-
- any = ipa_modify_expr (rhs_p, false, adjustments);
- any |= ipa_modify_expr (lhs_p, false, adjustments);
- if (any)
- {
- tree new_rhs = NULL_TREE;
-
- if (!useless_type_conversion_p (TREE_TYPE (*lhs_p), TREE_TYPE (*rhs_p)))
- {
- if (TREE_CODE (*rhs_p) == CONSTRUCTOR)
- {
- /* V_C_Es of constructors can cause trouble (PR 42714). */
- if (is_gimple_reg_type (TREE_TYPE (*lhs_p)))
- *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p));
- else
- *rhs_p = build_constructor (TREE_TYPE (*lhs_p),
- NULL);
- }
- else
- new_rhs = fold_build1_loc (gimple_location (stmt),
- VIEW_CONVERT_EXPR, TREE_TYPE (*lhs_p),
- *rhs_p);
- }
- else if (REFERENCE_CLASS_P (*rhs_p)
- && is_gimple_reg_type (TREE_TYPE (*lhs_p))
- && !is_gimple_reg (*lhs_p))
- /* This can happen when an assignment in between two single field
- structures is turned into an assignment in between two pointers to
- scalars (PR 42237). */
- new_rhs = *rhs_p;
-
- if (new_rhs)
- {
- tree tmp = force_gimple_operand_gsi (gsi, new_rhs, true, NULL_TREE,
- true, GSI_SAME_STMT);
-
- gimple_assign_set_rhs_from_tree (gsi, tmp);
- }
-
- return true;
- }
-
- return false;
-}
-
-/* Traverse the function body and all modifications as described in
- ADJUSTMENTS. Return true iff the CFG has been changed. */
-
-bool
-ipa_sra_modify_function_body (ipa_parm_adjustment_vec adjustments)
-{
- bool cfg_changed = false;
- basic_block bb;
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple_stmt_iterator gsi;
-
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gphi *phi = as_a <gphi *> (gsi_stmt (gsi));
- tree new_lhs, old_lhs = gimple_phi_result (phi);
- new_lhs = replace_removed_params_ssa_names (old_lhs, phi, adjustments);
- if (new_lhs)
- {
- gimple_phi_set_result (phi, new_lhs);
- release_ssa_name (old_lhs);
- }
- }
-
- gsi = gsi_start_bb (bb);
- while (!gsi_end_p (gsi))
- {
- gimple *stmt = gsi_stmt (gsi);
- bool modified = false;
- tree *t;
- unsigned i;
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_RETURN:
- t = gimple_return_retval_ptr (as_a <greturn *> (stmt));
- if (*t != NULL_TREE)
- modified |= ipa_modify_expr (t, true, adjustments);
- break;
-
- case GIMPLE_ASSIGN:
- modified |= sra_ipa_modify_assign (stmt, &gsi, adjustments);
- break;
-
- case GIMPLE_CALL:
- /* Operands must be processed before the lhs. */
- for (i = 0; i < gimple_call_num_args (stmt); i++)
- {
- t = gimple_call_arg_ptr (stmt, i);
- modified |= ipa_modify_expr (t, true, adjustments);
- }
-
- if (gimple_call_lhs (stmt))
- {
- t = gimple_call_lhs_ptr (stmt);
- modified |= ipa_modify_expr (t, false, adjustments);
- }
- break;
-
- case GIMPLE_ASM:
- {
- gasm *asm_stmt = as_a <gasm *> (stmt);
- for (i = 0; i < gimple_asm_ninputs (asm_stmt); i++)
- {
- t = &TREE_VALUE (gimple_asm_input_op (asm_stmt, i));
- modified |= ipa_modify_expr (t, true, adjustments);
- }
- for (i = 0; i < gimple_asm_noutputs (asm_stmt); i++)
- {
- t = &TREE_VALUE (gimple_asm_output_op (asm_stmt, i));
- modified |= ipa_modify_expr (t, false, adjustments);
- }
- }
- break;
-
- default:
- break;
- }
-
- def_operand_p defp;
- ssa_op_iter iter;
- FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_DEF)
- {
- tree old_def = DEF_FROM_PTR (defp);
- if (tree new_def = replace_removed_params_ssa_names (old_def, stmt,
- adjustments))
- {
- SET_DEF (defp, new_def);
- release_ssa_name (old_def);
- modified = true;
- }
- }
-
- if (modified)
- {
- update_stmt (stmt);
- if (maybe_clean_eh_stmt (stmt)
- && gimple_purge_dead_eh_edges (gimple_bb (stmt)))
- cfg_changed = true;
- }
- gsi_next (&gsi);
- }
- }
-
- return cfg_changed;
-}
-
-/* Call gimple_debug_bind_reset_value on all debug statements describing
- gimple register parameters that are being removed or replaced. */
-
-static void
-sra_ipa_reset_debug_stmts (ipa_parm_adjustment_vec adjustments)
-{
- int i, len;
- gimple_stmt_iterator *gsip = NULL, gsi;
-
- if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
- {
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
- gsip = &gsi;
- }
- len = adjustments.length ();
- for (i = 0; i < len; i++)
- {
- struct ipa_parm_adjustment *adj;
- imm_use_iterator ui;
- gimple *stmt;
- gdebug *def_temp;
- tree name, vexpr, copy = NULL_TREE;
- use_operand_p use_p;
-
- adj = &adjustments[i];
- if (adj->op == IPA_PARM_OP_COPY || !is_gimple_reg (adj->base))
- continue;
- name = ssa_default_def (cfun, adj->base);
- vexpr = NULL;
- if (name)
- FOR_EACH_IMM_USE_STMT (stmt, ui, name)
- {
- if (gimple_clobber_p (stmt))
- {
- gimple_stmt_iterator cgsi = gsi_for_stmt (stmt);
- unlink_stmt_vdef (stmt);
- gsi_remove (&cgsi, true);
- release_defs (stmt);
- continue;
- }
- /* All other users must have been removed by
- ipa_sra_modify_function_body. */
- gcc_assert (is_gimple_debug (stmt));
- if (vexpr == NULL && gsip != NULL)
- {
- gcc_assert (TREE_CODE (adj->base) == PARM_DECL);
- vexpr = make_node (DEBUG_EXPR_DECL);
- def_temp = gimple_build_debug_source_bind (vexpr, adj->base,
- NULL);
- DECL_ARTIFICIAL (vexpr) = 1;
- TREE_TYPE (vexpr) = TREE_TYPE (name);
- SET_DECL_MODE (vexpr, DECL_MODE (adj->base));
- gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
- }
- if (vexpr)
- {
- FOR_EACH_IMM_USE_ON_STMT (use_p, ui)
- SET_USE (use_p, vexpr);
- }
- else
- gimple_debug_bind_reset_value (stmt);
- update_stmt (stmt);
- }
- /* Create a VAR_DECL for debug info purposes. */
- if (!DECL_IGNORED_P (adj->base))
- {
- copy = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
- VAR_DECL, DECL_NAME (adj->base),
- TREE_TYPE (adj->base));
- if (DECL_PT_UID_SET_P (adj->base))
- SET_DECL_PT_UID (copy, DECL_PT_UID (adj->base));
- TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (adj->base);
- TREE_READONLY (copy) = TREE_READONLY (adj->base);
- TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (adj->base);
- DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (adj->base);
- DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (adj->base);
- DECL_IGNORED_P (copy) = DECL_IGNORED_P (adj->base);
- DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (adj->base);
- DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
- SET_DECL_RTL (copy, 0);
- TREE_USED (copy) = 1;
- DECL_CONTEXT (copy) = current_function_decl;
- add_local_decl (cfun, copy);
- DECL_CHAIN (copy) =
- BLOCK_VARS (DECL_INITIAL (current_function_decl));
- BLOCK_VARS (DECL_INITIAL (current_function_decl)) = copy;
- }
- if (gsip != NULL && copy && target_for_debug_bind (adj->base))
- {
- gcc_assert (TREE_CODE (adj->base) == PARM_DECL);
- if (vexpr)
- def_temp = gimple_build_debug_bind (copy, vexpr, NULL);
- else
- def_temp = gimple_build_debug_source_bind (copy, adj->base,
- NULL);
- gsi_insert_before (gsip, def_temp, GSI_SAME_STMT);
- }
- }
-}
-
-/* Return false if all callers have at least as many actual arguments as there
- are formal parameters in the current function and that their types
- match. */
-
-static bool
-some_callers_have_mismatched_arguments_p (struct cgraph_node *node,
- void *data ATTRIBUTE_UNUSED)
-{
- struct cgraph_edge *cs;
- for (cs = node->callers; cs; cs = cs->next_caller)
- if (!cs->call_stmt || !callsite_arguments_match_p (cs->call_stmt))
- return true;
-
- return false;
-}
-
-/* Return false if all callers have vuse attached to a call statement. */
-
-static bool
-some_callers_have_no_vuse_p (struct cgraph_node *node,
- void *data ATTRIBUTE_UNUSED)
-{
- struct cgraph_edge *cs;
- for (cs = node->callers; cs; cs = cs->next_caller)
- if (!cs->call_stmt || !gimple_vuse (cs->call_stmt))
- return true;
-
- return false;
-}
-
-/* Convert all callers of NODE. */
-
-static bool
-convert_callers_for_node (struct cgraph_node *node,
- void *data)
-{
- ipa_parm_adjustment_vec *adjustments = (ipa_parm_adjustment_vec *) data;
- bitmap recomputed_callers = BITMAP_ALLOC (NULL);
- struct cgraph_edge *cs;
-
- for (cs = node->callers; cs; cs = cs->next_caller)
- {
- push_cfun (DECL_STRUCT_FUNCTION (cs->caller->decl));
-
- if (dump_file)
- fprintf (dump_file, "Adjusting call %s -> %s\n",
- cs->caller->dump_name (), cs->callee->dump_name ());
-
- ipa_modify_call_arguments (cs, cs->call_stmt, *adjustments);
-
- pop_cfun ();
- }
-
- for (cs = node->callers; cs; cs = cs->next_caller)
- if (bitmap_set_bit (recomputed_callers, cs->caller->get_uid ())
- && gimple_in_ssa_p (DECL_STRUCT_FUNCTION (cs->caller->decl)))
- compute_fn_summary (cs->caller, true);
- BITMAP_FREE (recomputed_callers);
-
- return true;
-}
-
-/* Convert all callers of NODE to pass parameters as given in ADJUSTMENTS. */
-
-static void
-convert_callers (struct cgraph_node *node, tree old_decl,
- ipa_parm_adjustment_vec adjustments)
-{
- basic_block this_block;
-
- node->call_for_symbol_and_aliases (convert_callers_for_node,
- &adjustments, false);
-
- if (!encountered_recursive_call)
- return;
-
- FOR_EACH_BB_FN (this_block, cfun)
- {
- gimple_stmt_iterator gsi;
-
- for (gsi = gsi_start_bb (this_block); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gcall *stmt;
- tree call_fndecl;
- stmt = dyn_cast <gcall *> (gsi_stmt (gsi));
- if (!stmt)
- continue;
- call_fndecl = gimple_call_fndecl (stmt);
- if (call_fndecl == old_decl)
- {
- if (dump_file)
- fprintf (dump_file, "Adjusting recursive call");
- gimple_call_set_fndecl (stmt, node->decl);
- ipa_modify_call_arguments (NULL, stmt, adjustments);
- }
- }
- }
-
- return;
-}
-
-/* Perform all the modification required in IPA-SRA for NODE to have parameters
- as given in ADJUSTMENTS. Return true iff the CFG has been changed. */
-
-static bool
-modify_function (struct cgraph_node *node, ipa_parm_adjustment_vec adjustments)
-{
- struct cgraph_node *new_node;
- bool cfg_changed;
-
- cgraph_edge::rebuild_edges ();
- free_dominance_info (CDI_DOMINATORS);
- pop_cfun ();
-
- /* This must be done after rebuilding cgraph edges for node above.
- Otherwise any recursive calls to node that are recorded in
- redirect_callers will be corrupted. */
- vec<cgraph_edge *> redirect_callers = node->collect_callers ();
- new_node = node->create_version_clone_with_body (redirect_callers, NULL,
- NULL, false, NULL, NULL,
- "isra");
- redirect_callers.release ();
-
- push_cfun (DECL_STRUCT_FUNCTION (new_node->decl));
- ipa_modify_formal_parameters (current_function_decl, adjustments);
- cfg_changed = ipa_sra_modify_function_body (adjustments);
- sra_ipa_reset_debug_stmts (adjustments);
- convert_callers (new_node, node->decl, adjustments);
- new_node->make_local ();
- return cfg_changed;
-}
-
-/* Means of communication between ipa_sra_check_caller and
- ipa_sra_preliminary_function_checks. */
-
-struct ipa_sra_check_caller_data
-{
- bool has_callers;
- bool bad_arg_alignment;
- bool has_thunk;
-};
-
-/* If NODE has a caller, mark that fact in DATA which is pointer to
- ipa_sra_check_caller_data. Also check all aggregate arguments in all known
- calls if they are unit aligned and if not, set the appropriate flag in DATA
- too. */
-
-static bool
-ipa_sra_check_caller (struct cgraph_node *node, void *data)
-{
- if (!node->callers)
- return false;
-
- struct ipa_sra_check_caller_data *iscc;
- iscc = (struct ipa_sra_check_caller_data *) data;
- iscc->has_callers = true;
-
- for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
- {
- if (cs->caller->thunk.thunk_p)
- {
- iscc->has_thunk = true;
- return true;
- }
- gimple *call_stmt = cs->call_stmt;
- unsigned count = gimple_call_num_args (call_stmt);
- for (unsigned i = 0; i < count; i++)
- {
- tree arg = gimple_call_arg (call_stmt, i);
- if (is_gimple_reg (arg))
- continue;
-
- tree offset;
- poly_int64 bitsize, bitpos;
- machine_mode mode;
- int unsignedp, reversep, volatilep = 0;
- get_inner_reference (arg, &bitsize, &bitpos, &offset, &mode,
- &unsignedp, &reversep, &volatilep);
- if (!multiple_p (bitpos, BITS_PER_UNIT))
- {
- iscc->bad_arg_alignment = true;
- return true;
- }
- }
- }
-
- return false;
-}
-
-/* Return false the function is apparently unsuitable for IPA-SRA based on it's
- attributes, return true otherwise. NODE is the cgraph node of the current
- function. */
-
-static bool
-ipa_sra_preliminary_function_checks (struct cgraph_node *node)
-{
- if (!node->can_be_local_p ())
- {
- if (dump_file)
- fprintf (dump_file, "Function not local to this compilation unit.\n");
- return false;
- }
-
- if (!node->local.can_change_signature)
- {
- if (dump_file)
- fprintf (dump_file, "Function cannot change signature.\n");
- return false;
- }
-
- if (!tree_versionable_function_p (node->decl))
- {
- if (dump_file)
- fprintf (dump_file, "Function is not versionable.\n");
- return false;
- }
-
- if (!opt_for_fn (node->decl, optimize)
- || !opt_for_fn (node->decl, flag_ipa_sra))
- {
- if (dump_file)
- fprintf (dump_file, "Function not optimized.\n");
- return false;
- }
-
- if (DECL_VIRTUAL_P (current_function_decl))
- {
- if (dump_file)
- fprintf (dump_file, "Function is a virtual method.\n");
- return false;
- }
-
- if ((DECL_ONE_ONLY (node->decl) || DECL_EXTERNAL (node->decl))
- && ipa_fn_summaries->get (node)
- && ipa_fn_summaries->get (node)->size >= MAX_INLINE_INSNS_AUTO)
- {
- if (dump_file)
- fprintf (dump_file, "Function too big to be made truly local.\n");
- return false;
- }
-
- if (cfun->stdarg)
- {
- if (dump_file)
- fprintf (dump_file, "Function uses stdarg. \n");
- return false;
- }
-
- if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
- return false;
-
- if (DECL_DISREGARD_INLINE_LIMITS (node->decl))
- {
- if (dump_file)
- fprintf (dump_file, "Always inline function will be inlined "
- "anyway. \n");
- return false;
- }
-
- struct ipa_sra_check_caller_data iscc;
- memset (&iscc, 0, sizeof(iscc));
- node->call_for_symbol_and_aliases (ipa_sra_check_caller, &iscc, true);
- if (!iscc.has_callers)
- {
- if (dump_file)
- fprintf (dump_file,
- "Function has no callers in this compilation unit.\n");
- return false;
- }
-
- if (iscc.bad_arg_alignment)
- {
- if (dump_file)
- fprintf (dump_file,
- "A function call has an argument with non-unit alignment.\n");
- return false;
- }
-
- if (iscc.has_thunk)
- {
- if (dump_file)
- fprintf (dump_file,
- "A has thunk.\n");
- return false;
- }
-
- return true;
-}
-
-/* Perform early interprocedural SRA. */
-
-static unsigned int
-ipa_early_sra (void)
-{
- struct cgraph_node *node = cgraph_node::get (current_function_decl);
- ipa_parm_adjustment_vec adjustments;
- int ret = 0;
-
- if (!ipa_sra_preliminary_function_checks (node))
- return 0;
-
- sra_initialize ();
- sra_mode = SRA_MODE_EARLY_IPA;
-
- if (!find_param_candidates ())
- {
- if (dump_file)
- fprintf (dump_file, "Function has no IPA-SRA candidates.\n");
- goto simple_out;
- }
-
- if (node->call_for_symbol_and_aliases
- (some_callers_have_mismatched_arguments_p, NULL, true))
- {
- if (dump_file)
- fprintf (dump_file, "There are callers with insufficient number of "
- "arguments or arguments with type mismatches.\n");
- goto simple_out;
- }
-
- if (node->call_for_symbol_and_aliases
- (some_callers_have_no_vuse_p, NULL, true))
- {
- if (dump_file)
- fprintf (dump_file, "There are callers with no VUSE attached "
- "to a call stmt.\n");
- goto simple_out;
- }
-
- bb_dereferences = XCNEWVEC (HOST_WIDE_INT,
- func_param_count
- * last_basic_block_for_fn (cfun));
- final_bbs = BITMAP_ALLOC (NULL);
-
- scan_function ();
- if (encountered_apply_args)
- {
- if (dump_file)
- fprintf (dump_file, "Function calls __builtin_apply_args().\n");
- goto out;
- }
-
- if (encountered_unchangable_recursive_call)
- {
- if (dump_file)
- fprintf (dump_file, "Function calls itself with insufficient "
- "number of arguments.\n");
- goto out;
- }
-
- adjustments = analyze_all_param_acesses ();
- if (!adjustments.exists ())
- goto out;
- if (dump_file)
- ipa_dump_param_adjustments (dump_file, adjustments, current_function_decl);
-
- if (modify_function (node, adjustments))
- ret = TODO_update_ssa | TODO_cleanup_cfg;
- else
- ret = TODO_update_ssa;
- adjustments.release ();
-
- statistics_counter_event (cfun, "Unused parameters deleted",
- sra_stats.deleted_unused_parameters);
- statistics_counter_event (cfun, "Scalar parameters converted to by-value",
- sra_stats.scalar_by_ref_to_by_val);
- statistics_counter_event (cfun, "Aggregate parameters broken up",
- sra_stats.aggregate_params_reduced);
- statistics_counter_event (cfun, "Aggregate parameter components created",
- sra_stats.param_reductions_created);
-
- out:
- BITMAP_FREE (final_bbs);
- free (bb_dereferences);
- simple_out:
- sra_deinitialize ();
- return ret;
-}
-
-namespace {
-
-const pass_data pass_data_early_ipa_sra =
-{
- GIMPLE_PASS, /* type */
- "eipa_sra", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- TV_IPA_SRA, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_symtab, /* todo_flags_finish */
-};
-
-class pass_early_ipa_sra : public gimple_opt_pass
-{
-public:
- pass_early_ipa_sra (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_early_ipa_sra, ctxt)
- {}
-
- /* opt_pass methods: */
- virtual bool gate (function *) { return flag_ipa_sra && dbg_cnt (eipa_sra); }
- virtual unsigned int execute (function *) { return ipa_early_sra (); }
-
-}; // class pass_early_ipa_sra
-
-} // anon namespace
-
-gimple_opt_pass *
-make_pass_early_ipa_sra (gcc::context *ctxt)
-{
- return new pass_early_ipa_sra (ctxt);
-}
--- /dev/null
+/* Scalar Replacement of Aggregates (SRA) converts some structure
+ references into scalar references, exposing them to the scalar
+ optimizers.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+bool type_internals_preclude_sra_p (tree type, const char **msg);
+
+/* Return true iff TYPE is stdarg va_list type (which early SRA and IPA-SRA
+ should leave alone). */
+
+static inline bool
+is_va_list_type (tree type)
+{
+ return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node);
+}