/* Tail call optimization on trees.
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
- Free Software Foundation, Inc.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
This file is part of GCC.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
+#include "stor-layout.h"
#include "tm_p.h"
+#include "predict.h"
+#include "vec.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "machmode.h"
#include "hard-reg-set.h"
-#include "basic-block.h"
+#include "input.h"
#include "function.h"
-#include "tree-flow.h"
-#include "tree-dump.h"
-#include "diagnostic.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "gimple-ssa.h"
+#include "tree-cfg.h"
+#include "tree-phinodes.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "tree-into-ssa.h"
+#include "expr.h"
+#include "tree-dfa.h"
+#include "gimple-pretty-print.h"
#include "except.h"
#include "tree-pass.h"
#include "flags.h"
#include "langhooks.h"
#include "dbgcnt.h"
+#include "target.h"
+#include "cfgloop.h"
+#include "common/common-target.h"
+#include "hash-map.h"
+#include "plugin-api.h"
+#include "ipa-ref.h"
+#include "cgraph.h"
+#include "ipa-utils.h"
/* The file implements the tail recursion elimination. It is also used to
analyze the tail calls in general, passing the results to the rtl level
return acc;
}
- To do this, we maintain two accumulators (a_acc and m_acc) that indicate
+ To do this, we maintain two accumulators (a_acc and m_acc) that indicate
when we reach the return x statement, we should return a_acc + x * m_acc
instead. They are initially initialized to 0 and 1, respectively,
so the semantics of the function is obviously preserved. If we are
1) Just return x, where x is not in any of the remaining special shapes.
We rewrite this to a gimple equivalent of return m_acc * x + a_acc.
-
+
2) return f (...), where f is the current function, is rewritten in a
classical tail-recursion elimination way, into assignment of arguments
and jump to the start of the function. Values of the accumulators
are unchanged.
-
+
3) return a + m * f(...), where a and m do not depend on call to f.
To preserve the semantics described before we want this to be rewritten
in such a way that we finally return
static bool
suitable_for_tail_opt_p (void)
{
- referenced_var_iterator rvi;
- tree var;
-
if (cfun->stdarg)
return false;
- /* No local variable nor structure field should be call-used. We
- ignore any kind of memory tag, as these are not real variables. */
-
- FOR_EACH_REFERENCED_VAR (var, rvi)
- {
- if (!is_global_var (var)
- && !MTAG_P (var)
- && (gimple_aliases_computed_p (cfun)? is_call_used (var)
- : TREE_ADDRESSABLE (var)))
- return false;
- }
-
return true;
}
/* Returns false when the function is not suitable for tail call optimization
/* If we are using sjlj exceptions, we may need to add a call to
_Unwind_SjLj_Unregister at exit of the function. Which means
that we cannot do any sibcall transformations. */
- if (USING_SJLJ_EXCEPTIONS && current_function_has_exception_handlers ())
+ if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ
+ && current_function_has_exception_handlers ())
return false;
/* Any function that calls setjmp might have longjmp called from
but not in all cases. See PR15387 and PR19616. Revisit for 4.1. */
for (param = DECL_ARGUMENTS (current_function_decl);
param;
- param = TREE_CHAIN (param))
+ param = DECL_CHAIN (param))
if (TREE_ADDRESSABLE (param))
return false;
bb->aux = &bb->aux;
while (1)
- {
+ {
at = SSA_NAME_DEF_STMT (expr);
bb = gimple_bb (at);
process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m,
tree *a, tree *ass_var)
{
- tree op0, op1, non_ass_var;
+ tree op0, op1 = NULL_TREE, non_ass_var = NULL_TREE;
tree dest = gimple_assign_lhs (stmt);
enum tree_code code = gimple_assign_rhs_code (stmt);
enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code);
tree src_var = gimple_assign_rhs1 (stmt);
-
+
/* See if this is a simple copy operation of an SSA name to the function
result. In that case we may have a simple tail call. Ignore type
conversions that can never produce extra code between the function
{
/* Reject a tailcall if the type conversion might need
additional code. */
- if (gimple_assign_cast_p (stmt)
- && TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
- return false;
+ if (gimple_assign_cast_p (stmt))
+ {
+ if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
+ return false;
+
+ /* Even if the type modes are the same, if the precision of the
+ type is smaller than mode's precision,
+ reduce_to_bit_field_precision would generate additional code. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
+ && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
+ > TYPE_PRECISION (TREE_TYPE (dest))))
+ return false;
+ }
if (src_var != *ass_var)
return false;
return true;
}
- if (rhs_class != GIMPLE_BINARY_RHS)
- return false;
+ switch (rhs_class)
+ {
+ case GIMPLE_BINARY_RHS:
+ op1 = gimple_assign_rhs2 (stmt);
+
+ /* Fall through. */
+
+ case GIMPLE_UNARY_RHS:
+ op0 = gimple_assign_rhs1 (stmt);
+ break;
+
+ default:
+ return false;
+ }
/* Accumulator optimizations will reverse the order of operations.
We can only do that for floating-point types if we're assuming
if (FLOAT_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
return false;
- /* We only handle the code like
-
- x = call ();
- y = m * x;
- z = y + a;
- return z;
-
- TODO -- Extend it for cases where the linear transformation of the output
- is expressed in a more complicated way. */
-
- op0 = gimple_assign_rhs1 (stmt);
- op1 = gimple_assign_rhs2 (stmt);
-
- if (op0 == *ass_var
- && (non_ass_var = independent_of_stmt_p (op1, stmt, call)))
+ if (rhs_class == GIMPLE_UNARY_RHS)
+ ;
+ else if (op0 == *ass_var
+ && (non_ass_var = independent_of_stmt_p (op1, stmt, call)))
;
else if (op1 == *ass_var
&& (non_ass_var = independent_of_stmt_p (op0, stmt, call)))
switch (code)
{
case PLUS_EXPR:
- /* There should be no previous addition. TODO -- it should be fairly
- straightforward to lift this restriction -- just allow storing
- more complicated expressions in *A, and gimplify it in
- adjust_accumulator_values. */
- if (*a)
+ *a = non_ass_var;
+ *ass_var = dest;
+ return true;
+
+ case POINTER_PLUS_EXPR:
+ if (op0 != *ass_var)
return false;
*a = non_ass_var;
*ass_var = dest;
return true;
case MULT_EXPR:
- /* Similar remark applies here. Handling multiplication after addition
- is just slightly more complicated -- we need to multiply both *A and
- *M. */
- if (*a || *m)
- return false;
*m = non_ass_var;
*ass_var = dest;
return true;
- /* TODO -- Handle other codes (NEGATE_EXPR, MINUS_EXPR,
- POINTER_PLUS_EXPR). */
+ case NEGATE_EXPR:
+ *m = build_minus_one_cst (TREE_TYPE (op0));
+ *ass_var = dest;
+ return true;
+
+ case MINUS_EXPR:
+ if (*ass_var == op0)
+ *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
+ else
+ {
+ *m = build_minus_one_cst (TREE_TYPE (non_ass_var));
+ *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
+ }
+
+ *ass_var = dest;
+ return true;
+
+ /* TODO -- Handle POINTER_PLUS_EXPR. */
default:
return false;
{
basic_block dest = e->dest;
gimple_stmt_iterator gsi;
-
+
for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
tree m, a;
basic_block abb;
size_t idx;
+ tree var;
if (!single_succ_p (bb))
return;
{
stmt = gsi_stmt (gsi);
- /* Ignore labels. */
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ /* Ignore labels, returns, clobbers and debug stmts. */
+ if (gimple_code (stmt) == GIMPLE_LABEL
+ || gimple_code (stmt) == GIMPLE_RETURN
+ || gimple_clobber_p (stmt)
+ || is_gimple_debug (stmt))
continue;
/* Check for a call. */
break;
}
- /* If the statement has virtual or volatile operands, fail. */
- if (!ZERO_SSA_OPERANDS (stmt, (SSA_OP_VUSE | SSA_OP_VIRTUAL_DEFS))
- || gimple_has_volatile_ops (stmt)
- || (!gimple_aliases_computed_p (cfun)
- && gimple_references_memory_p (stmt)))
+ /* If the statement references memory or volatile operands, fail. */
+ if (gimple_references_memory_p (stmt)
+ || gimple_has_volatile_ops (stmt))
return;
}
return;
}
- /* If the LHS of our call is not just a simple register, we can't
+ /* If the LHS of our call is not just a simple register, we can't
transform this into a tail or sibling call. This situation happens,
in (e.g.) "*p = foo()" where foo returns a struct. In this case
we won't have a temporary here, but we need to carry out the side
/* We found the call, check whether it is suitable. */
tail_recursion = false;
func = gimple_call_fndecl (call);
- if (func == current_function_decl)
+ if (func
+ && !DECL_BUILT_IN (func)
+ && recursive_call_p (current_function_decl, func))
{
tree arg;
+
for (param = DECL_ARGUMENTS (func), idx = 0;
param && idx < gimple_call_num_args (call);
- param = TREE_CHAIN (param), idx ++)
+ param = DECL_CHAIN (param), idx ++)
{
arg = gimple_call_arg (call, idx);
if (param != arg)
tail_recursion = true;
}
+ /* Make sure the tail invocation of this function does not refer
+ to local variables. */
+ FOR_EACH_LOCAL_DECL (cfun, idx, var)
+ {
+ if (TREE_CODE (var) != PARM_DECL
+ && auto_var_in_fn_p (var, cfun->decl)
+ && (ref_maybe_used_by_stmt_p (call, var)
+ || call_may_clobber_ref_p (call, var)))
+ return;
+ }
+
/* Now check the statements after the call. None of them has virtual
operands, so they may only depend on the call through its return
value. The return value should also be dependent on each of them,
agsi = gsi;
while (1)
{
+ tree tmp_a = NULL_TREE;
+ tree tmp_m = NULL_TREE;
gsi_next (&agsi);
while (gsi_end_p (agsi))
if (gimple_code (stmt) == GIMPLE_RETURN)
break;
+ if (gimple_clobber_p (stmt))
+ continue;
+
+ if (is_gimple_debug (stmt))
+ continue;
+
if (gimple_code (stmt) != GIMPLE_ASSIGN)
return;
/* This is a gimple assign. */
- if (! process_assignment (stmt, gsi, &m, &a, &ass_var))
+ if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var))
return;
+
+ if (tmp_a)
+ {
+ tree type = TREE_TYPE (tmp_a);
+ if (a)
+ a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a);
+ else
+ a = tmp_a;
+ }
+ if (tmp_m)
+ {
+ tree type = TREE_TYPE (tmp_m);
+ if (m)
+ m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m);
+ else
+ m = tmp_m;
+
+ if (a)
+ a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m);
+ }
}
/* See if this is a tail call we can handle. */
if (!tail_recursion && (m || a))
return;
+ /* For pointers only allow additions. */
+ if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl))))
+ return;
+
nw = XNEW (struct tailcall);
nw->call_gsi = gsi;
break;
gcc_assert (!gsi_end_p (gsi));
- add_phi_arg (gsi_stmt (gsi), phi_arg, e);
+ add_phi_arg (gsi_stmt (gsi), phi_arg, e, UNKNOWN_LOCATION);
}
/* Creates a GIMPLE statement which computes the operation specified by
- CODE, OP0 and OP1 to a new variable with name LABEL and inserts the
- statement in the position specified by GSI and UPDATE. Returns the
+ CODE, ACC and OP1 to a new variable with name LABEL and inserts the
+ statement in the position specified by GSI. Returns the
tree node of the statement's result. */
static tree
-adjust_return_value_with_ops (enum tree_code code, const char *label,
- tree op0, tree op1, gimple_stmt_iterator gsi,
- enum gsi_iterator_update update)
+adjust_return_value_with_ops (enum tree_code code, const char *label,
+ tree acc, tree op1, gimple_stmt_iterator gsi)
{
tree ret_type = TREE_TYPE (DECL_RESULT (current_function_decl));
- tree tmp = create_tmp_var (ret_type, label);
- gimple stmt = gimple_build_assign_with_ops (code, tmp, op0, op1);
- tree result;
-
- add_referenced_var (tmp);
- result = make_ssa_name (tmp, stmt);
- gimple_assign_set_lhs (stmt, result);
- update_stmt (stmt);
- gsi_insert_before (&gsi, stmt, update);
+ tree result = make_temp_ssa_name (ret_type, NULL, label);
+ gimple stmt;
+
+ if (POINTER_TYPE_P (ret_type))
+ {
+ gcc_assert (code == PLUS_EXPR && TREE_TYPE (acc) == sizetype);
+ code = POINTER_PLUS_EXPR;
+ }
+ if (types_compatible_p (TREE_TYPE (acc), TREE_TYPE (op1))
+ && code != POINTER_PLUS_EXPR)
+ stmt = gimple_build_assign_with_ops (code, result, acc, op1);
+ else
+ {
+ tree tem;
+ if (code == POINTER_PLUS_EXPR)
+ tem = fold_build2 (code, TREE_TYPE (op1), op1, acc);
+ else
+ tem = fold_build2 (code, TREE_TYPE (op1),
+ fold_convert (TREE_TYPE (op1), acc), op1);
+ tree rhs = fold_convert (ret_type, tem);
+ rhs = force_gimple_operand_gsi (&gsi, rhs,
+ false, NULL, true, GSI_SAME_STMT);
+ stmt = gimple_build_assign (result, rhs);
+ }
+
+ gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
return result;
}
-/* Creates a new GIMPLE statement that adjusts the value of accumulator ACC by
+/* Creates a new GIMPLE statement that adjusts the value of accumulator ACC by
the computation specified by CODE and OP1 and insert the statement
at the position specified by GSI as a new statement. Returns new SSA name
of updated accumulator. */
update_accumulator_with_ops (enum tree_code code, tree acc, tree op1,
gimple_stmt_iterator gsi)
{
- gimple stmt = gimple_build_assign_with_ops (code, SSA_NAME_VAR (acc), acc,
- op1);
- tree var = make_ssa_name (SSA_NAME_VAR (acc), stmt);
- gimple_assign_set_lhs (stmt, var);
- update_stmt (stmt);
+ gimple stmt;
+ tree var = copy_ssa_name (acc, NULL);
+ if (types_compatible_p (TREE_TYPE (acc), TREE_TYPE (op1)))
+ stmt = gimple_build_assign_with_ops (code, var, acc, op1);
+ else
+ {
+ tree rhs = fold_convert (TREE_TYPE (acc),
+ fold_build2 (code,
+ TREE_TYPE (op1),
+ fold_convert (TREE_TYPE (op1), acc),
+ op1));
+ rhs = force_gimple_operand_gsi (&gsi, rhs,
+ false, NULL, false, GSI_CONTINUE_LINKING);
+ stmt = gimple_build_assign (var, rhs);
+ }
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
return var;
}
static void
adjust_accumulator_values (gimple_stmt_iterator gsi, tree m, tree a, edge back)
{
- tree var, a_acc_arg = a_acc, m_acc_arg = m_acc;
+ tree var, a_acc_arg, m_acc_arg;
+ if (m)
+ m = force_gimple_operand_gsi (&gsi, m, true, NULL, true, GSI_SAME_STMT);
+ if (a)
+ a = force_gimple_operand_gsi (&gsi, a, true, NULL, true, GSI_SAME_STMT);
+
+ a_acc_arg = a_acc;
+ m_acc_arg = m_acc;
if (a)
{
if (m_acc)
var = m_acc;
else
var = adjust_return_value_with_ops (MULT_EXPR, "acc_tmp", m_acc,
- a, gsi, GSI_NEW_STMT);
+ a, gsi);
}
else
var = a;
if (m)
retval = adjust_return_value_with_ops (MULT_EXPR, "mul_tmp", m_acc, retval,
- gsi, GSI_SAME_STMT);
+ gsi);
if (a)
retval = adjust_return_value_with_ops (PLUS_EXPR, "acc_tmp", a_acc, retval,
- gsi, GSI_SAME_STMT);
+ gsi);
gimple_return_set_retval (ret_stmt, retval);
update_stmt (ret_stmt);
}
{
tree def;
- if (!is_gimple_reg (param) || !var_ann (param))
+ if (!is_gimple_reg (param))
return false;
-
+
/* Parameters that are only defined but never used need not be copied. */
- def = gimple_default_def (cfun, param);
+ def = ssa_default_def (cfun, param);
if (!def)
return false;
gcc_assert (is_gimple_call (stmt));
- first = single_succ (ENTRY_BLOCK_PTR);
+ first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Remove the code after call_gsi that will become unreachable. The
possibly unreachable code in other blocks is removed later in
/* Number of executions of function has reduced by the tailcall. */
e = single_succ_edge (gsi_bb (t->call_gsi));
- decrease_profile (EXIT_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- decrease_profile (ENTRY_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- if (e->dest != EXIT_BLOCK_PTR)
+ decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
+ decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
+ EDGE_FREQUENCY (e));
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));
/* Replace the call by a jump to the start of function. */
for (param = DECL_ARGUMENTS (current_function_decl),
idx = 0, gsi = gsi_start_phis (first);
param;
- param = TREE_CHAIN (param), idx++)
+ param = DECL_CHAIN (param), idx++)
{
if (!arg_needs_copy_p (param))
continue;
phi = gsi_stmt (gsi);
gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi)));
- add_phi_arg (phi, arg, e);
+ add_phi_arg (phi, arg, e, gimple_location (stmt));
gsi_next (&gsi);
}
release_defs (call);
}
-/* Add phi nodes for the virtual operands defined in the function to the
- header of the loop created by tail recursion elimination.
-
- Originally, we used to add phi nodes only for call clobbered variables,
- as the value of the non-call clobbered ones obviously cannot be used
- or changed within the recursive call. However, the local variables
- from multiple calls now share the same location, so the virtual ssa form
- requires us to say that the location dies on further iterations of the loop,
- which requires adding phi nodes.
-*/
-static void
-add_virtual_phis (void)
-{
- referenced_var_iterator rvi;
- tree var;
-
- /* The problematic part is that there is no way how to know what
- to put into phi nodes (there in fact does not have to be such
- ssa name available). A solution would be to have an artificial
- use/kill for all virtual operands in EXIT node. Unless we have
- this, we cannot do much better than to rebuild the ssa form for
- possibly affected virtual ssa names from scratch. */
-
- FOR_EACH_REFERENCED_VAR (var, rvi)
- {
- if (!is_gimple_reg (var) && gimple_default_def (cfun, var) != NULL_TREE)
- mark_sym_for_renaming (var);
- }
-}
-
/* Optimizes the tailcall described by T. If OPT_TAILCALLS is true, also
mark the tailcalls for the sibcall optimization. */
gimple stmt = gsi_stmt (t->call_gsi);
gimple_call_set_tail (stmt, true);
+ cfun->tail_call_marked = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Found tail call ");
create_tailcall_accumulator (const char *label, basic_block bb, tree init)
{
tree ret_type = TREE_TYPE (DECL_RESULT (current_function_decl));
- tree tmp = create_tmp_var (ret_type, label);
+ if (POINTER_TYPE_P (ret_type))
+ ret_type = sizetype;
+
+ tree tmp = make_temp_ssa_name (ret_type, NULL, label);
gimple phi;
- add_referenced_var (tmp);
phi = create_phi_node (tmp, bb);
/* RET_TYPE can be a float when -ffast-maths is enabled. */
- add_phi_arg (phi, fold_convert (ret_type, init), single_pred_edge (bb));
+ add_phi_arg (phi, fold_convert (ret_type, init), single_pred_edge (bb),
+ UNKNOWN_LOCATION);
return PHI_RESULT (phi);
}
-
+
/* Optimizes tail calls in the function, turning the tail recursion
into iteration. */
bool phis_constructed = false;
struct tailcall *tailcalls = NULL, *act, *next;
bool changed = false;
- basic_block first = single_succ (ENTRY_BLOCK_PTR);
+ basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree param;
gimple stmt;
edge_iterator ei;
if (opt_tailcalls)
opt_tailcalls = suitable_for_tail_call_opt_p ();
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
/* Only traverse the normal exits, i.e. those that end with return
statement. */
if (!phis_constructed)
{
- /* Ensure that there is only one predecessor of the block. */
- if (!single_pred_p (first))
- first = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ /* Ensure that there is only one predecessor of the block
+ or if there are existing degenerate PHI nodes. */
+ if (!single_pred_p (first)
+ || !gimple_seq_empty_p (phi_nodes (first)))
+ first =
+ split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Copy the args if needed. */
for (param = DECL_ARGUMENTS (current_function_decl);
param;
- param = TREE_CHAIN (param))
+ param = DECL_CHAIN (param))
if (arg_needs_copy_p (param))
{
- tree name = gimple_default_def (cfun, param);
+ tree name = ssa_default_def (cfun, param);
tree new_name = make_ssa_name (param, SSA_NAME_DEF_STMT (name));
gimple phi;
- set_default_def (param, new_name);
+ set_ssa_default_def (cfun, param, new_name);
phi = create_phi_node (name, first);
- SSA_NAME_DEF_STMT (name) = phi;
- add_phi_arg (phi, new_name, single_pred_edge (first));
+ add_phi_arg (phi, new_name, single_pred_edge (first),
+ EXPR_LOCATION (param));
}
phis_constructed = true;
}
integer_one_node);
}
+ if (a_acc || m_acc)
+ {
+ /* When the tail call elimination using accumulators is performed,
+ statements adding the accumulated value are inserted at all exits.
+ This turns all other tail calls to non-tail ones. */
+ opt_tailcalls = false;
+ }
+
for (; tailcalls; tailcalls = next)
{
next = tailcalls->next;
if (a_acc || m_acc)
{
/* Modify the remaining return statements. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
stmt = last_stmt (e->src);
}
if (changed)
- free_dominance_info (CDI_DOMINATORS);
+ {
+ /* We may have created new loops. Make them magically appear. */
+ loops_state_set (LOOPS_NEED_FIXUP);
+ free_dominance_info (CDI_DOMINATORS);
+ }
+ /* Add phi nodes for the virtual operands defined in the function to the
+ header of the loop created by tail recursion elimination. Do so
+ by triggering the SSA renamer. */
if (phis_constructed)
- add_virtual_phis ();
+ mark_virtual_operands_for_renaming (cfun);
+
if (changed)
return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
return 0;
}
-static unsigned int
-execute_tail_recursion (void)
-{
- return tree_optimize_tail_calls_1 (false);
-}
-
static bool
gate_tail_calls (void)
{
return tree_optimize_tail_calls_1 (true);
}
-struct gimple_opt_pass pass_tail_recursion =
+namespace {
+
+const pass_data pass_data_tail_recursion =
{
- {
- GIMPLE_PASS,
- "tailr", /* name */
- gate_tail_calls, /* gate */
- execute_tail_recursion, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- 0, /* tv_id */
- PROP_cfg | PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */
- }
+ GIMPLE_PASS, /* type */
+ "tailr", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
-struct gimple_opt_pass pass_tail_calls =
+class pass_tail_recursion : public gimple_opt_pass
{
- {
- GIMPLE_PASS,
- "tailc", /* name */
- gate_tail_calls, /* gate */
- execute_tail_calls, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- 0, /* tv_id */
- PROP_cfg | PROP_ssa | PROP_alias, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */
- }
+public:
+ pass_tail_recursion (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_tail_recursion, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_tail_recursion (m_ctxt); }
+ virtual bool gate (function *) { return gate_tail_calls (); }
+ virtual unsigned int execute (function *)
+ {
+ return tree_optimize_tail_calls_1 (false);
+ }
+
+}; // class pass_tail_recursion
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_tail_recursion (gcc::context *ctxt)
+{
+ return new pass_tail_recursion (ctxt);
+}
+
+namespace {
+
+const pass_data pass_data_tail_calls =
+{
+ GIMPLE_PASS, /* type */
+ "tailc", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ ( PROP_cfg | PROP_ssa ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+
+class pass_tail_calls : public gimple_opt_pass
+{
+public:
+ pass_tail_calls (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_tail_calls, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *) { return gate_tail_calls (); }
+ virtual unsigned int execute (function *) { return execute_tail_calls (); }
+
+}; // class pass_tail_calls
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_tail_calls (gcc::context *ctxt)
+{
+ return new pass_tail_calls (ctxt);
+}