/* Control flow functions for trees.
- Copyright (C) 2001-2014 Free Software Foundation, Inc.
+ Copyright (C) 2001-2016 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "hash-table.h"
-#include "hash-map.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
+#include "rtl.h"
#include "tree.h"
+#include "gimple.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "cgraph.h"
+#include "gimple-pretty-print.h"
+#include "diagnostic-core.h"
+#include "fold-const.h"
#include "trans-mem.h"
#include "stor-layout.h"
#include "print-tree.h"
-#include "tm_p.h"
-#include "basic-block.h"
-#include "flags.h"
-#include "function.h"
-#include "gimple-pretty-print.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
+#include "cfganal.h"
#include "gimple-fold.h"
#include "tree-eh.h"
-#include "gimple-expr.h"
-#include "is-a.h"
-#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
-#include "gimple-ssa.h"
-#include "cgraph.h"
#include "tree-cfg.h"
-#include "tree-phinodes.h"
-#include "ssa-iterators.h"
-#include "stringpool.h"
-#include "tree-ssanames.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-into-ssa.h"
-#include "expr.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
-#include "tree-dump.h"
-#include "tree-pass.h"
-#include "diagnostic-core.h"
#include "except.h"
#include "cfgloop.h"
#include "tree-ssa-propagate.h"
#include "value-prof.h"
#include "tree-inline.h"
-#include "target.h"
#include "tree-ssa-live.h"
#include "omp-low.h"
#include "tree-cfgcleanup.h"
-#include "wide-int.h"
-#include "wide-int-print.h"
+#include "gimplify.h"
+#include "attribs.h"
+#include "selftest.h"
/* This file contains functions for building the Control Flow Graph (CFG)
for a function tree. */
static struct cfg_stats_d cfg_stats;
+/* Data to pass to replace_block_vars_by_duplicates_1. */
+struct replace_decls_d
+{
+ hash_map<tree, tree> *vars_map;
+ tree to_context;
+};
+
/* Hash table to store last discriminator assigned for each locus. */
struct locus_discrim_map
{
/* Hashtable helpers. */
-struct locus_discrim_hasher : typed_free_remove <locus_discrim_map>
+struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map>
{
- typedef locus_discrim_map value_type;
- typedef locus_discrim_map compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *, const compare_type *);
+ static inline hashval_t hash (const locus_discrim_map *);
+ static inline bool equal (const locus_discrim_map *,
+ const locus_discrim_map *);
};
/* Trivial hash function for a location_t. ITEM is a pointer to
a hash table entry that maps a location_t to a discriminator. */
inline hashval_t
-locus_discrim_hasher::hash (const value_type *item)
+locus_discrim_hasher::hash (const locus_discrim_map *item)
{
return LOCATION_LINE (item->locus);
}
point to the two hash table entries to compare. */
inline bool
-locus_discrim_hasher::equal (const value_type *a, const compare_type *b)
+locus_discrim_hasher::equal (const locus_discrim_map *a,
+ const locus_discrim_map *b)
{
return LOCATION_LINE (a->locus) == LOCATION_LINE (b->locus);
}
static void make_edges (void);
static void assign_discriminators (void);
static void make_cond_expr_edges (basic_block);
-static void make_gimple_switch_edges (basic_block);
+static void make_gimple_switch_edges (gswitch *, basic_block);
static bool make_goto_expr_edges (basic_block);
static void make_gimple_asm_edges (basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
/* Various helpers. */
-static inline bool stmt_starts_bb_p (gimple, gimple);
+static inline bool stmt_starts_bb_p (gimple *, gimple *);
static int gimple_verify_flow_info (void);
static void gimple_make_forwarder_block (edge);
-static gimple first_non_label_stmt (basic_block);
-static bool verify_gimple_transaction (gimple);
-static bool call_can_make_abnormal_goto (gimple);
+static gimple *first_non_label_stmt (basic_block);
+static bool verify_gimple_transaction (gtransaction *);
+static bool call_can_make_abnormal_goto (gimple *);
/* Flowgraph optimization and cleanup. */
static void gimple_merge_blocks (basic_block, basic_block);
static void remove_bb (basic_block);
static edge find_taken_edge_computed_goto (basic_block, tree);
static edge find_taken_edge_cond_expr (basic_block, tree);
-static edge find_taken_edge_switch_expr (basic_block, tree);
-static tree find_case_label_for_value (gimple, tree);
+static edge find_taken_edge_switch_expr (gswitch *, basic_block, tree);
+static tree find_case_label_for_value (gswitch *, tree);
void
init_empty_tree_cfg_for_function (struct function *fn)
discriminator_per_locus = NULL;
}
+/* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove
+ them and propagate the information to LOOP. We assume that the annotations
+ come immediately before the condition in BB, if any. */
+
+static void
+replace_loop_annotate_in_block (basic_block bb, struct loop *loop)
+{
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
+ return;
+
+ for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ break;
+ if (!gimple_call_internal_p (stmt)
+ || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
+ break;
+
+ switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
+ {
+ case annot_expr_ivdep_kind:
+ loop->safelen = INT_MAX;
+ break;
+ case annot_expr_no_vector_kind:
+ loop->dont_vectorize = true;
+ break;
+ case annot_expr_vector_kind:
+ loop->force_vectorize = true;
+ cfun->has_force_vectorize_loops = true;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ stmt = gimple_build_assign (gimple_call_lhs (stmt),
+ gimple_call_arg (stmt, 0));
+ gsi_replace (&gsi, stmt, true);
+ }
+}
/* Look for ANNOTATE calls with loop annotation kind; if found, remove
them and propagate the information to the loop. We assume that the
annotations come immediately before the condition of the loop. */
static void
-replace_loop_annotate ()
+replace_loop_annotate (void)
{
struct loop *loop;
basic_block bb;
gimple_stmt_iterator gsi;
- gimple stmt;
+ gimple *stmt;
FOR_EACH_LOOP (loop, 0)
{
- gsi = gsi_last_bb (loop->header);
- stmt = gsi_stmt (gsi);
- if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
- continue;
- for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
- {
- stmt = gsi_stmt (gsi);
- if (gimple_code (stmt) != GIMPLE_CALL)
- break;
- if (!gimple_call_internal_p (stmt)
- || gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
- break;
- switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
- {
- case annot_expr_ivdep_kind:
- loop->safelen = INT_MAX;
- break;
- case annot_expr_no_vector_kind:
- loop->dont_vectorize = true;
- break;
- case annot_expr_vector_kind:
- loop->force_vectorize = true;
- cfun->has_force_vectorize_loops = true;
- break;
- default:
- gcc_unreachable ();
- }
- stmt = gimple_build_assign (gimple_call_lhs (stmt),
- gimple_call_arg (stmt, 0));
- gsi_replace (&gsi, stmt, true);
- }
+ /* First look into the header. */
+ replace_loop_annotate_in_block (loop->header, loop);
+
+ /* Then look into the latch, if any. */
+ if (loop->latch)
+ replace_loop_annotate_in_block (loop->latch, loop);
}
/* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
{
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_CALL)
- break;
+ continue;
if (!gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
- break;
+ continue;
+
switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
{
case annot_expr_ivdep_kind:
default:
gcc_unreachable ();
}
+
warning_at (gimple_location (stmt), 0, "ignoring loop annotation");
stmt = gimple_build_assign (gimple_call_lhs (stmt),
gimple_call_arg (stmt, 0));
/* Return true if T is a computed goto. */
bool
-computed_goto_p (gimple t)
+computed_goto_p (gimple *t)
{
return (gimple_code (t) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
assert_unreachable_fallthru_edge_p (edge e)
{
basic_block pred_bb = e->src;
- gimple last = last_stmt (pred_bb);
+ gimple *last = last_stmt (pred_bb);
if (last && gimple_code (last) == GIMPLE_COND)
{
basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
if (EDGE_COUNT (other_bb->succs) == 0)
{
gimple_stmt_iterator gsi = gsi_after_labels (other_bb);
- gimple stmt;
+ gimple *stmt;
if (gsi_end_p (gsi))
return false;
CFG build time and only ever clear it later. */
static void
-gimple_call_initialize_ctrl_altering (gimple stmt)
+gimple_call_initialize_ctrl_altering (gimple *stmt)
{
int flags = gimple_call_flags (stmt);
|| ((flags & ECF_TM_BUILTIN)
&& is_tm_ending_fndecl (gimple_call_fndecl (stmt)))
/* BUILT_IN_RETURN call is same as return statement. */
- || gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
+ || gimple_call_builtin_p (stmt, BUILT_IN_RETURN)
+ /* IFN_UNIQUE should be the last insn, to make checking for it
+ as cheap as possible. */
+ || (gimple_call_internal_p (stmt)
+ && gimple_call_internal_unique_p (stmt)))
gimple_call_set_ctrl_altering (stmt, true);
else
gimple_call_set_ctrl_altering (stmt, false);
}
-/* Build a flowgraph for the sequence of stmts SEQ. */
+/* Insert SEQ after BB and build a flowgraph. */
-static void
-make_blocks (gimple_seq seq)
+static basic_block
+make_blocks_1 (gimple_seq seq, basic_block bb)
{
gimple_stmt_iterator i = gsi_start (seq);
- gimple stmt = NULL;
+ gimple *stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
- basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (!gsi_end_p (i))
{
- gimple prev_stmt;
+ gimple *prev_stmt;
prev_stmt = stmt;
stmt = gsi_stmt (i);
{
if (!first_stmt_of_seq)
gsi_split_seq_before (&i, &seq);
- bb = create_basic_block (seq, NULL, bb);
+ bb = create_basic_block (seq, bb);
start_new_block = false;
}
&& is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
{
tree lhs = gimple_get_lhs (stmt);
- tree tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
- gimple s = gimple_build_assign (lhs, tmp);
+ tree tmp = create_tmp_var (TREE_TYPE (lhs));
+ gimple *s = gimple_build_assign (lhs, tmp);
gimple_set_location (s, gimple_location (stmt));
gimple_set_block (s, gimple_block (stmt));
gimple_set_lhs (stmt, tmp);
gsi_next (&i);
first_stmt_of_seq = false;
}
+ return bb;
}
+/* Build a flowgraph for the sequence of stmts SEQ. */
+
+static void
+make_blocks (gimple_seq seq)
+{
+ make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+}
/* Create and return a new empty basic block after bb AFTER. */
Edge creation
---------------------------------------------------------------------------*/
-/* Fold COND_EXPR_COND of each COND_EXPR. */
-
-void
-fold_cond_expr_cond (void)
-{
- basic_block bb;
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple stmt = last_stmt (bb);
-
- if (stmt && gimple_code (stmt) == GIMPLE_COND)
- {
- location_t loc = gimple_location (stmt);
- tree cond;
- bool zerop, onep;
-
- fold_defer_overflow_warnings ();
- cond = fold_binary_loc (loc, gimple_cond_code (stmt), boolean_type_node,
- gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
- if (cond)
- {
- zerop = integer_zerop (cond);
- onep = integer_onep (cond);
- }
- else
- zerop = onep = false;
-
- fold_undefer_overflow_warnings (zerop || onep,
- stmt,
- WARN_STRICT_OVERFLOW_CONDITIONAL);
- if (zerop)
- gimple_cond_make_false (stmt);
- else if (onep)
- gimple_cond_make_true (stmt);
- }
- }
-}
-
/* If basic block BB has an abnormal edge to a basic block
containing IFN_ABNORMAL_DISPATCHER internal call, return
that the dispatcher's basic block, otherwise return NULL. */
{
gimple_stmt_iterator gsi
= gsi_start_nondebug_after_labels_bb (e->dest);
- gimple g = gsi_stmt (gsi);
+ gimple *g = gsi_stmt (gsi);
if (g
&& is_gimple_call (g)
&& gimple_call_internal_p (g)
}
/* Create the dispatcher bb. */
- *dispatcher = create_basic_block (NULL, NULL, for_bb);
+ *dispatcher = create_basic_block (NULL, for_bb);
if (computed_goto)
{
/* Factor computed gotos into a common computed goto site. Also
factored computed goto. */
tree factored_label_decl
= create_artificial_label (UNKNOWN_LOCATION);
- gimple factored_computed_goto_label
+ gimple *factored_computed_goto_label
= gimple_build_label (factored_label_decl);
gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
/* Build our new computed goto. */
- gimple factored_computed_goto = gimple_build_goto (var);
+ gimple *factored_computed_goto = gimple_build_goto (var);
gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
FOR_EACH_VEC_ELT (*bbs, idx, bb)
continue;
gsi = gsi_last_bb (bb);
- gimple last = gsi_stmt (gsi);
+ gimple *last = gsi_stmt (gsi);
gcc_assert (computed_goto_p (last));
/* Copy the original computed goto's destination into VAR. */
- gimple assignment
+ gimple *assignment
= gimple_build_assign (var, gimple_goto_dest (last));
gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
else
{
tree arg = inner ? boolean_true_node : boolean_false_node;
- gimple g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
+ gimple *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
1, arg);
gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
}
+/* Creates outgoing edges for BB. Returns 1 when it ends with an
+ computed goto, returns 2 when it ends with a statement that
+ might return to this function via an nonlocal goto, otherwise
+ return 0. Updates *PCUR_REGION with the OMP region this BB is in. */
+
+static int
+make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
+{
+ gimple *last = last_stmt (bb);
+ bool fallthru = false;
+ int ret = 0;
+
+ if (!last)
+ return ret;
+
+ switch (gimple_code (last))
+ {
+ case GIMPLE_GOTO:
+ if (make_goto_expr_edges (bb))
+ ret = 1;
+ fallthru = false;
+ break;
+ case GIMPLE_RETURN:
+ {
+ edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
+ e->goto_locus = gimple_location (last);
+ fallthru = false;
+ }
+ break;
+ case GIMPLE_COND:
+ make_cond_expr_edges (bb);
+ fallthru = false;
+ break;
+ case GIMPLE_SWITCH:
+ make_gimple_switch_edges (as_a <gswitch *> (last), bb);
+ fallthru = false;
+ break;
+ case GIMPLE_RESX:
+ make_eh_edges (last);
+ fallthru = false;
+ break;
+ case GIMPLE_EH_DISPATCH:
+ fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (last));
+ break;
+
+ case GIMPLE_CALL:
+ /* If this function receives a nonlocal goto, then we need to
+ make edges from this call site to all the nonlocal goto
+ handlers. */
+ if (stmt_can_make_abnormal_goto (last))
+ ret = 2;
+
+ /* If this statement has reachable exception handlers, then
+ create abnormal edges to them. */
+ make_eh_edges (last);
+
+ /* BUILTIN_RETURN is really a return statement. */
+ if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
+ {
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
+ fallthru = false;
+ }
+ /* Some calls are known not to return. */
+ else
+ fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
+ break;
+
+ case GIMPLE_ASSIGN:
+ /* A GIMPLE_ASSIGN may throw internally and thus be considered
+ control-altering. */
+ if (is_ctrl_altering_stmt (last))
+ make_eh_edges (last);
+ fallthru = true;
+ break;
+
+ case GIMPLE_ASM:
+ make_gimple_asm_edges (bb);
+ fallthru = true;
+ break;
+
+ CASE_GIMPLE_OMP:
+ fallthru = make_gimple_omp_edges (bb, pcur_region, pomp_index);
+ break;
+
+ case GIMPLE_TRANSACTION:
+ {
+ gtransaction *txn = as_a <gtransaction *> (last);
+ tree label1 = gimple_transaction_label_norm (txn);
+ tree label2 = gimple_transaction_label_uninst (txn);
+
+ if (label1)
+ make_edge (bb, label_to_block (label1), EDGE_FALLTHRU);
+ if (label2)
+ make_edge (bb, label_to_block (label2),
+ EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
+
+ tree label3 = gimple_transaction_label_over (txn);
+ if (gimple_transaction_subcode (txn)
+ & (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
+ make_edge (bb, label_to_block (label3), EDGE_TM_ABORT);
+
+ fallthru = false;
+ }
+ break;
+
+ default:
+ gcc_assert (!stmt_ends_bb_p (last));
+ fallthru = true;
+ break;
+ }
+
+ if (fallthru)
+ make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
+
+ return ret;
+}
+
/* Join all the blocks in the flowgraph. */
static void
/* Traverse the basic block array placing edges. */
FOR_EACH_BB_FN (bb, cfun)
{
- gimple last = last_stmt (bb);
- bool fallthru;
+ int mer;
if (bb_to_omp_idx)
bb_to_omp_idx[bb->index] = cur_omp_region_idx;
- if (last)
- {
- enum gimple_code code = gimple_code (last);
- switch (code)
- {
- case GIMPLE_GOTO:
- if (make_goto_expr_edges (bb))
- ab_edge_goto.safe_push (bb);
- fallthru = false;
- break;
- case GIMPLE_RETURN:
- {
- edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
- e->goto_locus = gimple_location (last);
- fallthru = false;
- }
- break;
- case GIMPLE_COND:
- make_cond_expr_edges (bb);
- fallthru = false;
- break;
- case GIMPLE_SWITCH:
- make_gimple_switch_edges (bb);
- fallthru = false;
- break;
- case GIMPLE_RESX:
- make_eh_edges (last);
- fallthru = false;
- break;
- case GIMPLE_EH_DISPATCH:
- fallthru = make_eh_dispatch_edges (last);
- break;
-
- case GIMPLE_CALL:
- /* If this function receives a nonlocal goto, then we need to
- make edges from this call site to all the nonlocal goto
- handlers. */
- if (stmt_can_make_abnormal_goto (last))
- ab_edge_call.safe_push (bb);
-
- /* If this statement has reachable exception handlers, then
- create abnormal edges to them. */
- make_eh_edges (last);
-
- /* BUILTIN_RETURN is really a return statement. */
- if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
- {
- make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
- fallthru = false;
- }
- /* Some calls are known not to return. */
- else
- fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
- break;
-
- case GIMPLE_ASSIGN:
- /* A GIMPLE_ASSIGN may throw internally and thus be considered
- control-altering. */
- if (is_ctrl_altering_stmt (last))
- make_eh_edges (last);
- fallthru = true;
- break;
-
- case GIMPLE_ASM:
- make_gimple_asm_edges (bb);
- fallthru = true;
- break;
-
- CASE_GIMPLE_OMP:
- fallthru = make_gimple_omp_edges (bb, &cur_region,
- &cur_omp_region_idx);
- if (cur_region && bb_to_omp_idx == NULL)
- bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
- break;
-
- case GIMPLE_TRANSACTION:
- {
- tree abort_label = gimple_transaction_label (last);
- if (abort_label)
- make_edge (bb, label_to_block (abort_label), EDGE_TM_ABORT);
- fallthru = true;
- }
- break;
+ mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
+ if (mer == 1)
+ ab_edge_goto.safe_push (bb);
+ else if (mer == 2)
+ ab_edge_call.safe_push (bb);
- default:
- gcc_assert (!stmt_ends_bb_p (last));
- fallthru = true;
- }
- }
- else
- fallthru = true;
-
- if (fallthru)
- make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
+ if (cur_region && bb_to_omp_idx == NULL)
+ bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
}
/* Computed gotos are hell to deal with, especially if there are
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple label_stmt = gsi_stmt (gsi);
+ glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
tree target;
- if (gimple_code (label_stmt) != GIMPLE_LABEL)
+ if (!label_stmt)
break;
target = gimple_label_label (label_stmt);
if (!gsi_end_p (gsi))
{
/* Make an edge to every setjmp-like call. */
- gimple call_stmt = gsi_stmt (gsi);
+ gimple *call_stmt = gsi_stmt (gsi);
if (is_gimple_call (call_stmt)
&& ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
|| gimple_call_builtin_p (call_stmt,
XDELETE (bb_to_omp_idx);
free_omp_regions ();
+}
+
+/* Add SEQ after GSI. Start new bb after GSI, and created further bbs as
+ needed. Returns true if new bbs were created.
+ Note: This is transitional code, and should not be used for new code. We
+ should be able to get rid of this by rewriting all target va-arg
+ gimplification hooks to use an interface gimple_build_cond_value as described
+ in https://gcc.gnu.org/ml/gcc-patches/2015-02/msg01194.html. */
- /* Fold COND_EXPR_COND of each COND_EXPR. */
- fold_cond_expr_cond ();
+bool
+gimple_find_sub_bbs (gimple_seq seq, gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+ basic_block bb = gimple_bb (stmt);
+ basic_block lastbb, afterbb;
+ int old_num_bbs = n_basic_blocks_for_fn (cfun);
+ edge e;
+ lastbb = make_blocks_1 (seq, bb);
+ if (old_num_bbs == n_basic_blocks_for_fn (cfun))
+ return false;
+ e = split_block (bb, stmt);
+ /* Move e->dest to come after the new basic blocks. */
+ afterbb = e->dest;
+ unlink_block (afterbb);
+ link_block (afterbb, lastbb);
+ redirect_edge_succ (e, bb->next_bb);
+ bb = bb->next_bb;
+ while (bb != afterbb)
+ {
+ struct omp_region *cur_region = NULL;
+ int cur_omp_region_idx = 0;
+ int mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
+ gcc_assert (!mer && !cur_region);
+ add_bb_to_loop (bb, afterbb->loop_father);
+ bb = bb->next_bb;
+ }
+ return true;
}
/* Find the next available discriminator value for LOCUS. The
{
edge e;
edge_iterator ei;
- gimple last = last_stmt (bb);
+ gimple *last = last_stmt (bb);
location_t locus = last ? gimple_location (last) : UNKNOWN_LOCATION;
if (locus == UNKNOWN_LOCATION)
FOR_EACH_EDGE (e, ei, bb->succs)
{
- gimple first = first_non_label_stmt (e->dest);
- gimple last = last_stmt (e->dest);
+ gimple *first = first_non_label_stmt (e->dest);
+ gimple *last = last_stmt (e->dest);
if ((first && same_line_p (locus, gimple_location (first)))
|| (last && same_line_p (locus, gimple_location (last))))
{
static void
make_cond_expr_edges (basic_block bb)
{
- gimple entry = last_stmt (bb);
- gimple then_stmt, else_stmt;
+ gcond *entry = as_a <gcond *> (last_stmt (bb));
+ gimple *then_stmt, *else_stmt;
basic_block then_bb, else_bb;
tree then_label, else_label;
edge e;
/* Called for each element in the hash table (P) as we delete the
edge to cases hash table.
- Clear all the TREE_CHAINs to prevent problems with copying of
+ Clear all the CASE_CHAINs to prevent problems with copying of
SWITCH_EXPRs and structure sharing rules, then free the hash table
element. */
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
if (bb)
{
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
- group_case_labels_stmt (stmt);
+ group_case_labels_stmt (as_a <gswitch *> (stmt));
}
}
BITMAP_FREE (touched_switch_bbs);
Otherwise return NULL. */
static tree
-get_cases_for_edge (edge e, gimple t)
+get_cases_for_edge (edge e, gswitch *t)
{
tree *slot;
size_t i, n;
/* Create the edges for a GIMPLE_SWITCH starting at block BB. */
static void
-make_gimple_switch_edges (basic_block bb)
+make_gimple_switch_edges (gswitch *entry, basic_block bb)
{
- gimple entry = last_stmt (bb);
size_t i, n;
n = gimple_switch_num_labels (entry);
{
gimple_stmt_iterator gsi =
gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
- gimple stmt;
+ gimple *stmt;
stmt = gimple_build_label (dest);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
make_goto_expr_edges (basic_block bb)
{
gimple_stmt_iterator last = gsi_last_bb (bb);
- gimple goto_t = gsi_stmt (last);
+ gimple *goto_t = gsi_stmt (last);
/* A simple GOTO creates normal edges. */
if (simple_goto_p (goto_t))
static void
make_gimple_asm_edges (basic_block bb)
{
- gimple stmt = last_stmt (bb);
+ gasm *stmt = as_a <gasm *> (last_stmt (bb));
int i, n = gimple_asm_nlabels (stmt);
for (i = 0; i < n; ++i)
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
tree label;
- gimple stmt = gsi_stmt (i);
+ glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
- if (gimple_code (stmt) != GIMPLE_LABEL)
+ if (!label_stmt)
break;
- label = gimple_label_label (stmt);
+ label = gimple_label_label (label_stmt);
/* If we have not yet seen a label for the current block,
remember this one and see if there are more labels. */
First do so for each block ending in a control statement. */
FOR_EACH_BB_FN (bb, cfun)
{
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
tree label, new_label;
if (!stmt)
switch (gimple_code (stmt))
{
case GIMPLE_COND:
- label = gimple_cond_true_label (stmt);
- if (label)
- {
- new_label = main_block_label (label);
- if (new_label != label)
- gimple_cond_set_true_label (stmt, new_label);
- }
+ {
+ gcond *cond_stmt = as_a <gcond *> (stmt);
+ label = gimple_cond_true_label (cond_stmt);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_cond_set_true_label (cond_stmt, new_label);
+ }
- label = gimple_cond_false_label (stmt);
- if (label)
- {
- new_label = main_block_label (label);
- if (new_label != label)
- gimple_cond_set_false_label (stmt, new_label);
- }
+ label = gimple_cond_false_label (cond_stmt);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_cond_set_false_label (cond_stmt, new_label);
+ }
+ }
break;
case GIMPLE_SWITCH:
{
- size_t i, n = gimple_switch_num_labels (stmt);
+ gswitch *switch_stmt = as_a <gswitch *> (stmt);
+ size_t i, n = gimple_switch_num_labels (switch_stmt);
/* Replace all destination labels. */
for (i = 0; i < n; ++i)
{
- tree case_label = gimple_switch_label (stmt, i);
+ tree case_label = gimple_switch_label (switch_stmt, i);
label = CASE_LABEL (case_label);
new_label = main_block_label (label);
if (new_label != label)
case GIMPLE_ASM:
{
- int i, n = gimple_asm_nlabels (stmt);
+ gasm *asm_stmt = as_a <gasm *> (stmt);
+ int i, n = gimple_asm_nlabels (asm_stmt);
for (i = 0; i < n; ++i)
{
- tree cons = gimple_asm_label_op (stmt, i);
+ tree cons = gimple_asm_label_op (asm_stmt, i);
tree label = main_block_label (TREE_VALUE (cons));
TREE_VALUE (cons) = label;
}
case GIMPLE_GOTO:
if (!computed_goto_p (stmt))
{
- label = gimple_goto_dest (stmt);
+ ggoto *goto_stmt = as_a <ggoto *> (stmt);
+ label = gimple_goto_dest (goto_stmt);
new_label = main_block_label (label);
if (new_label != label)
- gimple_goto_set_dest (stmt, new_label);
+ gimple_goto_set_dest (goto_stmt, new_label);
}
break;
case GIMPLE_TRANSACTION:
{
- tree label = gimple_transaction_label (stmt);
+ gtransaction *txn = as_a <gtransaction *> (stmt);
+
+ label = gimple_transaction_label_norm (txn);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_transaction_set_label_norm (txn, new_label);
+ }
+
+ label = gimple_transaction_label_uninst (txn);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_transaction_set_label_uninst (txn, new_label);
+ }
+
+ label = gimple_transaction_label_over (txn);
if (label)
{
- tree new_label = main_block_label (label);
+ new_label = main_block_label (label);
if (new_label != label)
- gimple_transaction_set_label (stmt, new_label);
+ gimple_transaction_set_label_over (txn, new_label);
}
}
break;
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
{
tree label;
- gimple stmt = gsi_stmt (i);
+ glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
- if (gimple_code (stmt) != GIMPLE_LABEL)
+ if (!label_stmt)
break;
- label = gimple_label_label (stmt);
+ label = gimple_label_label (label_stmt);
if (label == label_for_this_bb
|| !DECL_ARTIFICIAL (label)
Eg. three separate entries 1: 2: 3: become one entry 1..3: */
void
-group_case_labels_stmt (gimple stmt)
+group_case_labels_stmt (gswitch *stmt)
{
int old_size = gimple_switch_num_labels (stmt);
int i, j, new_size = old_size;
FOR_EACH_BB_FN (bb, cfun)
{
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
- group_case_labels_stmt (stmt);
+ group_case_labels_stmt (as_a <gswitch *> (stmt));
}
}
static bool
gimple_can_merge_blocks_p (basic_block a, basic_block b)
{
- gimple stmt;
- gimple_stmt_iterator gsi;
+ gimple *stmt;
if (!single_succ_p (a))
return false;
if (!single_pred_p (b))
return false;
- if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ if (a == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || b == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
/* If A ends by a statement causing exceptions or something similar, we
return false;
/* Do not allow a block with only a non-local label to be merged. */
- if (stmt
- && gimple_code (stmt) == GIMPLE_LABEL
- && DECL_NONLOCAL (gimple_label_label (stmt)))
- return false;
+ if (stmt)
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
+ if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
+ return false;
/* Examine the labels at the beginning of B. */
- for (gsi = gsi_start_bb (b); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (b); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
tree lab;
- stmt = gsi_stmt (gsi);
- if (gimple_code (stmt) != GIMPLE_LABEL)
+ glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
+ if (!label_stmt)
break;
- lab = gimple_label_label (stmt);
+ lab = gimple_label_label (label_stmt);
/* Do not remove user forced labels or for -O0 any user labels. */
if (!DECL_ARTIFICIAL (lab) && (!optimize || FORCED_LABEL (lab)))
return false;
}
- /* Protect the loop latches. */
- if (current_loops && b->loop_father->latch == b)
+ /* Protect simple loop latches. We only want to avoid merging
+ the latch with the loop header or with a block in another
+ loop in this case. */
+ if (current_loops
+ && b->loop_father->latch == b
+ && loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES)
+ && (b->loop_father->header == a
+ || b->loop_father != a->loop_father))
return false;
/* It must be possible to eliminate all phi nodes in B. If ssa form
is not up-to-date and a name-mapping is registered, we cannot eliminate
any phis. Symbols marked for renaming are never a problem though. */
- for (gsi = gsi_start_phis (b); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- gimple phi = gsi_stmt (gsi);
+ gphi *phi = gsi.phi ();
/* Technically only new names matter. */
if (name_registered_for_update_p (PHI_RESULT (phi)))
return false;
{
imm_use_iterator imm_iter;
use_operand_p use;
- gimple stmt;
+ gimple *stmt;
edge e;
FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
if (gimple_code (stmt) == GIMPLE_PHI)
{
- e = gimple_phi_arg_edge (stmt, PHI_ARG_INDEX_FROM_USE (use));
- if (e->flags & EDGE_ABNORMAL)
+ e = gimple_phi_arg_edge (as_a <gphi *> (stmt),
+ PHI_ARG_INDEX_FROM_USE (use));
+ if (e->flags & EDGE_ABNORMAL
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
{
/* This can only occur for virtual operands, since
for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
if (gimple_code (stmt) != GIMPLE_PHI)
{
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
- gimple orig_stmt = stmt;
+ gimple *orig_stmt = stmt;
size_t i;
/* FIXME. It shouldn't be required to keep TREE_CONSTANT
static void
gimple_merge_blocks (basic_block a, basic_block b)
{
- gimple_stmt_iterator last, gsi, psi;
+ gimple_stmt_iterator last, gsi;
+ gphi_iterator psi;
if (dump_file)
fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index);
gsi = gsi_last_bb (a);
for (psi = gsi_start_phis (b); !gsi_end_p (psi); )
{
- gimple phi = gsi_stmt (psi);
+ gimple *phi = gsi_stmt (psi);
tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
- gimple copy;
+ gimple *copy;
bool may_replace_uses = (virtual_operand_p (def)
|| may_propagate_copy (def, use));
{
imm_use_iterator iter;
use_operand_p use_p;
- gimple stmt;
+ gimple *stmt;
FOR_EACH_IMM_USE_STMT (stmt, iter, def)
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
/* Remove labels from B and set gimple_bb to A for other statements. */
for (gsi = gsi_start_bb (b); !gsi_end_p (gsi);)
{
- gimple stmt = gsi_stmt (gsi);
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ gimple *stmt = gsi_stmt (gsi);
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
{
- tree label = gimple_label_label (stmt);
+ tree label = gimple_label_label (label_stmt);
int lp_nr;
gsi_remove (&gsi, false);
/* Other user labels keep around in a form of a debug stmt. */
else if (!DECL_ARTIFICIAL (label) && MAY_HAVE_DEBUG_STMTS)
{
- gimple dbg = gimple_build_debug_bind (label,
- integer_zero_node,
- stmt);
+ gimple *dbg = gimple_build_debug_bind (label,
+ integer_zero_node,
+ stmt);
gimple_debug_bind_reset_value (dbg);
gsi_insert_before (&gsi, dbg, GSI_SAME_STMT);
}
/* T is CALL_EXPR. Set current_function_calls_* flags. */
void
-notice_special_calls (gimple call)
+notice_special_calls (gcall *call)
{
int flags = gimple_call_flags (call);
details. */
for (i = gsi_last_bb (bb); !gsi_end_p (i);)
{
- gimple stmt = gsi_stmt (i);
- if (gimple_code (stmt) == GIMPLE_LABEL
- && (FORCED_LABEL (gimple_label_label (stmt))
- || DECL_NONLOCAL (gimple_label_label (stmt))))
+ gimple *stmt = gsi_stmt (i);
+ glabel *label_stmt = dyn_cast <glabel *> (stmt);
+ if (label_stmt
+ && (FORCED_LABEL (gimple_label_label (label_stmt))
+ || DECL_NONLOCAL (gimple_label_label (label_stmt))))
{
basic_block new_bb;
gimple_stmt_iterator new_gsi;
/* A non-reachable non-local label may still be referenced.
But it no longer needs to carry the extra semantics of
non-locality. */
- if (DECL_NONLOCAL (gimple_label_label (stmt)))
+ if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
{
- DECL_NONLOCAL (gimple_label_label (stmt)) = 0;
- FORCED_LABEL (gimple_label_label (stmt)) = 1;
+ DECL_NONLOCAL (gimple_label_label (label_stmt)) = 0;
+ FORCED_LABEL (gimple_label_label (label_stmt)) = 1;
}
new_bb = bb->prev_bb;
}
else
{
- /* Release SSA definitions if we are in SSA. Note that we
- may be called when not in SSA. For example,
- final_cleanup calls this function via
- cleanup_tree_cfg. */
- if (gimple_in_ssa_p (cfun))
- release_defs (stmt);
-
+ /* Release SSA definitions. */
+ release_defs (stmt);
gsi_remove (&i, true);
}
edge
find_taken_edge (basic_block bb, tree val)
{
- gimple stmt;
+ gimple *stmt;
stmt = last_stmt (bb);
return find_taken_edge_cond_expr (bb, val);
if (gimple_code (stmt) == GIMPLE_SWITCH)
- return find_taken_edge_switch_expr (bb, val);
+ return find_taken_edge_switch_expr (as_a <gswitch *> (stmt), bb, val);
if (computed_goto_p (stmt))
{
NULL if any edge may be taken. */
static edge
-find_taken_edge_switch_expr (basic_block bb, tree val)
+find_taken_edge_switch_expr (gswitch *switch_stmt, basic_block bb,
+ tree val)
{
basic_block dest_bb;
edge e;
- gimple switch_stmt;
tree taken_case;
- switch_stmt = last_stmt (bb);
taken_case = find_case_label_for_value (switch_stmt, val);
dest_bb = label_to_block (CASE_LABEL (taken_case));
sorted: We can do a binary search for a case matching VAL. */
static tree
-find_case_label_for_value (gimple switch_stmt, tree val)
+find_case_label_for_value (gswitch *switch_stmt, tree val)
{
size_t low, high, n = gimple_switch_num_labels (switch_stmt);
tree default_case = gimple_switch_default_label (switch_stmt);
flow. Transfers of control flow associated with EH are excluded. */
static bool
-call_can_make_abnormal_goto (gimple t)
+call_can_make_abnormal_goto (gimple *t)
{
/* If the function has no non-local labels, then a call cannot make an
abnormal transfer of control. */
Transfers of control flow associated with EH are excluded. */
bool
-stmt_can_make_abnormal_goto (gimple t)
+stmt_can_make_abnormal_goto (gimple *t)
{
if (computed_goto_p (t))
return true;
/* Return true if T represents a stmt that always transfers control. */
bool
-is_ctrl_stmt (gimple t)
+is_ctrl_stmt (gimple *t)
{
switch (gimple_code (t))
{
(e.g., a call to a non-returning function). */
bool
-is_ctrl_altering_stmt (gimple t)
+is_ctrl_altering_stmt (gimple *t)
{
gcc_assert (t);
return true;
case GIMPLE_ASM:
- if (gimple_asm_nlabels (t) > 0)
+ if (gimple_asm_nlabels (as_a <gasm *> (t)) > 0)
return true;
break;
/* Return true if T is a simple local goto. */
bool
-simple_goto_p (gimple t)
+simple_goto_p (gimple *t)
{
return (gimple_code (t) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL);
label. */
static inline bool
-stmt_starts_bb_p (gimple stmt, gimple prev_stmt)
+stmt_starts_bb_p (gimple *stmt, gimple *prev_stmt)
{
if (stmt == NULL)
return false;
/* Labels start a new basic block only if the preceding statement
wasn't a label of the same type. This prevents the creation of
consecutive blocks that have nothing but a single label. */
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
{
/* Nonlocal and computed GOTO targets always start a new block. */
- if (DECL_NONLOCAL (gimple_label_label (stmt))
- || FORCED_LABEL (gimple_label_label (stmt)))
+ if (DECL_NONLOCAL (gimple_label_label (label_stmt))
+ || FORCED_LABEL (gimple_label_label (label_stmt)))
return true;
if (prev_stmt && gimple_code (prev_stmt) == GIMPLE_LABEL)
{
- if (DECL_NONLOCAL (gimple_label_label (prev_stmt)))
+ if (DECL_NONLOCAL (gimple_label_label (
+ as_a <glabel *> (prev_stmt))))
return true;
cfg_stats.num_merged_labels++;
/* Return true if T should end a basic block. */
bool
-stmt_ends_bb_p (gimple t)
+stmt_ends_bb_p (gimple *t)
{
return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t);
}
/* Remove block annotations and other data structures. */
void
-delete_tree_cfg_annotations (void)
+delete_tree_cfg_annotations (struct function *fn)
{
- vec_free (label_to_block_map_for_fn (cfun));
+ vec_free (label_to_block_map_for_fn (fn));
}
+/* Return the virtual phi in BB. */
+
+gphi *
+get_virtual_phi (basic_block bb)
+{
+ for (gphi_iterator gsi = gsi_start_phis (bb);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gphi *phi = gsi.phi ();
+
+ if (virtual_operand_p (PHI_RESULT (phi)))
+ return phi;
+ }
+
+ return NULL;
+}
/* Return the first statement in basic block BB. */
-gimple
+gimple *
first_stmt (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
- gimple stmt = NULL;
+ gimple *stmt = NULL;
while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
{
/* Return the first non-label statement in basic block BB. */
-static gimple
+static gimple *
first_non_label_stmt (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
/* Return the last statement in basic block BB. */
-gimple
+gimple *
last_stmt (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
- gimple stmt = NULL;
+ gimple *stmt = NULL;
while (!gsi_end_p (i) && is_gimple_debug ((stmt = gsi_stmt (i))))
{
if the block is totally empty, or if it contains more than one
statement. */
-gimple
+gimple *
last_and_only_stmt (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_nondebug_bb (bb);
- gimple last, prev;
+ gimple *last, *prev;
if (gsi_end_p (i))
return NULL;
{
edge_var_map *vm;
int i;
- gimple_stmt_iterator phis;
+ gphi_iterator phis;
vec<edge_var_map> *v = redirect_edge_var_map_vector (old_edge);
if (!v)
v->iterate (i, &vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
{
- gimple phi = gsi_stmt (phis);
+ gphi *phi = phis.phi ();
tree result = redirect_edge_var_map_result (vm);
tree arg = redirect_edge_var_map_def (vm);
near its "logical" location. This is of most help to humans looking
at debugging dumps. */
-static basic_block
+basic_block
split_edge_bb_loc (edge edge_in)
{
basic_block dest = edge_in->dest;
}
break;
+ case PARM_DECL:
+ case VAR_DECL:
+ case RESULT_DECL:
+ {
+ tree context = decl_function_context (t);
+ if (context != cfun->decl
+ && !SCOPE_FILE_SCOPE_P (context)
+ && !TREE_STATIC (t)
+ && !DECL_EXTERNAL (t))
+ {
+ error ("Local declaration from a different function");
+ return t;
+ }
+ }
+ break;
+
case INDIRECT_REF:
error ("INDIRECT_REF in gimple IL");
return t;
error ("invalid offset operand of MEM_REF");
return TREE_OPERAND (t, 1);
}
- if (TREE_CODE (x) == ADDR_EXPR
- && (x = verify_address (x, TREE_OPERAND (x, 0))))
- return x;
+ if (TREE_CODE (x) == ADDR_EXPR)
+ {
+ tree va = verify_address (x, TREE_OPERAND (x, 0));
+ if (va)
+ return va;
+ x = TREE_OPERAND (x, 0);
+ }
+ walk_tree (&x, verify_expr, data, NULL);
*walk_subtrees = 0;
break;
}
else if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
&& TYPE_MODE (TREE_TYPE (t)) != BLKmode
- && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t)))
+ && (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (t)))
!= tree_to_uhwi (t1)))
{
- error ("mode precision of non-integral result does not "
+ error ("mode size of non-integral result does not "
"match field size of BIT_FIELD_REF");
return t;
}
error ("invalid reference prefix");
return t;
}
+ walk_tree (&t, verify_expr, data, NULL);
*walk_subtrees = 0;
break;
case PLUS_EXPR:
is a problem, otherwise false. */
static bool
-verify_gimple_call (gimple stmt)
+verify_gimple_call (gcall *stmt)
{
tree fn = gimple_call_fn (stmt);
tree fntype, fndecl;
return true;
}
- if (gimple_call_lhs (stmt)
- && (!is_gimple_lvalue (gimple_call_lhs (stmt))
- || verify_types_in_gimple_reference (gimple_call_lhs (stmt), true)))
+ tree lhs = gimple_call_lhs (stmt);
+ if (lhs
+ && (!is_gimple_lvalue (lhs)
+ || verify_types_in_gimple_reference (lhs, true)))
{
error ("invalid LHS in gimple call");
return true;
}
- if (gimple_call_lhs (stmt) && gimple_call_noreturn_p (stmt))
+ if (gimple_call_ctrl_altering_p (stmt)
+ && gimple_call_noreturn_p (stmt)
+ && should_remove_lhs_p (lhs))
{
error ("LHS in noreturn call");
return true;
fntype = gimple_call_fntype (stmt);
if (fntype
- && gimple_call_lhs (stmt)
- && !useless_type_conversion_p (TREE_TYPE (gimple_call_lhs (stmt)),
- TREE_TYPE (fntype))
+ && lhs
+ && !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (fntype))
/* ??? At least C++ misses conversions at assignments from
void * call results.
??? Java is completely off. Especially with functions
returning java.lang.Object.
For now simply allow arbitrary pointer type conversions. */
- && !(POINTER_TYPE_P (TREE_TYPE (gimple_call_lhs (stmt)))
+ && !(POINTER_TYPE_P (TREE_TYPE (lhs))
&& POINTER_TYPE_P (TREE_TYPE (fntype))))
{
error ("invalid conversion in gimple call");
- debug_generic_stmt (TREE_TYPE (gimple_call_lhs (stmt)));
+ debug_generic_stmt (TREE_TYPE (lhs));
debug_generic_stmt (TREE_TYPE (fntype));
return true;
}
return true;
}
- /* If there is a static chain argument, this should not be an indirect
- call, and the decl should have DECL_STATIC_CHAIN set. */
- if (gimple_call_chain (stmt))
+ /* If there is a static chain argument, the call should either be
+ indirect, or the decl should have DECL_STATIC_CHAIN set. */
+ if (gimple_call_chain (stmt)
+ && fndecl
+ && !DECL_STATIC_CHAIN (fndecl))
{
- if (!gimple_call_fndecl (stmt))
- {
- error ("static chain in indirect gimple call");
- return true;
- }
- fn = TREE_OPERAND (fn, 0);
+ error ("static chain with function that doesn%'t use one");
+ return true;
+ }
- if (!DECL_STATIC_CHAIN (fn))
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ {
+ switch (DECL_FUNCTION_CODE (fndecl))
{
- error ("static chain with function that doesn%'t use one");
- return true;
+ case BUILT_IN_UNREACHABLE:
+ case BUILT_IN_TRAP:
+ if (gimple_call_num_args (stmt) > 0)
+ {
+ /* Built-in unreachable with parameters might not be caught by
+ undefined behavior sanitizer. Front-ends do check users do not
+ call them that way but we also produce calls to
+ __builtin_unreachable internally, for example when IPA figures
+ out a call cannot happen in a legal program. In such cases,
+ we must make sure arguments are stripped off. */
+ error ("__builtin_unreachable or __builtin_trap call with "
+ "arguments");
+ return true;
+ }
+ break;
+ default:
+ break;
}
}
}
/* Verifies the gimple comparison with the result type TYPE and
- the operands OP0 and OP1. */
+ the operands OP0 and OP1, comparison code is CODE. */
static bool
-verify_gimple_comparison (tree type, tree op0, tree op1)
+verify_gimple_comparison (tree type, tree op0, tree op1, enum tree_code code)
{
tree op0_type = TREE_TYPE (op0);
tree op1_type = TREE_TYPE (op1);
&& (TREE_CODE (type) == BOOLEAN_TYPE
|| TYPE_PRECISION (type) == 1))
{
- if (TREE_CODE (op0_type) == VECTOR_TYPE
- || TREE_CODE (op1_type) == VECTOR_TYPE)
- {
- error ("vector comparison returning a boolean");
- debug_generic_expr (op0_type);
- debug_generic_expr (op1_type);
- return true;
+ if ((TREE_CODE (op0_type) == VECTOR_TYPE
+ || TREE_CODE (op1_type) == VECTOR_TYPE)
+ && code != EQ_EXPR && code != NE_EXPR
+ && !VECTOR_BOOLEAN_TYPE_P (op0_type)
+ && !VECTOR_INTEGER_TYPE_P (op0_type))
+ {
+ error ("unsupported operation or type for vector comparison"
+ " returning a boolean");
+ debug_generic_expr (op0_type);
+ debug_generic_expr (op1_type);
+ return true;
}
}
- /* Or an integer vector type with the same size and element count
+ /* Or a boolean vector type with the same element count
as the comparison operand types. */
else if (TREE_CODE (type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE)
+ && TREE_CODE (TREE_TYPE (type)) == BOOLEAN_TYPE)
{
if (TREE_CODE (op0_type) != VECTOR_TYPE
|| TREE_CODE (op1_type) != VECTOR_TYPE)
return true;
}
- if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type)
- || (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)))
- != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0_type))))
- /* The result of a vector comparison is of signed
- integral type. */
- || TYPE_UNSIGNED (TREE_TYPE (type)))
+ if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type))
{
error ("invalid vector comparison resulting type");
debug_generic_expr (type);
Returns true if anything is wrong. */
static bool
-verify_gimple_assign_unary (gimple stmt)
+verify_gimple_assign_unary (gassign *stmt)
{
enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
tree lhs = gimple_assign_lhs (stmt);
return false;
}
-
- case VEC_UNPACK_HI_EXPR:
- case VEC_UNPACK_LO_EXPR:
case REDUC_MAX_EXPR:
case REDUC_MIN_EXPR:
case REDUC_PLUS_EXPR:
+ if (!VECTOR_TYPE_P (rhs1_type)
+ || !useless_type_conversion_p (lhs_type, TREE_TYPE (rhs1_type)))
+ {
+ error ("reduction should convert from vector to element type");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+ return false;
+
+ case VEC_UNPACK_HI_EXPR:
+ case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
/* FIXME. */
Returns true if anything is wrong. */
static bool
-verify_gimple_assign_binary (gimple stmt)
+verify_gimple_assign_binary (gassign *stmt)
{
enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
tree lhs = gimple_assign_lhs (stmt);
return false;
}
- case VEC_LSHIFT_EXPR:
- case VEC_RSHIFT_EXPR:
- {
- if (TREE_CODE (rhs1_type) != VECTOR_TYPE
- || !(INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
- || POINTER_TYPE_P (TREE_TYPE (rhs1_type))
- || FIXED_POINT_TYPE_P (TREE_TYPE (rhs1_type))
- || SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
- || (!INTEGRAL_TYPE_P (rhs2_type)
- && (TREE_CODE (rhs2_type) != VECTOR_TYPE
- || !INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
- || !useless_type_conversion_p (lhs_type, rhs1_type))
- {
- error ("type mismatch in vector shift expression");
- debug_generic_expr (lhs_type);
- debug_generic_expr (rhs1_type);
- debug_generic_expr (rhs2_type);
- return true;
- }
- /* For shifting a vector of non-integral components we
- only allow shifting by a constant multiple of the element size. */
- if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
- && (TREE_CODE (rhs2) != INTEGER_CST
- || !div_if_zero_remainder (rhs2,
- TYPE_SIZE (TREE_TYPE (rhs1_type)))))
- {
- error ("non-element sized vector shift of floating point vector");
- return true;
- }
-
- return false;
- }
-
case WIDEN_LSHIFT_EXPR:
{
if (!INTEGRAL_TYPE_P (lhs_type)
case LTGT_EXPR:
/* Comparisons are also binary, but the result type is not
connected to the operand types. */
- return verify_gimple_comparison (lhs_type, rhs1, rhs2);
+ return verify_gimple_comparison (lhs_type, rhs1, rhs2, rhs_code);
case WIDEN_MULT_EXPR:
if (TREE_CODE (lhs_type) != INTEGER_TYPE)
Returns true if anything is wrong. */
static bool
-verify_gimple_assign_ternary (gimple stmt)
+verify_gimple_assign_ternary (gassign *stmt)
{
enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
tree lhs = gimple_assign_lhs (stmt);
}
break;
- case COND_EXPR:
case VEC_COND_EXPR:
+ if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
+ || TYPE_VECTOR_SUBPARTS (rhs1_type)
+ != TYPE_VECTOR_SUBPARTS (lhs_type))
+ {
+ error ("the first argument of a VEC_COND_EXPR must be of a "
+ "boolean vector type of the same number of elements "
+ "as the result");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+ /* Fallthrough. */
+ case COND_EXPR:
if (!useless_type_conversion_p (lhs_type, rhs2_type)
|| !useless_type_conversion_p (lhs_type, rhs3_type))
{
case SAD_EXPR:
if (!useless_type_conversion_p (rhs1_type, rhs2_type)
|| !useless_type_conversion_p (lhs_type, rhs3_type)
- || 2 * GET_MODE_BITSIZE (GET_MODE_INNER
- (TYPE_MODE (TREE_TYPE (rhs1_type))))
- > GET_MODE_BITSIZE (GET_MODE_INNER
- (TYPE_MODE (TREE_TYPE (lhs_type)))))
+ || 2 * GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1_type)))
+ > GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (lhs_type))))
{
error ("type mismatch in sad expression");
debug_generic_expr (lhs_type);
debug_generic_expr (rhs3_type);
return true;
}
-
+
+ return false;
+
+ case BIT_INSERT_EXPR:
+ if (! useless_type_conversion_p (lhs_type, rhs1_type))
+ {
+ error ("type mismatch in BIT_INSERT_EXPR");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+ if (! ((INTEGRAL_TYPE_P (rhs1_type)
+ && INTEGRAL_TYPE_P (rhs2_type))
+ || (VECTOR_TYPE_P (rhs1_type)
+ && types_compatible_p (TREE_TYPE (rhs1_type), rhs2_type))))
+ {
+ error ("not allowed type combination in BIT_INSERT_EXPR");
+ debug_generic_expr (rhs1_type);
+ debug_generic_expr (rhs2_type);
+ return true;
+ }
+ if (! tree_fits_uhwi_p (rhs3)
+ || ! tree_fits_uhwi_p (TYPE_SIZE (rhs2_type)))
+ {
+ error ("invalid position or size in BIT_INSERT_EXPR");
+ return true;
+ }
+ if (INTEGRAL_TYPE_P (rhs1_type))
+ {
+ unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
+ if (bitpos >= TYPE_PRECISION (rhs1_type)
+ || (bitpos + TYPE_PRECISION (rhs2_type)
+ > TYPE_PRECISION (rhs1_type)))
+ {
+ error ("insertion out of range in BIT_INSERT_EXPR");
+ return true;
+ }
+ }
+ else if (VECTOR_TYPE_P (rhs1_type))
+ {
+ unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
+ unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TYPE_SIZE (rhs2_type));
+ if (bitpos % bitsize != 0)
+ {
+ error ("vector insertion not at element boundary");
+ return true;
+ }
+ }
return false;
case DOT_PROD_EXPR:
Returns true if anything is wrong. */
static bool
-verify_gimple_assign_single (gimple stmt)
+verify_gimple_assign_single (gassign *stmt)
{
enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
tree lhs = gimple_assign_lhs (stmt);
debug_generic_stmt (rhs1);
return true;
}
+ if (!is_gimple_val (elt_v))
+ {
+ error ("vector CONSTRUCTOR element is not a GIMPLE value");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
}
}
+ else if (CONSTRUCTOR_NELTS (rhs1) != 0)
+ {
+ error ("non-vector CONSTRUCTOR with elements");
+ debug_generic_stmt (rhs1);
+ return true;
+ }
return res;
case OBJ_TYPE_REF:
case ASSERT_EXPR:
is a problem, otherwise false. */
static bool
-verify_gimple_assign (gimple stmt)
+verify_gimple_assign (gassign *stmt)
{
switch (gimple_assign_rhs_class (stmt))
{
is a problem, otherwise false. */
static bool
-verify_gimple_return (gimple stmt)
+verify_gimple_return (greturn *stmt)
{
tree op = gimple_return_retval (stmt);
tree restype = TREE_TYPE (TREE_TYPE (cfun->decl));
is a problem, otherwise false. */
static bool
-verify_gimple_goto (gimple stmt)
+verify_gimple_goto (ggoto *stmt)
{
tree dest = gimple_goto_dest (stmt);
is a problem, otherwise false. */
static bool
-verify_gimple_switch (gimple stmt)
+verify_gimple_switch (gswitch *stmt)
{
unsigned int i, n;
tree elt, prev_upper_bound = NULL_TREE;
Returns true if anything is wrong. */
static bool
-verify_gimple_debug (gimple stmt ATTRIBUTE_UNUSED)
+verify_gimple_debug (gimple *stmt ATTRIBUTE_UNUSED)
{
/* There isn't much that could be wrong in a gimple debug stmt. A
gimple debug bind stmt, for example, maps a tree, that's usually
Returns true if anything is wrong. */
static bool
-verify_gimple_label (gimple stmt)
+verify_gimple_label (glabel *stmt)
{
tree decl = gimple_label_label (stmt);
int uid;
return err;
}
+/* Verify a gimple cond statement STMT.
+ Returns true if anything is wrong. */
+
+static bool
+verify_gimple_cond (gcond *stmt)
+{
+ if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
+ {
+ error ("invalid comparison code in gimple cond");
+ return true;
+ }
+ if (!(!gimple_cond_true_label (stmt)
+ || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
+ || !(!gimple_cond_false_label (stmt)
+ || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
+ {
+ error ("invalid labels in gimple cond");
+ return true;
+ }
+
+ return verify_gimple_comparison (boolean_type_node,
+ gimple_cond_lhs (stmt),
+ gimple_cond_rhs (stmt),
+ gimple_cond_code (stmt));
+}
+
/* Verify the GIMPLE statement STMT. Returns true if there is an
error, otherwise false. */
static bool
-verify_gimple_stmt (gimple stmt)
+verify_gimple_stmt (gimple *stmt)
{
switch (gimple_code (stmt))
{
case GIMPLE_ASSIGN:
- return verify_gimple_assign (stmt);
+ return verify_gimple_assign (as_a <gassign *> (stmt));
case GIMPLE_LABEL:
- return verify_gimple_label (stmt);
+ return verify_gimple_label (as_a <glabel *> (stmt));
case GIMPLE_CALL:
- return verify_gimple_call (stmt);
+ return verify_gimple_call (as_a <gcall *> (stmt));
case GIMPLE_COND:
- if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
- {
- error ("invalid comparison code in gimple cond");
- return true;
- }
- if (!(!gimple_cond_true_label (stmt)
- || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
- || !(!gimple_cond_false_label (stmt)
- || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
- {
- error ("invalid labels in gimple cond");
- return true;
- }
-
- return verify_gimple_comparison (boolean_type_node,
- gimple_cond_lhs (stmt),
- gimple_cond_rhs (stmt));
+ return verify_gimple_cond (as_a <gcond *> (stmt));
case GIMPLE_GOTO:
- return verify_gimple_goto (stmt);
+ return verify_gimple_goto (as_a <ggoto *> (stmt));
case GIMPLE_SWITCH:
- return verify_gimple_switch (stmt);
+ return verify_gimple_switch (as_a <gswitch *> (stmt));
case GIMPLE_RETURN:
- return verify_gimple_return (stmt);
+ return verify_gimple_return (as_a <greturn *> (stmt));
case GIMPLE_ASM:
return false;
case GIMPLE_TRANSACTION:
- return verify_gimple_transaction (stmt);
+ return verify_gimple_transaction (as_a <gtransaction *> (stmt));
/* Tuples that do not have tree operands. */
case GIMPLE_NOP:
and false otherwise. */
static bool
-verify_gimple_phi (gimple phi)
+verify_gimple_phi (gimple *phi)
{
bool err = false;
unsigned i;
for (ittr = gsi_start (stmts); !gsi_end_p (ittr); gsi_next (&ittr))
{
- gimple stmt = gsi_stmt (ittr);
+ gimple *stmt = gsi_stmt (ittr);
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
- err |= verify_gimple_in_seq_2 (gimple_bind_body (stmt));
+ err |= verify_gimple_in_seq_2 (
+ gimple_bind_body (as_a <gbind *> (stmt)));
break;
case GIMPLE_TRY:
break;
case GIMPLE_EH_ELSE:
- err |= verify_gimple_in_seq_2 (gimple_eh_else_n_body (stmt));
- err |= verify_gimple_in_seq_2 (gimple_eh_else_e_body (stmt));
+ {
+ geh_else *eh_else = as_a <geh_else *> (stmt);
+ err |= verify_gimple_in_seq_2 (gimple_eh_else_n_body (eh_else));
+ err |= verify_gimple_in_seq_2 (gimple_eh_else_e_body (eh_else));
+ }
break;
case GIMPLE_CATCH:
- err |= verify_gimple_in_seq_2 (gimple_catch_handler (stmt));
+ err |= verify_gimple_in_seq_2 (gimple_catch_handler (
+ as_a <gcatch *> (stmt)));
break;
case GIMPLE_TRANSACTION:
- err |= verify_gimple_transaction (stmt);
+ err |= verify_gimple_transaction (as_a <gtransaction *> (stmt));
break;
default:
is a problem, otherwise false. */
static bool
-verify_gimple_transaction (gimple stmt)
+verify_gimple_transaction (gtransaction *stmt)
{
- tree lab = gimple_transaction_label (stmt);
+ tree lab;
+
+ lab = gimple_transaction_label_norm (stmt);
+ if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
+ return true;
+ lab = gimple_transaction_label_uninst (stmt);
if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
return true;
+ lab = gimple_transaction_label_over (stmt);
+ if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
+ return true;
+
return verify_gimple_in_seq_2 (gimple_transaction_body (stmt));
}
}
static bool eh_error_found;
-static int
-verify_eh_throw_stmt_node (void **slot, void *data)
+bool
+verify_eh_throw_stmt_node (gimple *const &stmt, const int &,
+ hash_set<gimple *> *visited)
{
- struct throw_stmt_node *node = (struct throw_stmt_node *)*slot;
- hash_set<void *> *visited = (hash_set<void *> *) data;
-
- if (!visited->contains (node->stmt))
+ if (!visited->contains (stmt))
{
error ("dead STMT in EH table");
- debug_gimple_stmt (node->stmt);
+ debug_gimple_stmt (stmt);
eh_error_found = true;
}
- return 1;
+ return true;
}
/* Verify if the location LOCs block is in BLOCKS. */
timevar_push (TV_TREE_STMT_VERIFY);
hash_set<void *> visited;
- hash_set<gimple> visited_stmts;
+ hash_set<gimple *> visited_stmts;
/* Collect all BLOCKs referenced by the BLOCK tree of FN. */
hash_set<tree> blocks;
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gphi_iterator gpi = gsi_start_phis (bb);
+ !gsi_end_p (gpi);
+ gsi_next (&gpi))
{
- gimple phi = gsi_stmt (gsi);
+ gphi *phi = gpi.phi ();
bool err2 = false;
unsigned i;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
bool err2 = false;
struct walk_stmt_info wi;
tree addr;
}
eh_error_found = false;
- if (get_eh_throw_stmt_table (cfun))
- htab_traverse (get_eh_throw_stmt_table (cfun),
- verify_eh_throw_stmt_node,
- &visited_stmts);
+ hash_map<gimple *, int> *eh_table = get_eh_throw_stmt_table (cfun);
+ if (eh_table)
+ eh_table->traverse<hash_set<gimple *> *, verify_eh_throw_stmt_node>
+ (&visited_stmts);
if (err || eh_error_found)
internal_error ("verify_gimple failed");
int err = 0;
basic_block bb;
gimple_stmt_iterator gsi;
- gimple stmt;
+ gimple *stmt;
edge e;
edge_iterator ei;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree label;
- gimple prev_stmt = stmt;
+ gimple *prev_stmt = stmt;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_LABEL)
break;
- label = gimple_label_label (stmt);
+ label = gimple_label_label (as_a <glabel *> (stmt));
if (prev_stmt && DECL_NONLOCAL (label))
{
error ("nonlocal label ");
/* Verify that body of basic block BB is free of control flow. */
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
if (found_ctrl_stmt)
{
if (stmt_ends_bb_p (stmt))
found_ctrl_stmt = true;
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
{
error ("label ");
- print_generic_expr (stderr, gimple_label_label (stmt), 0);
+ print_generic_expr (stderr, gimple_label_label (label_stmt), 0);
fprintf (stderr, " in the middle of basic block %d", bb->index);
err = 1;
}
case GIMPLE_SWITCH:
{
+ gswitch *switch_stmt = as_a <gswitch *> (stmt);
tree prev;
edge e;
size_t i, n;
- n = gimple_switch_num_labels (stmt);
+ n = gimple_switch_num_labels (switch_stmt);
/* Mark all the destination basic blocks. */
for (i = 0; i < n; ++i)
{
- tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
+ tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
basic_block label_bb = label_to_block (lab);
gcc_assert (!label_bb->aux || label_bb->aux == (void *)1);
label_bb->aux = (void *)1;
}
/* Verify that the case labels are sorted. */
- prev = gimple_switch_label (stmt, 0);
+ prev = gimple_switch_label (switch_stmt, 0);
for (i = 1; i < n; ++i)
{
- tree c = gimple_switch_label (stmt, i);
+ tree c = gimple_switch_label (switch_stmt, i);
if (!CASE_LOW (c))
{
error ("found default case not at the start of "
/* Check that we have all of them. */
for (i = 0; i < n; ++i)
{
- tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
+ tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
basic_block label_bb = label_to_block (lab);
if (label_bb->aux != (void *)2)
break;
case GIMPLE_EH_DISPATCH:
- err |= verify_eh_dispatch_edge (stmt);
+ err |= verify_eh_dispatch_edge (as_a <geh_dispatch *> (stmt));
break;
default:
edge_iterator ei;
basic_block dummy, bb;
tree var;
- gimple_stmt_iterator gsi;
+ gphi_iterator gsi;
dummy = fallthru->src;
bb = fallthru->dest;
start of BB. */
for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
{
- gimple phi, new_phi;
+ gphi *phi, *new_phi;
- phi = gsi_stmt (gsi);
+ phi = gsi.phi ();
var = gimple_phi_result (phi);
new_phi = create_phi_node (var, bb);
gimple_phi_set_result (phi, copy_ssa_name (var, phi));
gimple_stmt_iterator i, s = gsi_start_bb (bb);
bool first = true;
tree label;
- gimple stmt;
+ glabel *stmt;
for (i = s; !gsi_end_p (i); first = false, gsi_next (&i))
{
- stmt = gsi_stmt (i);
- if (gimple_code (stmt) != GIMPLE_LABEL)
+ stmt = dyn_cast <glabel *> (gsi_stmt (i));
+ if (!stmt)
break;
label = gimple_label_label (stmt);
if (!DECL_NONLOCAL (label))
{
basic_block src = e->src;
gimple_stmt_iterator i;
- gimple stmt;
+ gimple *stmt;
/* We can replace or remove a complex jump only when we have exactly
two edges. */
basic_block bb = e->src;
gimple_stmt_iterator gsi;
edge ret;
- gimple stmt;
+ gimple *stmt;
if (e->flags & EDGE_ABNORMAL)
return NULL;
case GIMPLE_SWITCH:
{
+ gswitch *switch_stmt = as_a <gswitch *> (stmt);
tree label = gimple_block_label (dest);
- tree cases = get_cases_for_edge (e, stmt);
+ tree cases = get_cases_for_edge (e, switch_stmt);
/* If we have a list of cases associated with E, then use it
as it's a lot faster than walking the entire case vector. */
to move all the cases associated with E to E2. */
if (e2)
{
- tree cases2 = get_cases_for_edge (e2, stmt);
+ tree cases2 = get_cases_for_edge (e2, switch_stmt);
CASE_CHAIN (last) = CASE_CHAIN (cases2);
CASE_CHAIN (cases2) = first;
}
else
{
- size_t i, n = gimple_switch_num_labels (stmt);
+ size_t i, n = gimple_switch_num_labels (switch_stmt);
for (i = 0; i < n; i++)
{
- tree elt = gimple_switch_label (stmt, i);
+ tree elt = gimple_switch_label (switch_stmt, i);
if (label_to_block (CASE_LABEL (elt)) == e->dest)
CASE_LABEL (elt) = label;
}
case GIMPLE_ASM:
{
- int i, n = gimple_asm_nlabels (stmt);
+ gasm *asm_stmt = as_a <gasm *> (stmt);
+ int i, n = gimple_asm_nlabels (asm_stmt);
tree label = NULL;
for (i = 0; i < n; ++i)
{
- tree cons = gimple_asm_label_op (stmt, i);
+ tree cons = gimple_asm_label_op (asm_stmt, i);
if (label_to_block (TREE_VALUE (cons)) == e->dest)
{
if (!label)
case GIMPLE_EH_DISPATCH:
if (!(e->flags & EDGE_FALLTHRU))
- redirect_eh_dispatch_edge (stmt, e, dest);
+ redirect_eh_dispatch_edge (as_a <geh_dispatch *> (stmt), e, dest);
break;
case GIMPLE_TRANSACTION:
- /* The ABORT edge has a stored label associated with it, otherwise
- the edges are simply redirectable. */
- if (e->flags == 0)
- gimple_transaction_set_label (stmt, gimple_block_label (dest));
+ if (e->flags & EDGE_TM_ABORT)
+ gimple_transaction_set_label_over (as_a <gtransaction *> (stmt),
+ gimple_block_label (dest));
+ else if (e->flags & EDGE_TM_UNINSTRUMENTED)
+ gimple_transaction_set_label_uninst (as_a <gtransaction *> (stmt),
+ gimple_block_label (dest));
+ else
+ gimple_transaction_set_label_norm (as_a <gtransaction *> (stmt),
+ gimple_block_label (dest));
break;
default:
{
gimple_stmt_iterator gsi;
gimple_stmt_iterator gsi_tgt;
- gimple act;
gimple_seq list;
basic_block new_bb;
edge e;
FOR_EACH_EDGE (e, ei, new_bb->succs)
e->src = new_bb;
- if (stmt && gimple_code ((gimple) stmt) == GIMPLE_LABEL)
- stmt = NULL;
-
- /* Move everything from GSI to the new basic block. */
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ /* Get a stmt iterator pointing to the first stmt to move. */
+ if (!stmt || gimple_code ((gimple *) stmt) == GIMPLE_LABEL)
+ gsi = gsi_after_labels (bb);
+ else
{
- act = gsi_stmt (gsi);
- if (gimple_code (act) == GIMPLE_LABEL)
- continue;
-
- if (!stmt)
- break;
-
- if (stmt == act)
- {
- gsi_next (&gsi);
- break;
- }
+ gsi = gsi_for_stmt ((gimple *) stmt);
+ gsi_next (&gsi);
}
-
+
+ /* Move everything from GSI to the new basic block. */
if (gsi_end_p (gsi))
return new_bb;
static basic_block
gimple_split_block_before_cond_jump (basic_block bb)
{
- gimple last, split_point;
+ gimple *last, *split_point;
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
if (gsi_end_p (gsi))
return NULL;
if (gimple_code (last) != GIMPLE_COND
&& gimple_code (last) != GIMPLE_SWITCH)
return NULL;
- gsi_prev_nondebug (&gsi);
+ gsi_prev (&gsi);
split_point = gsi_stmt (gsi);
return split_block (bb, split_point)->dest;
}
gimple_duplicate_bb (basic_block bb)
{
basic_block new_bb;
- gimple_stmt_iterator gsi, gsi_tgt;
- gimple_seq phis = phi_nodes (bb);
- gimple phi, stmt, copy;
+ gimple_stmt_iterator gsi_tgt;
new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
/* Copy the PHI nodes. We ignore PHI node arguments here because
the incoming edges have not been setup yet. */
- for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gphi_iterator gpi = gsi_start_phis (bb);
+ !gsi_end_p (gpi);
+ gsi_next (&gpi))
{
- phi = gsi_stmt (gsi);
+ gphi *phi, *copy;
+ phi = gpi.phi ();
copy = create_phi_node (NULL_TREE, new_bb);
create_new_def_for (gimple_phi_result (phi), copy,
gimple_phi_result_ptr (copy));
}
gsi_tgt = gsi_start_bb (new_bb);
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
def_operand_p def_p;
ssa_op_iter op_iter;
tree lhs;
+ gimple *stmt, *copy;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) == GIMPLE_LABEL)
basic_block bb, bb_copy = e_copy->src, dest;
edge e;
edge_iterator ei;
- gimple phi, phi_copy;
+ gphi *phi, *phi_copy;
tree def;
- gimple_stmt_iterator psi, psi_copy;
+ gphi_iterator psi, psi_copy;
if (gimple_seq_empty_p (phi_nodes (e_copy->dest)))
return;
!gsi_end_p (psi);
gsi_next (&psi), gsi_next (&psi_copy))
{
- phi = gsi_stmt (psi);
- phi_copy = gsi_stmt (psi_copy);
+ phi = psi.phi ();
+ phi_copy = psi_copy.phi ();
def = PHI_ARG_DEF_FROM_EDGE (phi, e);
add_phi_arg (phi_copy, def, e_copy,
gimple_phi_arg_location_from_edge (phi, e));
gcov_type total_count = 0, exit_count = 0;
edge exits[2], nexits[2], e;
gimple_stmt_iterator gsi;
- gimple cond_stmt;
+ gimple *cond_stmt;
edge sorig, snew;
basic_block exit_bb;
- gimple_stmt_iterator psi;
- gimple phi;
+ gphi_iterator psi;
+ gphi *phi;
tree def;
struct loop *target, *aloop, *cloop;
!gsi_end_p (psi);
gsi_next (&psi))
{
- phi = gsi_stmt (psi);
+ phi = psi.phi ();
def = PHI_ARG_DEF (phi, nexits[0]->dest_idx);
add_phi_arg (phi, def, e, gimple_phi_arg_location_from_edge (phi, e));
}
tree decl = SSA_NAME_VAR (name);
if (decl)
{
+ gcc_assert (!SSA_NAME_IS_DEFAULT_DEF (name));
replace_by_duplicate_decl (&decl, vars_map, to_context);
new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
decl, SSA_NAME_DEF_STMT (name));
- if (SSA_NAME_IS_DEFAULT_DEF (name))
- set_ssa_default_def (DECL_STRUCT_FUNCTION (to_context),
- decl, new_name);
}
else
new_name = copy_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
name, SSA_NAME_DEF_STMT (name));
+ /* Now that we've used the def stmt to define new_name, make sure it
+ doesn't define name anymore. */
+ SSA_NAME_DEF_STMT (name) = NULL;
+
vars_map->put (name, new_name);
}
else
|| (p->orig_block == NULL_TREE
&& block != NULL_TREE))
TREE_SET_BLOCK (t, p->new_block);
-#ifdef ENABLE_CHECKING
- else if (block != NULL_TREE)
+ else if (flag_checking && block != NULL_TREE)
{
while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
block = BLOCK_SUPERCONTEXT (block);
gcc_assert (block == p->orig_block);
}
-#endif
}
else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
{
if (TREE_CODE (t) == SSA_NAME)
*tp = replace_ssa_name (t, p->vars_map, p->to_context);
+ else if (TREE_CODE (t) == PARM_DECL
+ && gimple_in_ssa_p (cfun))
+ *tp = *(p->vars_map->get (t));
else if (TREE_CODE (t) == LABEL_DECL)
{
if (p->new_label_map)
struct walk_stmt_info *wi)
{
struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
- gimple stmt = gsi_stmt (*gsi_p);
+ gimple *stmt = gsi_stmt (*gsi_p);
tree block = gimple_block (stmt);
if (block == p->orig_block
case GIMPLE_RESX:
{
- int r = gimple_resx_region (stmt);
+ gresx *resx_stmt = as_a <gresx *> (stmt);
+ int r = gimple_resx_region (resx_stmt);
r = move_stmt_eh_region_nr (r, p);
- gimple_resx_set_region (stmt, r);
+ gimple_resx_set_region (resx_stmt, r);
}
break;
case GIMPLE_EH_DISPATCH:
{
- int r = gimple_eh_dispatch_region (stmt);
+ geh_dispatch *eh_dispatch_stmt = as_a <geh_dispatch *> (stmt);
+ int r = gimple_eh_dispatch_region (eh_dispatch_stmt);
r = move_stmt_eh_region_nr (r, p);
- gimple_eh_dispatch_set_region (stmt, r);
+ gimple_eh_dispatch_set_region (eh_dispatch_stmt, r);
}
break;
(*cfg->x_basic_block_info)[bb->index] = bb;
/* Remap the variables in phi nodes. */
- for (si = gsi_start_phis (bb); !gsi_end_p (si); )
+ for (gphi_iterator psi = gsi_start_phis (bb);
+ !gsi_end_p (psi); )
{
- gimple phi = gsi_stmt (si);
+ gphi *phi = psi.phi ();
use_operand_p use;
tree op = PHI_RESULT (phi);
ssa_op_iter oi;
{
/* Remove the phi nodes for virtual operands (alias analysis will be
run for the new function, anyway). */
- remove_phi_node (&si, true);
+ remove_phi_node (&psi, true);
continue;
}
continue;
if (d->orig_block == NULL_TREE || block == d->orig_block)
{
- if (d->new_block == NULL_TREE)
- locus = LOCATION_LOCUS (locus);
- else
- locus = COMBINE_LOCATION_DATA (line_table, locus, d->new_block);
+ locus = set_block (locus, d->new_block);
gimple_phi_arg_set_location (phi, i, locus);
}
}
- gsi_next (&si);
+ gsi_next (&psi);
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
- gimple stmt = gsi_stmt (si);
+ gimple *stmt = gsi_stmt (si);
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = d;
walk_gimple_stmt (&si, move_stmt_r, move_stmt_op, &wi);
- if (gimple_code (stmt) == GIMPLE_LABEL)
+ if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
{
- tree label = gimple_label_label (stmt);
+ tree label = gimple_label_label (label_stmt);
int uid = LABEL_DECL_UID (label);
gcc_assert (uid > -1);
tree block = LOCATION_BLOCK (e->goto_locus);
if (d->orig_block == NULL_TREE
|| block == d->orig_block)
- e->goto_locus = d->new_block ?
- COMBINE_LOCATION_DATA (line_table, e->goto_locus, d->new_block) :
- LOCATION_LOCUS (e->goto_locus);
+ e->goto_locus = set_block (e->goto_locus, d->new_block);
}
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
- gimple stmt = gsi_stmt (si);
+ gimple *stmt = gsi_stmt (si);
eh_region stmt_region;
int lp_nr;
return m->to;
}
+/* Tree walker to replace the decls used inside value expressions by
+ duplicates. */
+
+static tree
+replace_block_vars_by_duplicates_1 (tree *tp, int *walk_subtrees, void *data)
+{
+ struct replace_decls_d *rd = (struct replace_decls_d *)data;
+
+ switch (TREE_CODE (*tp))
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ replace_by_duplicate_decl (tp, rd->vars_map, rd->to_context);
+ break;
+ default:
+ break;
+ }
+
+ if (IS_TYPE_OR_DECL_P (*tp))
+ *walk_subtrees = false;
+
+ return NULL;
+}
+
/* Change DECL_CONTEXT of all BLOCK_VARS in block, including
subblocks. */
{
if (TREE_CODE (*tp) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (*tp))
{
- SET_DECL_VALUE_EXPR (t, DECL_VALUE_EXPR (*tp));
+ tree x = DECL_VALUE_EXPR (*tp);
+ struct replace_decls_d rd = { vars_map, to_context };
+ unshare_expr (x);
+ walk_tree (&x, replace_block_vars_by_duplicates_1, &rd, NULL);
+ SET_DECL_VALUE_EXPR (t, x);
DECL_HAS_VALUE_EXPR_P (t) = 1;
}
DECL_CHAIN (t) = DECL_CHAIN (*tp);
fixup_loop_arrays_after_move (fn1, fn2, loop);
}
+/* Verify that the blocks in BBS_P are a single-entry, single-exit region
+ delimited by ENTRY_BB and EXIT_BB, possibly containing noreturn blocks. */
+
+DEBUG_FUNCTION void
+verify_sese (basic_block entry, basic_block exit, vec<basic_block> *bbs_p)
+{
+ basic_block bb;
+ edge_iterator ei;
+ edge e;
+ bitmap bbs = BITMAP_ALLOC (NULL);
+ int i;
+
+ gcc_assert (entry != NULL);
+ gcc_assert (entry != exit);
+ gcc_assert (bbs_p != NULL);
+
+ gcc_assert (bbs_p->length () > 0);
+
+ FOR_EACH_VEC_ELT (*bbs_p, i, bb)
+ bitmap_set_bit (bbs, bb->index);
+
+ gcc_assert (bitmap_bit_p (bbs, entry->index));
+ gcc_assert (exit == NULL || bitmap_bit_p (bbs, exit->index));
+
+ FOR_EACH_VEC_ELT (*bbs_p, i, bb)
+ {
+ if (bb == entry)
+ {
+ gcc_assert (single_pred_p (entry));
+ gcc_assert (!bitmap_bit_p (bbs, single_pred (entry)->index));
+ }
+ else
+ for (ei = ei_start (bb->preds); !ei_end_p (ei); ei_next (&ei))
+ {
+ e = ei_edge (ei);
+ gcc_assert (bitmap_bit_p (bbs, e->src->index));
+ }
+
+ if (bb == exit)
+ {
+ gcc_assert (single_succ_p (exit));
+ gcc_assert (!bitmap_bit_p (bbs, single_succ (exit)->index));
+ }
+ else
+ for (ei = ei_start (bb->succs); !ei_end_p (ei); ei_next (&ei))
+ {
+ e = ei_edge (ei);
+ gcc_assert (bitmap_bit_p (bbs, e->dest->index));
+ }
+ }
+
+ BITMAP_FREE (bbs);
+}
+
+/* If FROM is an SSA_NAME, mark the version in bitmap DATA. */
+
+bool
+gather_ssa_name_hash_map_from (tree const &from, tree const &, void *data)
+{
+ bitmap release_names = (bitmap)data;
+
+ if (TREE_CODE (from) != SSA_NAME)
+ return true;
+
+ bitmap_set_bit (release_names, SSA_NAME_VERSION (from));
+ return true;
+}
+
/* Move a single-entry, single-exit region delimited by ENTRY_BB and
EXIT_BB to function DEST_CFUN. The whole region is replaced by a
single basic block in the original CFG and the new basic block is
All local variables referenced in the region are assumed to be in
the corresponding BLOCK_VARS and unexpanded variable lists
- associated with DEST_CFUN. */
+ associated with DEST_CFUN.
+
+ TODO: investigate whether we can reuse gimple_duplicate_sese_region to
+ reimplement move_sese_region_to_fn by duplicating the region rather than
+ moving it. */
basic_block
move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
bbs.safe_push (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
+ if (flag_checking)
+ verify_sese (entry_bb, exit_bb, &bbs);
+
/* The blocks that used to be dominated by something in BBS will now be
dominated by the new block. */
dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
if (loops_for_fn (saved_cfun)->exits)
FOR_EACH_EDGE (e, ei, bb->succs)
{
- void **slot = htab_find_slot_with_hash
- (loops_for_fn (saved_cfun)->exits, e,
- htab_hash_pointer (e), NO_INSERT);
+ struct loops *l = loops_for_fn (saved_cfun);
+ loop_exit **slot
+ = l->exits->find_slot_with_hash (e, htab_hash_pointer (e),
+ NO_INSERT);
if (slot)
- htab_clear_slot (loops_for_fn (saved_cfun)->exits, slot);
+ l->exits->clear_slot (slot);
}
}
d.eh_map = eh_map;
d.remap_decls_p = true;
+ if (gimple_in_ssa_p (cfun))
+ for (tree arg = DECL_ARGUMENTS (d.to_context); arg; arg = DECL_CHAIN (arg))
+ {
+ tree narg = make_ssa_name_fn (dest_cfun, arg, gimple_build_nop ());
+ set_ssa_default_def (dest_cfun, arg, narg);
+ vars_map.put (arg, narg);
+ }
+
FOR_EACH_VEC_ELT (bbs, i, bb)
{
/* No need to update edge counts on the last block. It has
if (eh_map)
delete eh_map;
+ if (gimple_in_ssa_p (cfun))
+ {
+ /* We need to release ssa-names in a defined order, so first find them,
+ and then iterate in ascending version order. */
+ bitmap release_names = BITMAP_ALLOC (NULL);
+ vars_map.traverse<void *, gather_ssa_name_hash_map_from> (release_names);
+ bitmap_iterator bi;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (release_names, 0, i, bi)
+ release_ssa_name (ssa_name (i));
+ BITMAP_FREE (release_names);
+ }
+
/* Rewire the entry and exit blocks. The successor to the entry
block turns into the successor of DEST_FN's ENTRY_BLOCK_PTR in
the child function. Similarly, the predecessor of DEST_FN's
return bb;
}
+/* Dump default def DEF to file FILE using FLAGS and indentation
+ SPC. */
+
+static void
+dump_default_def (FILE *file, tree def, int spc, int flags)
+{
+ for (int i = 0; i < spc; ++i)
+ fprintf (file, " ");
+ dump_ssaname_info_to_file (file, def, spc);
+
+ print_generic_expr (file, TREE_TYPE (def), flags);
+ fprintf (file, " ");
+ print_generic_expr (file, def, flags);
+ fprintf (file, " = ");
+ print_generic_expr (file, SSA_NAME_VAR (def), flags);
+ fprintf (file, ";\n");
+}
/* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in dumpfile.h)
*/
&& decl_is_tm_clone (fndecl));
struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
+ if (DECL_ATTRIBUTES (fndecl) != NULL_TREE)
+ {
+ fprintf (file, "__attribute__((");
+
+ bool first = true;
+ tree chain;
+ for (chain = DECL_ATTRIBUTES (fndecl); chain;
+ first = false, chain = TREE_CHAIN (chain))
+ {
+ if (!first)
+ fprintf (file, ", ");
+
+ print_generic_expr (file, get_attribute_name (chain), dump_flags);
+ if (TREE_VALUE (chain) != NULL_TREE)
+ {
+ fprintf (file, " (");
+ print_generic_expr (file, TREE_VALUE (chain), dump_flags);
+ fprintf (file, ")");
+ }
+ }
+
+ fprintf (file, "))\n");
+ }
+
current_function_decl = fndecl;
fprintf (file, "%s %s(", function_name (fun), tmclone ? "[tm-clone] " : "");
ignore_topmost_bind = true;
fprintf (file, "{\n");
+ if (gimple_in_ssa_p (fun)
+ && (flags & TDF_ALIAS))
+ {
+ for (arg = DECL_ARGUMENTS (fndecl); arg != NULL;
+ arg = DECL_CHAIN (arg))
+ {
+ tree def = ssa_default_def (fun, arg);
+ if (def)
+ dump_default_def (file, def, 2, flags);
+ }
+
+ tree res = DECL_RESULT (fun->decl);
+ if (res != NULL_TREE
+ && DECL_BY_REFERENCE (res))
+ {
+ tree def = ssa_default_def (fun, res);
+ if (def)
+ dump_default_def (file, def, 2, flags);
+ }
+
+ tree static_chain = fun->static_chain_decl;
+ if (static_chain != NULL_TREE)
+ {
+ tree def = ssa_default_def (fun, static_chain);
+ if (def)
+ dump_default_def (file, def, 2, flags);
+ }
+ }
+
if (!vec_safe_is_empty (fun->local_decls))
FOR_EACH_LOCAL_DECL (fun, ix, var)
{
else
{
if (!ignore_topmost_bind)
- fprintf (file, "{\n");
+ {
+ fprintf (file, "{\n");
+ /* No topmost bind, pretend it's ignored for later. */
+ ignore_topmost_bind = true;
+ }
indent = 2;
}
fprintf (file, ", upper_bound = ");
print_decu (loop->nb_iterations_upper_bound, file);
}
+ if (loop->any_likely_upper_bound)
+ {
+ fprintf (file, ", likely_upper_bound = ");
+ print_decu (loop->nb_iterations_likely_upper_bound, file);
+ }
if (loop->any_estimate)
{
basic_block bb;
bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ fprintf (file, "\nLoops in function: %s\n", current_function_name ());
if (bb && bb->loop_father)
print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
}
static bool
gimple_block_ends_with_condjump_p (const_basic_block bb)
{
- gimple stmt = last_stmt (CONST_CAST_BB (bb));
+ gimple *stmt = last_stmt (CONST_CAST_BB (bb));
return (stmt && gimple_code (stmt) == GIMPLE_COND);
}
-/* Return true if we need to add fake edge to exit at statement T.
- Helper function for gimple_flow_call_edges_add. */
+/* Return true if statement T may terminate execution of BB in ways not
+ explicitly represtented in the CFG. */
-static bool
-need_fake_edge_p (gimple t)
+bool
+stmt_can_terminate_bb_p (gimple *t)
{
tree fndecl = NULL_TREE;
int call_flags = 0;
+ /* Eh exception not handled internally terminates execution of the whole
+ function. */
+ if (stmt_can_throw_external (t))
+ return true;
+
/* NORETURN and LONGJMP calls already have an edge to exit.
CONST and PURE calls do not need one.
We don't currently check for CONST and PURE here, although
edge e;
basic_block bb;
+ if (call_flags & (ECF_PURE | ECF_CONST)
+ && !(call_flags & ECF_LOOPING_CONST_OR_PURE))
+ return false;
+
+ /* Function call may do longjmp, terminate program or do other things.
+ Special case noreturn that have non-abnormal edges out as in this case
+ the fact is sufficiently represented by lack of edges out of T. */
if (!(call_flags & ECF_NORETURN))
return true;
return true;
}
- if (gimple_code (t) == GIMPLE_ASM
- && (gimple_asm_volatile_p (t) || gimple_asm_input_p (t)))
- return true;
+ if (gasm *asm_stmt = dyn_cast <gasm *> (t))
+ if (gimple_asm_volatile_p (asm_stmt) || gimple_asm_input_p (asm_stmt))
+ return true;
return false;
}
{
basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
- gimple t = NULL;
+ gimple *t = NULL;
if (!gsi_end_p (gsi))
t = gsi_stmt (gsi);
- if (t && need_fake_edge_p (t))
+ if (t && stmt_can_terminate_bb_p (t))
{
edge e;
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
gimple_stmt_iterator gsi;
- gimple stmt, last_stmt;
+ gimple *stmt, *last_stmt;
if (!bb)
continue;
do
{
stmt = gsi_stmt (gsi);
- if (need_fake_edge_p (stmt))
+ if (stmt_can_terminate_bb_p (stmt))
{
edge e;
no edge to the exit block in CFG already.
Calling make_edge in such case would cause us to
mark that edge as fake and remove it later. */
-#ifdef ENABLE_CHECKING
- if (stmt == last_stmt)
+ if (flag_checking && stmt == last_stmt)
{
e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
-#endif
/* Note that the following may create a new basic block
and renumber the existing basic blocks. */
basic_block bb, dbb;
bitmap_iterator bi;
+ /* If we are removing a path inside a non-root loop that may change
+ loop ownership of blocks or remove loops. Mark loops for fixup. */
+ if (current_loops
+ && loop_outer (e->src->loop_father) != NULL
+ && e->src->loop_father == e->dest->loop_father)
+ loops_state_set (LOOPS_NEED_FIXUP);
+
if (!dom_info_available_p (CDI_DOMINATORS))
{
remove_edge (e);
bool changed = false;
edge e;
edge_iterator ei;
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (stmt && stmt_can_throw_internal (stmt))
return false;
bool changed = false;
edge e;
edge_iterator ei;
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (!cfun->has_nonlocal_label
&& !cfun->calls_setjmp)
gimple_lv_adjust_loop_header_phi (basic_block first, basic_block second,
basic_block new_head, edge e)
{
- gimple phi1, phi2;
- gimple_stmt_iterator psi1, psi2;
+ gphi *phi1, *phi2;
+ gphi_iterator psi1, psi2;
tree def;
edge e2 = find_edge (new_head, second);
!gsi_end_p (psi2) && !gsi_end_p (psi1);
gsi_next (&psi2), gsi_next (&psi1))
{
- phi1 = gsi_stmt (psi1);
- phi2 = gsi_stmt (psi2);
+ phi1 = psi1.phi ();
+ phi2 = psi2.phi ();
def = PHI_ARG_DEF (phi2, e2->dest_idx);
add_phi_arg (phi1, def, e, gimple_phi_arg_location_from_edge (phi2, e2));
}
basic_block cond_bb, void *cond_e)
{
gimple_stmt_iterator gsi;
- gimple new_cond_expr;
+ gimple *new_cond_expr;
tree cond_expr = (tree) cond_e;
edge e0;
}
+/* Insert COND expression which is GIMPLE_COND after STMT
+ in basic block BB with appropriate basic block split
+ and creation of a new conditionally executed basic block.
+ Return created basic block. */
+basic_block
+insert_cond_bb (basic_block bb, gimple *stmt, gimple *cond)
+{
+ edge fall = split_block (bb, stmt);
+ gimple_stmt_iterator iter = gsi_last_bb (bb);
+ basic_block new_bb;
+
+ /* Insert cond statement. */
+ gcc_assert (gimple_code (cond) == GIMPLE_COND);
+ if (gsi_end_p (iter))
+ gsi_insert_before (&iter, cond, GSI_CONTINUE_LINKING);
+ else
+ gsi_insert_after (&iter, cond, GSI_CONTINUE_LINKING);
+
+ /* Create conditionally executed block. */
+ new_bb = create_empty_bb (bb);
+ make_edge (bb, new_bb, EDGE_TRUE_VALUE);
+ make_single_succ_edge (new_bb, fall->dest, EDGE_FALLTHRU);
+
+ /* Fix edge for split bb. */
+ fall->flags = EDGE_FALSE_VALUE;
+
+ /* Update dominance info. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ set_immediate_dominator (CDI_DOMINATORS, new_bb, bb);
+ set_immediate_dominator (CDI_DOMINATORS, fall->dest, bb);
+ }
+
+ /* Update loop info. */
+ if (current_loops)
+ add_bb_to_loop (new_bb, bb->loop_father);
+
+ return new_bb;
+}
+
/* Build a ternary operation and gimplify it. Emit code before GSI.
Return the gimple_val holding the result. */
}
}
+
+/* From a controlling predicate in the immediate dominator DOM of
+ PHIBLOCK determine the edges into PHIBLOCK that are chosen if the
+ predicate evaluates to true and false and store them to
+ *TRUE_CONTROLLED_EDGE and *FALSE_CONTROLLED_EDGE if
+ they are non-NULL. Returns true if the edges can be determined,
+ else return false. */
+
+bool
+extract_true_false_controlled_edges (basic_block dom, basic_block phiblock,
+ edge *true_controlled_edge,
+ edge *false_controlled_edge)
+{
+ basic_block bb = phiblock;
+ edge true_edge, false_edge, tem;
+ edge e0 = NULL, e1 = NULL;
+
+ /* We have to verify that one edge into the PHI node is dominated
+ by the true edge of the predicate block and the other edge
+ dominated by the false edge. This ensures that the PHI argument
+ we are going to take is completely determined by the path we
+ take from the predicate block.
+ We can only use BB dominance checks below if the destination of
+ the true/false edges are dominated by their edge, thus only
+ have a single predecessor. */
+ extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
+ tem = EDGE_PRED (bb, 0);
+ if (tem == true_edge
+ || (single_pred_p (true_edge->dest)
+ && (tem->src == true_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, true_edge->dest))))
+ e0 = tem;
+ else if (tem == false_edge
+ || (single_pred_p (false_edge->dest)
+ && (tem->src == false_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, false_edge->dest))))
+ e1 = tem;
+ else
+ return false;
+ tem = EDGE_PRED (bb, 1);
+ if (tem == true_edge
+ || (single_pred_p (true_edge->dest)
+ && (tem->src == true_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, true_edge->dest))))
+ e0 = tem;
+ else if (tem == false_edge
+ || (single_pred_p (false_edge->dest)
+ && (tem->src == false_edge->dest
+ || dominated_by_p (CDI_DOMINATORS,
+ tem->src, false_edge->dest))))
+ e1 = tem;
+ else
+ return false;
+ if (!e0 || !e1)
+ return false;
+
+ if (true_controlled_edge)
+ *true_controlled_edge = e0;
+ if (false_controlled_edge)
+ *false_controlled_edge = e1;
+
+ return true;
+}
+
+
+
/* Emit return warnings. */
namespace {
pass_warn_function_return::execute (function *fun)
{
source_location location;
- gimple last;
+ gimple *last;
edge e;
edge_iterator ei;
{
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
{
- gimple last = last_stmt (e->src);
- if (gimple_code (last) == GIMPLE_RETURN
- && gimple_return_retval (last) == NULL
+ gimple *last = last_stmt (e->src);
+ greturn *return_stmt = dyn_cast <greturn *> (last);
+ if (return_stmt
+ && gimple_return_retval (return_stmt) == NULL
&& !gimple_no_warning_p (last))
{
location = gimple_location (last);
for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
{
- gimple g = gsi_stmt (i);
+ gimple *g = gsi_stmt (i);
switch (gimple_code (g))
{
case GIMPLE_BIND:
- do_warn_unused_result (gimple_bind_body (g));
+ do_warn_unused_result (gimple_bind_body (as_a <gbind *>(g)));
break;
case GIMPLE_TRY:
do_warn_unused_result (gimple_try_eval (g));
do_warn_unused_result (gimple_try_cleanup (g));
break;
case GIMPLE_CATCH:
- do_warn_unused_result (gimple_catch_handler (g));
+ do_warn_unused_result (gimple_catch_handler (
+ as_a <gcatch *> (g)));
break;
case GIMPLE_EH_FILTER:
do_warn_unused_result (gimple_eh_filter_failure (g));
bb->count = apply_scale (bb->count, count_scale);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
- gimple stmt = gsi_stmt (gsi);
+ gimple *stmt = gsi_stmt (gsi);
tree decl = is_gimple_call (stmt)
? gimple_call_fndecl (stmt)
: NULL;
when inlining a noreturn call that does in fact return. */
if (EDGE_COUNT (bb->succs) == 0)
{
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (!stmt
|| (!is_ctrl_stmt (stmt)
&& (!is_gimple_call (stmt)
if (count_scale != REG_BR_PROB_BASE)
compute_function_frequency ();
- /* Dump a textual representation of the flowgraph. */
- if (dump_file)
- gimple_dump_cfg (dump_file, dump_flags);
-
if (current_loops
&& (todo & TODO_cleanup_cfg))
loops_state_set (LOOPS_NEED_FIXUP);
const pass_data pass_data_fixup_cfg =
{
GIMPLE_PASS, /* type */
- "*free_cfg_annotations", /* name */
+ "fixup_cfg", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_cfg, /* properties_required */
/* Garbage collection support for edge_def. */
extern void gt_ggc_mx (tree&);
-extern void gt_ggc_mx (gimple&);
+extern void gt_ggc_mx (gimple *&);
extern void gt_ggc_mx (rtx&);
extern void gt_ggc_mx (basic_block&);
/* PCH support for edge_def. */
extern void gt_pch_nx (tree&);
-extern void gt_pch_nx (gimple&);
+extern void gt_pch_nx (gimple *&);
extern void gt_pch_nx (rtx&);
extern void gt_pch_nx (basic_block&);
op (&(e->insns.r), cookie);
op (&(block), cookie);
}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Helper function for CFG selftests: create a dummy function decl
+ and push it as cfun. */
+
+static tree
+push_fndecl (const char *name)
+{
+ tree fn_type = build_function_type_array (integer_type_node, 0, NULL);
+ /* FIXME: this uses input_location: */
+ tree fndecl = build_fn_decl (name, fn_type);
+ tree retval = build_decl (UNKNOWN_LOCATION, RESULT_DECL,
+ NULL_TREE, integer_type_node);
+ DECL_RESULT (fndecl) = retval;
+ push_struct_function (fndecl);
+ function *fun = DECL_STRUCT_FUNCTION (fndecl);
+ ASSERT_TRUE (fun != NULL);
+ init_empty_tree_cfg_for_function (fun);
+ ASSERT_EQ (2, n_basic_blocks_for_fn (fun));
+ ASSERT_EQ (0, n_edges_for_fn (fun));
+ return fndecl;
+}
+
+/* These tests directly create CFGs.
+ Compare with the static fns within tree-cfg.c:
+ - build_gimple_cfg
+ - make_blocks: calls create_basic_block (seq, bb);
+ - make_edges. */
+
+/* Verify a simple cfg of the form:
+ ENTRY -> A -> B -> C -> EXIT. */
+
+static void
+test_linear_chain ()
+{
+ gimple_register_cfg_hooks ();
+
+ tree fndecl = push_fndecl ("cfg_test_linear_chain");
+ function *fun = DECL_STRUCT_FUNCTION (fndecl);
+
+ /* Create some empty blocks. */
+ basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ basic_block bb_b = create_empty_bb (bb_a);
+ basic_block bb_c = create_empty_bb (bb_b);
+
+ ASSERT_EQ (5, n_basic_blocks_for_fn (fun));
+ ASSERT_EQ (0, n_edges_for_fn (fun));
+
+ /* Create some edges: a simple linear chain of BBs. */
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
+ make_edge (bb_a, bb_b, 0);
+ make_edge (bb_b, bb_c, 0);
+ make_edge (bb_c, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
+
+ /* Verify the edges. */
+ ASSERT_EQ (4, n_edges_for_fn (fun));
+ ASSERT_EQ (NULL, ENTRY_BLOCK_PTR_FOR_FN (fun)->preds);
+ ASSERT_EQ (1, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs->length ());
+ ASSERT_EQ (1, bb_a->preds->length ());
+ ASSERT_EQ (1, bb_a->succs->length ());
+ ASSERT_EQ (1, bb_b->preds->length ());
+ ASSERT_EQ (1, bb_b->succs->length ());
+ ASSERT_EQ (1, bb_c->preds->length ());
+ ASSERT_EQ (1, bb_c->succs->length ());
+ ASSERT_EQ (1, EXIT_BLOCK_PTR_FOR_FN (fun)->preds->length ());
+ ASSERT_EQ (NULL, EXIT_BLOCK_PTR_FOR_FN (fun)->succs);
+
+ /* Verify the dominance information
+ Each BB in our simple chain should be dominated by the one before
+ it. */
+ calculate_dominance_info (CDI_DOMINATORS);
+ ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
+ ASSERT_EQ (bb_b, get_immediate_dominator (CDI_DOMINATORS, bb_c));
+ vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
+ ASSERT_EQ (1, dom_by_b.length ());
+ ASSERT_EQ (bb_c, dom_by_b[0]);
+ free_dominance_info (CDI_DOMINATORS);
+ dom_by_b.release ();
+
+ /* Similarly for post-dominance: each BB in our chain is post-dominated
+ by the one after it. */
+ calculate_dominance_info (CDI_POST_DOMINATORS);
+ ASSERT_EQ (bb_b, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
+ ASSERT_EQ (bb_c, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
+ vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
+ ASSERT_EQ (1, postdom_by_b.length ());
+ ASSERT_EQ (bb_a, postdom_by_b[0]);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ postdom_by_b.release ();
+
+ pop_cfun ();
+}
+
+/* Verify a simple CFG of the form:
+ ENTRY
+ |
+ A
+ / \
+ /t \f
+ B C
+ \ /
+ \ /
+ D
+ |
+ EXIT. */
+
+static void
+test_diamond ()
+{
+ gimple_register_cfg_hooks ();
+
+ tree fndecl = push_fndecl ("cfg_test_diamond");
+ function *fun = DECL_STRUCT_FUNCTION (fndecl);
+
+ /* Create some empty blocks. */
+ basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ basic_block bb_b = create_empty_bb (bb_a);
+ basic_block bb_c = create_empty_bb (bb_a);
+ basic_block bb_d = create_empty_bb (bb_b);
+
+ ASSERT_EQ (6, n_basic_blocks_for_fn (fun));
+ ASSERT_EQ (0, n_edges_for_fn (fun));
+
+ /* Create the edges. */
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
+ make_edge (bb_a, bb_b, EDGE_TRUE_VALUE);
+ make_edge (bb_a, bb_c, EDGE_FALSE_VALUE);
+ make_edge (bb_b, bb_d, 0);
+ make_edge (bb_c, bb_d, 0);
+ make_edge (bb_d, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
+
+ /* Verify the edges. */
+ ASSERT_EQ (6, n_edges_for_fn (fun));
+ ASSERT_EQ (1, bb_a->preds->length ());
+ ASSERT_EQ (2, bb_a->succs->length ());
+ ASSERT_EQ (1, bb_b->preds->length ());
+ ASSERT_EQ (1, bb_b->succs->length ());
+ ASSERT_EQ (1, bb_c->preds->length ());
+ ASSERT_EQ (1, bb_c->succs->length ());
+ ASSERT_EQ (2, bb_d->preds->length ());
+ ASSERT_EQ (1, bb_d->succs->length ());
+
+ /* Verify the dominance information. */
+ calculate_dominance_info (CDI_DOMINATORS);
+ ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
+ ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_c));
+ ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_d));
+ vec<basic_block> dom_by_a = get_dominated_by (CDI_DOMINATORS, bb_a);
+ ASSERT_EQ (3, dom_by_a.length ()); /* B, C, D, in some order. */
+ dom_by_a.release ();
+ vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
+ ASSERT_EQ (0, dom_by_b.length ());
+ dom_by_b.release ();
+ free_dominance_info (CDI_DOMINATORS);
+
+ /* Similarly for post-dominance. */
+ calculate_dominance_info (CDI_POST_DOMINATORS);
+ ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
+ ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
+ ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_c));
+ vec<basic_block> postdom_by_d = get_dominated_by (CDI_POST_DOMINATORS, bb_d);
+ ASSERT_EQ (3, postdom_by_d.length ()); /* A, B, C in some order. */
+ postdom_by_d.release ();
+ vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
+ ASSERT_EQ (0, postdom_by_b.length ());
+ postdom_by_b.release ();
+ free_dominance_info (CDI_POST_DOMINATORS);
+
+ pop_cfun ();
+}
+
+/* Verify that we can handle a CFG containing a "complete" aka
+ fully-connected subgraph (where A B C D below all have edges
+ pointing to each other node, also to themselves).
+ e.g.:
+ ENTRY EXIT
+ | ^
+ | /
+ | /
+ | /
+ V/
+ A<--->B
+ ^^ ^^
+ | \ / |
+ | X |
+ | / \ |
+ VV VV
+ C<--->D
+*/
+
+static void
+test_fully_connected ()
+{
+ gimple_register_cfg_hooks ();
+
+ tree fndecl = push_fndecl ("cfg_fully_connected");
+ function *fun = DECL_STRUCT_FUNCTION (fndecl);
+
+ const int n = 4;
+
+ /* Create some empty blocks. */
+ auto_vec <basic_block> subgraph_nodes;
+ for (int i = 0; i < n; i++)
+ subgraph_nodes.safe_push (create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun)));
+
+ ASSERT_EQ (n + 2, n_basic_blocks_for_fn (fun));
+ ASSERT_EQ (0, n_edges_for_fn (fun));
+
+ /* Create the edges. */
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), subgraph_nodes[0], EDGE_FALLTHRU);
+ make_edge (subgraph_nodes[0], EXIT_BLOCK_PTR_FOR_FN (fun), 0);
+ for (int i = 0; i < n; i++)
+ for (int j = 0; j < n; j++)
+ make_edge (subgraph_nodes[i], subgraph_nodes[j], 0);
+
+ /* Verify the edges. */
+ ASSERT_EQ (2 + (n * n), n_edges_for_fn (fun));
+ /* The first one is linked to ENTRY/EXIT as well as itself and
+ everything else. */
+ ASSERT_EQ (n + 1, subgraph_nodes[0]->preds->length ());
+ ASSERT_EQ (n + 1, subgraph_nodes[0]->succs->length ());
+ /* The other ones in the subgraph are linked to everything in
+ the subgraph (including themselves). */
+ for (int i = 1; i < n; i++)
+ {
+ ASSERT_EQ (n, subgraph_nodes[i]->preds->length ());
+ ASSERT_EQ (n, subgraph_nodes[i]->succs->length ());
+ }
+
+ /* Verify the dominance information. */
+ calculate_dominance_info (CDI_DOMINATORS);
+ /* The initial block in the subgraph should be dominated by ENTRY. */
+ ASSERT_EQ (ENTRY_BLOCK_PTR_FOR_FN (fun),
+ get_immediate_dominator (CDI_DOMINATORS,
+ subgraph_nodes[0]));
+ /* Every other block in the subgraph should be dominated by the
+ initial block. */
+ for (int i = 1; i < n; i++)
+ ASSERT_EQ (subgraph_nodes[0],
+ get_immediate_dominator (CDI_DOMINATORS,
+ subgraph_nodes[i]));
+ free_dominance_info (CDI_DOMINATORS);
+
+ /* Similarly for post-dominance. */
+ calculate_dominance_info (CDI_POST_DOMINATORS);
+ /* The initial block in the subgraph should be postdominated by EXIT. */
+ ASSERT_EQ (EXIT_BLOCK_PTR_FOR_FN (fun),
+ get_immediate_dominator (CDI_POST_DOMINATORS,
+ subgraph_nodes[0]));
+ /* Every other block in the subgraph should be postdominated by the
+ initial block, since that leads to EXIT. */
+ for (int i = 1; i < n; i++)
+ ASSERT_EQ (subgraph_nodes[0],
+ get_immediate_dominator (CDI_POST_DOMINATORS,
+ subgraph_nodes[i]));
+ free_dominance_info (CDI_POST_DOMINATORS);
+
+ pop_cfun ();
+}
+
+/* Run all of the selftests within this file. */
+
+void
+tree_cfg_c_tests ()
+{
+ test_linear_chain ();
+ test_diamond ();
+ test_fully_connected ();
+}
+
+} // namespace selftest
+
+/* TODO: test the dominator/postdominator logic with various graphs/nodes:
+ - loop
+ - nested loops
+ - switch statement (a block with many out-edges)
+ - something that jumps to itself
+ - etc */
+
+#endif /* CHECKING_P */