#include "builtins.h"
#include "range-op.h"
-/* Set of SSA names found live during the RPO traversal of the function
- for still active basic-blocks. */
-static sbitmap *live;
+
+
+/* Location information for ASSERT_EXPRs. Each instance of this
+ structure describes an ASSERT_EXPR for an SSA name. Since a single
+ SSA name may have more than one assertion associated with it, these
+ locations are kept in a linked list attached to the corresponding
+ SSA name. */
+struct assert_locus
+{
+ /* Basic block where the assertion would be inserted. */
+ basic_block bb;
+
+ /* Some assertions need to be inserted on an edge (e.g., assertions
+ generated by COND_EXPRs). In those cases, BB will be NULL. */
+ edge e;
+
+ /* Pointer to the statement that generated this assertion. */
+ gimple_stmt_iterator si;
+
+ /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
+ enum tree_code comp_code;
+
+ /* Value being compared against. */
+ tree val;
+
+ /* Expression to compare. */
+ tree expr;
+
+ /* Next node in the linked list. */
+ assert_locus *next;
+};
+
+class vrp_insert
+{
+ public:
+
+ vrp_insert (struct function *fn)
+ {
+ fun = fn;
+ }
+
+ /* Traverse the flowgraph looking for conditional jumps to insert range
+ expressions. These range expressions are meant to provide information
+ to optimizations that need to reason in terms of value ranges. They
+ will not be expanded into RTL. See method implementation comment
+ for example. */
+ void insert_range_assertions ();
+
+
+ /* Dump all the registered assertions for all the names to FILE. */
+ void dump_all_asserts (FILE *);
+
+ /* Dump all the registered assertions for NAME to FILE. */
+ void dump_asserts_for (FILE *file, tree name);
+
+ /* Dump all the registered assertions for NAME to stderr. */
+ DEBUG_FUNCTION void debug_asserts_for (tree name)
+ {
+ dump_asserts_for (stderr, name);
+ }
+
+ DEBUG_FUNCTION void debug_all_asserts ()
+ {
+ dump_all_asserts (stderr);
+ }
+
+ private:
+ /* Set of SSA names found live during the RPO traversal of the function
+ for still active basic-blocks. */
+ sbitmap *live;
+
+ /* Function to work on. */
+ struct function *fun;
+
+ /* If bit I is present, it means that SSA name N_i has a list of
+ assertions that should be inserted in the IL. */
+ bitmap need_assert_for;
+
+ /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
+ holds a list of ASSERT_LOCUS_T nodes that describe where
+ ASSERT_EXPRs for SSA name N_I should be inserted. */
+ assert_locus **asserts_for;
+
+ /* Return true if the SSA name NAME is live on the edge E. */
+ bool live_on_edge (edge e, tree name);
+
+ /* Finish found ASSERTS for E and register them at GSI. */
+ void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
+ vec<assert_info> &asserts);
+
+ /* Determine whether the outgoing edges of BB should receive an
+ ASSERT_EXPR for each of the operands of BB's LAST statement.
+ The last statement of BB must be a SWITCH_EXPR.
+
+ If any of the sub-graphs rooted at BB have an interesting use of
+ the predicate operands, an assert location node is added to the
+ list of assertions for the corresponding operands. */
+ void find_switch_asserts (basic_block bb, gswitch *last);
+
+
+ /* Do an RPO walk over the function computing SSA name liveness
+ on-the-fly and deciding on assert expressions to insert. */
+ void find_assert_locations ();
+
+ /* Traverse all the statements in block BB looking for statements that
+ may generate useful assertions for the SSA names in their operand.
+ See method implementation comentary for more information. */
+ void find_assert_locations_1 (basic_block bb, sbitmap live);
+
+ /* Determine whether the outgoing edges of BB should receive an
+ ASSERT_EXPR for each of the operands of BB's LAST statement.
+ The last statement of BB must be a COND_EXPR.
+
+ If any of the sub-graphs rooted at BB have an interesting use of
+ the predicate operands, an assert location node is added to the
+ list of assertions for the corresponding operands. */
+ void find_conditional_asserts (basic_block bb, gcond *last);
+
+
+ /* Process all the insertions registered for every name N_i registered
+ in NEED_ASSERT_FOR. The list of assertions to be inserted are
+ found in ASSERTS_FOR[i]. */
+
+ void process_assert_insertions ();
+
+
+ /* If NAME doesn't have an ASSERT_EXPR registered for asserting
+ 'EXPR COMP_CODE VAL' at a location that dominates block BB or
+ E->DEST, then register this location as a possible insertion point
+ for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
+
+ BB, E and SI provide the exact insertion point for the new
+ ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
+ on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
+ BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
+ must not be NULL. */
+ void register_new_assert_for (tree name, tree expr,
+ enum tree_code comp_code,
+ tree val,
+ basic_block bb,
+ edge e,
+ gimple_stmt_iterator si);
+};
void
value_range_equiv::set_equiv (bitmap equiv)
/* Return true if the SSA name NAME is live on the edge E. */
-static bool
-live_on_edge (edge e, tree name)
+bool
+vrp_insert::live_on_edge (edge e, tree name)
{
return (live[e->dest->index]
&& bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
}
-/* Location information for ASSERT_EXPRs. Each instance of this
- structure describes an ASSERT_EXPR for an SSA name. Since a single
- SSA name may have more than one assertion associated with it, these
- locations are kept in a linked list attached to the corresponding
- SSA name. */
-struct assert_locus
-{
- /* Basic block where the assertion would be inserted. */
- basic_block bb;
-
- /* Some assertions need to be inserted on an edge (e.g., assertions
- generated by COND_EXPRs). In those cases, BB will be NULL. */
- edge e;
-
- /* Pointer to the statement that generated this assertion. */
- gimple_stmt_iterator si;
-
- /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
- enum tree_code comp_code;
-
- /* Value being compared against. */
- tree val;
-
- /* Expression to compare. */
- tree expr;
-
- /* Next node in the linked list. */
- assert_locus *next;
-};
-
-/* If bit I is present, it means that SSA name N_i has a list of
- assertions that should be inserted in the IL. */
-static bitmap need_assert_for;
-
-/* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
- holds a list of ASSERT_LOCUS_T nodes that describe where
- ASSERT_EXPRs for SSA name N_I should be inserted. */
-static assert_locus **asserts_for;
/* VR_TYPE describes a range with mininum value *MIN and maximum
value *MAX. Restrict the range to the set of values that have
}
-void dump_asserts_for (FILE *, tree);
void debug_asserts_for (tree);
void dump_all_asserts (FILE *);
void debug_all_asserts (void);
/* Dump all the registered assertions for NAME to FILE. */
void
-dump_asserts_for (FILE *file, tree name)
+vrp_insert::dump_asserts_for (FILE *file, tree name)
{
assert_locus *loc;
}
-/* Dump all the registered assertions for NAME to stderr. */
-
-DEBUG_FUNCTION void
-debug_asserts_for (tree name)
-{
- dump_asserts_for (stderr, name);
-}
-
-
/* Dump all the registered assertions for all the names to FILE. */
void
-dump_all_asserts (FILE *file)
+vrp_insert::dump_all_asserts (FILE *file)
{
unsigned i;
bitmap_iterator bi;
}
-/* Dump all the registered assertions for all the names to stderr. */
-
-DEBUG_FUNCTION void
-debug_all_asserts (void)
-{
- dump_all_asserts (stderr);
-}
-
/* Dump assert_info structure. */
void
BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
must not be NULL. */
-static void
-register_new_assert_for (tree name, tree expr,
- enum tree_code comp_code,
- tree val,
- basic_block bb,
- edge e,
- gimple_stmt_iterator si)
+void
+vrp_insert::register_new_assert_for (tree name, tree expr,
+ enum tree_code comp_code,
+ tree val,
+ basic_block bb,
+ edge e,
+ gimple_stmt_iterator si)
{
assert_locus *n, *loc, *last_loc;
basic_block dest_bb;
/* Finish found ASSERTS for E and register them at GSI. */
-static void
-finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
- vec<assert_info> &asserts)
+void
+vrp_insert::finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
+ vec<assert_info> &asserts)
{
for (unsigned i = 0; i < asserts.length (); ++i)
/* Only register an ASSERT_EXPR if NAME was found in the sub-graph
the predicate operands, an assert location node is added to the
list of assertions for the corresponding operands. */
-static void
-find_conditional_asserts (basic_block bb, gcond *last)
+void
+vrp_insert::find_conditional_asserts (basic_block bb, gcond *last)
{
gimple_stmt_iterator bsi;
tree op;
the predicate operands, an assert location node is added to the
list of assertions for the corresponding operands. */
-static void
-find_switch_asserts (basic_block bb, gswitch *last)
+void
+vrp_insert::find_switch_asserts (basic_block bb, gswitch *last)
{
gimple_stmt_iterator bsi;
tree op;
for (idx = 0; idx < n; ++idx)
{
ci[idx].expr = gimple_switch_label (last, idx);
- ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
+ ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
}
edge default_edge = find_edge (bb, ci[0].bb);
qsort (ci, n, sizeof (struct case_info), compare_case_labels);
dominator tree, only the location dominating all the dereference of
P_4 will receive an ASSERT_EXPR. */
-static void
-find_assert_locations_1 (basic_block bb, sbitmap live)
+void
+vrp_insert::find_assert_locations_1 (basic_block bb, sbitmap live)
{
gimple *last;
/* Do an RPO walk over the function computing SSA name liveness
on-the-fly and deciding on assert expressions to insert. */
-static void
-find_assert_locations (void)
+void
+vrp_insert::find_assert_locations (void)
{
- int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
- int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
- int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
+ int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
+ int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
int rpo_cnt, i;
- live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
+ live = XCNEWVEC (sbitmap, last_basic_block_for_fn (fun));
rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
for (i = 0; i < rpo_cnt; ++i)
bb_rpo[rpo[i]] = i;
for (i = rpo_cnt - 1; i >= 0; --i)
{
- basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
+ basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
edge e;
edge_iterator ei;
XDELETEVEC (rpo);
XDELETEVEC (bb_rpo);
XDELETEVEC (last_rpo);
- for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
+ for (i = 0; i < last_basic_block_for_fn (fun); ++i)
if (live[i])
sbitmap_free (live[i]);
XDELETEVEC (live);
in NEED_ASSERT_FOR. The list of assertions to be inserted are
found in ASSERTS_FOR[i]. */
-static void
-process_assert_insertions (void)
+void
+vrp_insert::process_assert_insertions ()
{
unsigned i;
bitmap_iterator bi;
if (update_edges_p)
gsi_commit_edge_inserts ();
- statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
+ statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
num_asserts);
}
take in different paths of the code, simply by checking the reaching
definition of 'x'. */
-static void
-insert_range_assertions (void)
+void
+vrp_insert::insert_range_assertions (void)
{
need_assert_for = BITMAP_ALLOC (NULL);
asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
- void vrp_initialize (void);
+ struct function *fun;
+
+ void vrp_initialize (struct function *);
void vrp_finalize (bool);
void check_all_array_refs (void);
bool check_array_ref (location_t, tree, bool);
vrp_prop::check_all_array_refs ()
{
check_array_bounds_dom_walker w (this);
- w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ w.walk (ENTRY_BLOCK_PTR_FOR_FN (fun));
}
/* Return true if all imm uses of VAR are either in STMT, or
multiple ranges to be associated with one SSA_NAME. */
static void
-remove_range_assertions (void)
+remove_range_assertions (struct function *fun)
{
basic_block bb;
gimple_stmt_iterator si;
/* Note that the BSI iterator bump happens at the bottom of the
loop and no bump is necessary if we're removing the statement
referenced by the current BSI. */
- FOR_EACH_BB_FN (bb, cfun)
+ FOR_EACH_BB_FN (bb, fun)
for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
{
gimple *stmt = gsi_stmt (si);
/* Initialization required by ssa_propagate engine. */
void
-vrp_prop::vrp_initialize ()
+vrp_prop::vrp_initialize (struct function *fn)
{
basic_block bb;
+ fun = fn;
- FOR_EACH_BB_FN (bb, cfun)
+ FOR_EACH_BB_FN (bb, fun)
{
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
for later realization. */
static void
-identify_jump_threads (class vr_values *vr_values)
+identify_jump_threads (struct function *fun, class vr_values *vr_values)
{
/* Ugh. When substituting values earlier in this pass we can
wipe the dominance information. So rebuild the dominator
vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
walker.vr_values = vr_values;
- walker.walk (cfun->cfg->x_entry_block_ptr);
+ walker.walk (fun->cfg->x_entry_block_ptr);
/* We do not actually update the CFG or SSA graphs at this point as
ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
check_array_bounds_dom_walker's ctor; vrp_folder may clear
it from some edges. */
if (warn_array_bounds && warn_array_bounds_p)
- set_all_edges_as_executable (cfun);
+ set_all_edges_as_executable (fun);
class vrp_folder vrp_folder;
vrp_folder.vr_values = &vr_values;
probabilities to aid branch prediction. */
static unsigned int
-execute_vrp (bool warn_array_bounds_p)
+execute_vrp (struct function *fun, bool warn_array_bounds_p)
{
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
/* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
Inserting assertions may split edges which will invalidate
EDGE_DFS_BACK. */
- insert_range_assertions ();
+ vrp_insert (fun).insert_range_assertions ();
threadedge_initialize_values ();
mark_dfs_back_edges ();
class vrp_prop vrp_prop;
- vrp_prop.vrp_initialize ();
+ vrp_prop.vrp_initialize (fun);
vrp_prop.ssa_propagate ();
vrp_prop.vrp_finalize (warn_array_bounds_p);
/* We must identify jump threading opportunities before we release
the datastructures built by VRP. */
- identify_jump_threads (&vrp_prop.vr_values);
+ identify_jump_threads (fun, &vrp_prop.vr_values);
/* A comparison of an SSA_NAME against a constant where the SSA_NAME
was set by a type conversion can often be rewritten to use the
So that transformation is not performed until after jump threading
is complete. */
basic_block bb;
- FOR_EACH_BB_FN (bb, cfun)
+ FOR_EACH_BB_FN (bb, fun)
{
gimple *last = last_stmt (bb);
if (last && gimple_code (last) == GIMPLE_COND)
vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
}
- free_numbers_of_iterations_estimates (cfun);
+ free_numbers_of_iterations_estimates (fun);
/* ASSERT_EXPRs must be removed before finalizing jump threads
as finalizing jump threads calls the CFG cleanup code which
does not properly handle ASSERT_EXPRs. */
- remove_range_assertions ();
+ remove_range_assertions (fun);
/* If we exposed any new variables, go ahead and put them into
SSA form now, before we handle jump threading. This simplifies
warn_array_bounds_p = param;
}
virtual bool gate (function *) { return flag_tree_vrp != 0; }
- virtual unsigned int execute (function *)
- { return execute_vrp (warn_array_bounds_p); }
+ virtual unsigned int execute (function *fun)
+ { return execute_vrp (fun, warn_array_bounds_p); }
private:
bool warn_array_bounds_p;