{
public:
name_prop (loop_info &li) : m_li (li) {}
- tree get_value (tree) FINAL OVERRIDE;
+ tree get_value (tree, gimple *) FINAL OVERRIDE;
private:
/* Information about the versioning we've performed on the loop. */
Return the new value if so, otherwise return null. */
tree
-loop_versioning::name_prop::get_value (tree val)
+loop_versioning::name_prop::get_value (tree val,
+ gimple *stmt ATTRIBUTE_UNUSED)
{
if (TREE_CODE (val) == SSA_NAME
&& bitmap_bit_p (&m_li.unity_names, SSA_NAME_VERSION (val)))
#include "gimple-ssa-evrp-analyze.h"
class evrp_folder : public substitute_and_fold_engine
-{
- public:
- tree get_value (tree) FINAL OVERRIDE;
- evrp_folder (class vr_values *vr_values_) : vr_values (vr_values_) { }
- bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
- { return vr_values->simplify_stmt_using_ranges (gsi); }
- class vr_values *vr_values;
-
- private:
- DISABLE_COPY_AND_ASSIGN (evrp_folder);
-};
-
-tree
-evrp_folder::get_value (tree op)
-{
- return vr_values->op_with_constant_singleton_value_range (op);
-}
-
-/* evrp_dom_walker visits the basic blocks in the dominance order and set
- the Value Ranges (VR) for SSA_NAMEs in the scope. Use this VR to
- discover more VRs. */
-
-class evrp_dom_walker : public dom_walker
{
public:
- evrp_dom_walker ()
- : dom_walker (CDI_DOMINATORS),
- evrp_range_analyzer (true),
- evrp_folder (evrp_range_analyzer.get_vr_values ())
- {
- need_eh_cleanup = BITMAP_ALLOC (NULL);
- }
- ~evrp_dom_walker ()
- {
- BITMAP_FREE (need_eh_cleanup);
- }
- virtual edge before_dom_children (basic_block);
- virtual void after_dom_children (basic_block);
- void cleanup (void);
-
- private:
- DISABLE_COPY_AND_ASSIGN (evrp_dom_walker);
- bitmap need_eh_cleanup;
- auto_vec<gimple *> stmts_to_fixup;
- auto_vec<gimple *> stmts_to_remove;
-
- class evrp_range_analyzer evrp_range_analyzer;
- class evrp_folder evrp_folder;
+ evrp_folder () : m_range_analyzer (/*update_global_ranges=*/true),
+ m_vr_values (m_range_analyzer.get_vr_values ())
+ {
+ }
+
+ ~evrp_folder ()
+ {
+ m_vr_values->cleanup_edges_and_switches ();
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
+ m_range_analyzer.dump_all_value_ranges (dump_file);
+ fprintf (dump_file, "\n");
+ }
+ }
+
+ tree get_value (tree op, gimple *stmt ATTRIBUTE_UNUSED) OVERRIDE
+ {
+ return m_vr_values->op_with_constant_singleton_value_range (op);
+ }
+
+ void pre_fold_bb (basic_block bb) OVERRIDE
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "evrp visiting BB%d\n", bb->index);
+ m_range_analyzer.enter (bb);
+ }
+
+ void pre_fold_stmt (gimple *stmt) OVERRIDE
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "evrp visiting stmt ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+ m_range_analyzer.record_ranges_from_stmt (stmt, false);
+ }
+
+ bool fold_stmt (gimple_stmt_iterator *gsi) OVERRIDE
+ {
+ return m_vr_values->simplify_stmt_using_ranges (gsi);
+ }
+
+ void post_fold_bb (basic_block bb) OVERRIDE
+ {
+ m_range_analyzer.leave (bb);
+ }
+
+ void post_new_stmt (gimple *stmt) OVERRIDE
+ {
+ m_vr_values->set_defs_to_varying (stmt);
+ }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (evrp_folder);
+ class evrp_range_analyzer m_range_analyzer;
+ class vr_values *m_vr_values;
};
-edge
-evrp_dom_walker::before_dom_children (basic_block bb)
-{
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Visiting BB%d\n", bb->index);
-
- evrp_range_analyzer.enter (bb);
-
- for (gphi_iterator gpi = gsi_start_phis (bb);
- !gsi_end_p (gpi); gsi_next (&gpi))
- {
- gphi *phi = gpi.phi ();
- tree lhs = PHI_RESULT (phi);
- if (virtual_operand_p (lhs))
- continue;
-
- const value_range_equiv *vr = evrp_range_analyzer.get_value_range (lhs);
- /* Mark PHIs whose lhs we fully propagate for removal. */
- tree val;
- if (vr->singleton_p (&val) && may_propagate_copy (lhs, val))
- {
- stmts_to_remove.safe_push (phi);
- continue;
- }
- }
-
- edge taken_edge = NULL;
-
- /* Visit all other stmts and discover any new VRs possible. */
- for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
- !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple *stmt = gsi_stmt (gsi);
- tree output = NULL_TREE;
- gimple *old_stmt = stmt;
- bool was_noreturn = (is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt));
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Visiting stmt ");
- print_gimple_stmt (dump_file, stmt, 0);
- }
-
- evrp_range_analyzer.record_ranges_from_stmt (stmt, false);
-
- if (gcond *cond = dyn_cast <gcond *> (stmt))
- {
- evrp_range_analyzer.vrp_visit_cond_stmt (cond, &taken_edge);
- if (taken_edge)
- {
- if (taken_edge->flags & EDGE_TRUE_VALUE)
- gimple_cond_make_true (cond);
- else if (taken_edge->flags & EDGE_FALSE_VALUE)
- gimple_cond_make_false (cond);
- else
- gcc_unreachable ();
- update_stmt (stmt);
- }
- }
- else if (stmt_interesting_for_vrp (stmt))
- {
- output = get_output_for_vrp (stmt);
- if (output)
- {
- const value_range_equiv *vr
- = evrp_range_analyzer.get_value_range (output);
-
- /* Mark stmts whose output we fully propagate for removal. */
- tree val;
- if (vr->singleton_p (&val)
- && may_propagate_copy (output, val)
- && !stmt_could_throw_p (cfun, stmt)
- && !gimple_has_side_effects (stmt))
- {
- stmts_to_remove.safe_push (stmt);
- continue;
- }
- }
- }
-
- /* Try folding stmts with the VR discovered. */
- bool did_replace = evrp_folder.replace_uses_in (stmt);
- gimple_stmt_iterator prev_gsi = gsi;
- gsi_prev (&prev_gsi);
- if (fold_stmt (&gsi, follow_single_use_edges)
- || did_replace)
- {
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
- did_replace = true;
- }
- if (evrp_folder.simplify_stmt_using_ranges (&gsi))
- {
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
- did_replace = true;
- }
-
- if (did_replace)
- {
- /* If we wound up generating new stmts during folding
- drop all their defs to VARYING. We can't easily
- process them because we've already instantiated
- ranges on uses on STMT that only hold after it. */
- if (gsi_end_p (prev_gsi))
- prev_gsi = gsi_start_bb (bb);
- else
- gsi_next (&prev_gsi);
- while (gsi_stmt (prev_gsi) != gsi_stmt (gsi))
- {
- evrp_range_analyzer.get_vr_values ()
- ->set_defs_to_varying (gsi_stmt (prev_gsi));
- gsi_next (&prev_gsi);
- }
-
- /* If we cleaned up EH information from the statement,
- remove EH edges. */
- if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
- bitmap_set_bit (need_eh_cleanup, bb->index);
-
- /* If we turned a not noreturn call into a noreturn one
- schedule it for fixup. */
- if (!was_noreturn
- && is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt))
- stmts_to_fixup.safe_push (stmt);
-
- if (gimple_assign_single_p (stmt))
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- if (TREE_CODE (rhs) == ADDR_EXPR)
- recompute_tree_invariant_for_addr_expr (rhs);
- }
- }
- }
-
- /* Visit BB successor PHI nodes and replace PHI args. */
- edge e;
- edge_iterator ei;
- FOR_EACH_EDGE (e, ei, bb->succs)
- {
- for (gphi_iterator gpi = gsi_start_phis (e->dest);
- !gsi_end_p (gpi); gsi_next (&gpi))
- {
- gphi *phi = gpi.phi ();
- use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
- tree arg = USE_FROM_PTR (use_p);
- if (TREE_CODE (arg) != SSA_NAME
- || virtual_operand_p (arg))
- continue;
- const value_range_equiv
- *vr = evrp_range_analyzer.get_value_range (arg);
- tree val;
- if (vr->singleton_p (&val) && may_propagate_copy (arg, val))
- propagate_value (use_p, val);
- }
- }
-
- return taken_edge;
-}
-
-void
-evrp_dom_walker::after_dom_children (basic_block bb)
-{
- evrp_range_analyzer.leave (bb);
-}
-
-/* Perform any cleanups after the main phase of EVRP has completed. */
-
-void
-evrp_dom_walker::cleanup (void)
-{
- if (dump_file)
- {
- fprintf (dump_file, "\nValue ranges after Early VRP:\n\n");
- evrp_range_analyzer.dump_all_value_ranges (dump_file);
- fprintf (dump_file, "\n");
- }
-
- /* Remove stmts in reverse order to make debug stmt creation possible. */
- while (! stmts_to_remove.is_empty ())
- {
- gimple *stmt = stmts_to_remove.pop ();
- if (dump_file && dump_flags & TDF_DETAILS)
- {
- fprintf (dump_file, "Removing dead stmt ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
- gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
- if (gimple_code (stmt) == GIMPLE_PHI)
- remove_phi_node (&gsi, true);
- else
- {
- unlink_stmt_vdef (stmt);
- gsi_remove (&gsi, true);
- release_defs (stmt);
- }
- }
-
- if (!bitmap_empty_p (need_eh_cleanup))
- gimple_purge_all_dead_eh_edges (need_eh_cleanup);
-
- /* Fixup stmts that became noreturn calls. This may require splitting
- blocks and thus isn't possible during the dominator walk. Do this
- in reverse order so we don't inadvertedly remove a stmt we want to
- fixup by visiting a dominating now noreturn call first. */
- while (!stmts_to_fixup.is_empty ())
- {
- gimple *stmt = stmts_to_fixup.pop ();
- fixup_noreturn_call (stmt);
- }
-
- evrp_folder.vr_values->cleanup_edges_and_switches ();
-}
-
/* Main entry point for the early vrp pass which is a simplified non-iterative
version of vrp where basic blocks are visited in dominance order. Value
ranges discovered in early vrp will also be used by ipa-vrp. */
static unsigned int
execute_early_vrp ()
{
- /* Ideally this setup code would move into the ctor for the dominator
- walk. However, this setup can change the number of blocks which
+ /* Ideally this setup code would move into the ctor for the folder
+ However, this setup can change the number of blocks which
invalidates the internal arrays that are set up by the dominator
- walker. */
+ walker in substitute_and_fold_engine. */
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
scev_initialize ();
calculate_dominance_info (CDI_DOMINATORS);
- /* Walk stmts in dominance order and propagate VRP. */
- evrp_dom_walker walker;
- walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- walker.cleanup ();
+ evrp_folder folder;
+ folder.substitute_and_fold ();
scev_finalize ();
loop_optimizer_finalize ();
{
return new pass_early_vrp (ctxt);
}
-
{
char d[33];
- /* Bcopy is transformed into memmove and those calls are expanded
- inline in EVRP, before DSE runs, so this test doesn't actually
- verify that DSE does its job. */
+ /* Bcopy is transformed into memmove before DSE runs, so this test
+ doesn't actually verify that DSE does its job. */
__builtin_bcopy (s, d, sizeof d);
__builtin_bcopy (s, d, sizeof d);
}
/* { dg-final { scan-tree-dump-times "builtin_memset" 1 "dse1" } } */
-/* { dg-final { scan-tree-dump-not "builtin_(bcopy|bzero|memcpy|memmove)" "dse1" } } */
+
+/* Merging the evrp folder into substitute_and_fold_engine shuffled
+ the order of gimple_fold a bit, so evrp is no longer folding the
+ memmove inline. This folding is instead done by forwprop. Thus, I
+ have remmoved the |memmove in the test below as this is not done
+ until after dse.
+
+ What happened was that the propagator engine only called gimple
+ fold if replace_uses_in() was successful. On the other hand, EVRP
+ called gimple fold regardless.
+
+ If we really care about previous behavior, we could put a call to
+ gimple ::fold_stmt into evrp_folder::fold_stmt(). */
+/* { dg-final { scan-tree-dump-not "builtin_(bcopy|bzero|memcpy)" "dse1" } } */
class ccp_folder : public substitute_and_fold_engine
{
public:
- tree get_value (tree) FINAL OVERRIDE;
+ tree get_value (tree, gimple *) FINAL OVERRIDE;
bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
};
of calling member functions. */
tree
-ccp_folder::get_value (tree op)
+ccp_folder::get_value (tree op, gimple *stmt ATTRIBUTE_UNUSED)
{
return get_constant_value (op);
}
class copy_folder : public substitute_and_fold_engine
{
public:
- tree get_value (tree) FINAL OVERRIDE;
+ tree get_value (tree, gimple *) FINAL OVERRIDE;
};
/* Callback for substitute_and_fold to get at the final copy-of values. */
tree
-copy_folder::get_value (tree name)
+copy_folder::get_value (tree name, gimple *stmt ATTRIBUTE_UNUSED)
{
tree val;
if (SSA_NAME_VERSION (name) >= n_copy_of)
FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE)
{
tree tuse = USE_FROM_PTR (use);
- tree val = get_value (tuse);
+ tree val = get_value (tuse, stmt);
if (val == tuse || val == NULL_TREE)
continue;
size_t i;
bool replaced = false;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Folding PHI node: ");
- print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
- }
-
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree arg = gimple_phi_arg_def (phi, i);
if (TREE_CODE (arg) == SSA_NAME)
{
- tree val = get_value (arg);
+ tree val = get_value (arg, phi);
if (val && val != arg && may_propagate_copy (arg, val))
{
}
virtual edge before_dom_children (basic_block);
- virtual void after_dom_children (basic_block) {}
+ virtual void after_dom_children (basic_block bb)
+ {
+ substitute_and_fold_engine->post_fold_bb (bb);
+ }
bool something_changed;
vec<gimple *> stmts_to_remove;
bitmap need_eh_cleanup;
class substitute_and_fold_engine *substitute_and_fold_engine;
+
+private:
+ void foreach_new_stmt_in_bb (gimple_stmt_iterator old_gsi,
+ gimple_stmt_iterator new_gsi);
};
+/* Call post_new_stmt for each each new statement that has been added
+ to the current BB. OLD_GSI is the statement iterator before the BB
+ changes ocurred. NEW_GSI is the iterator which may contain new
+ statements. */
+
+void
+substitute_and_fold_dom_walker::foreach_new_stmt_in_bb
+ (gimple_stmt_iterator old_gsi,
+ gimple_stmt_iterator new_gsi)
+{
+ basic_block bb = gsi_bb (new_gsi);
+ if (gsi_end_p (old_gsi))
+ old_gsi = gsi_start_bb (bb);
+ else
+ gsi_next (&old_gsi);
+ while (gsi_stmt (old_gsi) != gsi_stmt (new_gsi))
+ {
+ gimple *stmt = gsi_stmt (old_gsi);
+ substitute_and_fold_engine->post_new_stmt (stmt);
+ gsi_next (&old_gsi);
+ }
+}
+
+void
+substitute_and_fold_engine::propagate_into_phi_args (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+ /* Visit BB successor PHI nodes and replace PHI args. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ for (gphi_iterator gpi = gsi_start_phis (e->dest);
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
+ tree arg = USE_FROM_PTR (use_p);
+ if (TREE_CODE (arg) != SSA_NAME
+ || virtual_operand_p (arg))
+ continue;
+ tree val = get_value (arg, phi);
+ if (val && may_propagate_copy (arg, val))
+ propagate_value (use_p, val);
+ }
+ }
+}
+
edge
substitute_and_fold_dom_walker::before_dom_children (basic_block bb)
{
+ substitute_and_fold_engine->pre_fold_bb (bb);
+
/* Propagate known values into PHI nodes. */
for (gphi_iterator i = gsi_start_phis (bb);
!gsi_end_p (i);
tree res = gimple_phi_result (phi);
if (virtual_operand_p (res))
continue;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Folding PHI node: ");
+ print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
+ }
if (res && TREE_CODE (res) == SSA_NAME)
{
- tree sprime = substitute_and_fold_engine->get_value (res);
+ tree sprime = substitute_and_fold_engine->get_value (res, phi);
if (sprime
&& sprime != res
&& may_propagate_copy (res, sprime))
{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Queued PHI for removal. Folds to: ");
+ print_generic_expr (dump_file, sprime);
+ fprintf (dump_file, "\n");
+ }
stmts_to_remove.safe_push (phi);
continue;
}
bool did_replace;
gimple *stmt = gsi_stmt (i);
+ substitute_and_fold_engine->pre_fold_stmt (stmt);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Folding statement: ");
+ print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+ }
+
/* No point propagating into a stmt we have a value for we
can propagate into all uses. Mark it for removal instead. */
tree lhs = gimple_get_lhs (stmt);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
{
- tree sprime = substitute_and_fold_engine->get_value (lhs);
+ tree sprime = substitute_and_fold_engine->get_value (lhs, stmt);
if (sprime
&& sprime != lhs
&& may_propagate_copy (lhs, sprime)
&& (!is_gimple_assign (stmt)
|| gimple_assign_rhs_code (stmt) != ASSERT_EXPR))
{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Queued stmt for removal. Folds to: ");
+ print_generic_expr (dump_file, sprime);
+ fprintf (dump_file, "\n");
+ }
stmts_to_remove.safe_push (stmt);
continue;
}
/* Replace the statement with its folded version and mark it
folded. */
did_replace = false;
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Folding statement: ");
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
- }
-
gimple *old_stmt = stmt;
bool was_noreturn = (is_gimple_call (stmt)
&& gimple_call_noreturn_p (stmt));
/* Replace real uses in the statement. */
did_replace |= substitute_and_fold_engine->replace_uses_in (stmt);
+ gimple_stmt_iterator prev_gsi = i;
+ gsi_prev (&prev_gsi);
+
/* If we made a replacement, fold the statement. */
if (did_replace)
{
specific information. Do this before propagating
into the stmt to not disturb pass specific information. */
update_stmt_if_modified (stmt);
- if (substitute_and_fold_engine->fold_stmt(&i))
+ if (substitute_and_fold_engine->fold_stmt (&i))
{
did_replace = true;
prop_stats.num_stmts_folded++;
/* Now cleanup. */
if (did_replace)
{
+ foreach_new_stmt_in_bb (prev_gsi, i);
+
/* If we cleaned up EH information from the statement,
remove EH edges. */
if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
fprintf (dump_file, "Not folded\n");
}
}
+
+ substitute_and_fold_engine->propagate_into_phi_args (bb);
+
return NULL;
}
: fold_all_stmts (fold_all_stmts) { }
virtual ~substitute_and_fold_engine (void) { }
virtual bool fold_stmt (gimple_stmt_iterator *) { return false; }
- virtual tree get_value (tree) { return NULL_TREE; }
+ virtual tree get_value (tree, gimple *) { return NULL_TREE; }
bool substitute_and_fold (basic_block = NULL);
bool replace_uses_in (gimple *);
bool replace_phi_args_in (gphi *);
+ virtual void pre_fold_bb (basic_block) { }
+ virtual void post_fold_bb (basic_block) { }
+ virtual void pre_fold_stmt (gimple *) { }
+ virtual void post_new_stmt (gimple *) { }
+
+ void propagate_into_phi_args (basic_block);
+
/* Users like VRP can set this when they want to perform
folding for every propagation. */
bool fold_all_stmts;
{
public:
vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
- tree get_value (tree) FINAL OVERRIDE;
+ tree get_value (tree, gimple *stmt) FINAL OVERRIDE;
bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
class vr_values *vr_values;
Implemented as a pure wrapper right now, but this will change. */
tree
-vrp_folder::get_value (tree op)
+vrp_folder::get_value (tree op, gimple *stmt ATTRIBUTE_UNUSED)
{
return op_with_constant_singleton_value_range (op);
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "\nVisiting statement:\n");
+ fprintf (dump_file, "\nextract_range_from_stmt visiting:\n");
print_gimple_stmt (dump_file, stmt, 0, dump_flags);
}
return true;
}
+/* If COND can be folded entirely as TRUE or FALSE, rewrite the
+ conditional as such, and return TRUE. */
+
+bool
+vr_values::fold_cond (gcond *cond)
+{
+ /* ?? vrp_folder::fold_predicate_in() is a superset of this. At
+ some point we should merge all variants of this code. */
+ edge taken_edge;
+ vrp_visit_cond_stmt (cond, &taken_edge);
+ if (taken_edge)
+ {
+ if (taken_edge->flags & EDGE_TRUE_VALUE)
+ gimple_cond_make_true (cond);
+ else if (taken_edge->flags & EDGE_FALSE_VALUE)
+ gimple_cond_make_false (cond);
+ else
+ gcc_unreachable ();
+ update_stmt (cond);
+ return true;
+ }
+ return false;
+}
+
/* Simplify a conditional using a relational operator to an equality
test if the range information indicates only one value can satisfy
the original conditional. */
tree op1 = gimple_cond_rhs (stmt);
enum tree_code cond_code = gimple_cond_code (stmt);
+ if (fold_cond (stmt))
+ return true;
+
if (cond_code != NE_EXPR
&& cond_code != EQ_EXPR
&& TREE_CODE (op0) == SSA_NAME
bool simplify_bit_ops_using_ranges (gimple_stmt_iterator *, gimple *);
bool simplify_min_or_max_using_ranges (gimple_stmt_iterator *, gimple *);
bool simplify_cond_using_ranges_1 (gcond *);
+ bool fold_cond (gcond *);
bool simplify_switch_using_ranges (gswitch *);
bool simplify_float_conversion_using_ranges (gimple_stmt_iterator *,
gimple *);