{
value_range nr;
nr.type = rtype;
- nr.min = wide_int_to_tree (TREE_TYPE (var), min);
- nr.max = wide_int_to_tree (TREE_TYPE (var), max);
+ /* Range info on SSA names doesn't carry overflow information
+ so make sure to preserve the overflow bit on the lattice. */
+ if (new_vr->type == VR_RANGE
+ && is_negative_overflow_infinity (new_vr->min)
+ && wi::eq_p (new_vr->min, min))
+ nr.min = new_vr->min;
+ else
+ nr.min = wide_int_to_tree (TREE_TYPE (var), min);
+ if (new_vr->type == VR_RANGE
+ && is_positive_overflow_infinity (new_vr->max)
+ && wi::eq_p (new_vr->max, max))
+ nr.max = new_vr->max;
+ else
+ nr.max = wide_int_to_tree (TREE_TYPE (var), max);
nr.equiv = NULL;
vrp_intersect_ranges (new_vr, &nr);
}
{
/* LT is folded faster than GE and others. Inline the common case. */
if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
- return tree_int_cst_lt (val, val2);
+ {
+ if (! is_positive_overflow_infinity (val2))
+ return tree_int_cst_lt (val, val2);
+ }
else
{
tree tcmp;
value_range_constant_singleton (value_range *vr)
{
if (vr->type == VR_RANGE
- && operand_equal_p (vr->min, vr->max, 0)
+ && vrp_operand_equal_p (vr->min, vr->max)
&& is_gimple_min_invariant (vr->min))
return vr->min;
{
value_range *vr = get_value_range (name);
if (vr->type == VR_RANGE
- && (vr->min == vr->max
- || operand_equal_p (vr->min, vr->max, 0)))
+ && vrp_operand_equal_p (vr->min, vr->max))
return vr->min;
}
return name;
enum value_range_type vr1type,
tree vr1min, tree vr1max)
{
- bool mineq = operand_equal_p (*vr0min, vr1min, 0);
- bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
+ bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+ bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
/* [] is vr0, () is vr1 in the following classification comments. */
if (mineq && maxeq)
enum value_range_type vr1type,
tree vr1min, tree vr1max)
{
- bool mineq = operand_equal_p (*vr0min, vr1min, 0);
- bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
+ bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+ bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
/* [] is vr0, () is vr1 in the following classification comments. */
if (mineq && maxeq)
print_gimple_stmt (dump_file, phi, 0, dump_flags);
}
- bool may_simulate_again = false;
+ bool may_simulate_backedge_again = false;
edges = 0;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
/* See if we are eventually going to change one of the args. */
gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
if (! gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt))
- may_simulate_again = true;
+ && prop_simulate_again_p (def_stmt)
+ && e->flags & EDGE_DFS_BACK)
+ may_simulate_backedge_again = true;
vr_arg = *(get_value_range (arg));
/* Do not allow equivalences or symbolic ranges to leak in from
edge; this helps us avoid an overflow infinity for conditionals
which are not in a loop. If the old value-range was VR_UNDEFINED
use the updated range and iterate one more time. If we will not
- simulate this PHI again with the same number of edges then iterate
- one more time. */
+ simulate this PHI again via the backedge allow us to iterate. */
if (edges > 0
&& gimple_phi_num_args (phi) > 1
&& edges == old_edges
&& lhs_vr->type != VR_UNDEFINED
- && may_simulate_again)
+ && may_simulate_backedge_again)
{
/* Compare old and new ranges, fall back to varying if the
values are not comparable. */