--- /dev/null
+/* PR tree-optimization/95852 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized -masm=att" } */
+/* { dg-final { scan-tree-dump-times " = \.MUL_OVERFLOW " 32 "optimized" } } */
+/* { dg-final { scan-assembler-times "\timull\t" 32 } } */
+/* { dg-final { scan-assembler-times "\tseto\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tsetno\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tjn\?o\t" 16 } } */
+
+unsigned fn (void);
+
+int
+f1 (unsigned x, unsigned y, unsigned *res)
+{
+ *res = x * y;
+ return x && ((int) *res / (int) x) != (int) y;
+}
+
+unsigned
+f2 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (x && ((int) r / (int) x) != (int) y)
+ return fn ();
+ return r;
+}
+
+int
+f3 (unsigned x, unsigned y, unsigned *res)
+{
+ *res = x * y;
+ return !x || ((int) *res / (int) x) == (int) y;
+}
+
+unsigned
+f4 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (!x || ((int) r / (int) x) == (int) y)
+ return fn ();
+ return r;
+}
+
+int
+f5 (int x, int y, int *res)
+{
+ *res = (unsigned) x * y;
+ return x && (*res / x) != y;
+}
+
+int
+f6 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (x && (r / x) != y)
+ return fn ();
+ return r;
+}
+
+int
+f7 (int x, int y, int *res)
+{
+ *res = (unsigned) x * y;
+ return !x || (*res / x) == y;
+}
+
+int
+f8 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (!x || (r / x) == y)
+ return fn ();
+ return r;
+}
+
+int
+f9 (unsigned x, unsigned y, unsigned *res)
+{
+ *res = x * y;
+ return y && ((int) *res / (int) y) != (int) x;
+}
+
+unsigned
+f10 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (y && ((int) r / (int) y) != (int) x)
+ return fn ();
+ return r;
+}
+
+int
+f11 (unsigned x, unsigned y, unsigned *res)
+{
+ *res = x * y;
+ return !y || ((int) *res / (int) y) == (int) x;
+}
+
+unsigned
+f12 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (!y || ((int) r / (int) y) == (int) x)
+ return fn ();
+ return r;
+}
+
+int
+f13 (int x, int y, int *res)
+{
+ *res = (unsigned) x * y;
+ return y && (*res / y) != x;
+}
+
+int
+f14 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (y && (r / y) != x)
+ return fn ();
+ return r;
+}
+
+int
+f15 (int x, int y, int *res)
+{
+ *res = (unsigned) x * y;
+ return !y || (*res / y) == x;
+}
+
+int
+f16 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (!y || (r / y) == x)
+ return fn ();
+ return r;
+}
+
+int
+f17 (unsigned x, unsigned *res)
+{
+ *res = x * 35U;
+ return x && ((int) *res / (int) x) != 35;
+}
+
+unsigned
+f18 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (x && ((int) r / (int) x) != 35)
+ return fn ();
+ return r;
+}
+
+int
+f19 (unsigned x, unsigned *res)
+{
+ *res = x * 35U;
+ return !x || ((int) *res / (int) x) == 35;
+}
+
+unsigned
+f20 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (!x || ((int) r / (int) x) == 35)
+ return fn ();
+ return r;
+}
+
+int
+f21 (int x, int *res)
+{
+ *res = (unsigned) x * 35;
+ return x && (*res / x) != 35;
+}
+
+int
+f22 (int x)
+{
+ int r = (unsigned) x * 35;
+ if (x && (r / x) != 35)
+ return fn ();
+ return r;
+}
+
+int
+f23 (int x, int *res)
+{
+ *res = (unsigned) x * 35;
+ return !x || (*res / x) == 35;
+}
+
+int
+f24 (int x)
+{
+ int r = (unsigned) x * 35;
+ if (!x || (r / x) == 35)
+ return fn ();
+ return r;
+}
+
+int
+f25 (unsigned x, unsigned *res)
+{
+ *res = x * 35U;
+ return ((int) *res / 35) != (int) x;
+}
+
+unsigned
+f26 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (((int) r / 35) != (int) x)
+ return fn ();
+ return r;
+}
+
+int
+f27 (unsigned x, unsigned *res)
+{
+ *res = x * 35U;
+ return ((int) *res / 35) == (int) x;
+}
+
+unsigned
+f28 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (((int) r / 35) == (int) x)
+ return fn ();
+ return r;
+}
+
+int
+f29 (int x, int *res)
+{
+ *res = (unsigned) x * 35;
+ return 35 && (*res / 35) != x;
+}
+
+int
+f30 (int x)
+{
+ int r = (unsigned) x * 35;
+ if ((r / 35) != x)
+ return fn ();
+ return r;
+}
+
+int
+f31 (int x, int *res)
+{
+ *res = (unsigned) x * 35;
+ return (*res / 35) == x;
+}
+
+int
+f32 (int x)
+{
+ int r = (unsigned) x * 35;
+ if ((r / 35) == x)
+ return fn ();
+ return r;
+}
--- /dev/null
+/* PR tree-optimization/95852 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized -masm=att" } */
+/* { dg-final { scan-tree-dump-times " = \.MUL_OVERFLOW " 32 "optimized" } } */
+/* { dg-final { scan-assembler-times "\timull\t" 32 } } */
+/* { dg-final { scan-assembler-times "\tseto\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tsetno\t" 8 } } */
+/* { dg-final { scan-assembler-times "\tjn\?o\t" 16 } } */
+
+unsigned fn (void);
+
+int
+f1 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ return x && ((int) r / (int) x) != (int) y;
+}
+
+unsigned
+f2 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (x && ((int) r / (int) x) != (int) y)
+ return fn ();
+ return 0;
+}
+
+int
+f3 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ return !x || ((int) r / (int) x) == (int) y;
+}
+
+unsigned
+f4 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (!x || ((int) r / (int) x) == (int) y)
+ return fn ();
+ return 0;
+}
+
+int
+f5 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ return x && (r / x) != y;
+}
+
+int
+f6 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (x && (r / x) != y)
+ return fn ();
+ return 0;
+}
+
+int
+f7 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ return !x || (r / x) == y;
+}
+
+int
+f8 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (!x || (r / x) == y)
+ return fn ();
+ return 0;
+}
+
+int
+f9 (unsigned x, unsigned y)
+{
+ unsigned r = x * y;
+ return y && ((int) r / (int) y) != (int) x;
+}
+
+unsigned
+f10 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (y && ((int) r / (int) y) != (int) x)
+ return fn ();
+ return 0;
+}
+
+int
+f11 (unsigned x, unsigned y)
+{
+ unsigned r = x * y;
+ return !y || ((int) r / (int) y) == (int) x;
+}
+
+unsigned
+f12 (unsigned x, unsigned y)
+{
+ unsigned int r = x * y;
+ if (!y || ((int) r / (int) y) == (int) x)
+ return fn ();
+ return 0;
+}
+
+int
+f13 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ return y && (r / y) != x;
+}
+
+int
+f14 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (y && (r / y) != x)
+ return fn ();
+ return 0;
+}
+
+int
+f15 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ return !y || (r / y) == x;
+}
+
+int
+f16 (int x, int y)
+{
+ int r = (unsigned) x * y;
+ if (!y || (r / y) == x)
+ return fn ();
+ return 0;
+}
+
+int
+f17 (unsigned x)
+{
+ unsigned r = x * 35U;
+ return x && ((int) r / (int) x) != 35;
+}
+
+unsigned
+f18 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (x && ((int) r / (int) x) != 35)
+ return fn ();
+ return 0;
+}
+
+int
+f19 (unsigned x)
+{
+ unsigned r = x * 35U;
+ return !x || ((int) r / (int) x) == 35;
+}
+
+unsigned
+f20 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (!x || ((int) r / (int) x) == 35)
+ return fn ();
+ return 0;
+}
+
+int
+f21 (int x)
+{
+ int r = (unsigned) x * 35;
+ return x && (r / x) != 35;
+}
+
+int
+f22 (int x)
+{
+ int r = (unsigned) x * 35;
+ if (x && (r / x) != 35)
+ return fn ();
+ return 0;
+}
+
+int
+f23 (int x)
+{
+ int r = (unsigned) x * 35;
+ return !x || (r / x) == 35;
+}
+
+int
+f24 (int x)
+{
+ int r = (unsigned) x * 35;
+ if (!x || (r / x) == 35)
+ return fn ();
+ return 0;
+}
+
+int
+f25 (unsigned x)
+{
+ unsigned r = x * 35U;
+ return ((int) r / 35) != (int) x;
+}
+
+unsigned
+f26 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (((int) r / 35) != (int) x)
+ return fn ();
+ return 0;
+}
+
+int
+f27 (unsigned x)
+{
+ unsigned r = x * 35U;
+ return !35 || ((int) r / 35) == (int) x;
+}
+
+unsigned
+f28 (unsigned x)
+{
+ unsigned int r = x * 35U;
+ if (((int) r / 35) == (int) x)
+ return fn ();
+ return 0;
+}
+
+int
+f29 (int x)
+{
+ int r = (unsigned) x * 35;
+ return 35 && (r / 35) != x;
+}
+
+int
+f30 (int x)
+{
+ int r = (unsigned) x * 35;
+ if ((r / 35) != x)
+ return fn ();
+ return 0;
+}
+
+int
+f31 (int x)
+{
+ int r = (unsigned) x * 35;
+ return (r / 35) == x;
+}
+
+int
+f32 (int x)
+{
+ int r = (unsigned) x * 35;
+ if ((r / 35) == x)
+ return fn ();
+ return 0;
+}
optimize the x_4(D) != 0 condition to 1. */
static void
-maybe_optimize_guarding_check (gimple **mul_stmts, gimple *cond_stmt,
+maybe_optimize_guarding_check (vec<gimple *> &mul_stmts, gimple *cond_stmt,
gimple *div_stmt, bool *cfg_changed)
{
basic_block bb = gimple_bb (cond_stmt);
!= TYPE_PRECISION (TREE_TYPE (rhs2))))
return;
}
- gimple_stmt_iterator gsi = gsi_for_stmt (div_stmt);
- gsi_prev_nondebug (&gsi);
- if (!gsi_end_p (gsi))
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
+ mul_stmts.quick_push (div_stmt);
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+ unsigned cast_count = 0;
+ while (gsi_stmt (gsi) != cond_stmt)
{
/* If original mul_stmt has a single use, allow it in the same bb,
we are looking then just at __builtin_mul_overflow_p.
Though, in that case the original mul_stmt will be replaced
by .MUL_OVERFLOW, REALPART_EXPR and IMAGPART_EXPR stmts. */
- for (int i = 2; i >= 0; --i)
- {
- if (gsi_stmt (gsi) != mul_stmts[i])
- return;
- gsi_prev_nondebug (&gsi);
- }
- /* Allow up to 2 extra casts. Given the way we check PHIs,
- nothing from this bb should be consumed by any other bb
- anyway. */
- for (int i = 0; i < 2 && !gsi_end_p (gsi); i++)
+ gimple *mul_stmt;
+ unsigned int i;
+ bool ok = false;
+ FOR_EACH_VEC_ELT (mul_stmts, i, mul_stmt)
{
- gimple *g = gsi_stmt (gsi);
- if (!gimple_assign_cast_p (g))
- return;
- gsi_prev_nondebug (&gsi);
+ if (gsi_stmt (gsi) == mul_stmt)
+ {
+ ok = true;
+ break;
+ }
}
- if (!gsi_end_p (gsi))
+ if (!ok && gimple_assign_cast_p (gsi_stmt (gsi)) && ++cast_count < 4)
+ ok = true;
+ if (!ok)
return;
+ gsi_next_nondebug (&gsi);
}
- gsi = gsi_for_stmt (div_stmt);
- gsi_next_nondebug (&gsi);
- if (gsi_stmt (gsi) != cond_stmt)
- return;
if (gimple_code (cond_stmt) == GIMPLE_COND)
{
basic_block succ_bb = other_edge->dest;
*cfg_changed = true;
}
+/* Helper function for arith_overflow_check_p. Return true
+ if VAL1 is equal to VAL2 cast to corresponding integral type
+ with other signedness or vice versa. */
+
+static bool
+arith_cast_equal_p (tree val1, tree val2)
+{
+ if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
+ return wi::eq_p (wi::to_wide (val1), wi::to_wide (val2));
+ else if (TREE_CODE (val1) != SSA_NAME || TREE_CODE (val2) != SSA_NAME)
+ return false;
+ if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (val1))
+ && gimple_assign_rhs1 (SSA_NAME_DEF_STMT (val1)) == val2)
+ return true;
+ if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (val2))
+ && gimple_assign_rhs1 (SSA_NAME_DEF_STMT (val2)) == val1)
+ return true;
+ return false;
+}
+
/* Helper function of match_arith_overflow. Return 1
if USE_STMT is unsigned overflow check ovf != 0 for
STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
and 0 otherwise. */
static int
-arith_overflow_check_p (gimple *stmt, gimple *&use_stmt, tree maxval,
- tree *other)
+arith_overflow_check_p (gimple *stmt, gimple *cast_stmt, gimple *&use_stmt,
+ tree maxval, tree *other)
{
enum tree_code ccode = ERROR_MARK;
tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
enum tree_code code = gimple_assign_rhs_code (stmt);
- tree lhs = gimple_assign_lhs (stmt);
+ tree lhs = gimple_assign_lhs (cast_stmt ? cast_stmt : stmt);
tree rhs1 = gimple_assign_rhs1 (stmt);
tree rhs2 = gimple_assign_rhs2 (stmt);
tree multop = NULL_TREE, divlhs = NULL_TREE;
return 0;
if (gimple_assign_rhs1 (use_stmt) != lhs)
return 0;
- if (gimple_assign_rhs2 (use_stmt) == rhs1)
+ if (cast_stmt)
+ {
+ if (arith_cast_equal_p (gimple_assign_rhs2 (use_stmt), rhs1))
+ multop = rhs2;
+ else if (arith_cast_equal_p (gimple_assign_rhs2 (use_stmt), rhs2))
+ multop = rhs1;
+ else
+ return 0;
+ }
+ else if (gimple_assign_rhs2 (use_stmt) == rhs1)
multop = rhs2;
else if (operand_equal_p (gimple_assign_rhs2 (use_stmt), rhs2, 0))
multop = rhs1;
r = a * b; _1 = r / b; _1 == a
r = a * b; _1 = r / a; _1 != b
r = a * b; _1 = r / b; _1 != a. */
- if (code == MULT_EXPR
- && ((crhs1 == divlhs && operand_equal_p (crhs2, multop, 0))
- || (crhs2 == divlhs && crhs1 == multop)))
- return ccode == NE_EXPR ? 1 : -1;
+ if (code == MULT_EXPR)
+ {
+ if (cast_stmt)
+ {
+ if ((crhs1 == divlhs && arith_cast_equal_p (crhs2, multop))
+ || (crhs2 == divlhs && arith_cast_equal_p (crhs1, multop)))
+ return ccode == NE_EXPR ? 1 : -1;
+ }
+ else if ((crhs1 == divlhs && operand_equal_p (crhs2, multop, 0))
+ || (crhs2 == divlhs && crhs1 == multop))
+ return ccode == NE_EXPR ? 1 : -1;
+ }
break;
default:
break;
gimple *add_stmt = NULL;
bool add_first = false;
gimple *cond_stmt = NULL;
+ gimple *cast_stmt = NULL;
+ tree cast_lhs = NULL_TREE;
gcc_checking_assert (code == PLUS_EXPR
|| code == MINUS_EXPR
continue;
tree other = NULL_TREE;
- if (arith_overflow_check_p (stmt, use_stmt, NULL_TREE, &other))
+ if (arith_overflow_check_p (stmt, NULL, use_stmt, NULL_TREE, &other))
{
if (code == BIT_NOT_EXPR)
{
}
ovf_use_seen = true;
}
- else
- use_seen = true;
+ else
+ {
+ use_seen = true;
+ if (code == MULT_EXPR
+ && cast_stmt == NULL
+ && gimple_assign_cast_p (use_stmt))
+ {
+ cast_lhs = gimple_assign_lhs (use_stmt);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (cast_lhs))
+ && !TYPE_UNSIGNED (TREE_TYPE (cast_lhs))
+ && (TYPE_PRECISION (TREE_TYPE (cast_lhs))
+ == TYPE_PRECISION (TREE_TYPE (lhs))))
+ cast_stmt = use_stmt;
+ else
+ cast_lhs = NULL_TREE;
+ }
+ }
if (ovf_use_seen && use_seen)
break;
}
+ if (!ovf_use_seen
+ && code == MULT_EXPR
+ && cast_stmt)
+ {
+ if (TREE_CODE (rhs1) != SSA_NAME
+ || (TREE_CODE (rhs2) != SSA_NAME && TREE_CODE (rhs2) != INTEGER_CST))
+ return false;
+ FOR_EACH_IMM_USE_FAST (use_p, iter, cast_lhs)
+ {
+ use_stmt = USE_STMT (use_p);
+ if (is_gimple_debug (use_stmt))
+ continue;
+
+ if (arith_overflow_check_p (stmt, cast_stmt, use_stmt,
+ NULL_TREE, NULL))
+ ovf_use_seen = true;
+ }
+ }
+ else
+ {
+ cast_stmt = NULL;
+ cast_lhs = NULL_TREE;
+ }
+
tree maxval = NULL_TREE;
if (!ovf_use_seen
|| (code != MULT_EXPR && (code == BIT_NOT_EXPR ? use_seen : !use_seen))
&& optab_handler (uaddv4_optab,
TYPE_MODE (type)) == CODE_FOR_nothing)
|| (code == MULT_EXPR
- && optab_handler (umulv4_optab,
+ && optab_handler (cast_stmt ? mulv4_optab : umulv4_optab,
TYPE_MODE (type)) == CODE_FOR_nothing))
{
if (code != PLUS_EXPR)
if (is_gimple_debug (use_stmt))
continue;
- if (arith_overflow_check_p (stmt, use_stmt, maxval, NULL))
+ if (arith_overflow_check_p (stmt, NULL, use_stmt, maxval, NULL))
{
ovf_use_seen = true;
use_bb = gimple_bb (use_stmt);
if (code == BIT_NOT_EXPR)
*gsi = gsi_for_stmt (cond_stmt);
+ auto_vec<gimple *, 8> mul_stmts;
+ if (code == MULT_EXPR && cast_stmt)
+ {
+ type = TREE_TYPE (cast_lhs);
+ gimple *g = SSA_NAME_DEF_STMT (rhs1);
+ if (gimple_assign_cast_p (g)
+ && useless_type_conversion_p (type,
+ TREE_TYPE (gimple_assign_rhs1 (g)))
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (g)))
+ rhs1 = gimple_assign_rhs1 (g);
+ else
+ {
+ g = gimple_build_assign (make_ssa_name (type), NOP_EXPR, rhs1);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ rhs1 = gimple_assign_lhs (g);
+ mul_stmts.quick_push (g);
+ }
+ if (TREE_CODE (rhs2) == INTEGER_CST)
+ rhs2 = fold_convert (type, rhs2);
+ else
+ {
+ if (gimple_assign_cast_p (g)
+ && useless_type_conversion_p (type,
+ TREE_TYPE (gimple_assign_rhs1 (g)))
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (g)))
+ rhs2 = gimple_assign_rhs1 (g);
+ else
+ {
+ g = gimple_build_assign (make_ssa_name (type), NOP_EXPR, rhs2);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ rhs2 = gimple_assign_lhs (g);
+ mul_stmts.quick_push (g);
+ }
+ }
+ }
tree ctype = build_complex_type (type);
gcall *g = gimple_build_call_internal (code == MULT_EXPR
? IFN_MUL_OVERFLOW
tree ctmp = make_ssa_name (ctype);
gimple_call_set_lhs (g, ctmp);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
- tree new_lhs = maxval ? make_ssa_name (type) : lhs;
- gimple *mul_stmts[3] = { NULL, NULL, NULL };
+ tree new_lhs = (maxval || cast_stmt) ? make_ssa_name (type) : lhs;
gassign *g2;
if (code != BIT_NOT_EXPR)
{
g2 = gimple_build_assign (new_lhs, REALPART_EXPR,
build1 (REALPART_EXPR, type, ctmp));
- if (maxval)
+ if (maxval || cast_stmt)
{
gsi_insert_before (gsi, g2, GSI_SAME_STMT);
if (add_first)
gsi_replace (gsi, g2, true);
if (code == MULT_EXPR)
{
- mul_stmts[0] = g;
- mul_stmts[1] = g2;
+ mul_stmts.quick_push (g);
+ mul_stmts.quick_push (g2);
+ if (cast_stmt)
+ {
+ g2 = gimple_build_assign (lhs, NOP_EXPR, new_lhs);
+ gsi_replace (gsi, g2, true);
+ mul_stmts.quick_push (g2);
+ }
}
}
tree ovf = make_ssa_name (type);
else
gsi_insert_before (gsi, g2, GSI_SAME_STMT);
if (code == MULT_EXPR)
- mul_stmts[2] = g2;
+ mul_stmts.quick_push (g2);
- FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, cast_lhs ? cast_lhs : lhs)
{
if (is_gimple_debug (use_stmt))
continue;
gimple *orig_use_stmt = use_stmt;
- int ovf_use = arith_overflow_check_p (stmt, use_stmt, maxval, NULL);
+ int ovf_use = arith_overflow_check_p (stmt, cast_stmt, use_stmt,
+ maxval, NULL);
if (ovf_use == 0)
{
gcc_assert (code != BIT_NOT_EXPR);