From d0d8b5d83614d8f0d0e40c0520d4f40ffa01f8d9 Mon Sep 17 00:00:00 2001 From: Andrew MacLeod Date: Thu, 19 Nov 2020 17:41:30 -0500 Subject: [PATCH] Process only valid shift ranges. When shifting outside the valid range of [0, precision-1], we can choose to process just the valid ones since the rest is undefined. this allows us to produce results for x << [0,2][+INF, +INF] by discarding the invalid ranges and processing just [0,2]. gcc/ PR tree-optimization/93781 * range-op.cc (get_shift_range): Rename from undefined_shift_range_check and now return valid shift ranges. (operator_lshift::fold_range): Use result from get_shift_range. (operator_rshift::fold_range): Ditto. gcc/testsuite/ * gcc.dg/tree-ssa/pr93781-1.c: New. * gcc.dg/tree-ssa/pr93781-2.c: New. * gcc.dg/tree-ssa/pr93781-3.c: New. --- gcc/range-op.cc | 64 +++++++++++++---------- gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c | 18 +++++++ gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c | 17 ++++++ gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c | 21 ++++++++ 4 files changed, 92 insertions(+), 28 deletions(-) create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c diff --git a/gcc/range-op.cc b/gcc/range-op.cc index 6be60073d19..5bf37e1ad82 100644 --- a/gcc/range-op.cc +++ b/gcc/range-op.cc @@ -80,30 +80,25 @@ empty_range_varying (irange &r, tree type, return false; } -// Return TRUE if shifting by OP is undefined behavior, and set R to -// the appropriate range. +// Return false if shifting by OP is undefined behavior. Otherwise, return +// true and the range it is to be shifted by. This allows trimming out of +// undefined ranges, leaving only valid ranges if there are any. static inline bool -undefined_shift_range_check (irange &r, tree type, const irange &op) +get_shift_range (irange &r, tree type, const irange &op) { if (op.undefined_p ()) - { - r.set_undefined (); - return true; - } + return false; - // Shifting by any values outside [0..prec-1], gets undefined - // behavior from the shift operation. We cannot even trust - // SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl - // shifts, and the operation at the tree level may be widened. - if (wi::lt_p (op.lower_bound (), 0, TYPE_SIGN (op.type ())) - || wi::ge_p (op.upper_bound (), - TYPE_PRECISION (type), TYPE_SIGN (op.type ()))) - { - r.set_varying (type); - return true; - } - return false; + // Build valid range and intersect it with the shift range. + r = value_range (build_int_cst_type (op.type (), 0), + build_int_cst_type (op.type (), TYPE_PRECISION (type) - 1)); + r.intersect (op); + + // If there are no valid ranges in the shift range, returned false. + if (r.undefined_p ()) + return false; + return true; } // Return TRUE if 0 is within [WMIN, WMAX]. @@ -1465,13 +1460,20 @@ operator_lshift::fold_range (irange &r, tree type, const irange &op1, const irange &op2) const { - if (undefined_shift_range_check (r, type, op2)) - return true; + int_range_max shift_range; + if (!get_shift_range (shift_range, type, op2)) + { + if (op2.undefined_p ()) + r.set_undefined (); + else + r.set_varying (type); + return true; + } // Transform left shifts by constants into multiplies. - if (op2.singleton_p ()) + if (shift_range.singleton_p ()) { - unsigned shift = op2.lower_bound ().to_uhwi (); + unsigned shift = shift_range.lower_bound ().to_uhwi (); wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type)); int_range<1> mult (type, tmp, tmp); @@ -1487,7 +1489,7 @@ operator_lshift::fold_range (irange &r, tree type, } else // Otherwise, invoke the generic fold routine. - return range_operator::fold_range (r, type, op1, op2); + return range_operator::fold_range (r, type, op1, shift_range); } void @@ -1709,11 +1711,17 @@ operator_rshift::fold_range (irange &r, tree type, const irange &op1, const irange &op2) const { - // Invoke the generic fold routine if not undefined.. - if (undefined_shift_range_check (r, type, op2)) - return true; + int_range_max shift; + if (!get_shift_range (shift, type, op2)) + { + if (op2.undefined_p ()) + r.set_undefined (); + else + r.set_varying (type); + return true; + } - return range_operator::fold_range (r, type, op1, op2); + return range_operator::fold_range (r, type, op1, shift); } void diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c new file mode 100644 index 00000000000..5ebd8053965 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c @@ -0,0 +1,18 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-evrp" } */ + +void kill (void); + +void foo (unsigned int arg) +{ + int a = arg - 3; + unsigned int b = 4; + int x = 0x1 << arg; + + if (a < 0) + b = x; + + /* In the fullness of time, we will delete this call. */ + if (b >= 5) + kill ();; +} diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c new file mode 100644 index 00000000000..c9b28783c12 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-evrp" } */ + +void kill (void); + +void foo (unsigned int arg) +{ + unsigned int C000003FE = 4; + + if (arg + 1 < 4) // work for if (arg < 3) + C000003FE = 0x1 << arg; + + if (C000003FE >= 5) + kill (); +} + +/* { dg-final { scan-tree-dump-not "kill" "evrp" } } */ diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c new file mode 100644 index 00000000000..e1d2be0ea7f --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c @@ -0,0 +1,21 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-evrp" } */ + +void kill (void); + +void foo (unsigned int arg) +{ + int a = arg - 3; + unsigned int b = 4; + + if (a < 0) + { + int x = 0x1 << arg; + b = x; + } + + if (b >= 5) + kill (); +} + +/* { dg-final { scan-tree-dump-not "kill" "evrp" } } */ -- 2.30.2