/* Generic tree predicates we inherit. */
(define_predicates
integer_onep integer_zerop integer_all_onesp integer_minus_onep
- integer_each_onep integer_truep
+ integer_each_onep integer_truep integer_nonzerop
real_zerop real_onep real_minus_onep
+ zerop
CONSTANT_CLASS_P
- tree_expr_nonnegative_p)
+ tree_expr_nonnegative_p
+ integer_valued_real_p
+ integer_pow2p
+ HONOR_NANS)
/* Operator lists. */
(define_operator_list tcc_comparison
(define_operator_list POW10 BUILT_IN_POW10F BUILT_IN_POW10 BUILT_IN_POW10L)
(define_operator_list SQRT BUILT_IN_SQRTF BUILT_IN_SQRT BUILT_IN_SQRTL)
(define_operator_list CBRT BUILT_IN_CBRTF BUILT_IN_CBRT BUILT_IN_CBRTL)
-
+(define_operator_list SIN BUILT_IN_SINF BUILT_IN_SIN BUILT_IN_SINL)
+(define_operator_list COS BUILT_IN_COSF BUILT_IN_COS BUILT_IN_COSL)
+(define_operator_list TAN BUILT_IN_TANF BUILT_IN_TAN BUILT_IN_TANL)
+(define_operator_list ATAN BUILT_IN_ATANF BUILT_IN_ATAN BUILT_IN_ATANL)
+(define_operator_list COSH BUILT_IN_COSHF BUILT_IN_COSH BUILT_IN_COSHL)
+(define_operator_list CEXPI BUILT_IN_CEXPIF BUILT_IN_CEXPI BUILT_IN_CEXPIL)
+(define_operator_list CPROJ BUILT_IN_CPROJF BUILT_IN_CPROJ BUILT_IN_CPROJL)
+(define_operator_list CCOS BUILT_IN_CCOSF BUILT_IN_CCOS BUILT_IN_CCOSL)
+(define_operator_list CCOSH BUILT_IN_CCOSHF BUILT_IN_CCOSH BUILT_IN_CCOSHL)
+(define_operator_list HYPOT BUILT_IN_HYPOTF BUILT_IN_HYPOT BUILT_IN_HYPOTL)
+(define_operator_list COPYSIGN BUILT_IN_COPYSIGNF
+ BUILT_IN_COPYSIGN
+ BUILT_IN_COPYSIGNL)
+(define_operator_list CABS BUILT_IN_CABSF BUILT_IN_CABS BUILT_IN_CABSL)
+(define_operator_list TRUNC BUILT_IN_TRUNCF BUILT_IN_TRUNC BUILT_IN_TRUNCL)
+(define_operator_list FLOOR BUILT_IN_FLOORF BUILT_IN_FLOOR BUILT_IN_FLOORL)
+(define_operator_list CEIL BUILT_IN_CEILF BUILT_IN_CEIL BUILT_IN_CEILL)
+(define_operator_list ROUND BUILT_IN_ROUNDF BUILT_IN_ROUND BUILT_IN_ROUNDL)
+(define_operator_list NEARBYINT BUILT_IN_NEARBYINTF
+ BUILT_IN_NEARBYINT
+ BUILT_IN_NEARBYINTL)
+(define_operator_list RINT BUILT_IN_RINTF BUILT_IN_RINT BUILT_IN_RINTL)
/* Simplifications of operations with one constant operand and
simplifications to constants or single values. */
wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
}
(if (!overflow_p)
- (div @0 { wide_int_to_tree (type, mul); }))
- (if (overflow_p
- && (TYPE_UNSIGNED (type)
- || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)))
- { build_zero_cst (type); }))))
+ (div @0 { wide_int_to_tree (type, mul); })
+ (if (TYPE_UNSIGNED (type)
+ || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
+ { build_zero_cst (type); })))))
/* Optimize A / A to 1.0 if we don't care about
NaNs or Infinities. */
(with
{ tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
(if (tem)
- (mult @0 { tem; } ))))
- (if (cst != COMPLEX_CST)
- (with { tree inverse = exact_inverse (type, @1); }
- (if (inverse)
- (mult @0 { inverse; } )))))))
+ (mult @0 { tem; } )))
+ (if (cst != COMPLEX_CST)
+ (with { tree inverse = exact_inverse (type, @1); }
+ (if (inverse)
+ (mult @0 { inverse; } ))))))))
/* Same applies to modulo operations, but fold is inconsistent here
and simplifies 0 % x to 0, only preserving literal 0 % 0. */
/* (X % Y) % Y is just X % Y. */
(simplify
(mod (mod@2 @0 @1) @1)
- @2))
+ @2)
+ /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
+ (simplify
+ (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
+ (if (ANY_INTEGRAL_TYPE_P (type)
+ && TYPE_OVERFLOW_UNDEFINED (type)
+ && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
+ { build_zero_cst (type); })))
/* X % -C is the same as X % C. */
(simplify
/* X - (X / Y) * Y is the same as X % Y. */
(simplify
(minus (convert1? @0) (convert2? (mult (trunc_div @0 @1) @1)))
- (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
+ (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
+ && TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (type))
(trunc_mod (convert @0) (convert @1))))
/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
&& integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
(bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
+/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
+(simplify
+ (trunc_div (mult @0 integer_pow2p@1) @1)
+ (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
+ (bit_and @0 { wide_int_to_tree
+ (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
+ false, TYPE_PRECISION (type))); })))
+
+/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
+(simplify
+ (mult (trunc_div @0 integer_pow2p@1) @1)
+ (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
+ (bit_and @0 (negate @1))))
+
+/* Simplify (t * 2) / 2) -> t. */
+(for div (trunc_div ceil_div floor_div round_div exact_div)
+ (simplify
+ (div (mult @0 @1) @1)
+ (if (ANY_INTEGRAL_TYPE_P (type)
+ && TYPE_OVERFLOW_UNDEFINED (type))
+ @0)))
+
+(for op (negate abs)
+ /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
+ (for coss (COS COSH)
+ (simplify
+ (coss (op @0))
+ (coss @0)))
+ /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
+ (for pows (POW)
+ (simplify
+ (pows (op @0) REAL_CST@1)
+ (with { HOST_WIDE_INT n; }
+ (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
+ (pows @0 @1)))))
+ /* Strip negate and abs from both operands of hypot. */
+ (for hypots (HYPOT)
+ (simplify
+ (hypots (op @0) @1)
+ (hypots @0 @1))
+ (simplify
+ (hypots @0 (op @1))
+ (hypots @0 @1)))
+ /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
+ (for copysigns (COPYSIGN)
+ (simplify
+ (copysigns (op @0) @1)
+ (copysigns @0 @1))))
+
+/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
+(simplify
+ (mult (abs@1 @0) @1)
+ (mult @0 @0))
+
+/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
+(for coss (COS COSH)
+ copysigns (COPYSIGN)
+ (simplify
+ (coss (copysigns @0 @1))
+ (coss @0)))
+
+/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
+(for pows (POW)
+ copysigns (COPYSIGN)
+ (simplify
+ (pows (copysigns @0 @1) REAL_CST@1)
+ (with { HOST_WIDE_INT n; }
+ (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
+ (pows @0 @1)))))
+
+(for hypots (HYPOT)
+ copysigns (COPYSIGN)
+ /* hypot(copysign(x, y), z) -> hypot(x, z). */
+ (simplify
+ (hypots (copysigns @0 @1) @2)
+ (hypots @0 @2))
+ /* hypot(x, copysign(y, z)) -> hypot(x, y). */
+ (simplify
+ (hypots @0 (copysigns @1 @2))
+ (hypots @0 @1)))
+
+/* copysign(copysign(x, y), z) -> copysign(x, z). */
+(for copysigns (COPYSIGN)
+ (simplify
+ (copysigns (copysigns @0 @1) @2)
+ (copysigns @0 @2)))
+
+/* copysign(x,y)*copysign(x,y) -> x*x. */
+(for copysigns (COPYSIGN)
+ (simplify
+ (mult (copysigns@2 @0 @1) @2)
+ (mult @0 @0)))
+
+/* ccos(-x) -> ccos(x). Similarly for ccosh. */
+(for ccoss (CCOS CCOSH)
+ (simplify
+ (ccoss (negate @0))
+ (ccoss @0)))
+
+/* cabs(-x) and cos(conj(x)) -> cabs(x). */
+(for ops (conj negate)
+ (for cabss (CABS)
+ (simplify
+ (cabss (ops @0))
+ (cabss @0))))
+
+/* Fold (a * (1 << b)) into (a << b) */
+(simplify
+ (mult:c @0 (convert? (lshift integer_onep@1 @2)))
+ (if (! FLOAT_TYPE_P (type)
+ && tree_nop_conversion_p (type, TREE_TYPE (@1)))
+ (lshift @0 @2)))
+
+/* Fold (C1/X)*C2 into (C1*C2)/X. */
+(simplify
+ (mult (rdiv:s REAL_CST@0 @1) REAL_CST@2)
+ (if (flag_associative_math)
+ (with
+ { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
+ (if (tem)
+ (rdiv { tem; } @1)))))
+
+/* Simplify ~X & X as zero. */
+(simplify
+ (bit_and:c (convert? @0) (convert? (bit_not @0)))
+ { build_zero_cst (type); })
+
/* X % Y is smaller than Y. */
(for cmp (lt ge)
(simplify
/* x + (x & 1) -> (x + 1) & ~1 */
(simplify
- (plus:c @0 (bit_and@2 @0 integer_onep@1))
- (if (single_use (@2))
- (bit_and (plus @0 @1) (bit_not @1))))
+ (plus:c @0 (bit_and:s @0 integer_onep@1))
+ (bit_and (plus @0 @1) (bit_not @1)))
/* x & ~(x & y) -> x & ~y */
/* x | ~(x | y) -> x | ~y */
(for bitop (bit_and bit_ior)
(simplify
- (bitop:c @0 (bit_not (bitop:c@2 @0 @1)))
- (if (single_use (@2))
- (bitop @0 (bit_not @1)))))
+ (bitop:c @0 (bit_not (bitop:cs @0 @1)))
+ (bitop @0 (bit_not @1))))
/* (x | y) & ~x -> y & ~x */
/* (x & y) | ~x -> y | ~x */
&& tree_nop_conversion_p (type, TREE_TYPE (@1)))
(bit_not (rop (convert @0) (convert @1))))))
-/* If we are XORing two BIT_AND_EXPR's, both of which are and'ing
+/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
with a constant, and the two constants have no bits in common,
we should treat this as a BIT_IOR_EXPR since this may produce more
simplifications. */
-(simplify
- (bit_xor (convert1? (bit_and@4 @0 INTEGER_CST@1))
- (convert2? (bit_and@5 @2 INTEGER_CST@3)))
- (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
- && tree_nop_conversion_p (type, TREE_TYPE (@2))
- && wi::bit_and (@1, @3) == 0)
- (bit_ior (convert @4) (convert @5))))
+(for op (bit_xor plus)
+ (simplify
+ (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
+ (convert2? (bit_and@5 @2 INTEGER_CST@3)))
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
+ && tree_nop_conversion_p (type, TREE_TYPE (@2))
+ && wi::bit_and (@1, @3) == 0)
+ (bit_ior (convert @4) (convert @5)))))
/* (X | Y) ^ X -> Y & ~ X*/
(simplify
(bit_xor:c (bit_and:c @0 @1) @1)
(bit_and (bit_not @0) @1))
+/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
+ operands are another bit-wise operation with a common input. If so,
+ distribute the bit operations to save an operation and possibly two if
+ constants are involved. For example, convert
+ (A | B) & (A | C) into A | (B & C)
+ Further simplification will occur if B and C are constants. */
+(for op (bit_and bit_ior)
+ rop (bit_ior bit_and)
+ (simplify
+ (op (convert? (rop:c @0 @1)) (convert? (rop @0 @2)))
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
+ (rop (convert @0) (op (convert @1) (convert @2))))))
+
(simplify
(abs (abs@1 @0))
(abs tree_expr_nonnegative_p@0)
@0)
-/* A - B -> A + (-B) if B is easily negatable. This just covers
- very few cases of "easily negatable", effectively inlining negate_expr_p. */
-(simplify
- (minus @0 INTEGER_CST@1)
+/* A few cases of fold-const.c negate_expr_p predicate. */
+(match negate_expr_p
+ INTEGER_CST
(if ((INTEGRAL_TYPE_P (type)
&& TYPE_OVERFLOW_WRAPS (type))
|| (!TYPE_OVERFLOW_SANITIZED (type)
- && may_negate_without_overflow_p (@1)))
- (plus @0 (negate @1))))
-(simplify
- (minus @0 REAL_CST@1)
- (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
- (plus @0 (negate @1))))
+ && may_negate_without_overflow_p (t)))))
+(match negate_expr_p
+ FIXED_CST)
+(match negate_expr_p
+ (negate @0)
+ (if (!TYPE_OVERFLOW_SANITIZED (type))))
+(match negate_expr_p
+ REAL_CST
+ (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
+/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
+ ways. */
+(match negate_expr_p
+ VECTOR_CST
+ (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
+
+/* (-A) * (-B) -> A * B */
+(simplify
+ (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
+ && tree_nop_conversion_p (type, TREE_TYPE (@1)))
+ (mult (convert @0) (convert (negate @1)))))
+
+/* -(A + B) -> (-B) - A. */
(simplify
- (minus @0 VECTOR_CST@1)
- (if (FLOAT_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
- (plus @0 (negate @1))))
+ (negate (plus:c @0 negate_expr_p@1))
+ (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
+ && !HONOR_SIGNED_ZEROS (element_mode (type)))
+ (minus (negate @1) @0)))
+/* A - B -> A + (-B) if B is easily negatable. */
+(simplify
+ (minus @0 negate_expr_p@1)
+ (if (!FIXED_POINT_TYPE_P (type))
+ (plus @0 (negate @1))))
/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
when profitable.
(match truth_valued_p
(truth_not @0))
+(match (logical_inverted_value @0)
+ (truth_not @0))
(match (logical_inverted_value @0)
(bit_not truth_valued_p@0))
(match (logical_inverted_value @0)
(simplify
(op:c truth_valued_p@0 (logical_inverted_value @0))
{ constant_boolean_node (true, type); }))
+/* X ==/!= !X is false/true. */
+(for op (eq ne)
+ (simplify
+ (op:c truth_valued_p@0 (logical_inverted_value @0))
+ { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
/* If arg1 and arg2 are booleans (or any single bit type)
then try to simplify:
/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
(simplify
- (bit_ior:c (bit_and:c@3 @0 (bit_not @2)) (bit_and:c@4 @1 @2))
- (if (single_use (@3) && single_use (@4))
- (bit_xor (bit_and (bit_xor @0 @1) @2) @0)))
+ (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
+ (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
+/* Fold A - (A & B) into ~B & A. */
+(simplify
+ (minus (convert? @0) (convert?:s (bit_and:cs @0 @1)))
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
+ && tree_nop_conversion_p (type, TREE_TYPE (@1)))
+ (convert (bit_and (bit_not @1) @0))))
+
+
+
+/* ((X inner_op C0) outer_op C1)
+ With X being a tree where value_range has reasoned certain bits to always be
+ zero throughout its computed value range,
+ inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
+ where zero_mask has 1's for all bits that are sure to be 0 in
+ and 0's otherwise.
+ if (inner_op == '^') C0 &= ~C1;
+ if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
+ if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
+*/
+(for inner_op (bit_ior bit_xor)
+ outer_op (bit_xor bit_ior)
+(simplify
+ (outer_op
+ (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
+ (with
+ {
+ bool fail = false;
+ wide_int zero_mask_not;
+ wide_int C0;
+ wide_int cst_emit;
+
+ if (TREE_CODE (@2) == SSA_NAME)
+ zero_mask_not = get_nonzero_bits (@2);
+ else
+ fail = true;
+
+ if (inner_op == BIT_XOR_EXPR)
+ {
+ C0 = wi::bit_and_not (@0, @1);
+ cst_emit = wi::bit_or (C0, @1);
+ }
+ else
+ {
+ C0 = @0;
+ cst_emit = wi::bit_xor (@0, @1);
+ }
+ }
+ (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
+ (outer_op @2 { wide_int_to_tree (type, cst_emit); })
+ (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
+ (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
(simplify
- (pointer_plus (pointer_plus@2 @0 @1) @3)
- (if (single_use (@2)
- || (TREE_CODE (@1) == INTEGER_CST && TREE_CODE (@3) == INTEGER_CST))
- (pointer_plus @0 (plus @1 @3))))
+ (pointer_plus (pointer_plus:s @0 @1) @3)
+ (pointer_plus @0 (plus @1 @3)))
/* Pattern match
tem1 = (long) ptr1;
|| (POINTER_TYPE_P (TREE_TYPE (@0))
&& TREE_CODE (@1) == INTEGER_CST
&& tree_int_cst_sign_bit (@1) == 0))
- (convert @1))))))
+ (convert @1))))
+
+ /* (T)P - (T)(P + A) -> -(T) A */
+ (for add (plus pointer_plus)
+ (simplify
+ (minus (convert @0)
+ (convert (add @0 @1)))
+ (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
+ /* For integer types, if A has a smaller type
+ than T the result depends on the possible
+ overflow in P + A.
+ E.g. T=size_t, A=(unsigned)429497295, P>0.
+ However, if an overflow in P + A would cause
+ undefined behavior, we can assume that there
+ is no overflow. */
+ || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
+ /* For pointer types, if the conversion of A to the
+ final type requires a sign- or zero-extension,
+ then we have to punt - it is not defined which
+ one is correct. */
+ || (POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (@1) == INTEGER_CST
+ && tree_int_cst_sign_bit (@1) == 0))
+ (negate (convert @1)))))
+
+ /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
+ (for add (plus pointer_plus)
+ (simplify
+ (minus (convert (add @0 @1))
+ (convert (add @0 @2)))
+ (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
+ /* For integer types, if A has a smaller type
+ than T the result depends on the possible
+ overflow in P + A.
+ E.g. T=size_t, A=(unsigned)429497295, P>0.
+ However, if an overflow in P + A would cause
+ undefined behavior, we can assume that there
+ is no overflow. */
+ || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
+ /* For pointer types, if the conversion of A to the
+ final type requires a sign- or zero-extension,
+ then we have to punt - it is not defined which
+ one is correct. */
+ || (POINTER_TYPE_P (TREE_TYPE (@0))
+ && TREE_CODE (@1) == INTEGER_CST
+ && tree_int_cst_sign_bit (@1) == 0
+ && TREE_CODE (@2) == INTEGER_CST
+ && tree_int_cst_sign_bit (@2) == 0))
+ (minus (convert @1) (convert @2)))))))
/* Simplifications of MIN_EXPR and MAX_EXPR. */
&& tree_expr_nonnegative_p (@1))
@0))
+/* Optimize (x >> c) << c into x & (-1<<c). */
+(simplify
+ (lshift (rshift @0 INTEGER_CST@1) @1)
+ (if (wi::ltu_p (@1, element_precision (type)))
+ (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
+
+/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
+ types. */
+(simplify
+ (rshift (lshift @0 INTEGER_CST@1) @1)
+ (if (TYPE_UNSIGNED (type)
+ && (wi::ltu_p (@1, element_precision (type))))
+ (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
+
(for shiftrotate (lrotate rrotate lshift rshift)
(simplify
(shiftrotate @0 integer_zerop)
build_int_cst (TREE_TYPE (@1),
element_precision (type)), @1); }))
+/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
+(for op (lrotate rrotate rshift lshift)
+ (simplify
+ (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
+ (with { unsigned int prec = element_precision (type); }
+ (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
+ && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
+ && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
+ (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
+ /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
+ being well defined. */
+ (if (low >= prec)
+ (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
+ (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
+ (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
+ { build_zero_cst (type); }
+ (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
+ (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
+
+
/* ((1 << A) & 1) != 0 -> A == 0
((1 << A) & 1) == 0 -> A != 0 */
(for cmp (ne eq)
(if (cand < 0
|| (!integer_zerop (@2)
&& wi::ne_p (wi::lshift (@0, cand), @2)))
- { constant_boolean_node (cmp == NE_EXPR, type); })
- (if (!integer_zerop (@2)
- && wi::eq_p (wi::lshift (@0, cand), @2))
- (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))
+ { constant_boolean_node (cmp == NE_EXPR, type); }
+ (if (!integer_zerop (@2)
+ && wi::eq_p (wi::lshift (@0, cand), @2))
+ (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
(X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
if the new mask might be further optimized. */
(for shift (lshift rshift)
(simplify
- (bit_and (convert?@4 (shift@5 (convert1?@3 @0) INTEGER_CST@1)) INTEGER_CST@2)
+ (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
+ INTEGER_CST@2)
(if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
&& TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
&& tree_fits_uhwi_p (@1)
}
/* ((X << 16) & 0xff00) is (X, 0). */
(if ((mask & zerobits) == mask)
- { build_int_cst (type, 0); })
- (with { newmask = mask | zerobits; }
- (if (newmask != mask && (newmask & (newmask + 1)) == 0)
- (with
- {
- /* Only do the transformation if NEWMASK is some integer
- mode's mask. */
- for (prec = BITS_PER_UNIT;
- prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
- if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
- break;
- }
- (if (prec < HOST_BITS_PER_WIDE_INT
- || newmask == ~(unsigned HOST_WIDE_INT) 0)
- (with
- { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
- (if (!tree_int_cst_equal (newmaskt, @2))
- (if (shift_type != TREE_TYPE (@3)
- && single_use (@4) && single_use (@5))
- (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }))
- (if (shift_type == TREE_TYPE (@3))
- (bit_and @4 { newmaskt; }))))))))))))
+ { build_int_cst (type, 0); }
+ (with { newmask = mask | zerobits; }
+ (if (newmask != mask && (newmask & (newmask + 1)) == 0)
+ (with
+ {
+ /* Only do the transformation if NEWMASK is some integer
+ mode's mask. */
+ for (prec = BITS_PER_UNIT;
+ prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
+ if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
+ break;
+ }
+ (if (prec < HOST_BITS_PER_WIDE_INT
+ || newmask == ~(unsigned HOST_WIDE_INT) 0)
+ (with
+ { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
+ (if (!tree_int_cst_equal (newmaskt, @2))
+ (if (shift_type != TREE_TYPE (@3))
+ (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
+ (bit_and @4 { newmaskt; })))))))))))))
+
+/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
+ (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
+(for shift (lshift rshift)
+ (for bit_op (bit_and bit_xor bit_ior)
+ (simplify
+ (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
+ (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
+ (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
+ (bit_op (shift (convert @0) @1) { mask; }))))))
+
/* Simplifications of conversions. */
unsigned int final_prec = TYPE_PRECISION (type);
int final_unsignedp = TYPE_UNSIGNED (type);
}
- /* In addition to the cases of two conversions in a row
- handled below, if we are converting something to its own
- type via an object of identical or wider precision, neither
- conversion is needed. */
- (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
- || (GENERIC
- && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
- && (((inter_int || inter_ptr) && final_int)
- || (inter_float && final_float))
- && inter_prec >= final_prec)
- (ocvt @0))
-
- /* Likewise, if the intermediate and initial types are either both
- float or both integer, we don't need the middle conversion if the
- former is wider than the latter and doesn't change the signedness
- (for integers). Avoid this if the final type is a pointer since
- then we sometimes need the middle conversion. Likewise if the
- final type has a precision not equal to the size of its mode. */
- (if (((inter_int && inside_int) || (inter_float && inside_float))
- && (final_int || final_float)
- && inter_prec >= inside_prec
- && (inter_float || inter_unsignedp == inside_unsignedp)
- && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
- && TYPE_MODE (type) == TYPE_MODE (inter_type)))
- (ocvt @0))
-
- /* If we have a sign-extension of a zero-extended value, we can
- replace that by a single zero-extension. Likewise if the
- final conversion does not change precision we can drop the
- intermediate conversion. */
- (if (inside_int && inter_int && final_int
- && ((inside_prec < inter_prec && inter_prec < final_prec
- && inside_unsignedp && !inter_unsignedp)
- || final_prec == inter_prec))
- (ocvt @0))
-
- /* Two conversions in a row are not needed unless:
+ (switch
+ /* In addition to the cases of two conversions in a row
+ handled below, if we are converting something to its own
+ type via an object of identical or wider precision, neither
+ conversion is needed. */
+ (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
+ || (GENERIC
+ && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
+ && (((inter_int || inter_ptr) && final_int)
+ || (inter_float && final_float))
+ && inter_prec >= final_prec)
+ (ocvt @0))
+
+ /* Likewise, if the intermediate and initial types are either both
+ float or both integer, we don't need the middle conversion if the
+ former is wider than the latter and doesn't change the signedness
+ (for integers). Avoid this if the final type is a pointer since
+ then we sometimes need the middle conversion. Likewise if the
+ final type has a precision not equal to the size of its mode. */
+ (if (((inter_int && inside_int) || (inter_float && inside_float))
+ && (final_int || final_float)
+ && inter_prec >= inside_prec
+ && (inter_float || inter_unsignedp == inside_unsignedp)
+ && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
+ && TYPE_MODE (type) == TYPE_MODE (inter_type)))
+ (ocvt @0))
+
+ /* If we have a sign-extension of a zero-extended value, we can
+ replace that by a single zero-extension. Likewise if the
+ final conversion does not change precision we can drop the
+ intermediate conversion. */
+ (if (inside_int && inter_int && final_int
+ && ((inside_prec < inter_prec && inter_prec < final_prec
+ && inside_unsignedp && !inter_unsignedp)
+ || final_prec == inter_prec))
+ (ocvt @0))
+
+ /* Two conversions in a row are not needed unless:
- some conversion is floating-point (overstrict for now), or
- some conversion is a vector (overstrict for now), or
- the intermediate type is narrower than both initial and
intermediate and final types differ, or
- the final type is a pointer type and the precisions of the
initial and intermediate types differ. */
- (if (! inside_float && ! inter_float && ! final_float
- && ! inside_vec && ! inter_vec && ! final_vec
- && (inter_prec >= inside_prec || inter_prec >= final_prec)
- && ! (inside_int && inter_int
- && inter_unsignedp != inside_unsignedp
- && inter_prec < final_prec)
- && ((inter_unsignedp && inter_prec > inside_prec)
- == (final_unsignedp && final_prec > inter_prec))
- && ! (inside_ptr && inter_prec != final_prec)
- && ! (final_ptr && inside_prec != inter_prec)
- && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
- && TYPE_MODE (type) == TYPE_MODE (inter_type)))
- (ocvt @0))
-
- /* A truncation to an unsigned type (a zero-extension) should be
- canonicalized as bitwise and of a mask. */
- (if (final_int && inter_int && inside_int
- && final_prec == inside_prec
- && final_prec > inter_prec
- && inter_unsignedp)
- (convert (bit_and @0 { wide_int_to_tree
- (inside_type,
- wi::mask (inter_prec, false,
- TYPE_PRECISION (inside_type))); })))
-
- /* If we are converting an integer to a floating-point that can
- represent it exactly and back to an integer, we can skip the
- floating-point conversion. */
- (if (GIMPLE /* PR66211 */
- && inside_int && inter_float && final_int &&
- (unsigned) significand_size (TYPE_MODE (inter_type))
- >= inside_prec - !inside_unsignedp)
- (convert @0))))))
+ (if (! inside_float && ! inter_float && ! final_float
+ && ! inside_vec && ! inter_vec && ! final_vec
+ && (inter_prec >= inside_prec || inter_prec >= final_prec)
+ && ! (inside_int && inter_int
+ && inter_unsignedp != inside_unsignedp
+ && inter_prec < final_prec)
+ && ((inter_unsignedp && inter_prec > inside_prec)
+ == (final_unsignedp && final_prec > inter_prec))
+ && ! (inside_ptr && inter_prec != final_prec)
+ && ! (final_ptr && inside_prec != inter_prec)
+ && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
+ && TYPE_MODE (type) == TYPE_MODE (inter_type)))
+ (ocvt @0))
+
+ /* A truncation to an unsigned type (a zero-extension) should be
+ canonicalized as bitwise and of a mask. */
+ (if (final_int && inter_int && inside_int
+ && final_prec == inside_prec
+ && final_prec > inter_prec
+ && inter_unsignedp)
+ (convert (bit_and @0 { wide_int_to_tree
+ (inside_type,
+ wi::mask (inter_prec, false,
+ TYPE_PRECISION (inside_type))); })))
+
+ /* If we are converting an integer to a floating-point that can
+ represent it exactly and back to an integer, we can skip the
+ floating-point conversion. */
+ (if (GIMPLE /* PR66211 */
+ && inside_int && inter_float && final_int &&
+ (unsigned) significand_size (TYPE_MODE (inter_type))
+ >= inside_prec - !inside_unsignedp)
+ (convert @0)))))))
/* If we have a narrowing conversion to an integral type that is fed by a
BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
(imagpart (complex @0 @1))
@1)
+/* Sometimes we only care about half of a complex expression. */
+(simplify
+ (realpart (convert?:s (conj:s @0)))
+ (convert (realpart @0)))
+(simplify
+ (imagpart (convert?:s (conj:s @0)))
+ (convert (negate (imagpart @0))))
+(for part (realpart imagpart)
+ (for op (plus minus)
+ (simplify
+ (part (convert?:s@2 (op:s @0 @1)))
+ (convert (op (part @0) (part @1))))))
+(simplify
+ (realpart (convert?:s (CEXPI:s @0)))
+ (convert (COS @0)))
+(simplify
+ (imagpart (convert?:s (CEXPI:s @0)))
+ (convert (SIN @0)))
+
+/* conj(conj(x)) -> x */
+(simplify
+ (conj (convert? (conj @0)))
+ (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
+ (convert @0)))
+
+/* conj({x,y}) -> {x,-y} */
+(simplify
+ (conj (convert?:s (complex:s @0 @1)))
+ (with { tree itype = TREE_TYPE (type); }
+ (complex (convert:itype @0) (negate (convert:itype @1)))))
/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
genmatch cannot handle. */
(simplify
(cond INTEGER_CST@0 @1 @2)
- (if (integer_zerop (@0)
- && (!VOID_TYPE_P (TREE_TYPE (@2))
- || VOID_TYPE_P (type)))
- @2)
- (if (!integer_zerop (@0)
- && (!VOID_TYPE_P (TREE_TYPE (@1))
- || VOID_TYPE_P (type)))
- @1))
+ (if (integer_zerop (@0))
+ (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
+ @2)
+ (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
+ @1)))
(simplify
(vec_cond VECTOR_CST@0 @1 @2)
(if (integer_all_onesp (@0))
- @1)
- (if (integer_zerop (@0))
- @2))
+ @1
+ (if (integer_zerop (@0))
+ @2)))
(for cnd (cond vec_cond)
/* A ? B : (A ? X : C) -> A ? B : C. */
/* Simplifications of comparisons. */
+/* See if we can reduce the magnitude of a constant involved in a
+ comparison by changing the comparison code. This is a canonicalization
+ formerly done by maybe_canonicalize_comparison_1. */
+(for cmp (le gt)
+ acmp (lt ge)
+ (simplify
+ (cmp @0 INTEGER_CST@1)
+ (if (tree_int_cst_sgn (@1) == -1)
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
+(for cmp (ge lt)
+ acmp (gt le)
+ (simplify
+ (cmp @0 INTEGER_CST@1)
+ (if (tree_int_cst_sgn (@1) == 1)
+ (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
+
+
/* We can simplify a logical negation of a comparison to the
inverted comparison. As we cannot compute an expression
operator using invert_tree_comparison we have to simulate
(with { enum tree_code ic = invert_tree_comparison
(cmp, HONOR_NANS (@0)); }
(if (ic == icmp)
- (icmp @0 @1))
- (if (ic == ncmp)
- (ncmp @0 @1)))))
+ (icmp @0 @1)
+ (if (ic == ncmp)
+ (ncmp @0 @1))))))
(simplify
(bit_xor (cmp @0 @1) integer_truep)
(with { enum tree_code ic = invert_tree_comparison
(cmp, HONOR_NANS (@0)); }
(if (ic == icmp)
- (icmp @0 @1))
- (if (ic == ncmp)
- (ncmp @0 @1)))))
+ (icmp @0 @1)
+ (if (ic == ncmp)
+ (ncmp @0 @1))))))
/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
??? The transformation is valid for the other operators if overflow
attempts to synthetize ABS_EXPR. */
(for cmp (eq ne)
(simplify
- (cmp (minus @0 @1) integer_zerop)
- (cmp @0 @1)))
+ (cmp (minus@2 @0 @1) integer_zerop)
+ (if (single_use (@2))
+ (cmp @0 @1))))
/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
signed arithmetic case. That form is created by the compiler
(cmp (mult @0 INTEGER_CST@1) integer_zerop@2)
/* Handle unfolded multiplication by zero. */
(if (integer_zerop (@1))
- (cmp @1 @2))
- (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
- /* If @1 is negative we swap the sense of the comparison. */
- (if (tree_int_cst_sgn (@1) < 0)
- (scmp @0 @2))
- (cmp @0 @2))))
+ (cmp @1 @2)
+ (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
+ /* If @1 is negative we swap the sense of the comparison. */
+ (if (tree_int_cst_sgn (@1) < 0)
+ (scmp @0 @2)
+ (cmp @0 @2))))))
/* Simplify comparison of something with itself. For IEEE
floating-point, we can only do some of these simplifications. */
|| ! FLOAT_TYPE_P (TREE_TYPE (@0))
|| ! HONOR_NANS (TYPE_MODE (TREE_TYPE (@0))))
{ constant_boolean_node (false, type); })))
+(for cmp (unle unge uneq)
+ (simplify
+ (cmp @0 @0)
+ { constant_boolean_node (true, type); }))
+(simplify
+ (ltgt @0 @0)
+ (if (!flag_trapping_math)
+ { constant_boolean_node (false, type); }))
/* Fold ~X op ~Y as Y op X. */
(for cmp (simple_comparison)
(simplify
(cmp @0 REAL_CST@1)
/* IEEE doesn't distinguish +0 and -0 in comparisons. */
- /* a CMP (-0) -> a CMP 0 */
- (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
- (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
- /* x != NaN is always true, other ops are always false. */
- (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
- && ! HONOR_SNANS (@1))
- { constant_boolean_node (cmp == NE_EXPR, type); })
- /* Fold comparisons against infinity. */
- (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
- && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
- (with
- {
- REAL_VALUE_TYPE max;
- enum tree_code code = cmp;
- bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
- if (neg)
- code = swap_tree_comparison (code);
- }
- /* x > +Inf is always false, if with ignore sNANs. */
- (if (code == GT_EXPR
- && ! HONOR_SNANS (@0))
- { constant_boolean_node (false, type); })
- (if (code == LE_EXPR)
- /* x <= +Inf is always true, if we don't case about NaNs. */
- (if (! HONOR_NANS (@0))
- { constant_boolean_node (true, type); })
- /* x <= +Inf is the same as x == x, i.e. isfinite(x). */
- (eq @0 @0))
- /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
- (if (code == EQ_EXPR || code == GE_EXPR)
- (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
- (if (neg)
- (lt @0 { build_real (TREE_TYPE (@0), max); }))
- (gt @0 { build_real (TREE_TYPE (@0), max); })))
- /* x < +Inf is always equal to x <= DBL_MAX. */
- (if (code == LT_EXPR)
- (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
- (if (neg)
- (ge @0 { build_real (TREE_TYPE (@0), max); }))
- (le @0 { build_real (TREE_TYPE (@0), max); })))
- /* x != +Inf is always equal to !(x > DBL_MAX). */
- (if (code == NE_EXPR)
- (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
- (if (! HONOR_NANS (@0))
- (if (neg)
- (ge @0 { build_real (TREE_TYPE (@0), max); }))
- (le @0 { build_real (TREE_TYPE (@0), max); }))
- (if (neg)
- (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
- { build_one_cst (type); }))
- (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
- { build_one_cst (type); }))))))
+ (switch
+ /* a CMP (-0) -> a CMP 0 */
+ (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
+ (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
+ /* x != NaN is always true, other ops are always false. */
+ (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
+ && ! HONOR_SNANS (@1))
+ { constant_boolean_node (cmp == NE_EXPR, type); })
+ /* Fold comparisons against infinity. */
+ (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
+ && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
+ (with
+ {
+ REAL_VALUE_TYPE max;
+ enum tree_code code = cmp;
+ bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
+ if (neg)
+ code = swap_tree_comparison (code);
+ }
+ (switch
+ /* x > +Inf is always false, if with ignore sNANs. */
+ (if (code == GT_EXPR
+ && ! HONOR_SNANS (@0))
+ { constant_boolean_node (false, type); })
+ (if (code == LE_EXPR)
+ /* x <= +Inf is always true, if we don't case about NaNs. */
+ (if (! HONOR_NANS (@0))
+ { constant_boolean_node (true, type); }
+ /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
+ (eq @0 @0)))
+ /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
+ (if (code == EQ_EXPR || code == GE_EXPR)
+ (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+ (if (neg)
+ (lt @0 { build_real (TREE_TYPE (@0), max); })
+ (gt @0 { build_real (TREE_TYPE (@0), max); }))))
+ /* x < +Inf is always equal to x <= DBL_MAX. */
+ (if (code == LT_EXPR)
+ (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+ (if (neg)
+ (ge @0 { build_real (TREE_TYPE (@0), max); })
+ (le @0 { build_real (TREE_TYPE (@0), max); }))))
+ /* x != +Inf is always equal to !(x > DBL_MAX). */
+ (if (code == NE_EXPR)
+ (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
+ (if (! HONOR_NANS (@0))
+ (if (neg)
+ (ge @0 { build_real (TREE_TYPE (@0), max); })
+ (le @0 { build_real (TREE_TYPE (@0), max); }))
+ (if (neg)
+ (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
+ { build_one_cst (type); })
+ (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
+ { build_one_cst (type); }))))))))))
/* If this is a comparison of a real constant with a PLUS_EXPR
or a MINUS_EXPR of a real constant, we can convert it into a
tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
TREE_TYPE (@1), @2, @1);
}
- (if (!TREE_OVERFLOW (tem))
+ (if (tem && !TREE_OVERFLOW (tem))
(cmp @0 { tem; }))))))
/* Likewise, we can simplify a comparison of a real constant with
(simplify
(cmp (minus REAL_CST@0 @1) REAL_CST@2)
(with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
- (if (!TREE_OVERFLOW (tem))
+ (if (tem && !TREE_OVERFLOW (tem))
(cmp { tem; } @1)))))
/* Fold comparisons against built-in math functions. */
(for sq (SQRT)
(simplify
(cmp (sq @0) REAL_CST@1)
- (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
- /* sqrt(x) < y is always false, if y is negative. */
- (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
- { constant_boolean_node (false, type); })
- /* sqrt(x) > y is always true, if y is negative and we
- don't care about NaNs, i.e. negative values of x. */
- (if (cmp == NE_EXPR || !HONOR_NANS (@0))
- { constant_boolean_node (true, type); })
- /* sqrt(x) > y is the same as x >= 0, if y is negative. */
- (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
- (if (cmp == GT_EXPR || cmp == GE_EXPR)
- (with
- {
- REAL_VALUE_TYPE c2;
- REAL_ARITHMETIC (c2, MULT_EXPR, TREE_REAL_CST (@1), TREE_REAL_CST (@1));
- real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
- }
- (if (REAL_VALUE_ISINF (c2))
- /* sqrt(x) > y is x == +Inf, when y is very large. */
- (if (HONOR_INFINITIES (@0))
- (eq @0 { build_real (TREE_TYPE (@0), c2); }))
- { constant_boolean_node (false, type); })
- /* sqrt(x) > c is the same as x > c*c. */
- (cmp @0 { build_real (TREE_TYPE (@0), c2); })))
- (if (cmp == LT_EXPR || cmp == LE_EXPR)
- (with
- {
- REAL_VALUE_TYPE c2;
- REAL_ARITHMETIC (c2, MULT_EXPR, TREE_REAL_CST (@1), TREE_REAL_CST (@1));
- real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
- }
- (if (REAL_VALUE_ISINF (c2))
- /* sqrt(x) < y is always true, when y is a very large
- value and we don't care about NaNs or Infinities. */
- (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
- { constant_boolean_node (true, type); })
- /* sqrt(x) < y is x != +Inf when y is very large and we
- don't care about NaNs. */
- (if (! HONOR_NANS (@0))
- (ne @0 { build_real (TREE_TYPE (@0), c2); }))
- /* sqrt(x) < y is x >= 0 when y is very large and we
- don't care about Infinities. */
- (if (! HONOR_INFINITIES (@0))
- (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
- /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
- (if (GENERIC)
- (truth_andif
- (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
- (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
- /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
- (if (! REAL_VALUE_ISINF (c2)
- && ! HONOR_NANS (@0))
- (cmp @0 { build_real (TREE_TYPE (@0), c2); }))
- /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
- (if (! REAL_VALUE_ISINF (c2)
- && GENERIC)
- (truth_andif
- (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
- (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
+ (switch
+ (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
+ (switch
+ /* sqrt(x) < y is always false, if y is negative. */
+ (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
+ { constant_boolean_node (false, type); })
+ /* sqrt(x) > y is always true, if y is negative and we
+ don't care about NaNs, i.e. negative values of x. */
+ (if (cmp == NE_EXPR || !HONOR_NANS (@0))
+ { constant_boolean_node (true, type); })
+ /* sqrt(x) > y is the same as x >= 0, if y is negative. */
+ (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
+ (if (cmp == GT_EXPR || cmp == GE_EXPR)
+ (with
+ {
+ REAL_VALUE_TYPE c2;
+ real_arithmetic (&c2, MULT_EXPR,
+ &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
+ real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
+ }
+ (if (REAL_VALUE_ISINF (c2))
+ /* sqrt(x) > y is x == +Inf, when y is very large. */
+ (if (HONOR_INFINITIES (@0))
+ (eq @0 { build_real (TREE_TYPE (@0), c2); })
+ { constant_boolean_node (false, type); })
+ /* sqrt(x) > c is the same as x > c*c. */
+ (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
+ (if (cmp == LT_EXPR || cmp == LE_EXPR)
+ (with
+ {
+ REAL_VALUE_TYPE c2;
+ real_arithmetic (&c2, MULT_EXPR,
+ &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
+ real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
+ }
+ (if (REAL_VALUE_ISINF (c2))
+ (switch
+ /* sqrt(x) < y is always true, when y is a very large
+ value and we don't care about NaNs or Infinities. */
+ (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
+ { constant_boolean_node (true, type); })
+ /* sqrt(x) < y is x != +Inf when y is very large and we
+ don't care about NaNs. */
+ (if (! HONOR_NANS (@0))
+ (ne @0 { build_real (TREE_TYPE (@0), c2); }))
+ /* sqrt(x) < y is x >= 0 when y is very large and we
+ don't care about Infinities. */
+ (if (! HONOR_INFINITIES (@0))
+ (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
+ /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
+ (if (GENERIC)
+ (truth_andif
+ (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
+ (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
+ /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
+ (if (! HONOR_NANS (@0))
+ (cmp @0 { build_real (TREE_TYPE (@0), c2); })
+ /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
+ (if (GENERIC)
+ (truth_andif
+ (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
+ (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
/* Unordered tests if either argument is a NaN. */
(simplify
(if (tem && !TREE_OVERFLOW (tem))
(scmp @0 { tem; }))))))
+/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
+(for op (eq ne)
+ (simplify
+ (op (abs @0) zerop@1)
+ (op @0 @1)))
+
+/* From fold_sign_changed_comparison and fold_widened_comparison. */
+(for cmp (simple_comparison)
+ (simplify
+ (cmp (convert@0 @00) (convert?@1 @10))
+ (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
+ /* Disable this optimization if we're casting a function pointer
+ type on targets that require function pointer canonicalization. */
+ && !(targetm.have_canonicalize_funcptr_for_compare ()
+ && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
+ && single_use (@0))
+ (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
+ && (TREE_CODE (@10) == INTEGER_CST
+ || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
+ && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
+ || cmp == NE_EXPR
+ || cmp == EQ_EXPR)
+ && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
+ /* ??? The special-casing of INTEGER_CST conversion was in the original
+ code and here to avoid a spurious overflow flag on the resulting
+ constant which fold_convert produces. */
+ (if (TREE_CODE (@1) == INTEGER_CST)
+ (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
+ TREE_OVERFLOW (@1)); })
+ (cmp @00 (convert @1)))
+
+ (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
+ /* If possible, express the comparison in the shorter mode. */
+ (if ((cmp == EQ_EXPR || cmp == NE_EXPR
+ || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)))
+ && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
+ || ((TYPE_PRECISION (TREE_TYPE (@00))
+ >= TYPE_PRECISION (TREE_TYPE (@10)))
+ && (TYPE_UNSIGNED (TREE_TYPE (@00))
+ == TYPE_UNSIGNED (TREE_TYPE (@10))))
+ || (TREE_CODE (@10) == INTEGER_CST
+ && (TREE_CODE (TREE_TYPE (@00)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (@00)) == BOOLEAN_TYPE)
+ && int_fits_type_p (@10, TREE_TYPE (@00)))))
+ (cmp @00 (convert @10))
+ (if (TREE_CODE (@10) == INTEGER_CST
+ && TREE_CODE (TREE_TYPE (@00)) == INTEGER_TYPE
+ && !int_fits_type_p (@10, TREE_TYPE (@00)))
+ (with
+ {
+ tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
+ tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
+ bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
+ bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
+ }
+ (if (above || below)
+ (if (cmp == EQ_EXPR || cmp == NE_EXPR)
+ { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
+ (if (cmp == LT_EXPR || cmp == LE_EXPR)
+ { constant_boolean_node (above ? true : false, type); }
+ (if (cmp == GT_EXPR || cmp == GE_EXPR)
+ { constant_boolean_node (above ? false : true, type); }))))))))))))
+
+(for cmp (eq ne)
+ /* A local variable can never be pointed to by
+ the default SSA name of an incoming parameter.
+ SSA names are canonicalized to 2nd place. */
+ (simplify
+ (cmp addr@0 SSA_NAME@1)
+ (if (SSA_NAME_IS_DEFAULT_DEF (@1)
+ && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
+ (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
+ (if (TREE_CODE (base) == VAR_DECL
+ && auto_var_in_fn_p (base, current_function_decl))
+ (if (cmp == NE_EXPR)
+ { constant_boolean_node (true, type); }
+ { constant_boolean_node (false, type); }))))))
/* Equality compare simplifications from fold_binary */
(for cmp (eq ne)
(simplify
(cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
(if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
- (cmp @0 (bit_xor @1 (convert @2))))))
+ (cmp @0 (bit_xor @1 (convert @2)))))
-/* Simplification of math builtins. */
+ (simplify
+ (cmp (convert? addr@0) integer_zerop)
+ (if (tree_single_nonzero_warnv_p (@0, NULL))
+ { constant_boolean_node (cmp == NE_EXPR, type); })))
+
+/* If we have (A & C) == C where C is a power of 2, convert this into
+ (A & C) != 0. Similarly for NE_EXPR. */
+(for cmp (eq ne)
+ icmp (ne eq)
+ (simplify
+ (cmp (bit_and@2 @0 integer_pow2p@1) @1)
+ (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
+
+/* If we have (A & C) != 0 where C is the sign bit of A, convert
+ this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
+(for cmp (eq ne)
+ ncmp (ge lt)
+ (simplify
+ (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && (TYPE_PRECISION (TREE_TYPE (@0))
+ == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
+ && element_precision (@2) >= element_precision (@0)
+ && wi::only_sign_bit_p (@1, element_precision (@0)))
+ (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
+ (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
+
+/* When the addresses are not directly of decls compare base and offset.
+ This implements some remaining parts of fold_comparison address
+ comparisons but still no complete part of it. Still it is good
+ enough to make fold_stmt not regress when not dispatching to fold_binary. */
+(for cmp (simple_comparison)
+ (simplify
+ (cmp (convert1?@2 addr@0) (convert2? addr@1))
+ (with
+ {
+ HOST_WIDE_INT off0, off1;
+ tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
+ tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
+ if (base0 && TREE_CODE (base0) == MEM_REF)
+ {
+ off0 += mem_ref_offset (base0).to_short_addr ();
+ base0 = TREE_OPERAND (base0, 0);
+ }
+ if (base1 && TREE_CODE (base1) == MEM_REF)
+ {
+ off1 += mem_ref_offset (base1).to_short_addr ();
+ base1 = TREE_OPERAND (base1, 0);
+ }
+ }
+ (if (base0 && base1)
+ (with
+ {
+ int equal = 2;
+ if (decl_in_symtab_p (base0)
+ && decl_in_symtab_p (base1))
+ equal = symtab_node::get_create (base0)
+ ->equal_address_to (symtab_node::get_create (base1));
+ else if ((DECL_P (base0)
+ || TREE_CODE (base0) == SSA_NAME
+ || TREE_CODE (base0) == STRING_CST)
+ && (DECL_P (base1)
+ || TREE_CODE (base1) == SSA_NAME
+ || TREE_CODE (base1) == STRING_CST))
+ equal = (base0 == base1);
+ }
+ (if (equal == 1
+ && (cmp == EQ_EXPR || cmp == NE_EXPR
+ /* If the offsets are equal we can ignore overflow. */
+ || off0 == off1
+ || POINTER_TYPE_OVERFLOW_UNDEFINED
+ /* Or if we compare using pointers to decls or strings. */
+ || (POINTER_TYPE_P (TREE_TYPE (@2))
+ && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
+ (switch
+ (if (cmp == EQ_EXPR)
+ { constant_boolean_node (off0 == off1, type); })
+ (if (cmp == NE_EXPR)
+ { constant_boolean_node (off0 != off1, type); })
+ (if (cmp == LT_EXPR)
+ { constant_boolean_node (off0 < off1, type); })
+ (if (cmp == LE_EXPR)
+ { constant_boolean_node (off0 <= off1, type); })
+ (if (cmp == GE_EXPR)
+ { constant_boolean_node (off0 >= off1, type); })
+ (if (cmp == GT_EXPR)
+ { constant_boolean_node (off0 > off1, type); }))
+ (if (equal == 0
+ && DECL_P (base0) && DECL_P (base1)
+ /* If we compare this as integers require equal offset. */
+ && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
+ || off0 == off1))
+ (switch
+ (if (cmp == EQ_EXPR)
+ { constant_boolean_node (false, type); })
+ (if (cmp == NE_EXPR)
+ { constant_boolean_node (true, type); })))))))))
+
+/* Non-equality compare simplifications from fold_binary */
+(for cmp (lt gt le ge)
+ /* Comparisons with the highest or lowest possible integer of
+ the specified precision will have known values. */
+ (simplify
+ (cmp (convert?@2 @0) INTEGER_CST@1)
+ (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
+ && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
+ (with
+ {
+ tree arg1_type = TREE_TYPE (@1);
+ unsigned int prec = TYPE_PRECISION (arg1_type);
+ wide_int max = wi::max_value (arg1_type);
+ wide_int signed_max = wi::max_value (prec, SIGNED);
+ wide_int min = wi::min_value (arg1_type);
+ }
+ (switch
+ (if (wi::eq_p (@1, max))
+ (switch
+ (if (cmp == GT_EXPR)
+ { constant_boolean_node (false, type); })
+ (if (cmp == GE_EXPR)
+ (eq @2 @1))
+ (if (cmp == LE_EXPR)
+ { constant_boolean_node (true, type); })
+ (if (cmp == LT_EXPR)
+ (ne @2 @1))))
+ (if (wi::eq_p (@1, min))
+ (switch
+ (if (cmp == LT_EXPR)
+ { constant_boolean_node (false, type); })
+ (if (cmp == LE_EXPR)
+ (eq @2 @1))
+ (if (cmp == GE_EXPR)
+ { constant_boolean_node (true, type); })
+ (if (cmp == GT_EXPR)
+ (ne @2 @1))))
+ (if (wi::eq_p (@1, max - 1))
+ (switch
+ (if (cmp == GT_EXPR)
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
+ (if (cmp == LE_EXPR)
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
+ (if (wi::eq_p (@1, min + 1))
+ (switch
+ (if (cmp == GE_EXPR)
+ (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
+ (if (cmp == LT_EXPR)
+ (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
+ (if (wi::eq_p (@1, signed_max)
+ && TYPE_UNSIGNED (arg1_type)
+ /* We will flip the signedness of the comparison operator
+ associated with the mode of @1, so the sign bit is
+ specified by this mode. Check that @1 is the signed
+ max associated with this sign bit. */
+ && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
+ /* signed_type does not work on pointer types. */
+ && INTEGRAL_TYPE_P (arg1_type))
+ /* The following case also applies to X < signed_max+1
+ and X >= signed_max+1 because previous transformations. */
+ (if (cmp == LE_EXPR || cmp == GT_EXPR)
+ (with { tree st = signed_type_for (arg1_type); }
+ (if (cmp == LE_EXPR)
+ (ge (convert:st @0) { build_zero_cst (st); })
+ (lt (convert:st @0) { build_zero_cst (st); }))))))))))
+
+(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
+ /* If the second operand is NaN, the result is constant. */
+ (simplify
+ (cmp @0 REAL_CST@1)
+ (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
+ && (cmp != LTGT_EXPR || ! flag_trapping_math))
+ { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
+ ? false : true, type); })))
+
+/* bool_var != 0 becomes bool_var. */
+(simplify
+ (ne @0 integer_zerop)
+ (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
+ && types_match (type, TREE_TYPE (@0)))
+ (non_lvalue @0)))
+/* bool_var == 1 becomes bool_var. */
+(simplify
+ (eq @0 integer_onep)
+ (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
+ && types_match (type, TREE_TYPE (@0)))
+ (non_lvalue @0)))
+/* Do not handle
+ bool_var == 0 becomes !bool_var or
+ bool_var != 1 becomes !bool_var
+ here because that only is good in assignment context as long
+ as we require a tcc_comparison in GIMPLE_CONDs where we'd
+ replace if (x == 0) with tem = ~x; if (tem != 0) which is
+ clearly less optimal and which we'll transform again in forwprop. */
+
+
+/* Simplification of math builtins. These rules must all be optimizations
+ as well as IL simplifications. If there is a possibility that the new
+ form could be a pessimization, the rule should go in the canonicalization
+ section that follows this one.
+
+ Rules can generally go in this section if they satisfy one of
+ the following:
+
+ - the rule describes an identity
+
+ - the rule replaces calls with something as simple as addition or
+ multiplication
+
+ - the rule contains unary calls only and simplifies the surrounding
+ arithmetic. (The idea here is to exclude non-unary calls in which
+ one operand is constant and in which the call is known to be cheap
+ when the operand has that value.) */
-/* fold_builtin_logarithm */
(if (flag_unsafe_math_optimizations)
+ /* Simplify sqrt(x) * sqrt(x) -> x. */
+ (simplify
+ (mult (SQRT@1 @0) @1)
+ (if (!HONOR_SNANS (type))
+ @0))
+
+ /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
+ (for root (SQRT CBRT)
+ (simplify
+ (mult (root:s @0) (root:s @1))
+ (root (mult @0 @1))))
+
+ /* Simplify expN(x) * expN(y) -> expN(x+y). */
+ (for exps (EXP EXP2 EXP10 POW10)
+ (simplify
+ (mult (exps:s @0) (exps:s @1))
+ (exps (plus @0 @1))))
+
+ /* Simplify a/root(b/c) into a*root(c/b). */
+ (for root (SQRT CBRT)
+ (simplify
+ (rdiv @0 (root:s (rdiv:s @1 @2)))
+ (mult @0 (root (rdiv @2 @1)))))
+
+ /* Simplify x/expN(y) into x*expN(-y). */
+ (for exps (EXP EXP2 EXP10 POW10)
+ (simplify
+ (rdiv @0 (exps:s @1))
+ (mult @0 (exps (negate @1)))))
+
/* Special case, optimize logN(expN(x)) = x. */
- (for logs (LOG LOG2 LOG10)
- exps (EXP EXP2 EXP10)
+ (for logs (LOG LOG2 LOG10 LOG10)
+ exps (EXP EXP2 EXP10 POW10)
(simplify
(logs (exps @0))
@0))
+
/* Optimize logN(func()) for various exponential functions. We
want to determine the value "x" and the power "exponent" in
order to transform logN(x**exponent) into exponent*logN(x). */
- (for logs (LOG LOG LOG LOG
- LOG2 LOG2 LOG2 LOG2
- LOG10 LOG10 LOG10 LOG10)
- exps (EXP EXP2 EXP10 POW10)
+ (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
+ exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
(simplify
(logs (exps @0))
(with {
switch (exps)
{
CASE_FLT_FN (BUILT_IN_EXP):
- /* Prepare to do logN(exp(exponent) -> exponent*logN(e). */
- x = build_real (type, real_value_truncate (TYPE_MODE (type),
- dconst_e ()));
+ /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
+ x = build_real_truncate (type, dconst_e ());
break;
CASE_FLT_FN (BUILT_IN_EXP2):
- /* Prepare to do logN(exp2(exponent) -> exponent*logN(2). */
+ /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
x = build_real (type, dconst2);
break;
CASE_FLT_FN (BUILT_IN_EXP10):
CASE_FLT_FN (BUILT_IN_POW10):
- /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */
+ /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
{
REAL_VALUE_TYPE dconst10;
real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
x = build_real (type, dconst10);
}
break;
+ default:
+ gcc_unreachable ();
}
}
(mult (logs { x; }) @0))))
+
(for logs (LOG LOG
LOG2 LOG2
LOG10 LOG10)
switch (exps)
{
CASE_FLT_FN (BUILT_IN_SQRT):
- /* Prepare to do logN(sqrt(x) -> 0.5*logN(x). */
+ /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
x = build_real (type, dconsthalf);
break;
CASE_FLT_FN (BUILT_IN_CBRT):
- /* Prepare to do logN(cbrt(x) -> (1/3)*logN(x). */
- x = build_real (type, real_value_truncate (TYPE_MODE (type),
- dconst_third ()));
+ /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
+ x = build_real_truncate (type, dconst_third ());
break;
+ default:
+ gcc_unreachable ();
}
}
(mult { x; } (logs @0)))))
- /* logN(pow(x,exponent) -> exponent*logN(x). */
+
+ /* logN(pow(x,exponent)) -> exponent*logN(x). */
(for logs (LOG LOG2 LOG10)
pows (POW)
(simplify
(logs (pows @0 @1))
- (mult @1 (logs @0)))))
+ (mult @1 (logs @0))))
+
+ (for sqrts (SQRT)
+ cbrts (CBRT)
+ exps (EXP EXP2 EXP10 POW10)
+ /* sqrt(expN(x)) -> expN(x*0.5). */
+ (simplify
+ (sqrts (exps @0))
+ (exps (mult @0 { build_real (type, dconsthalf); })))
+ /* cbrt(expN(x)) -> expN(x/3). */
+ (simplify
+ (cbrts (exps @0))
+ (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))))
+
+ /* tan(atan(x)) -> x. */
+ (for tans (TAN)
+ atans (ATAN)
+ (simplify
+ (tans (atans @0))
+ @0)))
+
+/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
+(simplify
+ (CABS (complex:c @0 real_zerop@1))
+ (abs @0))
+
+/* trunc(trunc(x)) -> trunc(x), etc. */
+(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
+ (simplify
+ (fns (fns @0))
+ (fns @0)))
+/* f(x) -> x if x is integer valued and f does nothing for such values. */
+(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT)
+ (simplify
+ (fns integer_valued_real_p@0)
+ @0))
+/* Same for rint. We have to check flag_errno_math because
+ integer_valued_real_p accepts +Inf, -Inf and NaNs as integers. */
+(if (!flag_errno_math)
+ (simplify
+ (RINT integer_valued_real_p@0)
+ @0))
+
+/* Canonicalization of sequences of math builtins. These rules represent
+ IL simplifications but are not necessarily optimizations.
+
+ The sincos pass is responsible for picking "optimal" implementations
+ of math builtins, which may be more complicated and can sometimes go
+ the other way, e.g. converting pow into a sequence of sqrts.
+ We only want to do these canonicalizations before the pass has run. */
+
+(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
+ /* Simplify tan(x) * cos(x) -> sin(x). */
+ (simplify
+ (mult:c (TAN:s @0) (COS:s @0))
+ (SIN @0))
+
+ /* Simplify x * pow(x,c) -> pow(x,c+1). */
+ (simplify
+ (mult @0 (POW:s @0 REAL_CST@1))
+ (if (!TREE_OVERFLOW (@1))
+ (POW @0 (plus @1 { build_one_cst (type); }))))
+
+ /* Simplify sin(x) / cos(x) -> tan(x). */
+ (simplify
+ (rdiv (SIN:s @0) (COS:s @0))
+ (TAN @0))
+
+ /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
+ (simplify
+ (rdiv (COS:s @0) (SIN:s @0))
+ (rdiv { build_one_cst (type); } (TAN @0)))
+
+ /* Simplify sin(x) / tan(x) -> cos(x). */
+ (simplify
+ (rdiv (SIN:s @0) (TAN:s @0))
+ (if (! HONOR_NANS (@0)
+ && ! HONOR_INFINITIES (@0))
+ (cos @0)))
+
+ /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
+ (simplify
+ (rdiv (TAN:s @0) (SIN:s @0))
+ (if (! HONOR_NANS (@0)
+ && ! HONOR_INFINITIES (@0))
+ (rdiv { build_one_cst (type); } (COS @0))))
+
+ /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
+ (simplify
+ (mult (POW:s @0 @1) (POW:s @0 @2))
+ (POW @0 (plus @1 @2)))
+
+ /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
+ (simplify
+ (mult (POW:s @0 @1) (POW:s @2 @1))
+ (POW (mult @0 @2) @1))
+
+ /* Simplify pow(x,c) / x -> pow(x,c-1). */
+ (simplify
+ (rdiv (POW:s @0 REAL_CST@1) @0)
+ (if (!TREE_OVERFLOW (@1))
+ (POW @0 (minus @1 { build_one_cst (type); }))))
+
+ /* Simplify x / pow (y,z) -> x * pow(y,-z). */
+ (simplify
+ (rdiv @0 (POW:s @1 @2))
+ (mult @0 (POW @1 (negate @2))))
+
+ (for sqrts (SQRT)
+ cbrts (CBRT)
+ pows (POW)
+ /* sqrt(sqrt(x)) -> pow(x,1/4). */
+ (simplify
+ (sqrts (sqrts @0))
+ (pows @0 { build_real (type, dconst_quarter ()); }))
+ /* sqrt(cbrt(x)) -> pow(x,1/6). */
+ (simplify
+ (sqrts (cbrts @0))
+ (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
+ /* cbrt(sqrt(x)) -> pow(x,1/6). */
+ (simplify
+ (cbrts (sqrts @0))
+ (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
+ /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
+ (simplify
+ (cbrts (cbrts tree_expr_nonnegative_p@0))
+ (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
+ /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
+ (simplify
+ (sqrts (pows @0 @1))
+ (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
+ /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
+ (simplify
+ (cbrts (pows tree_expr_nonnegative_p@0 @1))
+ (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))))
+
+ /* cabs(x+xi) -> fabs(x)*sqrt(2). */
+ (simplify
+ (CABS (complex @0 @0))
+ (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })))
+
+(if (canonicalize_math_p ())
+ /* floor(x) -> trunc(x) if x is nonnegative. */
+ (for floors (FLOOR)
+ truncs (TRUNC)
+ (simplify
+ (floors tree_expr_nonnegative_p@0)
+ (truncs @0))))
+
+(match double_value_p
+ @0
+ (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
+(for froms (BUILT_IN_TRUNCL
+ BUILT_IN_FLOORL
+ BUILT_IN_CEILL
+ BUILT_IN_ROUNDL
+ BUILT_IN_NEARBYINTL
+ BUILT_IN_RINTL)
+ tos (BUILT_IN_TRUNC
+ BUILT_IN_FLOOR
+ BUILT_IN_CEIL
+ BUILT_IN_ROUND
+ BUILT_IN_NEARBYINT
+ BUILT_IN_RINT)
+ /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
+ (if (optimize && canonicalize_math_p ())
+ (simplify
+ (froms (convert double_value_p@0))
+ (convert (tos @0)))))
+
+(match float_value_p
+ @0
+ (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
+(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
+ BUILT_IN_FLOORL BUILT_IN_FLOOR
+ BUILT_IN_CEILL BUILT_IN_CEIL
+ BUILT_IN_ROUNDL BUILT_IN_ROUND
+ BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
+ BUILT_IN_RINTL BUILT_IN_RINT)
+ tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
+ BUILT_IN_FLOORF BUILT_IN_FLOORF
+ BUILT_IN_CEILF BUILT_IN_CEILF
+ BUILT_IN_ROUNDF BUILT_IN_ROUNDF
+ BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
+ BUILT_IN_RINTF BUILT_IN_RINTF)
+ /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
+ if x is a float. */
+ (if (optimize && canonicalize_math_p ())
+ (simplify
+ (froms (convert float_value_p@0))
+ (convert (tos @0)))))
+
+/* cproj(x) -> x if we're ignoring infinities. */
+(simplify
+ (CPROJ @0)
+ (if (!HONOR_INFINITIES (type))
+ @0))
+
+/* If the real part is inf and the imag part is known to be
+ nonnegative, return (inf + 0i). */
+(simplify
+ (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
+ (if (real_isinf (TREE_REAL_CST_PTR (@0)))
+ { build_complex_inf (type, false); }))
+
+/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
+(simplify
+ (CPROJ (complex @0 REAL_CST@1))
+ (if (real_isinf (TREE_REAL_CST_PTR (@1)))
+ { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
+
/* Narrowing of arithmetic and logical operations.
operation and convert the result to the desired type. */
(for op (plus minus)
(simplify
- (convert (op@4 (convert@2 @0) (convert@3 @1)))
+ (convert (op:s (convert@2 @0) (convert@3 @1)))
(if (INTEGRAL_TYPE_P (type)
/* We check for type compatibility between @0 and @1 below,
so there's no need to check that @1/@3 are integral types. */
/* The inner conversion must be a widening conversion. */
&& TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
&& types_match (@0, @1)
- && types_match (@0, type)
- && single_use (@4))
+ && types_match (@0, type))
(if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
- (convert (op @0 @1)))
- (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
- (convert (op (convert:utype @0) (convert:utype @1)))))))
+ (convert (op @0 @1))
+ (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
+ (convert (op (convert:utype @0) (convert:utype @1))))))))
/* This is another case of narrowing, specifically when there's an outer
BIT_AND_EXPR which masks off bits outside the type of the innermost
to unsigned types to avoid introducing undefined behaviour for the
arithmetic operation. */
(for op (minus plus)
- (simplify
- (bit_and (op@5 (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
- (if (INTEGRAL_TYPE_P (type)
- /* We check for type compatibility between @0 and @1 below,
- so there's no need to check that @1/@3 are integral types. */
- && INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && INTEGRAL_TYPE_P (TREE_TYPE (@2))
- /* The precision of the type of each operand must match the
- precision of the mode of each operand, similarly for the
- result. */
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
- && (TYPE_PRECISION (TREE_TYPE (@1))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
- && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
- /* The inner conversion must be a widening conversion. */
- && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
- && types_match (@0, @1)
- && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
- <= TYPE_PRECISION (TREE_TYPE (@0)))
- && (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
- || tree_int_cst_sgn (@4) >= 0)
- && single_use (@5))
- (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
- (with { tree ntype = TREE_TYPE (@0); }
- (convert (bit_and (op @0 @1) (convert:ntype @4)))))
- (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
- (convert (bit_and (op (convert:utype @0) (convert:utype @1))
- (convert:utype @4)))))))
+ (simplify
+ (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
+ (if (INTEGRAL_TYPE_P (type)
+ /* We check for type compatibility between @0 and @1 below,
+ so there's no need to check that @1/@3 are integral types. */
+ && INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && INTEGRAL_TYPE_P (TREE_TYPE (@2))
+ /* The precision of the type of each operand must match the
+ precision of the mode of each operand, similarly for the
+ result. */
+ && (TYPE_PRECISION (TREE_TYPE (@0))
+ == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
+ && (TYPE_PRECISION (TREE_TYPE (@1))
+ == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
+ && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
+ /* The inner conversion must be a widening conversion. */
+ && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
+ && types_match (@0, @1)
+ && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
+ <= TYPE_PRECISION (TREE_TYPE (@0)))
+ && (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
+ || tree_int_cst_sgn (@4) >= 0))
+ (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
+ (with { tree ntype = TREE_TYPE (@0); }
+ (convert (bit_and (op @0 @1) (convert:ntype @4))))
+ (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
+ (convert (bit_and (op (convert:utype @0) (convert:utype @1))
+ (convert:utype @4))))))))
+
+/* Transform (@0 < @1 and @0 < @2) to use min,
+ (@0 > @1 and @0 > @2) to use max */
+(for op (lt le gt ge)
+ ext (min min max max)
+ (simplify
+ (bit_and (op:s @0 @1) (op:s @0 @2))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
+ (op @0 (ext @1 @2)))))