return negate_expr_p (TREE_OPERAND (t, 0));
case PLUS_EXPR:
- if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
- || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
+ if (HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
+ || HONOR_SIGNED_ZEROS (element_mode (type)))
return false;
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1))
case MINUS_EXPR:
/* We can't turn -(A-B) into B-A when we honor signed zeros. */
- return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ return !HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
+ && !HONOR_SIGNED_ZEROS (element_mode (type))
&& reorder_operands_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1));
/* Fall through. */
case RDIV_EXPR:
- if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
+ if (! HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (TREE_TYPE (t))))
return negate_expr_p (TREE_OPERAND (t, 1))
|| negate_expr_p (TREE_OPERAND (t, 0));
break;
break;
case PLUS_EXPR:
- if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
+ if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
+ && !HONOR_SIGNED_ZEROS (element_mode (type)))
{
/* -(A + B) -> (-B) - A. */
if (negate_expr_p (TREE_OPERAND (t, 1))
case MINUS_EXPR:
/* - (A - B) -> B - A */
- if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
+ && !HONOR_SIGNED_ZEROS (element_mode (type))
&& reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
return fold_build2_loc (loc, MINUS_EXPR, type,
TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
/* Fall through. */
case RDIV_EXPR:
- if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
+ if (! HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)))
{
tem = TREE_OPERAND (t, 1);
if (negate_expr_p (tem))
enum tree_code rcode, tree truth_type,
tree ll_arg, tree lr_arg)
{
- bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
+ bool honor_nans = HONOR_NANS (element_mode (ll_arg));
enum comparison_code lcompcode = comparison_to_compcode (lcode);
enum comparison_code rcompcode = comparison_to_compcode (rcode);
int compcode;
Note that all these transformations are correct if A is
NaN, since the two alternatives (A and -A) are also NaNs. */
- if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ if (!HONOR_SIGNED_ZEROS (element_mode (type))
&& (FLOAT_TYPE_P (TREE_TYPE (arg01))
? real_zerop (arg01)
: integer_zerop (arg01))
both transformations are correct when A is NaN: A != 0
is then true, and A == 0 is false. */
- if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ if (!HONOR_SIGNED_ZEROS (element_mode (type))
&& integer_zerop (arg01) && integer_zerop (arg2))
{
if (comp_code == NE_EXPR)
a number and A is not. The conditions in the original
expressions will be false, so all four give B. The min()
and max() versions would give a NaN instead. */
- if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ if (!HONOR_SIGNED_ZEROS (element_mode (type))
&& operand_equal_for_comparison_p (arg01, arg2, arg00)
/* Avoid these transformations if the COND_EXPR may be used
as an lvalue in the C++ front-end. PR c++/19199. */
operand which will be used if they are equal first
so that we can convert this back to the
corresponding COND_EXPR. */
- if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1))))
+ if (!HONOR_NANS (element_mode (arg1)))
{
comp_op0 = fold_convert_loc (loc, comp_type, comp_op0);
comp_op1 = fold_convert_loc (loc, comp_type, comp_op1);
case GT_EXPR:
case UNGE_EXPR:
case UNGT_EXPR:
- if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1))))
+ if (!HONOR_NANS (element_mode (arg1)))
{
comp_op0 = fold_convert_loc (loc, comp_type, comp_op0);
comp_op1 = fold_convert_loc (loc, comp_type, comp_op1);
}
break;
case UNEQ_EXPR:
- if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1))))
+ if (!HONOR_NANS (element_mode (arg1)))
return pedantic_non_lvalue_loc (loc,
fold_convert_loc (loc, type, arg2));
break;
case LTGT_EXPR:
- if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1))))
+ if (!HONOR_NANS (element_mode (arg1)))
return pedantic_non_lvalue_loc (loc,
fold_convert_loc (loc, type, arg1));
break;
return false;
/* Don't allow the fold with -fsignaling-nans. */
- if (HONOR_SNANS (TYPE_MODE (type)))
+ if (HONOR_SNANS (element_mode (type)))
return false;
/* Allow the fold if zeros aren't signed, or their sign isn't important. */
- if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
+ if (!HONOR_SIGNED_ZEROS (element_mode (type)))
return true;
/* In a vector or complex, we would need to check the sign of all zeros. */
In this situation, there is only one case we can return true for.
X - 0 is the same as X unless rounding towards -infinity is
supported. */
- return negate && !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type));
+ return negate && !HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type));
}
/* Subroutine of fold() that checks comparisons of built-in math
{
case EQ_EXPR:
if (! FLOAT_TYPE_P (TREE_TYPE (arg0))
- || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))))
+ || ! HONOR_NANS (element_mode (arg0)))
return constant_boolean_node (1, type);
break;
case GE_EXPR:
case LE_EXPR:
if (! FLOAT_TYPE_P (TREE_TYPE (arg0))
- || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))))
+ || ! HONOR_NANS (element_mode (arg0)))
return constant_boolean_node (1, type);
return fold_build2_loc (loc, EQ_EXPR, type, arg0, arg1);
/* For NE, we can only do this simplification if integer
or we don't honor IEEE floating point NaNs. */
if (FLOAT_TYPE_P (TREE_TYPE (arg0))
- && HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))))
+ && HONOR_NANS (element_mode (arg0)))
break;
/* ... fall through ... */
case GT_EXPR:
/* Fold __complex__ ( x, 0 ) + __complex__ ( 0, y )
to __complex__ ( x, y ). This is not the same for SNaNs or
if signed zeros are involved. */
- if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
+ if (!HONOR_SNANS (element_mode (arg0))
+ && !HONOR_SIGNED_ZEROS (element_mode (arg0))
&& COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)))
{
tree rtype = TREE_TYPE (TREE_TYPE (arg0));
/* Fold __complex__ ( x, 0 ) - __complex__ ( 0, y ) to
__complex__ ( x, -y ). This is not the same for SNaNs or if
signed zeros are involved. */
- if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
+ if (!HONOR_SNANS (element_mode (arg0))
+ && !HONOR_SIGNED_ZEROS (element_mode (arg0))
&& COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)))
{
tree rtype = TREE_TYPE (TREE_TYPE (arg0));
/* Fold z * +-I to __complex__ (-+__imag z, +-__real z).
This is not the same for NaNs or if signed zeros are
involved. */
- if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
+ if (!HONOR_NANS (element_mode (arg0))
+ && !HONOR_SIGNED_ZEROS (element_mode (arg0))
&& COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0))
&& TREE_CODE (arg1) == COMPLEX_CST
&& real_zerop (TREE_REALPART (arg1)))
/* Optimize sqrt(x)*sqrt(x) as x. */
if (BUILTIN_SQRT_P (fcode0)
&& operand_equal_p (arg00, arg10, 0)
- && ! HONOR_SNANS (TYPE_MODE (type)))
+ && ! HONOR_SNANS (element_mode (type)))
return arg00;
/* Optimize root(x)*root(y) as root(x*y). */
if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
&& TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
- prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
+ prec = element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)));
wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
if (mask == -1)
tree arg00 = CALL_EXPR_ARG (arg0, 0);
tree arg01 = CALL_EXPR_ARG (arg1, 0);
- if (! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg00)))
- && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg00)))
+ if (! HONOR_NANS (element_mode (arg00))
+ && ! HONOR_INFINITIES (element_mode (arg00))
&& operand_equal_p (arg00, arg01, 0))
{
tree cosfn = mathfn_built_in (type, BUILT_IN_COS);
tree arg00 = CALL_EXPR_ARG (arg0, 0);
tree arg01 = CALL_EXPR_ARG (arg1, 0);
- if (! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg00)))
- && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg00)))
+ if (! HONOR_NANS (element_mode (arg00))
+ && ! HONOR_INFINITIES (element_mode (arg00))
&& operand_equal_p (arg00, arg01, 0))
{
tree cosfn = mathfn_built_in (type, BUILT_IN_COS);
strict_overflow_p = false;
if (code == GE_EXPR
&& (integer_zerop (arg1)
- || (! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))
+ || (! HONOR_NANS (element_mode (arg0))
&& real_zerop (arg1)))
&& tree_expr_nonnegative_warnv_p (arg0, &strict_overflow_p))
{
&& TYPE_UNSIGNED (TREE_TYPE (arg0))
&& CONVERT_EXPR_P (arg1)
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
- && (TYPE_PRECISION (TREE_TYPE (arg1))
- >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0))))
+ && (element_precision (TREE_TYPE (arg1))
+ >= element_precision (TREE_TYPE (TREE_OPERAND (arg1, 0))))
&& (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0)))
- || (TYPE_PRECISION (TREE_TYPE (arg1))
- == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)))))
+ || (element_precision (TREE_TYPE (arg1))
+ == element_precision (TREE_TYPE (TREE_OPERAND (arg1, 0)))))
&& integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
{
tem = build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
if (COMPARISON_CLASS_P (arg0)
&& operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
arg1, TREE_OPERAND (arg0, 1))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
+ && !HONOR_SIGNED_ZEROS (element_mode (arg1)))
{
tem = fold_cond_expr_with_comparison (loc, type, arg0, op1, op2);
if (tem)
&& operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
op2,
TREE_OPERAND (arg0, 1))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op2))))
+ && !HONOR_SIGNED_ZEROS (element_mode (op2)))
{
location_t loc0 = expr_location_or (arg0, loc);
tem = fold_invert_truthvalue (loc0, arg0);
CASE_FLT_FN (BUILT_IN_SQRT):
/* sqrt(-0.0) is -0.0. */
- if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
+ if (!HONOR_SIGNED_ZEROS (element_mode (type)))
return true;
return tree_expr_nonnegative_warnv_p (arg0,
strict_overflow_p);
case MULT_EXPR:
case RDIV_EXPR:
- if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (exp))))
+ if (HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (exp)))
return NULL_TREE;
arg0 = fold_strip_sign_ops (TREE_OPERAND (exp, 0));
arg1 = fold_strip_sign_ops (TREE_OPERAND (exp, 1));
/* Generic tree predicates we inherit. */
(define_predicates
integer_onep integer_zerop integer_all_onesp integer_minus_onep
- integer_each_onep
+ integer_each_onep integer_truep
real_zerop real_onep real_minus_onep
CONSTANT_CLASS_P
tree_expr_nonnegative_p)
is volatile. */
(simplify
(minus @0 @0)
- (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
+ (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (element_mode (type)))
{ build_zero_cst (type); }))
(simplify
negative value by 0 gives -0, not +0. */
(simplify
(mult @0 real_zerop@1)
- (if (!HONOR_NANS (TYPE_MODE (type))
- && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
+ (if (!HONOR_NANS (element_mode (type))
+ && !HONOR_SIGNED_ZEROS (element_mode (type)))
@1))
/* In IEEE floating point, x*1 is not equivalent to x for snans.
Likewise for complex arithmetic with signed zeros. */
(simplify
(mult @0 real_onep)
- (if (!HONOR_SNANS (TYPE_MODE (type))
- && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ (if (!HONOR_SNANS (element_mode (type))
+ && (!HONOR_SIGNED_ZEROS (element_mode (type))
|| !COMPLEX_FLOAT_TYPE_P (type)))
(non_lvalue @0)))
/* Transform x * -1.0 into -x. */
(simplify
(mult @0 real_minus_onep)
- (if (!HONOR_SNANS (TYPE_MODE (type))
- && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ (if (!HONOR_SNANS (element_mode (type))
+ && (!HONOR_SIGNED_ZEROS (element_mode (type))
|| !COMPLEX_FLOAT_TYPE_P (type)))
(negate @0)))
/* X / -1 is -X. */
(for div (trunc_div ceil_div floor_div round_div exact_div)
(simplify
- (div @0 INTEGER_CST@1)
- (if (!TYPE_UNSIGNED (type)
- && wi::eq_p (@1, -1))
+ (div @0 integer_minus_onep@1)
+ (if (!TYPE_UNSIGNED (type))
(negate @0))))
/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
(simplify
(floor_div @0 @1)
- (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type))
+ (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
+ && TYPE_UNSIGNED (type))
(trunc_div @0 @1)))
/* Optimize A / A to 1.0 if we don't care about
- NaNs or Infinities. Skip the transformation
- for non-real operands. */
+ NaNs or Infinities. */
(simplify
(rdiv @0 @0)
- (if (SCALAR_FLOAT_TYPE_P (type)
- && ! HONOR_NANS (TYPE_MODE (type))
- && ! HONOR_INFINITIES (TYPE_MODE (type)))
- { build_real (type, dconst1); })
- /* The complex version of the above A / A optimization. */
- (if (COMPLEX_FLOAT_TYPE_P (type)
- && ! HONOR_NANS (TYPE_MODE (TREE_TYPE (type)))
- && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (type))))
- { build_complex (type, build_real (TREE_TYPE (type), dconst1),
- build_real (TREE_TYPE (type), dconst0)); }))
+ (if (FLOAT_TYPE_P (type)
+ && ! HONOR_NANS (element_mode (type))
+ && ! HONOR_INFINITIES (element_mode (type)))
+ { build_one_cst (type); }))
+
+/* Optimize -A / A to -1.0 if we don't care about
+ NaNs or Infinities. */
+(simplify
+ (rdiv:c @0 (negate @0))
+ (if (FLOAT_TYPE_P (type)
+ && ! HONOR_NANS (element_mode (type))
+ && ! HONOR_INFINITIES (element_mode (type)))
+ { build_minus_one_cst (type); }))
/* In IEEE floating point, x/1 is not equivalent to x for snans. */
(simplify
(rdiv @0 real_onep)
- (if (!HONOR_SNANS (TYPE_MODE (type)))
+ (if (!HONOR_SNANS (element_mode (type)))
(non_lvalue @0)))
/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
(simplify
(rdiv @0 real_minus_onep)
- (if (!HONOR_SNANS (TYPE_MODE (type)))
+ (if (!HONOR_SNANS (element_mode (type)))
(negate @0)))
/* If ARG1 is a constant, we can convert this to a multiply by the
{ build_zero_cst (type); })
/* X % -1 is zero. */
(simplify
- (mod @0 INTEGER_CST@1)
- (if (!TYPE_UNSIGNED (type)
- && wi::eq_p (@1, -1))
+ (mod @0 integer_minus_onep@1)
+ (if (!TYPE_UNSIGNED (type))
{ build_zero_cst (type); })))
/* X % -C is the same as X % C. */
(match (logical_inverted_value @0)
(bit_not truth_valued_p@0))
(match (logical_inverted_value @0)
- (eq @0 integer_zerop)
- (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
+ (eq @0 integer_zerop))
(match (logical_inverted_value @0)
- (ne truth_valued_p@0 integer_onep)
- (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
+ (ne truth_valued_p@0 integer_truep))
(match (logical_inverted_value @0)
- (bit_xor truth_valued_p@0 integer_onep)
- (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))))
+ (bit_xor truth_valued_p@0 integer_truep))
/* X & !X -> 0. */
(simplify
(simplify
(minus (convert (add @0 @1))
(convert @0))
- (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
+ (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
/* For integer types, if A has a smaller type
than T the result depends on the possible
overflow in P + A.
int inside_int = INTEGRAL_TYPE_P (inside_type);
int inside_ptr = POINTER_TYPE_P (inside_type);
int inside_float = FLOAT_TYPE_P (inside_type);
- int inside_vec = TREE_CODE (inside_type) == VECTOR_TYPE;
+ int inside_vec = VECTOR_TYPE_P (inside_type);
unsigned int inside_prec = TYPE_PRECISION (inside_type);
int inside_unsignedp = TYPE_UNSIGNED (inside_type);
int inter_int = INTEGRAL_TYPE_P (inter_type);
int inter_ptr = POINTER_TYPE_P (inter_type);
int inter_float = FLOAT_TYPE_P (inter_type);
- int inter_vec = TREE_CODE (inter_type) == VECTOR_TYPE;
+ int inter_vec = VECTOR_TYPE_P (inter_type);
unsigned int inter_prec = TYPE_PRECISION (inter_type);
int inter_unsignedp = TYPE_UNSIGNED (inter_type);
int final_int = INTEGRAL_TYPE_P (type);
int final_ptr = POINTER_TYPE_P (type);
int final_float = FLOAT_TYPE_P (type);
- int final_vec = TREE_CODE (type) == VECTOR_TYPE;
+ int final_vec = VECTOR_TYPE_P (type);
unsigned int final_prec = TYPE_PRECISION (type);
int final_unsignedp = TYPE_UNSIGNED (type);
}
&& inter_prec >= inside_prec
&& (inter_float || inter_vec
|| inter_unsignedp == inside_unsignedp)
- && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
- && TYPE_MODE (type) == TYPE_MODE (inter_type))
+ && ! (final_prec != GET_MODE_PRECISION (element_mode (type))
+ && element_mode (type) == element_mode (inter_type))
&& ! final_ptr
&& (! final_vec || inter_prec == inside_prec))
(ocvt @0))
/* A ? B : B -> B. */
(simplify
(cnd @0 @1 @1)
- @1))
+ @1)
-/* !A ? B : C -> A ? C : B. */
-(simplify
- (cond (logical_inverted_value truth_valued_p@0) @1 @2)
- (cond @0 @2 @1))
+ /* !A ? B : C -> A ? C : B. */
+ (simplify
+ (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
+ (cnd @0 @2 @1)))
/* Simplifications of comparisons. */
a computed operator in the replacement tree thus we have
to play the trick below. */
(with { enum tree_code ic = invert_tree_comparison
- (cmp, HONOR_NANS (TYPE_MODE (TREE_TYPE (@0)))); }
+ (cmp, HONOR_NANS (element_mode (@0))); }
(if (ic == icmp)
(icmp @0 @1))
(if (ic == ncmp)
(ncmp @0 @1)))))
(simplify
- (bit_xor (cmp @0 @1) integer_onep)
- (if (INTEGRAL_TYPE_P (type))
- (with { enum tree_code ic = invert_tree_comparison
- (cmp, HONOR_NANS (TYPE_MODE (TREE_TYPE (@0)))); }
- (if (ic == icmp)
- (icmp @0 @1))
- (if (ic == ncmp)
- (ncmp @0 @1))))))
+ (bit_xor (cmp @0 @1) integer_truep)
+ (with { enum tree_code ic = invert_tree_comparison
+ (cmp, HONOR_NANS (element_mode (@0))); }
+ (if (ic == icmp)
+ (icmp @0 @1))
+ (if (ic == ncmp)
+ (ncmp @0 @1)))))