#include "diagnostic-core.h"
#include "varasm.h"
#include "flags.h"
+#include "selftest.h"
+#include "selftest-rtl.h"
/* Simplification and canonicalization of RTL. */
static rtx neg_const_int (machine_mode, const_rtx);
static bool plus_minus_operand_p (const_rtx);
static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
-static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
- unsigned int);
static rtx simplify_associative_operation (enum rtx_code, machine_mode,
rtx, rtx);
static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
{
unsigned HOST_WIDE_INT val = -UINTVAL (i);
- if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
+ if (!HWI_COMPUTABLE_MODE_P (mode)
&& val == UINTVAL (i))
return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
mode);
{
tree decl = MEM_EXPR (x);
machine_mode mode = GET_MODE (x);
- HOST_WIDE_INT offset = 0;
+ poly_int64 offset = 0;
switch (TREE_CODE (decl))
{
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
{
- HOST_WIDE_INT bitsize, bitpos;
+ poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
tree toffset;
int unsignedp, reversep, volatilep = 0;
decl
= get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
&unsignedp, &reversep, &volatilep);
- if (bitsize != GET_MODE_BITSIZE (mode)
- || (bitpos % BITS_PER_UNIT)
- || (toffset && !tree_fits_shwi_p (toffset)))
+ if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
+ || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
+ || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
decl = NULL;
else
- {
- offset += bitpos / BITS_PER_UNIT;
- if (toffset)
- offset += tree_to_shwi (toffset);
- }
+ offset += bytepos + toffset_val;
break;
}
}
if (MEM_P (newx))
{
rtx n = XEXP (newx, 0), o = XEXP (x, 0);
+ poly_int64 n_offset, o_offset;
/* Avoid creating a new MEM needlessly if we already had
the same address. We do if there's no OFFSET and the
form (plus NEWX OFFSET), or the NEWX is of the form
(plus Y (const_int Z)) and X is that with the offset
added: (plus Y (const_int Z+OFFSET)). */
- if (!((offset == 0
- || (GET_CODE (o) == PLUS
- && GET_CODE (XEXP (o, 1)) == CONST_INT
- && (offset == INTVAL (XEXP (o, 1))
- || (GET_CODE (n) == PLUS
- && GET_CODE (XEXP (n, 1)) == CONST_INT
- && (INTVAL (XEXP (n, 1)) + offset
- == INTVAL (XEXP (o, 1)))
- && (n = XEXP (n, 0))))
- && (o = XEXP (o, 0))))
+ n = strip_offset (n, &n_offset);
+ o = strip_offset (o, &o_offset);
+ if (!(known_eq (o_offset, n_offset + offset)
&& rtx_equal_p (o, n)))
x = adjust_address_nv (newx, mode, offset);
}
else if (GET_MODE (x) == GET_MODE (newx)
- && offset == 0)
+ && known_eq (offset, 0))
x = newx;
}
}
rtx tem;
/* If this simplifies, use it. */
- if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
- op0, op1, op2)))
+ if ((tem = simplify_ternary_operation (code, mode, op0_mode,
+ op0, op1, op2)) != 0)
return tem;
return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
{
rtx tem;
- if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
- op0, op1)))
+ if ((tem = simplify_relational_operation (code, mode, cmp_mode,
+ op0, op1)) != 0)
return tem;
return gen_rtx_fmt_ee (code, mode, op0, op1);
&& (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
&& UINTVAL (XEXP (op, 1)) < op_precision)
{
- int byte = subreg_lowpart_offset (mode, op_mode);
+ poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
(WORDS_BIG_ENDIAN
if the MEM has a mode-dependent address. */
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
+ && is_a <scalar_int_mode> (mode, &int_mode)
&& is_a <scalar_int_mode> (op_mode, &int_op_mode)
&& MEM_P (XEXP (op, 0))
&& CONST_INT_P (XEXP (op, 1))
- && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
+ && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
&& INTVAL (XEXP (op, 1)) > 0
&& INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
&& ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
MEM_ADDR_SPACE (XEXP (op, 0)))
&& ! MEM_VOLATILE_P (XEXP (op, 0))
- && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
+ && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
|| WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
{
- int byte = subreg_lowpart_offset (mode, int_op_mode);
+ poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
- return adjust_address_nv (XEXP (op, 0), mode,
+ return adjust_address_nv (XEXP (op, 0), int_mode,
(WORDS_BIG_ENDIAN
? byte - shifted_bytes
: byte + shifted_bytes));
simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
{
enum rtx_code reversed;
- rtx temp;
- scalar_int_mode inner, int_mode;
+ rtx temp, elt, base, step;
+ scalar_int_mode inner, int_mode, op_mode, op0_mode;
switch (code)
{
XEXP (op, 0), const0_rtx);
- if (GET_CODE (op) == SUBREG
+ if (partial_subreg_p (op)
&& subreg_lowpart_p (op)
- && (GET_MODE_SIZE (GET_MODE (op))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
&& GET_CODE (SUBREG_REG (op)) == ASHIFT
&& XEXP (SUBREG_REG (op), 0) == const1_rtx)
{
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
&& XEXP (op, 1) == const0_rtx
&& is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
{
+ int_mode = as_a <scalar_int_mode> (mode);
int isize = GET_MODE_PRECISION (inner);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
- GEN_INT (isize - 1));
- if (mode == inner)
+ gen_int_shift_amount (inner,
+ isize - 1));
+ if (int_mode == inner)
return temp;
- if (GET_MODE_PRECISION (mode) > isize)
- return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
- return simplify_gen_unary (TRUNCATE, mode, temp, inner);
+ if (GET_MODE_PRECISION (int_mode) > isize)
+ return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
+ return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
}
else if (STORE_FLAG_VALUE == -1)
{
temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
- GEN_INT (isize - 1));
- if (mode == inner)
+ gen_int_shift_amount (inner,
+ isize - 1));
+ if (int_mode == inner)
return temp;
- if (GET_MODE_PRECISION (mode) > isize)
- return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
- return simplify_gen_unary (TRUNCATE, mode, temp, inner);
+ if (GET_MODE_PRECISION (int_mode) > isize)
+ return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
+ return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
+ }
+ }
+
+ if (vec_series_p (op, &base, &step))
+ {
+ /* Only create a new series if we can simplify both parts. In other
+ cases this isn't really a simplification, and it's not necessarily
+ a win to replace a vector operation with a scalar operation. */
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
+ base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
+ if (base)
+ {
+ step = simplify_unary_operation (NEG, inner_mode,
+ step, inner_mode);
+ if (step)
+ return gen_vec_series (mode, base, step);
}
}
break;
if ((GET_CODE (op) == FLOAT_TRUNCATE
&& flag_unsafe_math_optimizations)
|| GET_CODE (op) == FLOAT_EXTEND)
- return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
- 0)))
- > GET_MODE_SIZE (mode)
- ? FLOAT_TRUNCATE : FLOAT_EXTEND,
+ return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
+ > GET_MODE_UNIT_SIZE (mode)
+ ? FLOAT_TRUNCATE : FLOAT_EXTEND,
mode,
XEXP (op, 0), mode);
if (lcode == ASHIFTRT)
/* Number of bits not shifted off the end. */
- bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ bits = (GET_MODE_UNIT_PRECISION (lmode)
+ - INTVAL (XEXP (lhs, 1)));
else /* lcode == SIGN_EXTEND */
/* Size of inner mode. */
- bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+ bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
if (rcode == ASHIFTRT)
- bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ bits += (GET_MODE_UNIT_PRECISION (rmode)
+ - INTVAL (XEXP (rhs, 1)));
else /* rcode == SIGN_EXTEND */
- bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+ bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
/* We can only widen multiplies if the result is mathematiclly
equivalent. I.e. if overflow was impossible. */
- if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
return simplify_gen_binary
(MULT, mode,
simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
(sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
{
- gcc_assert (GET_MODE_PRECISION (mode)
- > GET_MODE_PRECISION (GET_MODE (op)));
+ gcc_assert (GET_MODE_UNIT_PRECISION (mode)
+ > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
GET_MODE (XEXP (op, 0)));
}
&& is_a <scalar_int_mode> (mode, &int_mode)
&& CONST_INT_P (XEXP (op, 1))
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
- && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
+ GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
{
scalar_int_mode tmode;
gcc_assert (GET_MODE_BITSIZE (int_mode)
- > GET_MODE_BITSIZE (GET_MODE (op)));
- if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ > GET_MODE_BITSIZE (op_mode));
+ if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
- INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
if (lcode == LSHIFTRT)
/* Number of bits not shifted off the end. */
- bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ bits = (GET_MODE_UNIT_PRECISION (lmode)
+ - INTVAL (XEXP (lhs, 1)));
else /* lcode == ZERO_EXTEND */
/* Size of inner mode. */
- bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+ bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
if (rcode == LSHIFTRT)
- bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ bits += (GET_MODE_UNIT_PRECISION (rmode)
+ - INTVAL (XEXP (rhs, 1)));
else /* rcode == ZERO_EXTEND */
- bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+ bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
/* We can only widen multiplies if the result is mathematiclly
equivalent. I.e. if overflow was impossible. */
- if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
return simplify_gen_binary
(MULT, mode,
simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
&& is_a <scalar_int_mode> (mode, &int_mode)
&& CONST_INT_P (XEXP (op, 1))
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
- && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+ && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
+ GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
{
scalar_int_mode tmode;
- if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
+ if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
- INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
of mode N. E.g.
(zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
(and:SI (reg:SI) (const_int 63)). */
- if (GET_CODE (op) == SUBREG
- && GET_MODE_PRECISION (GET_MODE (op))
- < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_PRECISION (mode)
- >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
+ if (partial_subreg_p (op)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
+ && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
&& subreg_lowpart_p (op)
- && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
+ && (nonzero_bits (SUBREG_REG (op), op0_mode)
& ~GET_MODE_MASK (GET_MODE (op))) == 0)
{
- if (GET_MODE_PRECISION (mode)
- == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
+ if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
return SUBREG_REG (op);
- return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
- GET_MODE (SUBREG_REG (op)));
+ return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
+ op0_mode);
}
#if defined(POINTERS_EXTEND_UNSIGNED)
break;
}
+ if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
+ {
+ /* Try applying the operator to ELT and see if that simplifies.
+ We can duplicate the result if so.
+
+ The reason we don't use simplify_gen_unary is that it isn't
+ necessarily a win to convert things like:
+
+ (neg:V (vec_duplicate:V (reg:S R)))
+
+ to:
+
+ (vec_duplicate:V (neg:S (reg:S R)))
+
+ The first might be done entirely in vector registers while the
+ second might need a move between register files. */
+ temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
+ elt, GET_MODE_INNER (GET_MODE (op)));
+ if (temp)
+ return gen_vec_duplicate (mode, temp);
+ }
+
return 0;
}
simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
rtx op, machine_mode op_mode)
{
- unsigned int width = GET_MODE_PRECISION (mode);
+ scalar_int_mode result_mode;
if (code == VEC_DUPLICATE)
{
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (op)));
}
- if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
- || GET_CODE (op) == CONST_VECTOR)
+ if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
+ return gen_const_vec_duplicate (mode, op);
+ if (GET_CODE (op) == CONST_VECTOR)
{
- int elt_size = GET_MODE_UNIT_SIZE (mode);
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ unsigned int n_elts = GET_MODE_NUNITS (mode);
+ unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
+ gcc_assert (in_n_elts < n_elts);
+ gcc_assert ((n_elts % in_n_elts) == 0);
rtvec v = rtvec_alloc (n_elts);
- unsigned int i;
-
- if (GET_CODE (op) != CONST_VECTOR)
- for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = op;
- else
- {
- machine_mode inmode = GET_MODE (op);
- int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
- unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
-
- gcc_assert (in_n_elts < n_elts);
- gcc_assert ((n_elts % in_n_elts) == 0);
- for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
- }
+ for (unsigned i = 0; i < n_elts; i++)
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
return gen_rtx_CONST_VECTOR (mode, v);
}
}
return const_double_from_real_value (d, mode);
}
- if (CONST_SCALAR_INT_P (op) && width > 0)
+ if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
{
+ unsigned int width = GET_MODE_PRECISION (result_mode);
wide_int result;
- machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
+ scalar_int_mode imode = (op_mode == VOIDmode
+ ? result_mode
+ : as_a <scalar_int_mode> (op_mode));
rtx_mode_t op0 = rtx_mode_t (op, imode);
int int_value;
break;
case FFS:
- result = wi::shwi (wi::ffs (op0), mode);
+ result = wi::shwi (wi::ffs (op0), result_mode);
break;
case CLZ:
if (wi::ne_p (op0, 0))
int_value = wi::clz (op0);
- else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
- int_value = GET_MODE_PRECISION (mode);
- result = wi::shwi (int_value, mode);
+ else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
+ int_value = GET_MODE_PRECISION (imode);
+ result = wi::shwi (int_value, result_mode);
break;
case CLRSB:
- result = wi::shwi (wi::clrsb (op0), mode);
+ result = wi::shwi (wi::clrsb (op0), result_mode);
break;
case CTZ:
if (wi::ne_p (op0, 0))
int_value = wi::ctz (op0);
- else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
- int_value = GET_MODE_PRECISION (mode);
- result = wi::shwi (int_value, mode);
+ else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
+ int_value = GET_MODE_PRECISION (imode);
+ result = wi::shwi (int_value, result_mode);
break;
case POPCOUNT:
- result = wi::shwi (wi::popcount (op0), mode);
+ result = wi::shwi (wi::popcount (op0), result_mode);
break;
case PARITY:
- result = wi::shwi (wi::parity (op0), mode);
+ result = wi::shwi (wi::parity (op0), result_mode);
break;
case BSWAP:
return 0;
}
- return immed_wide_int_const (result, mode);
+ return immed_wide_int_const (result, result_mode);
}
else if (CONST_DOUBLE_AS_FLOAT_P (op)
}
else if (CONST_DOUBLE_AS_FLOAT_P (op)
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
- && GET_MODE_CLASS (mode) == MODE_INT
- && width > 0)
+ && is_int_mode (mode, &result_mode))
{
+ unsigned int width = GET_MODE_PRECISION (result_mode);
/* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
operators are intentionally left unspecified (to ease implementation
by target backends), for consistency, this routine implements the
}
}
+ /* Handle polynomial integers. */
+ else if (CONST_POLY_INT_P (op))
+ {
+ poly_wide_int result;
+ switch (code)
+ {
+ case NEG:
+ result = -const_poly_int_value (op);
+ break;
+
+ case NOT:
+ result = ~const_poly_int_value (op);
+ break;
+
+ default:
+ return NULL_RTX;
+ }
+ return immed_wide_int_const (result, mode);
+ }
+
return NULL_RTX;
}
\f
return NULL_RTX;
}
+/* Subroutine of simplify_binary_operation_1 that looks for cases in
+ which OP0 and OP1 are both vector series or vector duplicates
+ (which are really just series with a step of 0). If so, try to
+ form a new series by applying CODE to the bases and to the steps.
+ Return null if no simplification is possible.
+
+ MODE is the mode of the operation and is known to be a vector
+ integer mode. */
+
+static rtx
+simplify_binary_operation_series (rtx_code code, machine_mode mode,
+ rtx op0, rtx op1)
+{
+ rtx base0, step0;
+ if (vec_duplicate_p (op0, &base0))
+ step0 = const0_rtx;
+ else if (!vec_series_p (op0, &base0, &step0))
+ return NULL_RTX;
+
+ rtx base1, step1;
+ if (vec_duplicate_p (op1, &base1))
+ step1 = const0_rtx;
+ else if (!vec_series_p (op1, &base1, &step1))
+ return NULL_RTX;
+
+ /* Only create a new series if we can simplify both parts. In other
+ cases this isn't really a simplification, and it's not necessarily
+ a win to replace a vector operation with a scalar operation. */
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
+ rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
+ if (!new_base)
+ return NULL_RTX;
+
+ rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
+ if (!new_step)
+ return NULL_RTX;
+
+ return gen_vec_series (mode, new_base, new_step);
+}
+
/* Subroutine of simplify_binary_operation. Simplify a binary operation
CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
rtx op0, rtx op1, rtx trueop0, rtx trueop1)
{
- rtx tem, reversed, opleft, opright;
+ rtx tem, reversed, opleft, opright, elt0, elt1;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_PRECISION (mode);
scalar_int_mode int_mode, inner_mode;
+ poly_int64 offset;
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
if (tem)
return tem;
}
+
+ /* Handle vector series. */
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ {
+ tem = simplify_binary_operation_series (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
break;
case COMPARE:
return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
}
+ if ((GET_CODE (op0) == CONST
+ || GET_CODE (op0) == SYMBOL_REF
+ || GET_CODE (op0) == LABEL_REF)
+ && poly_int_rtx_p (op1, &offset))
+ return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
+
/* Don't let a relocatable value get a negative coeff. */
if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
return simplify_gen_binary (PLUS, mode,
|| plus_minus_operand_p (op1))
&& (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
return tem;
+
+ /* Handle vector series. */
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ {
+ tem = simplify_binary_operation_series (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
break;
case MULT:
{
val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
if (val >= 0)
- return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (ASHIFT, mode, op0,
+ gen_int_shift_amount (mode, val));
}
/* x*2 is x+x and x*(-1) is -x */
HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
HOST_WIDE_INT c2 = INTVAL (trueop1);
- /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
+ /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
if ((c1 & c2) == c1
&& !side_effects_p (XEXP (op0, 0)))
return trueop1;
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
if (((c1|c2) & mask) == mask)
return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
-
- /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
- if (((c1 & ~c2) & mask) != (c1 & mask))
- {
- tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
- gen_int_mode (c1 & ~c2, mode));
- return simplify_gen_binary (IOR, mode, tem, op1);
- }
}
/* Convert (A & B) | A to A. */
&& CONST_INT_P (XEXP (opleft, 1))
&& CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_PRECISION (mode)))
+ == GET_MODE_UNIT_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
by simplify_shift_const. */
if (GET_CODE (opleft) == SUBREG
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
+ &inner_mode)
&& GET_CODE (SUBREG_REG (opleft)) == ASHIFT
&& GET_CODE (opright) == LSHIFTRT
&& GET_CODE (XEXP (opright, 0)) == SUBREG
- && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
- && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
- && (GET_MODE_SIZE (GET_MODE (opleft))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
+ && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
+ && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
&& CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
&& CONST_INT_P (XEXP (opright, 1))
- && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_PRECISION (mode)))
- return gen_rtx_ROTATE (mode, XEXP (opright, 0),
- XEXP (SUBREG_REG (opleft), 1));
-
- /* If we have (ior (and (X C1) C2)), simplify this by making
- C1 as small as possible if C1 actually changes. */
- if (CONST_INT_P (op1)
- && (HWI_COMPUTABLE_MODE_P (mode)
- || INTVAL (op1) > 0)
- && GET_CODE (op0) == AND
- && CONST_INT_P (XEXP (op0, 1))
- && CONST_INT_P (op1)
- && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
- {
- rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
- gen_int_mode (UINTVAL (XEXP (op0, 1))
- & ~UINTVAL (op1),
- mode));
- return simplify_gen_binary (IOR, mode, tmp, op1);
- }
+ && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
+ + INTVAL (XEXP (opright, 1))
+ == GET_MODE_PRECISION (int_mode)))
+ return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
+ XEXP (SUBREG_REG (opleft), 1));
/* If OP0 is (ashiftrt (plus ...) C), it might actually be
a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
is (lt foo (const_int 0)), so we can perform the above
simplification if STORE_FLAG_VALUE is 1. */
- if (STORE_FLAG_VALUE == 1
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && STORE_FLAG_VALUE == 1
&& trueop1 == const1_rtx
&& GET_CODE (op0) == LSHIFTRT
&& CONST_INT_P (XEXP (op0, 1))
- && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
- return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
+ return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
when STORE_FLAG_VALUE is the sign bit. */
- if (val_signbit_p (mode, STORE_FLAG_VALUE)
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && val_signbit_p (int_mode, STORE_FLAG_VALUE)
&& trueop1 == const_true_rtx
&& COMPARISON_P (op0)
- && (reversed = reversed_comparison (op0, mode)))
+ && (reversed = reversed_comparison (op0, int_mode)))
return reversed;
tem = simplify_byte_swapping_operation (code, mode, op0, op1);
/* Convert divide by power of two into shift. */
if (CONST_INT_P (trueop1)
&& (val = exact_log2 (UINTVAL (trueop1))) > 0)
- return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (LSHIFTRT, mode, op0,
+ gen_int_shift_amount (mode, val));
break;
case DIV:
#if defined(HAVE_rotate) && defined(HAVE_rotatert)
if (CONST_INT_P (trueop1)
&& IN_RANGE (INTVAL (trueop1),
- GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
- GET_MODE_PRECISION (mode) - 1))
- return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
- mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
- - INTVAL (trueop1)));
+ GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
+ GET_MODE_UNIT_PRECISION (mode) - 1))
+ {
+ int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
+ rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
+ return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
+ mode, op0, new_amount_rtx);
+ }
#endif
/* FALLTHRU */
case ASHIFTRT:
if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
return op0;
/* Rotating ~0 always results in ~0. */
- if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
+ if (CONST_INT_P (trueop0)
+ && HWI_COMPUTABLE_MODE_P (mode)
&& UINTVAL (trueop0) == GET_MODE_MASK (mode)
&& ! side_effects_p (op1))
return op0;
== GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
&& subreg_lowpart_p (op0))
{
- rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
- + INTVAL (op1));
+ rtx tmp = gen_int_shift_amount
+ (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
tmp = simplify_gen_binary (code, inner_mode,
XEXP (SUBREG_REG (op0), 0),
tmp);
if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
{
- val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
+ val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
if (val != INTVAL (op1))
- return simplify_gen_binary (code, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (code, mode, op0,
+ gen_int_shift_amount (mode, val));
}
break;
return op0;
/* Optimize (lshiftrt (clz X) C) as (eq X 0). */
if (GET_CODE (op0) == CLZ
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
&& CONST_INT_P (trueop1)
&& STORE_FLAG_VALUE == 1
- && INTVAL (trueop1) < (HOST_WIDE_INT)width)
+ && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
{
- machine_mode imode = GET_MODE (XEXP (op0, 0));
unsigned HOST_WIDE_INT zero_val = 0;
- if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
- && zero_val == GET_MODE_PRECISION (imode)
+ if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
+ && zero_val == GET_MODE_PRECISION (inner_mode)
&& INTVAL (trueop1) == exact_log2 (zero_val))
- return simplify_gen_relational (EQ, mode, imode,
+ return simplify_gen_relational (EQ, mode, inner_mode,
XEXP (op0, 0), const0_rtx);
}
goto canonicalize_shift;
case SMIN:
- if (width <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& mode_signbit_p (mode, trueop1)
&& ! side_effects_p (op0))
return op1;
break;
case SMAX:
- if (width <= HOST_BITS_PER_WIDE_INT
+ if (HWI_COMPUTABLE_MODE_P (mode)
&& CONST_INT_P (trueop1)
&& (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
&& ! side_effects_p (op0))
/* ??? There are simplifications that can be done. */
return 0;
+ case VEC_SERIES:
+ if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
+ return gen_vec_duplicate (mode, op0);
+ if (CONSTANT_P (op0) && CONSTANT_P (op1))
+ return gen_const_vec_series (mode, op0, op1);
+ return 0;
+
case VEC_SELECT:
if (!VECTOR_MODE_P (mode))
{
gcc_assert (XVECLEN (trueop1, 0) == 1);
gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
+ if (vec_duplicate_p (trueop0, &elt0))
+ return elt0;
+
if (GET_CODE (trueop0) == CONST_VECTOR)
return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
(trueop1, 0, 0)));
rtx op0 = XEXP (trueop0, 0);
rtx op1 = XEXP (trueop0, 1);
- machine_mode opmode = GET_MODE (op0);
- int elt_size = GET_MODE_UNIT_SIZE (opmode);
- int n_elts = GET_MODE_SIZE (opmode) / elt_size;
+ int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
int i = INTVAL (XVECEXP (trueop1, 0, 0));
int elem;
mode01 = GET_MODE (op01);
/* Find out number of elements of each operand. */
- if (VECTOR_MODE_P (mode00))
- {
- elt_size = GET_MODE_UNIT_SIZE (mode00);
- n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
- }
- else
- n_elts00 = 1;
-
- if (VECTOR_MODE_P (mode01))
- {
- elt_size = GET_MODE_UNIT_SIZE (mode01);
- n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
- }
- else
- n_elts01 = 1;
+ n_elts00 = GET_MODE_NUNITS (mode00);
+ n_elts01 = GET_MODE_NUNITS (mode01);
gcc_assert (n_elts == n_elts00 + n_elts01);
tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
return tmp;
}
- if (GET_CODE (trueop0) == VEC_DUPLICATE
- && GET_MODE (XEXP (trueop0, 0)) == mode)
- return XEXP (trueop0, 0);
}
else
{
== GET_MODE_INNER (GET_MODE (trueop0)));
gcc_assert (GET_CODE (trueop1) == PARALLEL);
+ if (vec_duplicate_p (trueop0, &elt0))
+ /* It doesn't matter which elements are selected by trueop1,
+ because they are all the same. */
+ return gen_vec_duplicate (mode, elt0);
+
if (GET_CODE (trueop0) == CONST_VECTOR)
{
int elt_size = GET_MODE_UNIT_SIZE (mode);
rtx subop1 = XEXP (trueop0, 1);
machine_mode mode0 = GET_MODE (subop0);
machine_mode mode1 = GET_MODE (subop1);
- int li = GET_MODE_UNIT_SIZE (mode0);
- int l0 = GET_MODE_SIZE (mode0) / li;
- int l1 = GET_MODE_SIZE (mode1) / li;
+ int l0 = GET_MODE_NUNITS (mode0);
+ int l1 = GET_MODE_NUNITS (mode1);
int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
{
|| CONST_SCALAR_INT_P (trueop1)
|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
{
- int elt_size = GET_MODE_UNIT_SIZE (mode);
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ unsigned n_elts = GET_MODE_NUNITS (mode);
+ unsigned in_n_elts = GET_MODE_NUNITS (op0_mode);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- unsigned in_n_elts = 1;
-
- if (VECTOR_MODE_P (op0_mode))
- in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
for (i = 0; i < n_elts; i++)
{
if (i < in_n_elts)
gcc_unreachable ();
}
+ if (mode == GET_MODE (op0)
+ && mode == GET_MODE (op1)
+ && vec_duplicate_p (op0, &elt0)
+ && vec_duplicate_p (op1, &elt1))
+ {
+ /* Try applying the operator to ELT and see if that simplifies.
+ We can duplicate the result if so.
+
+ The reason we don't use simplify_gen_binary is that it isn't
+ necessarily a win to convert things like:
+
+ (plus:V (vec_duplicate:V (reg:S R1))
+ (vec_duplicate:V (reg:S R2)))
+
+ to:
+
+ (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
+
+ The first might be done entirely in vector registers while the
+ second might need a move between register files. */
+ tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
+ elt0, elt1);
+ if (tem)
+ return gen_vec_duplicate (mode, tem);
+ }
+
return 0;
}
&& GET_CODE (op0) == CONST_VECTOR
&& GET_CODE (op1) == CONST_VECTOR)
{
- unsigned n_elts = GET_MODE_NUNITS (mode);
- machine_mode op0mode = GET_MODE (op0);
- unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
- machine_mode op1mode = GET_MODE (op1);
- unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
+ unsigned int n_elts = CONST_VECTOR_NUNITS (op0);
+ gcc_assert (n_elts == (unsigned int) CONST_VECTOR_NUNITS (op1));
+ gcc_assert (n_elts == GET_MODE_NUNITS (mode));
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- gcc_assert (op0_n_elts == n_elts);
- gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
return immed_wide_int_const (result, int_mode);
}
+ /* Handle polynomial integers. */
+ if (NUM_POLY_INT_COEFFS > 1
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && poly_int_rtx_p (op0)
+ && poly_int_rtx_p (op1))
+ {
+ poly_wide_int result;
+ switch (code)
+ {
+ case PLUS:
+ result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
+ break;
+
+ case MINUS:
+ result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
+ break;
+
+ case MULT:
+ if (CONST_SCALAR_INT_P (op1))
+ result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
+ else
+ return NULL_RTX;
+ break;
+
+ case ASHIFT:
+ if (CONST_SCALAR_INT_P (op1))
+ {
+ wide_int shift = rtx_mode_t (op1, mode);
+ if (SHIFT_COUNT_TRUNCATED)
+ shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
+ else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
+ return NULL_RTX;
+ result = wi::to_poly_wide (op0, mode) << shift;
+ }
+ else
+ return NULL_RTX;
+ break;
+
+ case IOR:
+ if (!CONST_SCALAR_INT_P (op1)
+ || !can_ior_p (wi::to_poly_wide (op0, mode),
+ rtx_mode_t (op1, mode), &result))
+ return NULL_RTX;
+ break;
+
+ default:
+ return NULL_RTX;
+ }
+ return immed_wide_int_const (result, int_mode);
+ }
+
return NULL_RTX;
}
return CONST0_RTX (mode);
#ifdef VECTOR_STORE_FLAG_VALUE
{
- int i, units;
- rtvec v;
-
rtx val = VECTOR_STORE_FLAG_VALUE (mode);
if (val == NULL_RTX)
return NULL_RTX;
if (val == const1_rtx)
return CONST1_RTX (mode);
- units = GET_MODE_NUNITS (mode);
- v = rtvec_alloc (units);
- for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) = val;
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ return gen_const_vec_duplicate (mode, val);
}
#else
return NULL_RTX;
&& (code == EQ || code == NE)
&& ! ((REG_P (op0) || CONST_INT_P (trueop0))
&& (REG_P (op1) || CONST_INT_P (trueop1)))
- && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
+ && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
/* We cannot do this if tem is a nonzero address. */
&& ! nonzero_address_p (tem))
return simplify_const_relational_operation (signed_condition (code),
}
/* Optimize integer comparisons with zero. */
- if (trueop1 == const0_rtx && !side_effects_p (trueop0))
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && trueop1 == const0_rtx
+ && !side_effects_p (trueop0))
{
/* Some addresses are known to be nonzero. We don't know
their sign, but equality comparisons are known. */
rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
{
- int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
+ int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
&& (UINTVAL (inner_const)
& (HOST_WIDE_INT_1U
return NULL_RTX;
HOST_WIDE_INT op_val;
- if (((op_code == CLZ
- && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
- || (op_code == CTZ
- && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
+ scalar_int_mode mode ATTRIBUTE_UNUSED
+ = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
+ if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
+ || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
&& op_val == INTVAL (on_zero))
return on_nonzero;
machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
- unsigned int width = GET_MODE_PRECISION (mode);
bool any_change = false;
rtx tem, trueop2;
-
- /* VOIDmode means "infinite" precision. */
- if (width == 0)
- width = HOST_BITS_PER_WIDE_INT;
+ scalar_int_mode int_mode, int_op0_mode;
switch (code)
{
if (CONST_INT_P (op0)
&& CONST_INT_P (op1)
&& CONST_INT_P (op2)
- && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
- && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
+ && HWI_COMPUTABLE_MODE_P (int_mode))
{
/* Extracting a bit-field from a constant */
unsigned HOST_WIDE_INT val = UINTVAL (op0);
HOST_WIDE_INT op1val = INTVAL (op1);
HOST_WIDE_INT op2val = INTVAL (op2);
- if (BITS_BIG_ENDIAN)
- val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
- else
+ if (!BITS_BIG_ENDIAN)
val >>= op2val;
+ else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
+ val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
+ else
+ /* Not enough information to calculate the bit position. */
+ break;
if (HOST_BITS_PER_WIDE_INT != op1val)
{
val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
}
- return gen_int_mode (val, mode);
+ return gen_int_mode (val, int_mode);
}
break;
XEXP (op0, 0), XEXP (op0, 1));
}
- if (cmp_mode == VOIDmode)
- cmp_mode = op0_mode;
temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
cmp_mode, XEXP (op0, 0),
XEXP (op0, 1));
trueop2 = avoid_constant_pool_reference (op2);
if (CONST_INT_P (trueop2))
{
- int elt_size = GET_MODE_UNIT_SIZE (mode);
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ unsigned n_elts = GET_MODE_NUNITS (mode);
unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
unsigned HOST_WIDE_INT mask;
if (n_elts == HOST_BITS_PER_WIDE_INT)
return op1;
}
}
+ /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
+ (const_int N))
+ with (vec_concat (X) (B)) if N == 1 or
+ (vec_concat (A) (X)) if N == 2. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == CONST_VECTOR
+ && CONST_VECTOR_NUNITS (op1) == 2
+ && GET_MODE_NUNITS (GET_MODE (op0)) == 2
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
+ /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
+ with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
+ Only applies for vectors of two elements. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == VEC_CONCAT
+ && GET_MODE_NUNITS (GET_MODE (op0)) == 2
+ && GET_MODE_NUNITS (GET_MODE (op1)) == 2
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = XEXP (op1, 2 - sel);
+ rtx otherop = XEXP (op1, sel - 1);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+ /* Don't want to throw away the other part of the vec_concat if
+ it has side-effects. */
+ if (!side_effects_p (otherop))
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
+
+ /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
+ (const_int n))
+ with (vec_concat x y) or (vec_concat y x) depending on value
+ of N. */
+ if (GET_CODE (op0) == VEC_DUPLICATE
+ && GET_CODE (op1) == VEC_DUPLICATE
+ && GET_MODE_NUNITS (GET_MODE (op0)) == 2
+ && GET_MODE_NUNITS (GET_MODE (op1)) == 2
+ && IN_RANGE (sel, 1, 2))
+ {
+ rtx newop0 = XEXP (op0, 0);
+ rtx newop1 = XEXP (op1, 0);
+ if (sel == 2)
+ std::swap (newop0, newop1);
+
+ return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
+ }
}
if (rtx_equal_p (op0, op1)
and then repacking them again for OUTERMODE. */
static rtx
-simplify_immed_subreg (machine_mode outermode, rtx op,
- machine_mode innermode, unsigned int byte)
+simplify_immed_subreg (fixed_size_mode outermode, rtx op,
+ fixed_size_mode innermode, unsigned int byte)
{
enum {
value_bit = 8,
rtx result_s = NULL;
rtvec result_v = NULL;
enum mode_class outer_class;
- machine_mode outer_submode;
+ scalar_mode outer_submode;
int max_bitsize;
/* Some ports misuse CCmode. */
case CONST_WIDE_INT:
{
- rtx_mode_t val = rtx_mode_t (el, innermode);
+ rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
unsigned char extend = wi::sign_mask (val);
int prec = wi::get_precision (val);
Return 0 if no simplifications are possible. */
rtx
simplify_subreg (machine_mode outermode, rtx op,
- machine_mode innermode, unsigned int byte)
+ machine_mode innermode, poly_uint64 byte)
{
/* Little bit of sanity checking. */
gcc_assert (innermode != VOIDmode);
gcc_assert (GET_MODE (op) == innermode
|| GET_MODE (op) == VOIDmode);
- if ((byte % GET_MODE_SIZE (outermode)) != 0)
+ if (!multiple_p (byte, GET_MODE_SIZE (outermode)))
return NULL_RTX;
- if (byte >= GET_MODE_SIZE (innermode))
+ if (maybe_ge (byte, GET_MODE_SIZE (innermode)))
return NULL_RTX;
- if (outermode == innermode && !byte)
+ if (outermode == innermode && known_eq (byte, 0U))
return op;
+ if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
+ {
+ rtx elt;
+
+ if (VECTOR_MODE_P (outermode)
+ && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
+ && vec_duplicate_p (op, &elt))
+ return gen_vec_duplicate (outermode, elt);
+
+ if (outermode == GET_MODE_INNER (innermode)
+ && vec_duplicate_p (op, &elt))
+ return elt;
+ }
+
if (CONST_SCALAR_INT_P (op)
|| CONST_DOUBLE_AS_FLOAT_P (op)
|| GET_CODE (op) == CONST_FIXED
|| GET_CODE (op) == CONST_VECTOR)
- return simplify_immed_subreg (outermode, op, innermode, byte);
+ {
+ /* simplify_immed_subreg deconstructs OP into bytes and constructs
+ the result from bytes, so it only works if the sizes of the modes
+ and the value of the offset are known at compile time. Cases that
+ that apply to general modes and offsets should be handled here
+ before calling simplify_immed_subreg. */
+ fixed_size_mode fs_outermode, fs_innermode;
+ unsigned HOST_WIDE_INT cbyte;
+ if (is_a <fixed_size_mode> (outermode, &fs_outermode)
+ && is_a <fixed_size_mode> (innermode, &fs_innermode)
+ && byte.is_constant (&cbyte))
+ return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte);
+
+ return NULL_RTX;
+ }
/* Changing mode twice with SUBREG => just change it once,
or not at all if changing back op starting mode. */
if (GET_CODE (op) == SUBREG)
{
machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
- int final_offset = byte + SUBREG_BYTE (op);
rtx newx;
if (outermode == innermostmode
- && byte == 0 && SUBREG_BYTE (op) == 0)
+ && known_eq (byte, 0U)
+ && known_eq (SUBREG_BYTE (op), 0))
return SUBREG_REG (op);
- /* The SUBREG_BYTE represents offset, as if the value were stored
- in memory. Irritating exception is paradoxical subreg, where
- we define SUBREG_BYTE to be 0. On big endian machines, this
- value should be negative. For a moment, undo this exception. */
- if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
- {
- int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
- if (WORDS_BIG_ENDIAN)
- final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- final_offset += difference % UNITS_PER_WORD;
- }
- if (SUBREG_BYTE (op) == 0
- && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
- {
- int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
- if (WORDS_BIG_ENDIAN)
- final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- final_offset += difference % UNITS_PER_WORD;
- }
+ /* Work out the memory offset of the final OUTERMODE value relative
+ to the inner value of OP. */
+ poly_int64 mem_offset = subreg_memory_offset (outermode,
+ innermode, byte);
+ poly_int64 op_mem_offset = subreg_memory_offset (op);
+ poly_int64 final_offset = mem_offset + op_mem_offset;
/* See whether resulting subreg will be paradoxical. */
if (!paradoxical_subreg_p (outermode, innermostmode))
{
/* In nonparadoxical subregs we can't handle negative offsets. */
- if (final_offset < 0)
+ if (maybe_lt (final_offset, 0))
return NULL_RTX;
/* Bail out in case resulting subreg would be incorrect. */
- if (final_offset % GET_MODE_SIZE (outermode)
- || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
+ if (!multiple_p (final_offset, GET_MODE_SIZE (outermode))
+ || maybe_ge (final_offset, GET_MODE_SIZE (innermostmode)))
return NULL_RTX;
}
else
{
- int offset = 0;
- int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
-
- /* In paradoxical subreg, see if we are still looking on lower part.
- If so, our SUBREG_BYTE will be 0. */
- if (WORDS_BIG_ENDIAN)
- offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- offset += difference % UNITS_PER_WORD;
- if (offset == final_offset)
- final_offset = 0;
- else
+ poly_int64 required_offset = subreg_memory_offset (outermode,
+ innermostmode, 0);
+ if (maybe_ne (final_offset, required_offset))
return NULL_RTX;
+ /* Paradoxical subregs always have byte offset 0. */
+ final_offset = 0;
}
/* Recurse for further possible simplifications. */
final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
if (HARD_REGISTER_NUM_P (final_regno))
{
- rtx x;
- int final_offset = byte;
-
- /* Adjust offset for paradoxical subregs. */
- if (byte == 0
- && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
- {
- int difference = (GET_MODE_SIZE (innermode)
- - GET_MODE_SIZE (outermode));
- if (WORDS_BIG_ENDIAN)
- final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
- if (BYTES_BIG_ENDIAN)
- final_offset += difference % UNITS_PER_WORD;
- }
-
- x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
+ rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
+ subreg_memory_offset (outermode,
+ innermode, byte));
/* Propagate original regno. We don't have any way to specify
the offset inside original regno, so do so only for lowpart.
The information is used only by alias analysis that can not
grog partial register anyway. */
- if (subreg_lowpart_offset (outermode, innermode) == byte)
+ if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
return x;
}
if (GET_CODE (op) == CONCAT
|| GET_CODE (op) == VEC_CONCAT)
{
- unsigned int part_size, final_offset;
+ unsigned int part_size;
+ poly_uint64 final_offset;
rtx part, res;
machine_mode part_mode = GET_MODE (XEXP (op, 0));
if (part_mode == VOIDmode)
part_mode = GET_MODE_INNER (GET_MODE (op));
part_size = GET_MODE_SIZE (part_mode);
- if (byte < part_size)
+ if (known_lt (byte, part_size))
{
part = XEXP (op, 0);
final_offset = byte;
}
- else
+ else if (known_ge (byte, part_size))
{
part = XEXP (op, 1);
final_offset = byte - part_size;
}
+ else
+ return NULL_RTX;
- if (final_offset + GET_MODE_SIZE (outermode) > part_size)
+ if (maybe_gt (final_offset + GET_MODE_SIZE (outermode), part_size))
return NULL_RTX;
part_mode = GET_MODE (part);
it extracts higher bits that the ZERO_EXTEND's source bits. */
if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
{
- unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
- if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
+ poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
+ if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
return CONST0_RTX (outermode);
}
scalar_int_mode int_outermode, int_innermode;
if (is_a <scalar_int_mode> (outermode, &int_outermode)
&& is_a <scalar_int_mode> (innermode, &int_innermode)
- && (GET_MODE_PRECISION (int_outermode)
- < GET_MODE_PRECISION (int_innermode))
- && byte == subreg_lowpart_offset (int_outermode, int_innermode))
+ && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
{
- rtx tem = simplify_truncation (int_outermode, op, int_innermode);
- if (tem)
- return tem;
+ /* Handle polynomial integers. The upper bits of a paradoxical
+ subreg are undefined, so this is safe regardless of whether
+ we're truncating or extending. */
+ if (CONST_POLY_INT_P (op))
+ {
+ poly_wide_int val
+ = poly_wide_int::from (const_poly_int_value (op),
+ GET_MODE_PRECISION (int_outermode),
+ SIGNED);
+ return immed_wide_int_const (val, int_outermode);
+ }
+
+ if (GET_MODE_PRECISION (int_outermode)
+ < GET_MODE_PRECISION (int_innermode))
+ {
+ rtx tem = simplify_truncation (int_outermode, op, int_innermode);
+ if (tem)
+ return tem;
+ }
}
return NULL_RTX;
rtx
simplify_gen_subreg (machine_mode outermode, rtx op,
- machine_mode innermode, unsigned int byte)
+ machine_mode innermode, poly_uint64 byte)
{
rtx newx;
}
return NULL;
}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Make a unique pseudo REG of mode MODE for use by selftests. */
+
+static rtx
+make_test_reg (machine_mode mode)
+{
+ static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
+
+ return gen_rtx_REG (mode, test_reg_num++);
+}
+
+/* Test vector simplifications involving VEC_DUPLICATE in which the
+ operands and result have vector mode MODE. SCALAR_REG is a pseudo
+ register that holds one element of MODE. */
+
+static void
+test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
+{
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
+ rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
+ unsigned int nunits = GET_MODE_NUNITS (mode);
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ {
+ /* Test some simple unary cases with VEC_DUPLICATE arguments. */
+ rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
+ rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
+ ASSERT_RTX_EQ (duplicate,
+ simplify_unary_operation (NOT, mode,
+ duplicate_not, mode));
+
+ rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
+ rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
+ ASSERT_RTX_EQ (duplicate,
+ simplify_unary_operation (NEG, mode,
+ duplicate_neg, mode));
+
+ /* Test some simple binary cases with VEC_DUPLICATE arguments. */
+ ASSERT_RTX_EQ (duplicate,
+ simplify_binary_operation (PLUS, mode, duplicate,
+ CONST0_RTX (mode)));
+
+ ASSERT_RTX_EQ (duplicate,
+ simplify_binary_operation (MINUS, mode, duplicate,
+ CONST0_RTX (mode)));
+
+ ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
+ simplify_binary_operation (MINUS, mode, duplicate,
+ duplicate));
+ }
+
+ /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
+ rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
+ ASSERT_RTX_PTR_EQ (scalar_reg,
+ simplify_binary_operation (VEC_SELECT, inner_mode,
+ duplicate, zero_par));
+
+ /* And again with the final element. */
+ rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
+ rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
+ ASSERT_RTX_PTR_EQ (scalar_reg,
+ simplify_binary_operation (VEC_SELECT, inner_mode,
+ duplicate, last_par));
+
+ /* Test a scalar subreg of a VEC_DUPLICATE. */
+ poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
+ ASSERT_RTX_EQ (scalar_reg,
+ simplify_gen_subreg (inner_mode, duplicate,
+ mode, offset));
+
+ machine_mode narrower_mode;
+ if (nunits > 2
+ && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
+ && VECTOR_MODE_P (narrower_mode))
+ {
+ /* Test VEC_SELECT of a vector. */
+ rtx vec_par
+ = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
+ rtx narrower_duplicate
+ = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
+ ASSERT_RTX_EQ (narrower_duplicate,
+ simplify_binary_operation (VEC_SELECT, narrower_mode,
+ duplicate, vec_par));
+
+ /* Test a vector subreg of a VEC_DUPLICATE. */
+ poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
+ ASSERT_RTX_EQ (narrower_duplicate,
+ simplify_gen_subreg (narrower_mode, duplicate,
+ mode, offset));
+ }
+}
+
+/* Test vector simplifications involving VEC_SERIES in which the
+ operands and result have vector mode MODE. SCALAR_REG is a pseudo
+ register that holds one element of MODE. */
+
+static void
+test_vector_ops_series (machine_mode mode, rtx scalar_reg)
+{
+ /* Test unary cases with VEC_SERIES arguments. */
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
+ rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
+ rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
+ rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
+ rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
+ rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
+ rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
+ rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
+ rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
+ neg_scalar_reg);
+ ASSERT_RTX_EQ (series_0_r,
+ simplify_unary_operation (NEG, mode, series_0_nr, mode));
+ ASSERT_RTX_EQ (series_r_m1,
+ simplify_unary_operation (NEG, mode, series_nr_1, mode));
+ ASSERT_RTX_EQ (series_r_r,
+ simplify_unary_operation (NEG, mode, series_nr_nr, mode));
+
+ /* Test that a VEC_SERIES with a zero step is simplified away. */
+ ASSERT_RTX_EQ (duplicate,
+ simplify_binary_operation (VEC_SERIES, mode,
+ scalar_reg, const0_rtx));
+
+ /* Test PLUS and MINUS with VEC_SERIES. */
+ rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
+ rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
+ rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
+ ASSERT_RTX_EQ (series_r_r,
+ simplify_binary_operation (PLUS, mode, series_0_r,
+ duplicate));
+ ASSERT_RTX_EQ (series_r_1,
+ simplify_binary_operation (PLUS, mode, duplicate,
+ series_0_1));
+ ASSERT_RTX_EQ (series_r_m1,
+ simplify_binary_operation (PLUS, mode, duplicate,
+ series_0_m1));
+ ASSERT_RTX_EQ (series_0_r,
+ simplify_binary_operation (MINUS, mode, series_r_r,
+ duplicate));
+ ASSERT_RTX_EQ (series_r_m1,
+ simplify_binary_operation (MINUS, mode, duplicate,
+ series_0_1));
+ ASSERT_RTX_EQ (series_r_1,
+ simplify_binary_operation (MINUS, mode, duplicate,
+ series_0_m1));
+ ASSERT_RTX_EQ (series_0_m1,
+ simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
+ constm1_rtx));
+}
+
+/* Verify some simplifications involving vectors. */
+
+static void
+test_vector_ops ()
+{
+ for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
+ {
+ machine_mode mode = (machine_mode) i;
+ if (VECTOR_MODE_P (mode))
+ {
+ rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
+ test_vector_ops_duplicate (mode, scalar_reg);
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ && GET_MODE_NUNITS (mode) > 2)
+ test_vector_ops_series (mode, scalar_reg);
+ }
+ }
+}
+
+template<unsigned int N>
+struct simplify_const_poly_int_tests
+{
+ static void run ();
+};
+
+template<>
+struct simplify_const_poly_int_tests<1>
+{
+ static void run () {}
+};
+
+/* Test various CONST_POLY_INT properties. */
+
+template<unsigned int N>
+void
+simplify_const_poly_int_tests<N>::run ()
+{
+ rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
+ rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
+ rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
+ rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
+ rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
+ rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
+ rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
+ rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
+ rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
+ rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
+ rtx two = GEN_INT (2);
+ rtx six = GEN_INT (6);
+ poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
+
+ /* These tests only try limited operation combinations. Fuller arithmetic
+ testing is done directly on poly_ints. */
+ ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
+ ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
+ ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
+ ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
+ ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
+ ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
+ ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
+ ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
+ ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
+ ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
+ ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
+}
+
+/* Run all of the selftests within this file. */
+
+void
+simplify_rtx_c_tests ()
+{
+ test_vector_ops ();
+ simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */