+2015-07-30 David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-simd.md (aarch64_ext<mode>): Replace call to
+ GET_MODE_SIZE (GET_MODE_INNER (m)) with GET_MODE_UNIT_SIZE (m).
+ * config/aarch64/aarch64.c (aarch64_simd_valid_immediate): Likewise.
+ * config/arm/arm.c (neon_valid_immediate): Likewise.
+ * config/i386/i386.c (classify_argument, ix86_expand_int_vcond)
+ (expand_vec_perm_blend, expand_vec_perm_pshufb): Likewise.
+ (expand_vec_perm_pshufb2, expand_vec_perm_vpshufb2_vpermq): Likewise.
+ (expand_vec_perm_vpshufb2_vpermq): Likewise.
+ (expand_vec_perm_vpshufb2_vpermq_even_odd): Likewise.
+ (expand_vec_perm_vpshufb4_vpermq2): Likewise.
+ * config/i386/sse.md
+ (<extract_type>_vinsert<shuffletype><extract_suf>_mask): Likewise.
+ (*ssse3_palignr<mode>_perm): Likewise.
+ * config/rs6000/rs6000.c (rs6000_complex_function_value): Likewise.
+ * config/spu/spu.c (arith_immediate_p): Likewise.
+ * simplify-rtx.c (simplify_const_unary_operation): Likewise.
+ (simplify_binary_operation_1, simplify_ternary_operation): Likewise.
+
2015-07-30 Richard Biener <rguenther@suse.de>
* genmatch.c (decision_tree::gen_gimple): Merge with ...
"TARGET_SIMD"
{
operands[3] = GEN_INT (INTVAL (operands[3])
- * GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)));
+ * GET_MODE_UNIT_SIZE (<MODE>mode));
return "ext\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>, #%3";
}
[(set_attr "type" "neon_ext<q>")]
}
unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
- unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned int innersize = GET_MODE_UNIT_SIZE (mode);
unsigned char bytes[16];
int immtype = -1, matches;
unsigned int invmask = inverse ? 0xff : 0;
mode = DImode;
}
- innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ innersize = GET_MODE_UNIT_SIZE (mode);
/* Vectors of float constants. */
if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
rtx *modconst, int *elementwidth,
bool isleftshift)
{
- unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned int innersize = GET_MODE_UNIT_SIZE (mode);
unsigned int n_elts = CONST_VECTOR_NUNITS (op), i;
unsigned HOST_WIDE_INT last_elt = 0;
unsigned HOST_WIDE_INT maxshift;
neon_pairwise_reduce (rtx op0, rtx op1, machine_mode mode,
rtx (*reduc) (rtx, rtx, rtx))
{
- machine_mode inner = GET_MODE_INNER (mode);
- unsigned int i, parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (inner);
+ unsigned int i, parts = GET_MODE_SIZE (mode) / GET_MODE_UNIT_SIZE (mode);
rtx tmpsum = op1;
for (i = parts / 2; i >= 1; i /= 2)
gcc_assert (GET_CODE (x) == CONST_VECTOR);
units = CONST_VECTOR_NUNITS (x);
- size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ size = GET_MODE_UNIT_SIZE (mode);
if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
for (i = 0; i < units; i++)
/* for V1xx modes, just use the base mode */
if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
- && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
+ && GET_MODE_UNIT_SIZE (mode) == bytes)
mode = GET_MODE_INNER (mode);
/* Classification of atomic types. */
&& data_mode == mode
&& cop1 == CONST0_RTX (mode)
&& operands[1 + (code == LT)] == CONST0_RTX (data_mode)
- && GET_MODE_SIZE (GET_MODE_INNER (data_mode)) > 1
- && GET_MODE_SIZE (GET_MODE_INNER (data_mode)) <= 8
+ && GET_MODE_UNIT_SIZE (data_mode) > 1
+ && GET_MODE_UNIT_SIZE (data_mode) <= 8
&& (GET_MODE_SIZE (data_mode) == 16
|| (TARGET_AVX2 && GET_MODE_SIZE (data_mode) == 32)))
{
return false;
if (TARGET_AVX512F && GET_MODE_SIZE (vmode) == 64
&& (TARGET_AVX512BW
- || GET_MODE_SIZE (GET_MODE_INNER (vmode)) >= 4))
+ || GET_MODE_UNIT_SIZE (vmode) >= 4))
;
else if (TARGET_AVX2 && GET_MODE_SIZE (vmode) == 32)
;
rperm[i] = GEN_INT ((d->perm[i * nelt / 16] * 16 / nelt) & 15);
else
{
- eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
+ eltsz = GET_MODE_UNIT_SIZE (d->vmode);
if (!d->one_operand_p)
mask = 2 * nelt - 1;
else if (vmode == V16QImode)
return true;
nelt = d->nelt;
- eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
+ eltsz = GET_MODE_UNIT_SIZE (d->vmode);
/* Generate two permutation masks. If the required element is within
the given vector it is shuffled into the proper lane. If the required
return true;
nelt = d->nelt;
- eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
+ eltsz = GET_MODE_UNIT_SIZE (d->vmode);
/* Generate two permutation masks. If the required element is within
the same lane, it is shuffled in. If the required element from the
return true;
nelt = d->nelt;
- eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
+ eltsz = GET_MODE_UNIT_SIZE (d->vmode);
/* Generate two permutation masks. In the first permutation mask
the first quarter will contain indexes for the first half
return true;
nelt = d->nelt;
- eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
+ eltsz = GET_MODE_UNIT_SIZE (d->vmode);
/* Generate 4 permutation masks. If the required element is within
the same lane, it is shuffled in. If the required element from the
{
int mask,selector;
mask = INTVAL (operands[3]);
- selector = GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) == 4 ?
+ selector = GET_MODE_UNIT_SIZE (<MODE>mode) == 4 ?
0xFFFF ^ (0xF000 >> mask * 4)
: 0xFF ^ (0xC0 >> mask * 2);
emit_insn (gen_<extract_type>_vinsert<shuffletype><extract_suf>_1_mask
[(match_operand 3 "const_int_operand" "n, n")])))]
"TARGET_SSSE3"
{
- machine_mode imode = GET_MODE_INNER (GET_MODE (operands[0]));
- operands[2] = GEN_INT (INTVAL (operands[3]) * GET_MODE_SIZE (imode));
+ operands[2] =
+ GEN_INT (INTVAL (operands[3]) * GET_MODE_UNIT_SIZE (GET_MODE (operands[0])));
switch (which_alternative)
{
unsigned int regno;
rtx r1, r2;
machine_mode inner = GET_MODE_INNER (mode);
- unsigned int inner_bytes = GET_MODE_SIZE (inner);
+ unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
regno = FP_ARG_RETURN;
constant_to_array (mode, op, arr);
- mode = GET_MODE_INNER (mode);
- bytes = GET_MODE_SIZE (mode);
- mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
+ bytes = GET_MODE_UNIT_SIZE (mode);
+ mode = mode_for_size (GET_MODE_BITSIZE (GET_MODE_INNER (mode)), MODE_INT, 0);
/* Check that bytes are repeated. */
for (i = bytes; i < 16; i += bytes)
constant_to_array (mode, op, arr);
- if (VECTOR_MODE_P (mode))
- mode = GET_MODE_INNER (mode);
+ mode = GET_MODE_INNER (mode);
bytes = GET_MODE_SIZE (mode);
int_mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
|| GET_CODE (op) == CONST_VECTOR)
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
else
{
machine_mode inmode = GET_MODE (op);
- int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
+ int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
gcc_assert (in_n_elts < n_elts);
if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
machine_mode opmode = GET_MODE (op);
- int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
+ int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
rtx op1 = XEXP (trueop0, 1);
machine_mode opmode = GET_MODE (op0);
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
+ int elt_size = GET_MODE_UNIT_SIZE (opmode);
int n_elts = GET_MODE_SIZE (opmode) / elt_size;
int i = INTVAL (XVECEXP (trueop1, 0, 0));
/* Find out number of elements of each operand. */
if (VECTOR_MODE_P (mode00))
{
- elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
+ elt_size = GET_MODE_UNIT_SIZE (mode00);
n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
}
else
if (VECTOR_MODE_P (mode01))
{
- elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
+ elt_size = GET_MODE_UNIT_SIZE (mode01);
n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
}
else
if (GET_CODE (trueop0) == CONST_VECTOR)
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
rtx subop1 = XEXP (trueop0, 1);
machine_mode mode0 = GET_MODE (subop0);
machine_mode mode1 = GET_MODE (subop1);
- int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
+ int li = GET_MODE_UNIT_SIZE (mode0);
int l0 = GET_MODE_SIZE (mode0) / li;
int l1 = GET_MODE_SIZE (mode1) / li;
int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
|| CONST_SCALAR_INT_P (trueop1)
|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
trueop2 = avoid_constant_pool_reference (op2);
if (CONST_INT_P (trueop2))
{
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
unsigned HOST_WIDE_INT mask;