+2017-11-01 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * emit-rtl.h (gen_const_vec_duplicate): Declare.
+ (gen_vec_duplicate): Likewise.
+ * emit-rtl.c (gen_const_vec_duplicate_1): New function, split
+ out from...
+ (gen_const_vector): ...here.
+ (gen_const_vec_duplicate, gen_vec_duplicate): New functions.
+ (gen_rtx_CONST_VECTOR): Use gen_const_vec_duplicate for constants
+ whose elements are all equal.
+ * optabs.c (expand_vector_broadcast): Use gen_const_vec_duplicate.
+ * simplify-rtx.c (simplify_const_unary_operation): Likewise.
+ (simplify_relational_operation): Likewise.
+ * config/aarch64/aarch64.c (aarch64_simd_gen_const_vector_dup):
+ Likewise.
+ (aarch64_simd_dup_constant): Use gen_vec_duplicate.
+ (aarch64_expand_vector_init): Likewise.
+ * config/arm/arm.c (neon_vdup_constant): Likewise.
+ (neon_expand_vector_init): Likewise.
+ (arm_expand_vec_perm): Use gen_const_vec_duplicate.
+ (arm_block_set_unaligned_vect): Likewise.
+ (arm_block_set_aligned_vect): Likewise.
+ * config/arm/neon.md (neon_copysignf<mode>): Likewise.
+ * config/i386/i386.c (ix86_expand_vec_perm): Likewise.
+ (expand_vec_perm_even_odd_pack): Likewise.
+ (ix86_vector_duplicate_value): Use gen_vec_duplicate.
+ * config/i386/sse.md (one_cmpl<mode>2): Use CONSTM1_RTX.
+ * config/ia64/ia64.c (ia64_expand_vecint_compare): Use
+ gen_const_vec_duplicate.
+ * config/ia64/vect.md (addv2sf3, subv2sf3): Use CONST1_RTX.
+ * config/mips/mips.c (mips_gen_const_int_vector): Use
+ gen_const_vec_duplicate.
+ (mips_expand_vector_init): Use CONST0_RTX.
+ * config/powerpcspe/altivec.md (abs<mode>2, nabs<mode>2): Likewise.
+ (define_split): Use gen_const_vec_duplicate.
+ * config/rs6000/altivec.md (abs<mode>2, nabs<mode>2): Use CONST0_RTX.
+ (define_split): Use gen_const_vec_duplicate.
+ * config/s390/vx-builtins.md (vec_genmask<mode>): Likewise.
+ (vec_ctd_s64, vec_ctd_u64, vec_ctsl, vec_ctul): Likewise.
+ * config/spu/spu.c (spu_const): Likewise.
+
2017-11-01 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
rtx
aarch64_simd_gen_const_vector_dup (machine_mode mode, HOST_WIDE_INT val)
{
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits);
- int i;
-
- rtx cache = GEN_INT (val);
-
- for (i=0; i < nunits; i++)
- RTVEC_ELT (v, i) = cache;
-
- return gen_rtx_CONST_VECTOR (mode, v);
+ rtx c = gen_int_mode (val, GET_MODE_INNER (mode));
+ return gen_const_vec_duplicate (mode, c);
}
/* Check OP is a legal scalar immediate for the MOVI instruction. */
single ARM register. This will be cheaper than a vector
load. */
x = copy_to_mode_reg (inner_mode, x);
- return gen_rtx_VEC_DUPLICATE (mode, x);
+ return gen_vec_duplicate (mode, x);
}
if (all_same)
{
rtx x = copy_to_mode_reg (inner_mode, v0);
- aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
+ aarch64_emit_move (target, gen_vec_duplicate (mode, x));
return;
}
/* Create a duplicate of the most common element. */
rtx x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, maxelement));
- aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
+ aarch64_emit_move (target, gen_vec_duplicate (mode, x));
/* Insert the rest. */
for (int i = 0; i < n_elts; i++)
load. */
x = copy_to_mode_reg (inner_mode, x);
- return gen_rtx_VEC_DUPLICATE (mode, x);
+ return gen_vec_duplicate (mode, x);
}
/* Generate code to load VALS, which is a PARALLEL containing only
if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
{
x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
+ emit_insn (gen_rtx_SET (target, gen_vec_duplicate (mode, x)));
return;
}
arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
{
machine_mode vmode = GET_MODE (target);
- unsigned int i, nelt = GET_MODE_NUNITS (vmode);
+ unsigned int nelt = GET_MODE_NUNITS (vmode);
bool one_vector_p = rtx_equal_p (op0, op1);
- rtx rmask[MAX_VECT_LEN], mask;
+ rtx mask;
/* TODO: ARM's VTBL indexing is little-endian. In order to handle GCC's
numbering of elements for big-endian, we must reverse the order. */
/* The VTBL instruction does not use a modulo index, so we must take care
of that ourselves. */
mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1);
- for (i = 0; i < nelt; ++i)
- rmask[i] = mask;
- mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask));
+ mask = gen_const_vec_duplicate (vmode, mask);
sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
arm_expand_vec_perm_1 (target, op0, op1, sel);
unsigned HOST_WIDE_INT value,
unsigned HOST_WIDE_INT align)
{
- unsigned int i, j, nelt_v16, nelt_v8, nelt_mode;
+ unsigned int i, nelt_v16, nelt_v8, nelt_mode;
rtx dst, mem;
- rtx val_elt, val_vec, reg;
- rtx rval[MAX_VECT_LEN];
+ rtx val_vec, reg;
rtx (*gen_func) (rtx, rtx);
machine_mode mode;
unsigned HOST_WIDE_INT v = value;
mem = adjust_automodify_address (dstbase, mode, dst, offset);
v = sext_hwi (v, BITS_PER_WORD);
- val_elt = GEN_INT (v);
- for (j = 0; j < nelt_mode; j++)
- rval[j] = val_elt;
reg = gen_reg_rtx (mode);
- val_vec = gen_rtx_CONST_VECTOR (mode, gen_rtvec_v (nelt_mode, rval));
+ val_vec = gen_const_vec_duplicate (mode, GEN_INT (v));
/* Emit instruction loading the constant value. */
emit_move_insn (reg, val_vec);
unsigned HOST_WIDE_INT value,
unsigned HOST_WIDE_INT align)
{
- unsigned int i, j, nelt_v8, nelt_v16, nelt_mode;
+ unsigned int i, nelt_v8, nelt_v16, nelt_mode;
rtx dst, addr, mem;
- rtx val_elt, val_vec, reg;
- rtx rval[MAX_VECT_LEN];
+ rtx val_vec, reg;
machine_mode mode;
unsigned HOST_WIDE_INT v = value;
unsigned int offset = 0;
dst = copy_addr_to_reg (XEXP (dstbase, 0));
v = sext_hwi (v, BITS_PER_WORD);
- val_elt = GEN_INT (v);
- for (j = 0; j < nelt_mode; j++)
- rval[j] = val_elt;
reg = gen_reg_rtx (mode);
- val_vec = gen_rtx_CONST_VECTOR (mode, gen_rtvec_v (nelt_mode, rval));
+ val_vec = gen_const_vec_duplicate (mode, GEN_INT (v));
/* Emit instruction loading the constant value. */
emit_move_insn (reg, val_vec);
"{
rtx v_bitmask_cast;
rtx v_bitmask = gen_reg_rtx (<VCVTF:V_cmp_result>mode);
- int i, n_elt = GET_MODE_NUNITS (<MODE>mode);
- rtvec v = rtvec_alloc (n_elt);
-
- /* Create bitmask for vector select. */
- for (i = 0; i < n_elt; ++i)
- RTVEC_ELT (v, i) = GEN_INT (0x80000000);
+ rtx c = GEN_INT (0x80000000);
emit_move_insn (v_bitmask,
- gen_rtx_CONST_VECTOR (<VCVTF:V_cmp_result>mode, v));
+ gen_const_vec_duplicate (<VCVTF:V_cmp_result>mode, c));
emit_move_insn (operands[0], operands[2]);
v_bitmask_cast = simplify_gen_subreg (<MODE>mode, v_bitmask,
<VCVTF:V_cmp_result>mode, 0);
t2 = gen_reg_rtx (V32QImode);
t3 = gen_reg_rtx (V32QImode);
vt2 = GEN_INT (-128);
- for (i = 0; i < 32; i++)
- vec[i] = vt2;
- vt = gen_rtx_CONST_VECTOR (V32QImode, gen_rtvec_v (32, vec));
+ vt = gen_const_vec_duplicate (V32QImode, vt2);
vt = force_reg (V32QImode, vt);
for (i = 0; i < 32; i++)
vec[i] = i < 16 ? vt2 : const0_rtx;
vt = GEN_INT (w - 1);
}
- for (i = 0; i < w; i++)
- vec[i] = vt;
- vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
+ vt = gen_const_vec_duplicate (maskmode, vt);
mask = expand_simple_binop (maskmode, AND, mask, vt,
NULL_RTX, 0, OPTAB_DIRECT);
e = w = 4;
}
- for (i = 0; i < w; i++)
- vec[i] = vt;
- vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
+ vt = gen_const_vec_duplicate (maskmode, vt);
vt = force_reg (maskmode, vt);
mask = expand_simple_binop (maskmode, AND, mask, vt,
NULL_RTX, 0, OPTAB_DIRECT);
rtx dup;
/* First attempt to recognize VAL as-is. */
- dup = gen_rtx_VEC_DUPLICATE (mode, val);
+ dup = gen_vec_duplicate (mode, val);
insn = emit_insn (gen_rtx_SET (target, dup));
if (recog_memoized (insn) < 0)
{
static bool
expand_vec_perm_even_odd_pack (struct expand_vec_perm_d *d)
{
- rtx op, dop0, dop1, t, rperm[16];
+ rtx op, dop0, dop1, t;
unsigned i, odd, c, s, nelt = d->nelt;
bool end_perm = false;
machine_mode half_mode;
dop1 = gen_reg_rtx (half_mode);
if (odd == 0)
{
- for (i = 0; i < nelt / 2; i++)
- rperm[i] = GEN_INT (c);
- t = gen_rtx_CONST_VECTOR (half_mode, gen_rtvec_v (nelt / 2, rperm));
+ t = gen_const_vec_duplicate (half_mode, GEN_INT (c));
t = force_reg (half_mode, t);
emit_insn (gen_and (dop0, t, gen_lowpart (half_mode, d->op0)));
emit_insn (gen_and (dop1, t, gen_lowpart (half_mode, d->op1)));
(match_dup 2)))]
"TARGET_SSE"
{
- int i, n = GET_MODE_NUNITS (<MODE>mode);
- rtvec v = rtvec_alloc (n);
-
- for (i = 0; i < n; ++i)
- RTVEC_ELT (v, i) = constm1_rtx;
-
- operands[2] = force_reg (<MODE>mode, gen_rtx_CONST_VECTOR (<MODE>mode, v));
+ operands[2] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
})
(define_expand "<sse2_avx2>_andnot<mode>3"
/* Subtract (-(INT MAX) - 1) from both operands to make
them signed. */
mask = gen_int_mode (0x80000000, SImode);
- mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
+ mask = gen_const_vec_duplicate (V2SImode, mask);
mask = force_reg (mode, mask);
t1 = gen_reg_rtx (mode);
emit_insn (gen_subv2si3 (t1, op0, mask));
(match_operand:V2SF 2 "fr_register_operand" "")))]
""
{
- rtvec v = gen_rtvec (2, CONST1_RTX (SFmode), CONST1_RTX (SFmode));
- operands[3] = force_reg (V2SFmode, gen_rtx_CONST_VECTOR (V2SFmode, v));
+ operands[3] = force_reg (V2SFmode, CONST1_RTX (V2SFmode));
})
(define_expand "subv2sf3"
(neg:V2SF (match_operand:V2SF 2 "fr_register_operand" ""))))]
""
{
- rtvec v = gen_rtvec (2, CONST1_RTX (SFmode), CONST1_RTX (SFmode));
- operands[3] = force_reg (V2SFmode, gen_rtx_CONST_VECTOR (V2SFmode, v));
+ operands[3] = force_reg (V2SFmode, CONST1_RTX (V2SFmode));
})
(define_insn "mulv2sf3"
rtx
mips_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val)
{
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits);
- int i;
-
- for (i = 0; i < nunits; i++)
- RTVEC_ELT (v, i) = gen_int_mode (val, GET_MODE_INNER (mode));
-
- return gen_rtx_CONST_VECTOR (mode, v);
+ rtx c = gen_int_mode (val, GET_MODE_INNER (mode));
+ return gen_const_vec_duplicate (mode, c);
}
/* Return a vector of repeated 4-element sets generated from
}
else
{
- rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
-
- for (i = 0; i < nelt; ++i)
- RTVEC_ELT (vec, i) = CONST0_RTX (imode);
-
- emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
+ emit_move_insn (target, CONST0_RTX (vmode));
for (i = 0; i < nelt; ++i)
{
HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
rtx rtx_val = GEN_INT (val);
int shift = vspltis_shifted (op1);
- int nunits = GET_MODE_NUNITS (<MODE>mode);
- int i;
gcc_assert (shift != 0);
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, rtvec_alloc (nunits));
+ operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
operands[4] = gen_reg_rtx (<MODE>mode);
if (shift < 0)
operands[5] = CONST0_RTX (<MODE>mode);
operands[6] = GEN_INT (shift);
}
-
- /* Populate the constant vectors. */
- for (i = 0; i < nunits; i++)
- XVECEXP (operands[3], 0, i) = rtx_val;
})
(define_insn "get_vrsave_internal"
(smax:VI2 (match_dup 1) (match_dup 4)))]
"<VI_unit>"
{
- int i, n_elt = GET_MODE_NUNITS (<MODE>mode);
- rtvec v = rtvec_alloc (n_elt);
-
- /* Create an all 0 constant. */
- for (i = 0; i < n_elt; ++i)
- RTVEC_ELT (v, i) = const0_rtx;
-
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v);
+ operands[3] = CONST0_RTX (<MODE>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
})
(smin:VI2 (match_dup 1) (match_dup 4)))]
"<VI_unit>"
{
- int i;
- int n_elt = GET_MODE_NUNITS (<MODE>mode);
-
- rtvec v = rtvec_alloc (n_elt);
-
- /* Create an all 0 constant. */
- for (i = 0; i < n_elt; ++i)
- RTVEC_ELT (v, i) = const0_rtx;
-
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v);
+ operands[3] = CONST0_RTX (<MODE>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
})
HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt);
rtx rtx_val = GEN_INT (val);
int shift = vspltis_shifted (op1);
- int nunits = GET_MODE_NUNITS (<MODE>mode);
- int i;
gcc_assert (shift != 0);
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, rtvec_alloc (nunits));
+ operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val);
operands[4] = gen_reg_rtx (<MODE>mode);
if (shift < 0)
operands[5] = CONST0_RTX (<MODE>mode);
operands[6] = GEN_INT (shift);
}
-
- /* Populate the constant vectors. */
- for (i = 0; i < nunits; i++)
- XVECEXP (operands[3], 0, i) = rtx_val;
})
(define_insn "get_vrsave_internal"
(smax:VI2 (match_dup 1) (match_dup 4)))]
"<VI_unit>"
{
- int i, n_elt = GET_MODE_NUNITS (<MODE>mode);
- rtvec v = rtvec_alloc (n_elt);
-
- /* Create an all 0 constant. */
- for (i = 0; i < n_elt; ++i)
- RTVEC_ELT (v, i) = const0_rtx;
-
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v);
+ operands[3] = CONST0_RTX (<MODE>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
})
(smin:VI2 (match_dup 1) (match_dup 4)))]
"<VI_unit>"
{
- int i;
- int n_elt = GET_MODE_NUNITS (<MODE>mode);
-
- rtvec v = rtvec_alloc (n_elt);
-
- /* Create an all 0 constant. */
- for (i = 0; i < n_elt; ++i)
- RTVEC_ELT (v, i) = const0_rtx;
-
operands[2] = gen_reg_rtx (<MODE>mode);
- operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v);
+ operands[3] = CONST0_RTX (<MODE>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
})
(match_operand:QI 2 "const_int_operand" "C")]
"TARGET_VX"
{
- int nunits = GET_MODE_NUNITS (<VI_HW:MODE>mode);
int bitlen = GET_MODE_UNIT_BITSIZE (<VI_HW:MODE>mode);
/* To bit little endian style. */
int end = bitlen - 1 - INTVAL (operands[1]);
int start = bitlen - 1 - INTVAL (operands[2]);
- rtx const_vec[16];
int i;
unsigned HOST_WIDE_INT mask;
bool swapped_p = false;
if (swapped_p)
mask = ~mask;
- for (i = 0; i < nunits; i++)
- const_vec[i] = GEN_INT (trunc_int_for_mode (mask,
- GET_MODE_INNER (<VI_HW:MODE>mode)));
+ rtx mask_rtx = gen_int_mode (mask, GET_MODE_INNER (<VI_HW:MODE>mode));
emit_insn (gen_rtx_SET (operands[0],
- gen_rtx_CONST_VECTOR (<VI_HW:MODE>mode,
- gen_rtvec_v (nunits, const_vec))));
+ gen_const_vec_duplicate (<VI_HW:MODE>mode,
+ mask_rtx)));
DONE;
})
real_2expN (&f, -INTVAL (operands[2]), DFmode);
c = const_double_from_real_value (f, DFmode);
- operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = gen_const_vec_duplicate (V2DFmode, c);
operands[3] = force_reg (V2DFmode, operands[3]);
})
real_2expN (&f, -INTVAL (operands[2]), DFmode);
c = const_double_from_real_value (f, DFmode);
- operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = gen_const_vec_duplicate (V2DFmode, c);
operands[3] = force_reg (V2DFmode, operands[3]);
})
real_2expN (&f, INTVAL (operands[2]), DFmode);
c = const_double_from_real_value (f, DFmode);
- operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = gen_const_vec_duplicate (V2DFmode, c);
operands[3] = force_reg (V2DFmode, operands[3]);
operands[4] = gen_reg_rtx (V2DFmode);
})
real_2expN (&f, INTVAL (operands[2]), DFmode);
c = const_double_from_real_value (f, DFmode);
- operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+ operands[3] = gen_const_vec_duplicate (V2DFmode, c);
operands[3] = force_reg (V2DFmode, operands[3]);
operands[4] = gen_reg_rtx (V2DFmode);
})
spu_const (machine_mode mode, HOST_WIDE_INT val)
{
rtx inner;
- rtvec v;
- int units, i;
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
|| GET_MODE_CLASS (mode) == MODE_FLOAT
else
inner = hwint_to_const_double (GET_MODE_INNER (mode), val);
- units = GET_MODE_NUNITS (mode);
-
- v = rtvec_alloc (units);
-
- for (i = 0; i < units; ++i)
- RTVEC_ELT (v, i) = inner;
-
- return gen_rtx_CONST_VECTOR (mode, v);
+ return gen_const_vec_duplicate (mode, inner);
}
/* Create a MODE vector constant from 4 ints. */
#endif
}
-/* Generate a vector constant for mode MODE and constant value CONSTANT. */
+/* Like gen_const_vec_duplicate, but ignore const_tiny_rtx. */
static rtx
-gen_const_vector (machine_mode mode, int constant)
+gen_const_vec_duplicate_1 (machine_mode mode, rtx el)
{
- rtx tem;
- rtvec v;
- int units, i;
- machine_mode inner;
+ int nunits = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (nunits);
+ for (int i = 0; i < nunits; ++i)
+ RTVEC_ELT (v, i) = el;
+ return gen_rtx_raw_CONST_VECTOR (mode, v);
+}
- units = GET_MODE_NUNITS (mode);
- inner = GET_MODE_INNER (mode);
+/* Generate a vector constant of mode MODE in which every element has
+ value ELT. */
- gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
+rtx
+gen_const_vec_duplicate (machine_mode mode, rtx elt)
+{
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
+ if (elt == CONST0_RTX (inner_mode))
+ return CONST0_RTX (mode);
+ else if (elt == CONST1_RTX (inner_mode))
+ return CONST1_RTX (mode);
+ else if (elt == CONSTM1_RTX (inner_mode))
+ return CONSTM1_RTX (mode);
+
+ return gen_const_vec_duplicate_1 (mode, elt);
+}
+
+/* Return a vector rtx of mode MODE in which every element has value X.
+ The result will be a constant if X is constant. */
+
+rtx
+gen_vec_duplicate (machine_mode mode, rtx x)
+{
+ if (CONSTANT_P (x))
+ return gen_const_vec_duplicate (mode, x);
+ return gen_rtx_VEC_DUPLICATE (mode, x);
+}
- v = rtvec_alloc (units);
+/* Generate a new vector constant for mode MODE and constant value
+ CONSTANT. */
- /* We need to call this function after we set the scalar const_tiny_rtx
- entries. */
- gcc_assert (const_tiny_rtx[constant][(int) inner]);
+static rtx
+gen_const_vector (machine_mode mode, int constant)
+{
+ machine_mode inner = GET_MODE_INNER (mode);
- for (i = 0; i < units; ++i)
- RTVEC_ELT (v, i) = const_tiny_rtx[constant][(int) inner];
+ gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
+
+ rtx el = const_tiny_rtx[constant][(int) inner];
+ gcc_assert (el);
- tem = gen_rtx_raw_CONST_VECTOR (mode, v);
- return tem;
+ return gen_const_vec_duplicate_1 (mode, el);
}
/* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when
rtx
gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
{
- machine_mode inner = GET_MODE_INNER (mode);
- int nunits = GET_MODE_NUNITS (mode);
- rtx x;
- int i;
-
- /* Check to see if all of the elements have the same value. */
- x = RTVEC_ELT (v, nunits - 1);
- for (i = nunits - 2; i >= 0; i--)
- if (RTVEC_ELT (v, i) != x)
- break;
+ gcc_assert (GET_MODE_NUNITS (mode) == GET_NUM_ELEM (v));
/* If the values are all the same, check to see if we can use one of the
standard constant vectors. */
- if (i == -1)
- {
- if (x == CONST0_RTX (inner))
- return CONST0_RTX (mode);
- else if (x == CONST1_RTX (inner))
- return CONST1_RTX (mode);
- else if (x == CONSTM1_RTX (inner))
- return CONSTM1_RTX (mode);
- }
+ if (rtvec_all_equal_p (v))
+ return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0));
return gen_rtx_raw_CONST_VECTOR (mode, v);
}
return crtl->emit.x_cur_insn_uid;
}
+extern rtx gen_const_vec_duplicate (machine_mode, rtx);
+extern rtx gen_vec_duplicate (machine_mode, rtx);
+
extern void set_decl_incoming_rtl (tree, rtx, bool);
/* Return a memory reference like MEMREF, but with its mode changed
gcc_checking_assert (VECTOR_MODE_P (vmode));
- n = GET_MODE_NUNITS (vmode);
- vec = rtvec_alloc (n);
- for (i = 0; i < n; ++i)
- RTVEC_ELT (vec, i) = op;
-
if (CONSTANT_P (op))
- return gen_rtx_CONST_VECTOR (vmode, vec);
+ return gen_const_vec_duplicate (vmode, op);
/* ??? If the target doesn't have a vec_init, then we have no easy way
of performing this operation. Most of this sort of generic support
if (icode == CODE_FOR_nothing)
return NULL;
+ n = GET_MODE_NUNITS (vmode);
+ vec = rtvec_alloc (n);
+ for (i = 0; i < n; ++i)
+ RTVEC_ELT (vec, i) = op;
ret = gen_reg_rtx (vmode);
emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
(GET_MODE (op)));
}
- if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
- || GET_CODE (op) == CONST_VECTOR)
+ if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
+ return gen_const_vec_duplicate (mode, op);
+ if (GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_UNIT_SIZE (mode);
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (GET_CODE (op) != CONST_VECTOR)
- for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = op;
- else
- {
- machine_mode inmode = GET_MODE (op);
- int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
- unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
+ machine_mode inmode = GET_MODE (op);
+ int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
+ unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
- gcc_assert (in_n_elts < n_elts);
- gcc_assert ((n_elts % in_n_elts) == 0);
- for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
- }
+ gcc_assert (in_n_elts < n_elts);
+ gcc_assert ((n_elts % in_n_elts) == 0);
+ for (i = 0; i < n_elts; i++)
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
return gen_rtx_CONST_VECTOR (mode, v);
}
}
return CONST0_RTX (mode);
#ifdef VECTOR_STORE_FLAG_VALUE
{
- int i, units;
- rtvec v;
-
rtx val = VECTOR_STORE_FLAG_VALUE (mode);
if (val == NULL_RTX)
return NULL_RTX;
if (val == const1_rtx)
return CONST1_RTX (mode);
- units = GET_MODE_NUNITS (mode);
- v = rtvec_alloc (units);
- for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) = val;
- return gen_rtx_raw_CONST_VECTOR (mode, v);
+ return gen_const_vec_duplicate (mode, val);
}
#else
return NULL_RTX;