; generic vectorizer code. It ends up creating a V2DI constructor with
; SImode elements.
-(define_insn "vashl<mode>3"
- [(set (match_operand:VDQIW 0 "s_register_operand" "=w,w")
- (ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w,w")
- (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "w,Dm")))]
- "TARGET_NEON"
- {
- switch (which_alternative)
- {
- case 0: return "vshl.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2";
- case 1: return neon_output_shift_immediate ("vshl", 'i', &operands[2],
- <MODE>mode,
- VALID_NEON_QREG_MODE (<MODE>mode),
- true);
- default: gcc_unreachable ();
- }
- }
- [(set_attr "type" "neon_shift_reg<q>, neon_shift_imm<q>")]
-)
-
(define_insn "vashr<mode>3_imm"
[(set (match_operand:VDQIW 0 "s_register_operand" "=w")
(ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w")
if (!neon_vector_mem_operand (adjust_mem, 2, true))
XEXP (adjust_mem, 0) = force_reg (Pmode, XEXP (adjust_mem, 0));
})
+
+(define_insn "mve_vshlq_<supf><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w,w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w,w")
+ (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "w,Dm")]
+ VSHLQ))]
+ "ARM_HAVE_<MODE>_ARITH"
+ "@
+ vshl.<supf>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
+ * return neon_output_shift_immediate (\"vshl\", 'i', &operands[2], <MODE>mode, VALID_NEON_QREG_MODE (<MODE>mode), true);"
+ [(set_attr "type" "neon_shift_reg<q>, neon_shift_imm<q>")]
+)
+
+(define_expand "vashl<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "")
+ (ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "")
+ (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "")))]
+ "ARM_HAVE_<MODE>_ARITH"
+{
+ emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], operands[2]));
+ DONE;
+})
\ No newline at end of file
--- /dev/null
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O3" } */
+
+#include <stdint.h>
+
+#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP b[i]; \
+ } \
+}
+
+#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP 5; \
+ } \
+}
+
+/* 64-bit vectors. */
+FUNC(s, int, 32, 2, <<, vshl)
+FUNC(u, uint, 32, 2, <<, vshl)
+FUNC(s, int, 16, 4, <<, vshl)
+FUNC(u, uint, 16, 4, <<, vshl)
+FUNC(s, int, 8, 8, <<, vshl)
+FUNC(u, uint, 8, 8, <<, vshl)
+
+/* 128-bit vectors. */
+FUNC(s, int, 32, 4, <<, vshl)
+FUNC(u, uint, 32, 4, <<, vshl)
+FUNC(s, int, 16, 8, <<, vshl) /* FIXME: not vectorized */
+FUNC(u, uint, 16, 8, <<, vshl) /* FIXME: not vectorized */
+FUNC(s, int, 8, 16, <<, vshl) /* FIXME: not vectorized */
+FUNC(u, uint, 8, 16, <<, vshl) /* FIXME: not vectorized */
+
+/* 64-bit vectors. */
+FUNC_IMM(s, int, 32, 2, <<, vshlimm)
+FUNC_IMM(u, uint, 32, 2, <<, vshlimm)
+FUNC_IMM(s, int, 16, 4, <<, vshlimm)
+FUNC_IMM(u, uint, 16, 4, <<, vshlimm)
+FUNC_IMM(s, int, 8, 8, <<, vshlimm)
+FUNC_IMM(u, uint, 8, 8, <<, vshlimm)
+
+/* 128-bit vectors. */
+FUNC_IMM(s, int, 32, 4, <<, vshlimm)
+FUNC_IMM(u, uint, 32, 4, <<, vshlimm)
+FUNC_IMM(s, int, 16, 8, <<, vshlimm)
+FUNC_IMM(u, uint, 16, 8, <<, vshlimm)
+FUNC_IMM(s, int, 8, 16, <<, vshlimm)
+FUNC_IMM(u, uint, 8, 16, <<, vshlimm)
+
+/* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+/* We only emit vshl.u, which is equivalent to vshl.s anyway. */
+/* { dg-final { scan-assembler-times {vshl.u[0-9]+\tq[0-9]+, q[0-9]+} 2 } } */
+
+/* We emit vshl.i when the shift amount is an immediate. */
+/* { dg-final { scan-assembler-times {vshl.i[0-9]+\tq[0-9]+, q[0-9]+} 6 } } */