(define_mode_iterator VN [V8HI V4SI V2DI])
;; All supported vector modes (except singleton DImode).
-(define_mode_iterator VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V4HF V8HF V2SF V4SF V2DI])
+(define_mode_iterator VDQ [(V8QI "!TARGET_HAVE_MVE") V16QI
+ (V4HI "!TARGET_HAVE_MVE") V8HI
+ (V2SI "!TARGET_HAVE_MVE") V4SI
+ (V4HF "!TARGET_HAVE_MVE") V8HF
+ (V2SF "!TARGET_HAVE_MVE") V4SF
+ (V2DI "!TARGET_HAVE_MVE")])
;; All supported floating-point vector modes (except V2DF).
(define_mode_iterator VF [(V4HF "TARGET_NEON_FP16INST")
(VADDLVQ_P_U "u") (VCMPNEQ_U "u") (VCMPNEQ_S "s")
(VABDQ_M_S "s") (VABDQ_M_U "u") (VABDQ_S "s")
(VABDQ_U "u") (VADDQ_N_S "s") (VADDQ_N_U "u")
- (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VANDQ_S "s")
- (VANDQ_U "u") (VBICQ_S "s") (VBICQ_U "u")
+ (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VBICQ_S "s") (VBICQ_U "u")
(VBRSRQ_N_S "s") (VBRSRQ_N_U "u") (VCADDQ_ROT270_S "s")
(VCADDQ_ROT270_U "u") (VCADDQ_ROT90_S "s")
(VCMPEQQ_S "s") (VCMPEQQ_U "u") (VCADDQ_ROT90_U "u")
(define_int_iterator VADDQ_N [VADDQ_N_S VADDQ_N_U])
(define_int_iterator VADDVAQ [VADDVAQ_S VADDVAQ_U])
(define_int_iterator VADDVQ_P [VADDVQ_P_U VADDVQ_P_S])
-(define_int_iterator VANDQ [VANDQ_U VANDQ_S])
(define_int_iterator VBICQ [VBICQ_S VBICQ_U])
(define_int_iterator VBRSRQ_N [VBRSRQ_N_U VBRSRQ_N_S])
(define_int_iterator VCADDQ_ROT270 [VCADDQ_ROT270_S VCADDQ_ROT270_U])
;;
;; [vandq_u, vandq_s])
;;
-(define_insn "mve_vandq_<supf><mode>"
+;; signed and unsigned versions are the same: define the unsigned
+;; insn, and use an expander for the signed one as we still reference
+;; both names from arm_mve.h.
+;; We use the same code as in neon.md (TODO: avoid this duplication).
+(define_insn "mve_vandq_u<mode>"
[
- (set (match_operand:MVE_2 0 "s_register_operand" "=w")
- (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
- (match_operand:MVE_2 2 "s_register_operand" "w")]
- VANDQ))
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w,w")
+ (and:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w,0")
+ (match_operand:MVE_2 2 "neon_inv_logic_op2" "w,DL")))
]
"TARGET_HAVE_MVE"
- "vand %q0, %q1, %q2"
+ "@
+ vand\t%q0, %q1, %q2
+ * return neon_output_logic_immediate (\"vand\", &operands[2], <MODE>mode, 1, VALID_NEON_QREG_MODE (<MODE>mode));"
[(set_attr "type" "mve_move")
])
+(define_expand "mve_vandq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand")
+ (and:MVE_2 (match_operand:MVE_2 1 "s_register_operand")
+ (match_operand:MVE_2 2 "neon_inv_logic_op2")))
+ ]
+ "TARGET_HAVE_MVE"
+)
;;
;; [vbicq_s, vbicq_u])
(define_insn "mve_vandq_f<mode>"
[
(set (match_operand:MVE_0 0 "s_register_operand" "=w")
- (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
- (match_operand:MVE_0 2 "s_register_operand" "w")]
- VANDQ_F))
+ (and:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
]
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
"vand %q0, %q1, %q2"
--- /dev/null
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O3" } */
+
+#include <stdint.h>
+
+#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP b[i]; \
+ } \
+}
+
+#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP 1; \
+ } \
+}
+
+/* 64-bit vectors. */
+FUNC(s, int, 32, 2, &, vand)
+FUNC(u, uint, 32, 2, &, vand)
+FUNC(s, int, 16, 4, &, vand)
+FUNC(u, uint, 16, 4, &, vand)
+FUNC(s, int, 8, 8, &, vand)
+FUNC(u, uint, 8, 8, &, vand)
+
+/* 128-bit vectors. */
+FUNC(s, int, 32, 4, &, vand)
+FUNC(u, uint, 32, 4, &, vand)
+FUNC(s, int, 16, 8, &, vand)
+FUNC(u, uint, 16, 8, &, vand)
+FUNC(s, int, 8, 16, &, vand)
+FUNC(u, uint, 8, 16, &, vand)
+
+/* 64-bit vectors. */
+FUNC_IMM(s, int, 32, 2, &, vandimm)
+FUNC_IMM(u, uint, 32, 2, &, vandimm)
+FUNC_IMM(s, int, 16, 4, &, vandimm)
+FUNC_IMM(u, uint, 16, 4, &, vandimm)
+FUNC_IMM(s, int, 8, 8, &, vandimm)
+FUNC_IMM(u, uint, 8, 8, &, vandimm)
+
+/* 128-bit vectors. */
+FUNC_IMM(s, int, 32, 4, &, vandimm)
+FUNC_IMM(u, uint, 32, 4, &, vandimm)
+FUNC_IMM(s, int, 16, 8, &, vandimm)
+FUNC_IMM(u, uint, 16, 8, &, vandimm)
+FUNC_IMM(s, int, 8, 16, &, vandimm)
+FUNC_IMM(u, uint, 8, 16, &, vandimm)
+
+/* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+/* Although float16 and float32 types are supported at assembly level,
+ we cannot test them with the '&' operator, so we check only the
+ integer variants. */
+/* For some reason, we do not generate the immediate version, we still
+ use vldr to load the vector of immediates. */
+/* { dg-final { scan-assembler-times {vand\tq[0-9]+, q[0-9]+, q[0-9]+} 12 } } */