(define_int_attr supf [(VCVTQ_TO_F_S "s") (VCVTQ_TO_F_U "u") (VREV16Q_S "s")
(VREV16Q_U "u") (VMVNQ_N_S "s") (VMVNQ_N_U "u")
(VCVTAQ_U "u") (VCVTAQ_S "s") (VREV64Q_S "s")
- (VREV64Q_U "u") (VMVNQ_S "s") (VMVNQ_U "u")
+ (VREV64Q_U "u")
(VDUPQ_N_U "u") (VDUPQ_N_S"s") (VADDVQ_S "s")
(VADDVQ_U "u") (VADDVQ_S "s") (VADDVQ_U "u")
(VMOVLTQ_U "u") (VMOVLTQ_S "s") (VMOVLBQ_S "s")
(define_int_iterator VCVTQ_FROM_F [VCVTQ_FROM_F_S VCVTQ_FROM_F_U])
(define_int_iterator VREV16Q [VREV16Q_U VREV16Q_S])
(define_int_iterator VCVTAQ [VCVTAQ_U VCVTAQ_S])
-(define_int_iterator VMVNQ [VMVNQ_U VMVNQ_S])
(define_int_iterator VDUPQ_N [VDUPQ_N_U VDUPQ_N_S])
(define_int_iterator VCLZQ [VCLZQ_U VCLZQ_S])
(define_int_iterator VADDVQ [VADDVQ_U VADDVQ_S])
;;
;; [vmvnq_u, vmvnq_s])
;;
-(define_insn "mve_vmvnq_<supf><mode>"
+(define_insn "mve_vmvnq_u<mode>"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
- (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")]
- VMVNQ))
+ (not:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")))
]
"TARGET_HAVE_MVE"
- "vmvn %q0, %q1"
+ "vmvn\t%q0, %q1"
[(set_attr "type" "mve_move")
])
+(define_expand "mve_vmvnq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand")
+ (not:MVE_2 (match_operand:MVE_2 1 "s_register_operand")))
+ ]
+ "TARGET_HAVE_MVE"
+)
;;
;; [vdupq_n_u, vdupq_n_s])
[(set_attr "type" "neon_logic<q>")]
)
-(define_insn "one_cmpl<mode>2"
+(define_insn "one_cmpl<mode>2_neon"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
(not:VDQ (match_operand:VDQ 1 "s_register_operand" "w")))]
"TARGET_NEON"
(match_operand:VDQIW 1 "s_register_operand")]
"TARGET_NEON"
{
- emit_insn (gen_one_cmpl<mode>2 (operands[0], operands[1]));
+ emit_insn (gen_one_cmpl<mode>2_neon (operands[0], operands[1]));
DONE;
})
--- /dev/null
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O3" } */
+
+#include <stdint.h>
+
+#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = OP a[i]; \
+ } \
+}
+
+/* vmnvq supports only 16-bit and 32-bit elements. */
+/* 64-bit vectors. */
+FUNC(s, int, 32, 2, ~, vmvn)
+FUNC(u, uint, 32, 2, ~, vmvn)
+FUNC(s, int, 16, 4, ~, vmvn)
+FUNC(u, uint, 16, 4, ~, vmvn)
+FUNC(s, int, 8, 8, ~, vmvn)
+FUNC(u, uint, 8, 8, ~, vmvn)
+
+/* 128-bit vectors. */
+FUNC(s, int, 32, 4, ~, vmvn)
+FUNC(u, uint, 32, 4, ~, vmvn)
+FUNC(s, int, 16, 8, ~, vmvn)
+FUNC(u, uint, 16, 8, ~, vmvn)
+FUNC(s, int, 8, 16, ~, vmvn)
+FUNC(u, uint, 8, 16, ~, vmvn)
+
+/* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+/* { dg-final { scan-assembler-times {vmvn\tq[0-9]+, q[0-9]+} 6 } } */