From: Richard Henderson Date: Mon, 2 Jul 2018 15:29:16 +0000 (+0000) Subject: aarch64: Add movprfx patterns alternatives X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=a08acce83a052a8b800eddaaae36e51e8de57643;p=gcc.git aarch64: Add movprfx patterns alternatives * config/aarch64/aarch64-protos.h, config/aarch64/aarch64.c (aarch64_sve_prepare_conditional_op): Remove. * config/aarch64/aarch64-sve.md (cond_): Allow aarch64_simd_reg_or_zero as select operand; remove the aarch64_sve_prepare_conditional_op call. (cond_): Likewise. (cond_): Likewise. (*cond__z): New pattern. (*cond__z): New pattern. (*cond__z): New pattern. (*cond__any): New pattern. (*cond__any): New pattern. (*cond__any): New pattern and a splitters to match all of the *_any patterns. * config/aarch64/predicates.md (aarch64_sve_any_binary_operator): New. * config/aarch64/iterators.md (SVE_INT_BINARY_REV): Remove. (SVE_COND_FP_BINARY_REV): Remove. (sve_int_op_rev, sve_fp_op_rev): New. * config/aarch64/aarch64-sve.md (*cond__0): New. (*cond__0): New. (*cond__0): New. (*cond__2): Rename, add movprfx alternative. (*cond__2): Similarly. (*cond__2): Similarly. (*cond__3): Similarly; use sve_int_op_rev. (*cond__3): Similarly. (*cond__3): Similarly; use sve_fp_op_rev. * config/aarch64/aarch64-sve.md (cond_): Remove match_dup 1 from the inner unspec. (*cond_): Likewise. * config/aarch64/aarch64.md (movprfx): New attr. (length): Default movprfx to 8. * config/aarch64/aarch64-sve.md (*mul3): Add movprfx alt. (*madd, *msubmul3_highpart): Likewise. (*3): Likewise. (*v3): Likewise. (*3): Likewise. (*3): Likewise. (*fma4, *fnma4): Likewise. (*fms4, *fnms4): Likewise. (*div4): Likewise. From-SVN: r262312 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 78cb80f0329..0a8395949cf 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,51 @@ +2018-07-02 Richard Henderson + + * config/aarch64/aarch64-protos.h, config/aarch64/aarch64.c + (aarch64_sve_prepare_conditional_op): Remove. + * config/aarch64/aarch64-sve.md (cond_): + Allow aarch64_simd_reg_or_zero as select operand; remove + the aarch64_sve_prepare_conditional_op call. + (cond_): Likewise. + (cond_): Likewise. + (*cond__z): New pattern. + (*cond__z): New pattern. + (*cond__z): New pattern. + (*cond__any): New pattern. + (*cond__any): New pattern. + (*cond__any): New pattern + and a splitters to match all of the *_any patterns. + * config/aarch64/predicates.md (aarch64_sve_any_binary_operator): New. + + * config/aarch64/iterators.md (SVE_INT_BINARY_REV): Remove. + (SVE_COND_FP_BINARY_REV): Remove. + (sve_int_op_rev, sve_fp_op_rev): New. + * config/aarch64/aarch64-sve.md (*cond__0): New. + (*cond__0): New. + (*cond__0): New. + (*cond__2): Rename, add movprfx alternative. + (*cond__2): Similarly. + (*cond__2): Similarly. + (*cond__3): Similarly; use sve_int_op_rev. + (*cond__3): Similarly. + (*cond__3): Similarly; use sve_fp_op_rev. + + * config/aarch64/aarch64-sve.md (cond_): + Remove match_dup 1 from the inner unspec. + (*cond_): Likewise. + + * config/aarch64/aarch64.md (movprfx): New attr. + (length): Default movprfx to 8. + * config/aarch64/aarch64-sve.md (*mul3): Add movprfx alt. + (*madd, *msubmul3_highpart): Likewise. + (*3): Likewise. + (*v3): Likewise. + (*3): Likewise. + (*3): Likewise. + (*fma4, *fnma4): Likewise. + (*fms4, *fnms4): Likewise. + (*div4): Likewise. + 2018-07-02 Richard Sandiford * tree-vect-patterns.c (vect_recog_widen_shift_pattern): Fix typo diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 87c6ae20278..514ddc457ca 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -513,7 +513,6 @@ bool aarch64_gen_adjusted_ldpstp (rtx *, bool, scalar_mode, RTX_CODE); void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx); bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool); void aarch64_expand_sve_vcond (machine_mode, machine_mode, rtx *); -void aarch64_sve_prepare_conditional_op (rtx *, unsigned int, bool); #endif /* RTX_CODE */ void aarch64_init_builtins (void); diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 8e2433385a8..b16d0455159 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -937,47 +937,53 @@ ;; to gain much and would make the instruction seem less uniform to the ;; register allocator. (define_insn "*mul3" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (mult:SVE_I - (match_operand:SVE_I 2 "register_operand" "%0, 0") - (match_operand:SVE_I 3 "aarch64_sve_mul_operand" "vsm, w"))] + (match_operand:SVE_I 2 "register_operand" "%0, 0, w") + (match_operand:SVE_I 3 "aarch64_sve_mul_operand" "vsm, w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ mul\t%0., %0., #%3 - mul\t%0., %1/m, %0., %3." + mul\t%0., %1/m, %0., %3. + movprfx\t%0, %2\;mul\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] ) (define_insn "*madd" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (plus:SVE_I (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl, Upl") - (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w") - (match_operand:SVE_I 3 "register_operand" "w, w"))] + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w") + (match_operand:SVE_I 3 "register_operand" "w, w, w"))] UNSPEC_MERGE_PTRUE) - (match_operand:SVE_I 4 "register_operand" "w, 0")))] + (match_operand:SVE_I 4 "register_operand" "w, 0, w")))] "TARGET_SVE" "@ mad\t%0., %1/m, %3., %4. - mla\t%0., %1/m, %2., %3." + mla\t%0., %1/m, %2., %3. + movprfx\t%0, %4\;mla\t%0., %1/m, %2., %3." + [(set_attr "movprfx" "*,*,yes")] ) (define_insn "*msub3" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (minus:SVE_I - (match_operand:SVE_I 4 "register_operand" "w, 0") + (match_operand:SVE_I 4 "register_operand" "w, 0, w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl, Upl") - (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w") - (match_operand:SVE_I 3 "register_operand" "w, w"))] + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w") + (match_operand:SVE_I 3 "register_operand" "w, w, w"))] UNSPEC_MERGE_PTRUE)))] "TARGET_SVE" "@ msb\t%0., %1/m, %3., %4. - mls\t%0., %1/m, %2., %3." + mls\t%0., %1/m, %2., %3. + movprfx\t%0, %4\;mls\t%0., %1/m, %2., %3." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated highpart multiplication. @@ -997,15 +1003,18 @@ ;; Predicated highpart multiplication. (define_insn "*mul3_highpart" - [(set (match_operand:SVE_I 0 "register_operand" "=w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl") - (unspec:SVE_I [(match_operand:SVE_I 2 "register_operand" "%0") - (match_operand:SVE_I 3 "register_operand" "w")] + [(match_operand: 1 "register_operand" "Upl, Upl") + (unspec:SVE_I [(match_operand:SVE_I 2 "register_operand" "%0, w") + (match_operand:SVE_I 3 "register_operand" "w, w")] MUL_HIGHPART)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" - "mulh\t%0., %1/m, %0., %3." + "@ + mulh\t%0., %1/m, %0., %3. + movprfx\t%0, %2\;mulh\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] ) ;; Unpredicated division. @@ -1025,17 +1034,19 @@ ;; Division predicated with a PTRUE. (define_insn "*3" - [(set (match_operand:SVE_SDI 0 "register_operand" "=w, w") + [(set (match_operand:SVE_SDI 0 "register_operand" "=w, w, ?&w") (unspec:SVE_SDI - [(match_operand: 1 "register_operand" "Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (SVE_INT_BINARY_SD:SVE_SDI - (match_operand:SVE_SDI 2 "register_operand" "0, w") - (match_operand:SVE_SDI 3 "aarch64_sve_mul_operand" "w, 0"))] + (match_operand:SVE_SDI 2 "register_operand" "0, w, w") + (match_operand:SVE_SDI 3 "aarch64_sve_mul_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ \t%0., %1/m, %0., %3. - r\t%0., %1/m, %0., %2." + r\t%0., %1/m, %0., %2. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated NEG, NOT and POPCOUNT. @@ -1222,17 +1233,19 @@ ;; or X isn't likely to gain much and would make the instruction seem ;; less uniform to the register allocator. (define_insn "*v3" - [(set (match_operand:SVE_I 0 "register_operand" "=w, w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (ASHIFT:SVE_I - (match_operand:SVE_I 2 "register_operand" "w, 0") - (match_operand:SVE_I 3 "aarch64_sve_shift_operand" "D, w"))] + (match_operand:SVE_I 2 "register_operand" "w, 0, w") + (match_operand:SVE_I 3 "aarch64_sve_shift_operand" "D, w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ \t%0., %2., #%3 - \t%0., %1/m, %0., %3." + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] ) ;; LSL, LSR and ASR by a scalar, which expands into one of the vector @@ -1723,14 +1736,17 @@ ;; Integer MIN/MAX predicated with a PTRUE. (define_insn "*3" - [(set (match_operand:SVE_I 0 "register_operand" "=w") + [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") (unspec:SVE_I - [(match_operand: 1 "register_operand" "Upl") - (MAXMIN:SVE_I (match_operand:SVE_I 2 "register_operand" "%0") - (match_operand:SVE_I 3 "register_operand" "w"))] + [(match_operand: 1 "register_operand" "Upl, Upl") + (MAXMIN:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w") + (match_operand:SVE_I 3 "register_operand" "w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" - "\t%0., %1/m, %0., %3." + "@ + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] ) ;; Unpredicated floating-point MIN/MAX. @@ -1749,14 +1765,17 @@ ;; Floating-point MIN/MAX predicated with a PTRUE. (define_insn "*3" - [(set (match_operand:SVE_F 0 "register_operand" "=w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl") - (FMAXMIN:SVE_F (match_operand:SVE_F 2 "register_operand" "%0") - (match_operand:SVE_F 3 "register_operand" "w"))] + [(match_operand: 1 "register_operand" "Upl, Upl") + (FMAXMIN:SVE_F (match_operand:SVE_F 2 "register_operand" "%0, w") + (match_operand:SVE_F 3 "register_operand" "w, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" - "fnm\t%0., %1/m, %0., %3." + "@ + fnm\t%0., %1/m, %0., %3. + movprfx\t%0, %2\;fnm\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] ) ;; Unpredicated fmin/fmax. @@ -1776,15 +1795,18 @@ ;; fmin/fmax predicated with a PTRUE. (define_insn "*3" - [(set (match_operand:SVE_F 0 "register_operand" "=w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl") - (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "%0") - (match_operand:SVE_F 3 "register_operand" "w")] + [(match_operand: 1 "register_operand" "Upl, Upl") + (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "%0, w") + (match_operand:SVE_F 3 "register_operand" "w, w")] FMAXMIN_UNS)] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" - "\t%0., %1/m, %0., %3." + "@ + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] ) ;; Predicated integer operations with select. @@ -1795,13 +1817,10 @@ (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand") (match_operand:SVE_I 3 "register_operand")) - (match_operand:SVE_I 4 "register_operand")] + (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" -{ - bool commutative_p = (GET_RTX_CLASS () == RTX_COMM_ARITH); - aarch64_sve_prepare_conditional_op (operands, 5, commutative_p); -}) +) (define_expand "cond_" [(set (match_operand:SVE_SDI 0 "register_operand") @@ -1810,66 +1829,191 @@ (SVE_INT_BINARY_SD:SVE_SDI (match_operand:SVE_SDI 2 "register_operand") (match_operand:SVE_SDI 3 "register_operand")) - (match_operand:SVE_SDI 4 "register_operand")] + (match_operand:SVE_SDI 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" -{ - bool commutative_p = (GET_RTX_CLASS () == RTX_COMM_ARITH); - aarch64_sve_prepare_conditional_op (operands, 5, commutative_p); -}) +) -;; Predicated integer operations. -(define_insn "*cond_" - [(set (match_operand:SVE_I 0 "register_operand" "=w") +;; Predicated integer operations with select matching the output operand. +(define_insn "*cond__0" + [(set (match_operand:SVE_I 0 "register_operand" "+w, w, ?&w") + (unspec:SVE_I + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (SVE_INT_BINARY:SVE_I + (match_operand:SVE_I 2 "register_operand" "0, w, w") + (match_operand:SVE_I 3 "register_operand" "w, 0, w")) + (match_dup 0)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + \t%0., %1/m, %0., %2. + movprfx\t%0, %1/m, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] +) + +(define_insn "*cond__0" + [(set (match_operand:SVE_SDI 0 "register_operand" "+w, w, ?&w") + (unspec:SVE_SDI + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (SVE_INT_BINARY_SD:SVE_SDI + (match_operand:SVE_SDI 2 "register_operand" "0, w, w") + (match_operand:SVE_SDI 3 "register_operand" "w, 0, w")) + (match_dup 0)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + \t%0., %1/m, %0., %2. + movprfx\t%0, %1/m, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] +) + +;; Predicated integer operations with select matching the first operand. +(define_insn "*cond__2" + [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") + (unspec:SVE_I + [(match_operand: 1 "register_operand" "Upl, Upl") + (SVE_INT_BINARY:SVE_I + (match_operand:SVE_I 2 "register_operand" "0, w") + (match_operand:SVE_I 3 "register_operand" "w, w")) + (match_dup 2)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] +) + +(define_insn "*cond__2" + [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w") + (unspec:SVE_SDI + [(match_operand: 1 "register_operand" "Upl, Upl") + (SVE_INT_BINARY_SD:SVE_SDI + (match_operand:SVE_SDI 2 "register_operand" "0, w") + (match_operand:SVE_SDI 3 "register_operand" "w, w")) + (match_dup 2)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] +) + +;; Predicated integer operations with select matching the second operand. +(define_insn "*cond__3" + [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") + (unspec:SVE_I + [(match_operand: 1 "register_operand" "Upl, Upl") + (SVE_INT_BINARY:SVE_I + (match_operand:SVE_I 2 "register_operand" "w, w") + (match_operand:SVE_I 3 "register_operand" "0, w")) + (match_dup 3)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %2. + movprfx\t%0, %3\;\t%0., %1/m, %0., %2." + [(set_attr "movprfx" "*,yes")] +) + +(define_insn "*cond__3" + [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w") + (unspec:SVE_SDI + [(match_operand: 1 "register_operand" "Upl, Upl") + (SVE_INT_BINARY_SD:SVE_SDI + (match_operand:SVE_SDI 2 "register_operand" "w, w") + (match_operand:SVE_SDI 3 "register_operand" "0, w")) + (match_dup 3)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %2. + movprfx\t%0, %3\;\t%0., %1/m, %0., %2." + [(set_attr "movprfx" "*,yes")] +) + +;; Predicated integer operations with select matching zero. +(define_insn "*cond__z" + [(set (match_operand:SVE_I 0 "register_operand" "=&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl") (SVE_INT_BINARY:SVE_I - (match_operand:SVE_I 2 "register_operand" "0") + (match_operand:SVE_I 2 "register_operand" "w") (match_operand:SVE_I 3 "register_operand" "w")) - (match_dup 2)] + (match_operand:SVE_I 4 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" - "\t%0., %1/m, %0., %3." + "movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "yes")] ) -(define_insn "*cond_" - [(set (match_operand:SVE_SDI 0 "register_operand" "=w") +(define_insn "*cond__z" + [(set (match_operand:SVE_SDI 0 "register_operand" "=&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl") (SVE_INT_BINARY_SD:SVE_SDI - (match_operand:SVE_SDI 2 "register_operand" "0") + (match_operand:SVE_SDI 2 "register_operand" "w") (match_operand:SVE_SDI 3 "register_operand" "w")) - (match_dup 2)] + (match_operand:SVE_SDI 4 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" - "\t%0., %1/m, %0., %3." + "movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "yes")] ) -;; Predicated integer operations with the operands reversed. -(define_insn "*cond_" - [(set (match_operand:SVE_I 0 "register_operand" "=w") +;; Synthetic predications with select unmatched. +(define_insn "*cond__any" + [(set (match_operand:SVE_I 0 "register_operand" "=&w") (unspec:SVE_I [(match_operand: 1 "register_operand" "Upl") - (SVE_INT_BINARY_REV:SVE_I + (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand" "w") - (match_operand:SVE_I 3 "register_operand" "0")) - (match_dup 3)] + (match_operand:SVE_I 3 "register_operand" "w")) + (match_operand:SVE_I 4 "register_operand" "w")] UNSPEC_SEL))] "TARGET_SVE" - "r\t%0., %1/m, %0., %2." + "#" ) -(define_insn "*cond_" - [(set (match_operand:SVE_SDI 0 "register_operand" "=w") +(define_insn "*cond__any" + [(set (match_operand:SVE_SDI 0 "register_operand" "=&w") (unspec:SVE_SDI [(match_operand: 1 "register_operand" "Upl") - (SVE_INT_BINARY_SD:SVE_SDI + (SVE_INT_BINARY_SD:SVE_I (match_operand:SVE_SDI 2 "register_operand" "w") - (match_operand:SVE_SDI 3 "register_operand" "0")) - (match_dup 3)] + (match_operand:SVE_SDI 3 "register_operand" "w")) + (match_operand:SVE_SDI 4 "register_operand" "w")] UNSPEC_SEL))] "TARGET_SVE" - "r\t%0., %1/m, %0., %2." + "#" +) + +(define_split + [(set (match_operand:SVE_I 0 "register_operand") + (unspec:SVE_I + [(match_operand: 1 "register_operand") + (match_operator:SVE_I 5 "aarch64_sve_any_binary_operator" + [(match_operand:SVE_I 2 "register_operand") + (match_operand:SVE_I 3 "register_operand")]) + (match_operand:SVE_I 4 "register_operand")] + UNSPEC_SEL))] + "TARGET_SVE && reload_completed + && !(rtx_equal_p (operands[0], operands[4]) + || rtx_equal_p (operands[2], operands[4]) + || rtx_equal_p (operands[3], operands[4]))" + ; Not matchable by any one insn or movprfx insn. We need a separate select. + [(set (match_dup 0) + (unspec:SVE_I [(match_dup 1) (match_dup 2) (match_dup 4)] + UNSPEC_SEL)) + (set (match_dup 0) + (unspec:SVE_I + [(match_dup 1) + (match_op_dup 5 [(match_dup 0) (match_dup 3)]) + (match_dup 0)] + UNSPEC_SEL))] ) ;; Set operand 0 to the last active element in operand 3, or to tied @@ -2146,17 +2290,19 @@ ;; fma predicated with a PTRUE. (define_insn "*fma4" - [(set (match_operand:SVE_F 0 "register_operand" "=w, w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl, Upl") - (fma:SVE_F (match_operand:SVE_F 3 "register_operand" "%0, w") - (match_operand:SVE_F 4 "register_operand" "w, w") - (match_operand:SVE_F 2 "register_operand" "w, 0"))] + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (fma:SVE_F (match_operand:SVE_F 3 "register_operand" "%0, w, w") + (match_operand:SVE_F 4 "register_operand" "w, w, w") + (match_operand:SVE_F 2 "register_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fmad\t%0., %1/m, %4., %2. - fmla\t%0., %1/m, %3., %4." + fmla\t%0., %1/m, %3., %4. + movprfx\t%0, %2\;fmla\t%0., %1/m, %3., %4." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated fnma (%0 = (-%1 * %2) + %3). @@ -2177,18 +2323,20 @@ ;; fnma predicated with a PTRUE. (define_insn "*fnma4" - [(set (match_operand:SVE_F 0 "register_operand" "=w, w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (fma:SVE_F (neg:SVE_F - (match_operand:SVE_F 3 "register_operand" "%0, w")) - (match_operand:SVE_F 4 "register_operand" "w, w") - (match_operand:SVE_F 2 "register_operand" "w, 0"))] + (match_operand:SVE_F 3 "register_operand" "%0, w, w")) + (match_operand:SVE_F 4 "register_operand" "w, w, w") + (match_operand:SVE_F 2 "register_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fmsb\t%0., %1/m, %4., %2. - fmls\t%0., %1/m, %3., %4." + fmls\t%0., %1/m, %3., %4. + movprfx\t%0, %2\;fmls\t%0., %1/m, %3., %4." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated fms (%0 = (%1 * %2) - %3). @@ -2209,18 +2357,20 @@ ;; fms predicated with a PTRUE. (define_insn "*fms4" - [(set (match_operand:SVE_F 0 "register_operand" "=w, w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl, Upl") - (fma:SVE_F (match_operand:SVE_F 3 "register_operand" "%0, w") - (match_operand:SVE_F 4 "register_operand" "w, w") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (fma:SVE_F (match_operand:SVE_F 3 "register_operand" "%0, w, w") + (match_operand:SVE_F 4 "register_operand" "w, w, w") (neg:SVE_F - (match_operand:SVE_F 2 "register_operand" "w, 0")))] + (match_operand:SVE_F 2 "register_operand" "w, 0, w")))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fnmsb\t%0., %1/m, %4., %2. - fnmls\t%0., %1/m, %3., %4." + fnmls\t%0., %1/m, %3., %4. + movprfx\t%0, %2\;fnmls\t%0., %1/m, %3., %4." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated fnms (%0 = (-%1 * %2) - %3). @@ -2242,19 +2392,21 @@ ;; fnms predicated with a PTRUE. (define_insn "*fnms4" - [(set (match_operand:SVE_F 0 "register_operand" "=w, w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl, Upl") + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") (fma:SVE_F (neg:SVE_F - (match_operand:SVE_F 3 "register_operand" "%0, w")) - (match_operand:SVE_F 4 "register_operand" "w, w") + (match_operand:SVE_F 3 "register_operand" "%0, w, w")) + (match_operand:SVE_F 4 "register_operand" "w, w, w") (neg:SVE_F - (match_operand:SVE_F 2 "register_operand" "w, 0")))] + (match_operand:SVE_F 2 "register_operand" "w, 0, w")))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fnmad\t%0., %1/m, %4., %2. - fnmla\t%0., %1/m, %3., %4." + fnmla\t%0., %1/m, %3., %4. + movprfx\t%0, %2\;fnmla\t%0., %1/m, %3., %4." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated floating-point division. @@ -2273,16 +2425,18 @@ ;; Floating-point division predicated with a PTRUE. (define_insn "*div3" - [(set (match_operand:SVE_F 0 "register_operand" "=w, w") + [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w") (unspec:SVE_F - [(match_operand: 1 "register_operand" "Upl, Upl") - (div:SVE_F (match_operand:SVE_F 2 "register_operand" "0, w") - (match_operand:SVE_F 3 "register_operand" "w, 0"))] + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (div:SVE_F (match_operand:SVE_F 2 "register_operand" "0, w, w") + (match_operand:SVE_F 3 "register_operand" "w, 0, w"))] UNSPEC_MERGE_PTRUE))] "TARGET_SVE" "@ fdiv\t%0., %1/m, %0., %3. - fdivr\t%0., %1/m, %0., %2." + fdivr\t%0., %1/m, %0., %2. + movprfx\t%0, %2\;fdiv\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] ) ;; Unpredicated FNEG, FABS and FSQRT. @@ -2645,47 +2799,111 @@ (unspec:SVE_F [(match_operand: 1 "register_operand") (unspec:SVE_F - [(match_dup 1) - (match_operand:SVE_F 2 "register_operand") + [(match_operand:SVE_F 2 "register_operand") (match_operand:SVE_F 3 "register_operand")] SVE_COND_FP_BINARY) - (match_operand:SVE_F 4 "register_operand")] + (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" -{ - aarch64_sve_prepare_conditional_op (operands, 5, ); -}) +) -;; Predicated floating-point operations. -(define_insn "*cond_" - [(set (match_operand:SVE_F 0 "register_operand" "=w") +;; Predicated floating-point operations with select matching output. +(define_insn "*cond__0" + [(set (match_operand:SVE_F 0 "register_operand" "+w, w, ?&w") + (unspec:SVE_F + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (unspec:SVE_F + [(match_operand:SVE_F 2 "register_operand" "0, w, w") + (match_operand:SVE_F 3 "register_operand" "w, 0, w")] + SVE_COND_FP_BINARY) + (match_dup 0)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + \t%0., %1/m, %0., %2. + movprfx\t%0, %1/m, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,*,yes")] +) + +;; Predicated floating-point operations with select matching first operand. +(define_insn "*cond__2" + [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") + (unspec:SVE_F + [(match_operand: 1 "register_operand" "Upl, Upl") + (unspec:SVE_F + [(match_operand:SVE_F 2 "register_operand" "0, w") + (match_operand:SVE_F 3 "register_operand" "w, w")] + SVE_COND_FP_BINARY) + (match_dup 2)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %3. + movprfx\t%0, %2\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "*,yes")] +) + +;; Predicated floating-point operations with select matching second operand. +(define_insn "*cond__3" + [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w") + (unspec:SVE_F + [(match_operand: 1 "register_operand" "Upl, Upl") + (unspec:SVE_F + [(match_operand:SVE_F 2 "register_operand" "w, w") + (match_operand:SVE_F 3 "register_operand" "0, w")] + SVE_COND_FP_BINARY) + (match_dup 3)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + \t%0., %1/m, %0., %2. + movprfx\t%0, %3\;\t%0., %1/m, %0., %2." + [(set_attr "movprfx" "*,yes")] +) + +;; Predicated floating-point operations with select matching zero. +(define_insn "*cond__z" + [(set (match_operand:SVE_F 0 "register_operand" "=&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl") (unspec:SVE_F - [(match_dup 1) - (match_operand:SVE_F 2 "register_operand" "0") + [(match_operand:SVE_F 2 "register_operand" "w") (match_operand:SVE_F 3 "register_operand" "w")] SVE_COND_FP_BINARY) - (match_dup 2)] + (match_operand:SVE_F 4 "aarch64_simd_imm_zero")] UNSPEC_SEL))] "TARGET_SVE" - "\t%0., %1/m, %0., %3." + "movprfx\t%0., %1/z, %2.\;\t%0., %1/m, %0., %3." + [(set_attr "movprfx" "yes")] ) -;; Predicated floating-point operations with the operands reversed. -(define_insn "*cond_" - [(set (match_operand:SVE_F 0 "register_operand" "=w") +;; Synthetic predication of floating-point operations with select unmatched. +(define_insn_and_split "*cond__any" + [(set (match_operand:SVE_F 0 "register_operand" "=&w") (unspec:SVE_F [(match_operand: 1 "register_operand" "Upl") (unspec:SVE_F - [(match_dup 1) - (match_operand:SVE_F 2 "register_operand" "w") - (match_operand:SVE_F 3 "register_operand" "0")] + [(match_operand:SVE_F 2 "register_operand" "w") + (match_operand:SVE_F 3 "register_operand" "w")] SVE_COND_FP_BINARY) - (match_dup 3)] + (match_operand:SVE_F 4 "register_operand" "w")] UNSPEC_SEL))] "TARGET_SVE" - "r\t%0., %1/m, %0., %2." + "#" + "&& reload_completed + && !(rtx_equal_p (operands[0], operands[4]) + || rtx_equal_p (operands[2], operands[4]) + || rtx_equal_p (operands[3], operands[4]))" + ; Not matchable by any one insn or movprfx insn. We need a separate select. + [(set (match_dup 0) + (unspec:SVE_F [(match_dup 1) (match_dup 2) (match_dup 4)] UNSPEC_SEL)) + (set (match_dup 0) + (unspec:SVE_F + [(match_dup 1) + (unspec:SVE_F [(match_dup 0) (match_dup 3)] SVE_COND_FP_BINARY) + (match_dup 0)] + UNSPEC_SEL))] ) ;; Shift an SVE vector left and insert a scalar into element 0. diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index b88e7cac27a..d75d45f4b8b 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -16058,54 +16058,6 @@ aarch64_expand_sve_vcond (machine_mode data_mode, machine_mode cmp_mode, emit_set_insn (ops[0], gen_rtx_UNSPEC (data_mode, vec, UNSPEC_SEL)); } -/* Prepare a cond_ operation that has the operands - given by OPERANDS, where: - - - operand 0 is the destination - - operand 1 is a predicate - - operands 2 to NOPS - 2 are the operands to an operation that is - performed for active lanes - - operand NOPS - 1 specifies the values to use for inactive lanes. - - COMMUTATIVE_P is true if operands 2 and 3 are commutative. In that case, - no pattern is provided for a tie between operands 3 and NOPS - 1. */ - -void -aarch64_sve_prepare_conditional_op (rtx *operands, unsigned int nops, - bool commutative_p) -{ - /* We can do the operation directly if the "else" value matches one - of the other inputs. */ - for (unsigned int i = 2; i < nops - 1; ++i) - if (rtx_equal_p (operands[i], operands[nops - 1])) - { - if (i == 3 && commutative_p) - std::swap (operands[2], operands[3]); - return; - } - - /* If the "else" value is different from the other operands, we have - the choice of doing a SEL on the output or a SEL on an input. - Neither choice is better in all cases, but one advantage of - selecting the input is that it can avoid a move when the output - needs to be distinct from the inputs. E.g. if operand N maps to - register N, selecting the output would give: - - MOVPRFX Z0.S, Z2.S - ADD Z0.S, P1/M, Z0.S, Z3.S - SEL Z0.S, P1, Z0.S, Z4.S - - whereas selecting the input avoids the MOVPRFX: - - SEL Z0.S, P1, Z2.S, Z4.S - ADD Z0.S, P1/M, Z0.S, Z3.S. */ - machine_mode mode = GET_MODE (operands[0]); - rtx temp = gen_reg_rtx (mode); - rtvec vec = gen_rtvec (3, operands[1], operands[2], operands[nops - 1]); - emit_set_insn (temp, gen_rtx_UNSPEC (mode, vec, UNSPEC_SEL)); - operands[2] = operands[nops - 1] = temp; -} - /* Implement TARGET_MODES_TIEABLE_P. In principle we should always return true. However due to issues with register allocation it is preferable to avoid tieing integer scalar and FP scalar modes. Executing integer diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 4ac6332a200..a014a012cc1 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -251,9 +251,6 @@ ;; will be disabled when !TARGET_SVE. (define_attr "sve" "no,yes" (const_string "no")) -(define_attr "length" "" - (const_int 4)) - ;; Attribute that controls whether an alternative is enabled or not. ;; Currently it is only used to disable alternatives which touch fp or simd ;; registers when -mgeneral-regs-only is specified. @@ -277,6 +274,14 @@ ;; 1 :=: yes (define_attr "far_branch" "" (const_int 0)) +;; Attribute that specifies whether the alternative uses MOVPRFX. +(define_attr "movprfx" "no,yes" (const_string "no")) + +(define_attr "length" "" + (cond [(eq_attr "movprfx" "yes") + (const_int 8) + ] (const_int 4))) + ;; Strictly for compatibility with AArch32 in pipeline models, since AArch64 has ;; no predicated insns. (define_attr "predicated" "yes,no" (const_string "no")) diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index c5ef2eecf20..965dc6bf4f3 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -1207,11 +1207,11 @@ ;; SVE floating-point unary operations. (define_code_iterator SVE_FP_UNARY [neg abs sqrt]) +;; SVE integer binary operations. (define_code_iterator SVE_INT_BINARY [plus minus mult smax umax smin umin and ior xor]) -(define_code_iterator SVE_INT_BINARY_REV [minus]) - +;; SVE integer binary division operations. (define_code_iterator SVE_INT_BINARY_SD [div udiv]) ;; SVE integer comparisons. @@ -1402,6 +1402,19 @@ (not "not") (popcount "cnt")]) +(define_code_attr sve_int_op_rev [(plus "add") + (minus "subr") + (mult "mul") + (div "sdivr") + (udiv "udivr") + (smin "smin") + (smax "smax") + (umin "umin") + (umax "umax") + (and "and") + (ior "orr") + (xor "eor")]) + ;; The floating-point SVE instruction that implements an rtx code. (define_code_attr sve_fp_op [(plus "fadd") (neg "fneg") @@ -1550,8 +1563,6 @@ UNSPEC_COND_MUL UNSPEC_COND_DIV UNSPEC_COND_MAX UNSPEC_COND_MIN]) -(define_int_iterator SVE_COND_FP_BINARY_REV [UNSPEC_COND_SUB UNSPEC_COND_DIV]) - (define_int_iterator SVE_COND_FP_CMP [UNSPEC_COND_LT UNSPEC_COND_LE UNSPEC_COND_EQ UNSPEC_COND_NE UNSPEC_COND_GE UNSPEC_COND_GT]) @@ -1802,6 +1813,13 @@ (UNSPEC_COND_MAX "fmaxnm") (UNSPEC_COND_MIN "fminnm")]) +(define_int_attr sve_fp_op_rev [(UNSPEC_COND_ADD "fadd") + (UNSPEC_COND_SUB "fsubr") + (UNSPEC_COND_MUL "fmul") + (UNSPEC_COND_DIV "fdivr") + (UNSPEC_COND_MAX "fmaxnm") + (UNSPEC_COND_MIN "fminnm")]) + (define_int_attr commutative [(UNSPEC_COND_ADD "true") (UNSPEC_COND_SUB "false") (UNSPEC_COND_MUL "true") diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md index 7aec76d681f..4acbc218a8d 100644 --- a/gcc/config/aarch64/predicates.md +++ b/gcc/config/aarch64/predicates.md @@ -625,3 +625,6 @@ ;; A special predicate that doesn't match a particular mode. (define_special_predicate "aarch64_any_register_operand" (match_code "reg")) + +(define_predicate "aarch64_sve_any_binary_operator" + (match_code "plus,minus,mult,div,udiv,smax,umax,smin,umin,and,ior,xor"))