From 9bfb28ed3c6fb702c2cab6798959679e1bbd7d09 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Tue, 13 Mar 2018 15:13:37 +0000 Subject: [PATCH] [SLP/AArch64] Fix unpack handling for big-endian SVE I hadn't realised that on big-endian targets, VEC_UNPACK*HI_EXPR unpacks the low-numbered lanes and VEC_UNPACK*LO_EXPR unpacks the high-numbered lanes. This meant that both the SVE patterns and the handling of fully-masked loops were wrong. The patch deals with that by making sure that all vec_unpack* optabs are define_expands, using BYTES_BIG_ENDIAN to choose the appropriate define_insn. This in turn meant that we can get rid of the duplication between the signed and unsigned patterns for predicates. (We provide implementations of both the signed and unsigned optabs because the sign doesn't matter for predicates: every element contains only one significant bit.) Also, the float unpacks need to unpack one half of the input vector, but the unpacked upper bits are "don't care". There are two obvious ways of handling that: use an unpack (filling with zeros) or use a ZIP (filling with a duplicate of the low bits). The code previously used unpacks, but the sequence involved a subreg that is semantically an element reverse on big-endian targets. Using the ZIP patterns avoids that, and at the moment there's no reason to prefer one over the other for performance reasons, so the patch switches to ZIP unconditionally. As the comment says, it would be easy to optimise this later if UUNPK turns out to be better for some implementations. 2018-03-13 Richard Sandiford gcc/ * tree-vect-loop-manip.c (vect_maybe_permute_loop_masks): Reverse the choice between VEC_UNPACK_LO_EXPR and VEC_UNPACK_HI_EXPR for big-endian. * config/aarch64/iterators.md (hi_lanes_optab): New int attribute. * config/aarch64/aarch64-sve.md (*aarch64_sve_): Rename to... (aarch64_sve_): ...this. (*extend2): Rename to... (aarch64_sve_extend2): ...this. (vec_unpack__): Turn into a define_expand, renaming the old pattern to... (aarch64_sve_punpk_): ...this. Only define unsigned packs. (vec_unpack__): Turn into a define_expand, renaming the old pattern to... (aarch64_sve_unpk_): ...this. (*vec_unpacku___no_convert): Delete. (vec_unpacks__): Take BYTES_BIG_ENDIAN into account when deciding which SVE instruction the optab should use. (vec_unpack_float__vnx4si): Likewise. gcc/testsuite/ * gcc.target/aarch64/sve/unpack_fcvt_signed_1.c: Expect zips rather than unpacks. * gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c: Likewise. * gcc.target/aarch64/sve/unpack_float_1.c: Likewise. From-SVN: r258489 --- gcc/ChangeLog | 23 ++++ gcc/config/aarch64/aarch64-sve.md | 116 ++++++++++++------ gcc/config/aarch64/iterators.md | 9 ++ gcc/testsuite/ChangeLog | 7 ++ .../aarch64/sve/unpack_fcvt_signed_1.c | 4 +- .../aarch64/sve/unpack_fcvt_unsigned_1.c | 4 +- .../gcc.target/aarch64/sve/unpack_float_1.c | 4 +- gcc/tree-vect-loop-manip.c | 3 +- 8 files changed, 123 insertions(+), 47 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 8e150de2a57..4a6f4d47b60 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,26 @@ +2018-03-13 Richard Sandiford + + * tree-vect-loop-manip.c (vect_maybe_permute_loop_masks): + Reverse the choice between VEC_UNPACK_LO_EXPR and VEC_UNPACK_HI_EXPR + for big-endian. + * config/aarch64/iterators.md (hi_lanes_optab): New int attribute. + * config/aarch64/aarch64-sve.md + (*aarch64_sve_): Rename to... + (aarch64_sve_): ...this. + (*extend2): Rename to... + (aarch64_sve_extend2): ...this. + (vec_unpack__): Turn into a define_expand, + renaming the old pattern to... + (aarch64_sve_punpk_): ...this. Only define + unsigned packs. + (vec_unpack__): Turn into a + define_expand, renaming the old pattern to... + (aarch64_sve_unpk_): ...this. + (*vec_unpacku___no_convert): Delete. + (vec_unpacks__): Take BYTES_BIG_ENDIAN into + account when deciding which SVE instruction the optab should use. + (vec_unpack_float__vnx4si): Likewise. + 2018-03-13 Richard Sandiford * config/aarch64/aarch64.md (V4_REGNUM, V8_REGNUM, V12_REGNUM) diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 2e7f0a45f79..d8855340509 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -817,7 +817,7 @@ "\t%0., %1., %2." ) -(define_insn "*aarch64_sve_" +(define_insn "aarch64_sve_" [(set (match_operand:SVE_ALL 0 "register_operand" "=w") (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w") (match_operand:SVE_ALL 2 "register_operand" "w")] @@ -2184,7 +2184,7 @@ ) ;; Conversion of DI or SI to DF, predicated with a PTRUE. -(define_insn "*vnx2df2" +(define_insn "aarch64_sve_vnx2df2" [(set (match_operand:VNx2DF 0 "register_operand" "=w") (unspec:VNx2DF [(match_operand:VNx2BI 1 "register_operand" "Upl") @@ -2211,7 +2211,7 @@ ;; Conversion of SFs to the same number of DFs, or HFs to the same number ;; of SFs. -(define_insn "*extend2" +(define_insn "aarch64_sve_extend2" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand: 1 "register_operand" "Upl") @@ -2223,17 +2223,50 @@ "fcvt\t%0., %1/m, %2." ) +;; Unpack the low or high half of a predicate, where "high" refers to +;; the low-numbered lanes for big-endian and the high-numbered lanes +;; for little-endian. +(define_expand "vec_unpack__" + [(match_operand: 0 "register_operand") + (unspec: [(match_operand:PRED_BHS 1 "register_operand")] + UNPACK)] + "TARGET_SVE" + { + emit_insn (( + ? gen_aarch64_sve_punpkhi_ + : gen_aarch64_sve_punpklo_) + (operands[0], operands[1])); + DONE; + } +) + ;; PUNPKHI and PUNPKLO. -(define_insn "vec_unpack__" +(define_insn "aarch64_sve_punpk_" [(set (match_operand: 0 "register_operand" "=Upa") (unspec: [(match_operand:PRED_BHS 1 "register_operand" "Upa")] - UNPACK))] + UNPACK_UNSIGNED))] "TARGET_SVE" "punpk\t%0.h, %1.b" ) +;; Unpack the low or high half of a vector, where "high" refers to +;; the low-numbered lanes for big-endian and the high-numbered lanes +;; for little-endian. +(define_expand "vec_unpack__" + [(match_operand: 0 "register_operand") + (unspec: [(match_operand:SVE_BHSI 1 "register_operand")] UNPACK)] + "TARGET_SVE" + { + emit_insn (( + ? gen_aarch64_sve_unpkhi_ + : gen_aarch64_sve_unpklo_) + (operands[0], operands[1])); + DONE; + } +) + ;; SUNPKHI, UUNPKHI, SUNPKLO and UUNPKLO. -(define_insn "vec_unpack__" +(define_insn "aarch64_sve_unpk_" [(set (match_operand: 0 "register_operand" "=w") (unspec: [(match_operand:SVE_BHSI 1 "register_operand" "w")] UNPACK))] @@ -2241,32 +2274,28 @@ "unpk\t%0., %1." ) -;; Used by the vec_unpacks__ expander to unpack the bit -;; representation of a VNx4SF or VNx8HF without conversion. The choice -;; between signed and unsigned isn't significant. -(define_insn "*vec_unpacku___no_convert" - [(set (match_operand:SVE_HSF 0 "register_operand" "=w") - (unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand" "w")] - UNPACK_UNSIGNED))] - "TARGET_SVE" - "uunpk\t%0., %1." -) - ;; Unpack one half of a VNx4SF to VNx2DF, or one half of a VNx8HF to VNx4SF. ;; First unpack the source without conversion, then float-convert the ;; unpacked source. (define_expand "vec_unpacks__" - [(set (match_dup 2) - (unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand")] - UNPACK_UNSIGNED)) - (set (match_operand: 0 "register_operand") - (unspec: [(match_dup 3) - (unspec: [(match_dup 2)] UNSPEC_FLOAT_CONVERT)] - UNSPEC_MERGE_PTRUE))] - "TARGET_SVE" - { - operands[2] = gen_reg_rtx (mode); - operands[3] = force_reg (mode, CONSTM1_RTX (mode)); + [(match_operand: 0 "register_operand") + (unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand")] + UNPACK_UNSIGNED)] + "TARGET_SVE" + { + /* Use ZIP to do the unpack, since we don't care about the upper halves + and since it has the nice property of not needing any subregs. + If using UUNPK* turns out to be preferable, we could model it as + a ZIP whose first operand is zero. */ + rtx temp = gen_reg_rtx (mode); + emit_insn (( + ? gen_aarch64_sve_zip2 + : gen_aarch64_sve_zip1) + (temp, operands[1], operands[1])); + rtx ptrue = force_reg (mode, CONSTM1_RTX (mode)); + emit_insn (gen_aarch64_sve_extend2 (operands[0], + ptrue, temp)); + DONE; } ) @@ -2274,18 +2303,25 @@ ;; to VNx2DI, reinterpret the VNx2DI as a VNx4SI, then convert the ;; unpacked VNx4SI to VNx2DF. (define_expand "vec_unpack_float__vnx4si" - [(set (match_dup 2) - (unspec:VNx2DI [(match_operand:VNx4SI 1 "register_operand")] - UNPACK_UNSIGNED)) - (set (match_operand:VNx2DF 0 "register_operand") - (unspec:VNx2DF [(match_dup 3) - (FLOATUORS:VNx2DF (match_dup 4))] - UNSPEC_MERGE_PTRUE))] - "TARGET_SVE" - { - operands[2] = gen_reg_rtx (VNx2DImode); - operands[3] = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode)); - operands[4] = gen_rtx_SUBREG (VNx4SImode, operands[2], 0); + [(match_operand:VNx2DF 0 "register_operand") + (FLOATUORS:VNx2DF + (unspec:VNx2DI [(match_operand:VNx4SI 1 "register_operand")] + UNPACK_UNSIGNED))] + "TARGET_SVE" + { + /* Use ZIP to do the unpack, since we don't care about the upper halves + and since it has the nice property of not needing any subregs. + If using UUNPK* turns out to be preferable, we could model it as + a ZIP whose first operand is zero. */ + rtx temp = gen_reg_rtx (VNx4SImode); + emit_insn (( + ? gen_aarch64_sve_zip2vnx4si + : gen_aarch64_sve_zip1vnx4si) + (temp, operands[1], operands[1])); + rtx ptrue = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode)); + emit_insn (gen_aarch64_sve_vnx4sivnx2df2 (operands[0], + ptrue, temp)); + DONE; } ) diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index a2945a81848..fa181794392 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -1680,6 +1680,15 @@ (UNSPEC_UNPACKSHI "hi") (UNSPEC_UNPACKUHI "hi") (UNSPEC_UNPACKSLO "lo") (UNSPEC_UNPACKULO "lo")]) +;; Return true if the associated optab refers to the high-numbered lanes, +;; false if it refers to the low-numbered lanes. The convention is for +;; "hi" to refer to the low-numbered lanes (the first ones in memory) +;; for big-endian. +(define_int_attr hi_lanes_optab [(UNSPEC_UNPACKSHI "!BYTES_BIG_ENDIAN") + (UNSPEC_UNPACKUHI "!BYTES_BIG_ENDIAN") + (UNSPEC_UNPACKSLO "BYTES_BIG_ENDIAN") + (UNSPEC_UNPACKULO "BYTES_BIG_ENDIAN")]) + (define_int_attr frecp_suffix [(UNSPEC_FRECPE "e") (UNSPEC_FRECPX "x")]) (define_int_attr crc_variant [(UNSPEC_CRC32B "crc32b") (UNSPEC_CRC32H "crc32h") diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 84c64b2e77b..cee70dc0bc9 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,10 @@ +2018-03-13 Richard Sandiford + + * gcc.target/aarch64/sve/unpack_fcvt_signed_1.c: Expect zips rather + than unpacks. + * gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c: Likewise. + * gcc.target/aarch64/sve/unpack_float_1.c: Likewise. + 2018-03-13 Richard Sandiford * gcc.target/aarch64/sve/tls_1.c: New test. diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_signed_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_signed_1.c index 83ffe8552c2..0f96dc2ff00 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_signed_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_signed_1.c @@ -10,6 +10,6 @@ unpack_double_int_plus8 (double *d, int32_t *s, int size) d[i] = s[i] + 8; } -/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ /* { dg-final { scan-assembler-times {\tscvtf\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c index e2f6b1a45ce..70465f91eba 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpack_fcvt_unsigned_1.c @@ -10,6 +10,6 @@ unpack_double_int_plus9 (double *d, uint32_t *s, int size) d[i] = (double) (s[i] + 9); } -/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ /* { dg-final { scan-assembler-times {\tucvtf\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/unpack_float_1.c b/gcc/testsuite/gcc.target/aarch64/sve/unpack_float_1.c index 14a636b5fda..deb4cf5e940 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/unpack_float_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/unpack_float_1.c @@ -8,6 +8,6 @@ unpack_float_plus_7point9 (double *d, float *s, int size) d[i] = s[i] + 7.9; } -/* { dg-final { scan-assembler-times {\tuunpklo\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tuunpkhi\tz[0-9]+\.d, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip1\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ +/* { dg-final { scan-assembler-times {\tzip2\tz[0-9]+\.s, z[0-9]+\.s, z[0-9]+\.s\n} 1 } } */ /* { dg-final { scan-assembler-times {\tfcvt\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.s\n} 2 } } */ diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c index 96d40c8c4b3..1c43ed0becd 100644 --- a/gcc/tree-vect-loop-manip.c +++ b/gcc/tree-vect-loop-manip.c @@ -334,7 +334,8 @@ vect_maybe_permute_loop_masks (gimple_seq *seq, rgroup_masks *dest_rgm, { tree src = src_rgm->masks[i / 2]; tree dest = dest_rgm->masks[i]; - tree_code code = (i & 1 ? VEC_UNPACK_HI_EXPR + tree_code code = ((i & 1) == (BYTES_BIG_ENDIAN ? 0 : 1) + ? VEC_UNPACK_HI_EXPR : VEC_UNPACK_LO_EXPR); gassign *stmt; if (dest_masktype == unpack_masktype) -- 2.30.2