From b2f5b38042f6a8772237d37cdf20fc139ed0f8fe Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Fri, 10 Jan 2020 16:31:13 +0000 Subject: [PATCH] [AArch64] Fix reversed vcond_mask invocation in aarch64_evpc_sel aarch64_evpc_sel (new in GCC 10) got the true and false vectors the wrong way round, leading to execution failures with fixed-length 128-bit SVE. Now that the ACLE types are in trunk, it's much easier to match the exact asm sequence for a permute. 2020-01-10 Richard Sandiford gcc/ * config/aarch64/aarch64.c (aarch64_evpc_sel): Fix gen_vcond_mask invocation. gcc/testsuite/ * gcc.target/aarch64/sve/sel_1.c: Use SVE types for the arguments and return values. Use check-function-bodies instead of scan-assembler. * gcc.target/aarch64/sve/sel_2.c: Likewise * gcc.target/aarch64/sve/sel_3.c: Likewise. From-SVN: r280121 --- gcc/ChangeLog | 5 ++ gcc/config/aarch64/aarch64.c | 4 +- gcc/testsuite/ChangeLog | 7 ++ gcc/testsuite/gcc.target/aarch64/sve/sel_1.c | 22 +++--- gcc/testsuite/gcc.target/aarch64/sve/sel_2.c | 52 +++++++++----- gcc/testsuite/gcc.target/aarch64/sve/sel_3.c | 74 +++++++++++++++----- 6 files changed, 116 insertions(+), 48 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 126c6eba8c2..ab5131a7337 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,8 @@ +2020-01-10 Richard Sandiford + + * config/aarch64/aarch64.c (aarch64_evpc_sel): Fix gen_vcond_mask + invocation. + 2020-01-10 Richard Sandiford * config/aarch64/aarch64-builtins.c diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index f83764fc420..190380b9a2e 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -19449,6 +19449,7 @@ aarch64_evpc_sel (struct expand_vec_perm_d *d) machine_mode pred_mode = aarch64_sve_pred_mode (vmode); + /* Build a predicate that is true when op0 elements should be used. */ rtx_vector_builder builder (pred_mode, n_patterns, 2); for (int i = 0; i < n_patterns * 2; i++) { @@ -19459,7 +19460,8 @@ aarch64_evpc_sel (struct expand_vec_perm_d *d) rtx const_vec = builder.build (); rtx pred = force_reg (pred_mode, const_vec); - emit_insn (gen_vcond_mask (vmode, vmode, d->target, d->op1, d->op0, pred)); + /* TARGET = PRED ? OP0 : OP1. */ + emit_insn (gen_vcond_mask (vmode, vmode, d->target, d->op0, d->op1, pred)); return true; } diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index d208b673dad..8dc47ea6c93 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,10 @@ +2020-01-10 Richard Sandiford + + * gcc.target/aarch64/sve/sel_1.c: Use SVE types for the arguments and + return values. Use check-function-bodies instead of scan-assembler. + * gcc.target/aarch64/sve/sel_2.c: Likewise + * gcc.target/aarch64/sve/sel_3.c: Likewise. + 2020-01-10 Martin Jambor * gcc.dg/ipa/ipa-clone-3.c: Replace struct initializer with diff --git a/gcc/testsuite/gcc.target/aarch64/sve/sel_1.c b/gcc/testsuite/gcc.target/aarch64/sve/sel_1.c index e651e5b93b6..9c581c52fde 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/sel_1.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/sel_1.c @@ -1,5 +1,6 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include @@ -13,15 +14,14 @@ typedef int8_t vnx16qi __attribute__((vector_size (32))); #define INDEX_32 vnx16qi -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute: +** ptrue (p[0-7])\.h, vl16 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } - -PERMUTE(vnx16qi, 32) - -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ - -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.h, vl16\n} 1 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/sel_2.c b/gcc/testsuite/gcc.target/aarch64/sve/sel_2.c index 05391474a92..60aaa878534 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/sel_2.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/sel_2.c @@ -1,14 +1,13 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include typedef int8_t vnx16qi __attribute__((vector_size (32))); typedef int16_t vnx8hi __attribute__((vector_size (32))); -typedef int32_t vnx4si __attribute__((vector_size (32))); typedef _Float16 vnx8hf __attribute__((vector_size (32))); -typedef float vnx4sf __attribute__((vector_size (32))); /* Predicate vector: 1 0 0 0 ... */ @@ -20,22 +19,39 @@ typedef float vnx4sf __attribute__((vector_size (32))); #define MASK_16 {0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31} -#define INDEX_32 vnx16qi -#define INDEX_16 vnx8hi - -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute_vnx16qi: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute_vnx16qi (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } -PERMUTE(vnx16qi, 32) -PERMUTE(vnx8hi, 16) -PERMUTE(vnx8hf, 16) - -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ - -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.h, p[0-9]+, z[0-9]+\.h, z[0-9]+\.h\n} 2 } } */ +/* +** permute_vnx8hi: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVInt16_t +permute_vnx8hi (__SVInt16_t x, __SVInt16_t y) +{ + return __builtin_shuffle ((vnx8hi) x, (vnx8hi) y, (vnx8hi) MASK_16); +} -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.s, vl8\n} 3 } } */ +/* +** permute_vnx8hf: +** ptrue (p[0-7])\.s, vl8 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVFloat16_t +permute_vnx8hf (__SVFloat16_t x, __SVFloat16_t y) +{ + return (__SVFloat16_t) __builtin_shuffle ((vnx8hf) x, (vnx8hf) y, + (vnx8hi) MASK_16); +} diff --git a/gcc/testsuite/gcc.target/aarch64/sve/sel_3.c b/gcc/testsuite/gcc.target/aarch64/sve/sel_3.c index a87492d9df1..0de1fae6d03 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/sel_3.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/sel_3.c @@ -1,5 +1,6 @@ /* { dg-do assemble { target aarch64_asm_sve_ok } } */ /* { dg-options "-O2 -msve-vector-bits=256 --save-temps" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include @@ -25,26 +26,63 @@ typedef float vnx4sf __attribute__((vector_size (32))); #define MASK_8 { 0, 9, 2, 11, 4, 13, 6, 15 } -#define INDEX_32 vnx16qi -#define INDEX_16 vnx8hi -#define INDEX_8 vnx4si - -#define PERMUTE(type, nunits) \ -type permute_##type (type x, type y) \ -{ \ - return __builtin_shuffle (x, y, (INDEX_##nunits) MASK_##nunits); \ +/* +** permute_vnx16qi: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.b, \1, z0\.b, z1\.b +** ret +*/ +__SVInt8_t +permute_vnx16qi (__SVInt8_t x, __SVInt8_t y) +{ + return __builtin_shuffle ((vnx16qi) x, (vnx16qi) y, (vnx16qi) MASK_32); } -PERMUTE(vnx16qi, 32) -PERMUTE(vnx8hi, 16) -PERMUTE(vnx4si, 8) -PERMUTE(vnx8hf, 16) -PERMUTE(vnx4sf, 8) +/* +** permute_vnx8hi: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVInt16_t +permute_vnx8hi (__SVInt16_t x, __SVInt16_t y) +{ + return __builtin_shuffle ((vnx8hi) x, (vnx8hi) y, (vnx8hi) MASK_16); +} -/* { dg-final { scan-assembler-not {\ttbl\t} } } */ +/* +** permute_vnx4si: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.s, \1, z0\.s, z1\.s +** ret +*/ +__SVInt32_t +permute_vnx4si (__SVInt32_t x, __SVInt32_t y) +{ + return __builtin_shuffle ((vnx4si) x, (vnx4si) y, (vnx4si) MASK_8); +} -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.b, p[0-9]+, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.h, p[0-9]+, z[0-9]+\.h, z[0-9]+\.h\n} 2 } } */ -/* { dg-final { scan-assembler-times {\tsel\tz[0-9]+\.s, p[0-9]+, z[0-9]+\.s, z[0-9]+\.s\n} 2 } } */ +/* +** permute_vnx8hf: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.h, \1, z0\.h, z1\.h +** ret +*/ +__SVFloat16_t +permute_vnx8hf (__SVFloat16_t x, __SVFloat16_t y) +{ + return (__SVFloat16_t) __builtin_shuffle ((vnx8hf) x, (vnx8hf) y, + (vnx8hi) MASK_16); +} -/* { dg-final { scan-assembler-times {\tptrue\tp[0-9]+\.d, vl4\n} 5 } } */ +/* +** permute_vnx4sf: +** ptrue (p[0-7])\.d, vl4 +** sel z0\.s, \1, z0\.s, z1\.s +** ret +*/ +__SVFloat32_t +permute_vnx4sf (__SVFloat16_t x, __SVFloat16_t y) +{ + return __builtin_shuffle ((vnx4sf) x, (vnx4sf) y, (vnx4si) MASK_8); +} -- 2.30.2