From d113ece60450b2efb07e9057b6d2732b08fee2c4 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Wed, 14 Aug 2019 11:00:45 +0000 Subject: [PATCH] [AArch64] Use SVE UXT[BHW] as a form of predicated AND UXTB, UXTH and UXTW are equivalent to predicated ANDs with the constants 0xff, 0xffff and 0xffffffff respectively. This patch uses them in the patterns for IFN_COND_AND. 2019-08-14 Richard Sandiford gcc/ * config/aarch64/aarch64.c (aarch64_print_operand): Allow %e to take the equivalent mask, as well as a bit count. * config/aarch64/predicates.md (aarch64_sve_uxtb_immediate) (aarch64_sve_uxth_immediate, aarch64_sve_uxt_immediate) (aarch64_sve_pred_and_operand): New predicates. * config/aarch64/iterators.md (sve_pred_int_rhs2_operand): New code attribute. * config/aarch64/aarch64-sve.md (cond_): Use it. (*cond_uxt_2, *cond_uxt_any): New patterns. gcc/testsuite/ * gcc.target/aarch64/sve/cond_uxt_1.c: New test. * gcc.target/aarch64/sve/cond_uxt_1_run.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_2.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_2_run.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_3.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_3_run.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_4.c: Likewise. * gcc.target/aarch64/sve/cond_uxt_4_run.c: Likewise. From-SVN: r274479 --- gcc/ChangeLog | 13 +++++ gcc/config/aarch64/aarch64-sve.md | 55 ++++++++++++++++++- gcc/config/aarch64/aarch64.c | 28 ++++------ gcc/config/aarch64/iterators.md | 14 +++++ gcc/config/aarch64/predicates.md | 19 +++++++ gcc/testsuite/ChangeLog | 11 ++++ .../gcc.target/aarch64/sve/cond_uxt_1.c | 40 ++++++++++++++ .../gcc.target/aarch64/sve/cond_uxt_1_run.c | 27 +++++++++ .../gcc.target/aarch64/sve/cond_uxt_2.c | 40 ++++++++++++++ .../gcc.target/aarch64/sve/cond_uxt_2_run.c | 27 +++++++++ .../gcc.target/aarch64/sve/cond_uxt_3.c | 39 +++++++++++++ .../gcc.target/aarch64/sve/cond_uxt_3_run.c | 27 +++++++++ .../gcc.target/aarch64/sve/cond_uxt_4.c | 36 ++++++++++++ .../gcc.target/aarch64/sve/cond_uxt_4_run.c | 27 +++++++++ 14 files changed, 386 insertions(+), 17 deletions(-) create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1_run.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2_run.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3_run.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4.c create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4_run.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 3338cf3475d..3413b57db29 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,16 @@ +2019-08-14 Richard Sandiford + + * config/aarch64/aarch64.c (aarch64_print_operand): Allow %e to + take the equivalent mask, as well as a bit count. + * config/aarch64/predicates.md (aarch64_sve_uxtb_immediate) + (aarch64_sve_uxth_immediate, aarch64_sve_uxt_immediate) + (aarch64_sve_pred_and_operand): New predicates. + * config/aarch64/iterators.md (sve_pred_int_rhs2_operand): New + code attribute. + * config/aarch64/aarch64-sve.md + (cond_): Use it. + (*cond_uxt_2, *cond_uxt_any): New patterns. + 2019-08-14 Richard Sandiford * config/aarch64/aarch64-sve.md diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index b1bec73a18b..a79757e9bf1 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -54,6 +54,7 @@ ;; ;; == Unary arithmetic ;; ---- [INT] General unary arithmetic corresponding to rtx codes +;; ---- [INT] Zero extension ;; ---- [INT] Logical inverse ;; ---- [FP] General unary arithmetic corresponding to unspecs ;; ---- [PRED] Inverse @@ -1493,6 +1494,58 @@ [(set_attr "movprfx" "*,yes,yes")] ) +;; ------------------------------------------------------------------------- +;; ---- [INT] Zero extension +;; ------------------------------------------------------------------------- +;; Includes: +;; - UXTB +;; - UXTH +;; - UXTW +;; ------------------------------------------------------------------------- + +;; Match UXT[BHW] as a conditional AND of a constant, merging with the +;; first input. +(define_insn "*cond_uxt_2" + [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w") + (unspec:SVE_I + [(match_operand: 1 "register_operand" "Upl, Upl") + (and:SVE_I + (match_operand:SVE_I 2 "register_operand" "0, w") + (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate")) + (match_dup 2)] + UNSPEC_SEL))] + "TARGET_SVE" + "@ + uxt%e3\t%0., %1/m, %0. + movprfx\t%0, %2\;uxt%e3\t%0., %1/m, %2." + [(set_attr "movprfx" "*,yes")] +) + +;; Match UXT[BHW] as a conditional AND of a constant, merging with an +;; independent value. +;; +;; The earlyclobber isn't needed for the first alternative, but omitting +;; it would only help the case in which operands 2 and 4 are the same, +;; which is handled above rather than here. Marking all the alternatives +;; as early-clobber helps to make the instruction more regular to the +;; register allocator. +(define_insn "*cond_uxt_any" + [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w") + (unspec:SVE_I + [(match_operand: 1 "register_operand" "Upl, Upl, Upl") + (and:SVE_I + (match_operand:SVE_I 2 "register_operand" "w, w, w") + (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate")) + (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")] + UNSPEC_SEL))] + "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])" + "@ + uxt%e3\t%0., %1/m, %2. + movprfx\t%0., %1/z, %2.\;uxt%e3\t%0., %1/m, %2. + movprfx\t%0, %4\;uxt%e3\t%0., %1/m, %2." + [(set_attr "movprfx" "*,yes,yes")] +) + ;; ------------------------------------------------------------------------- ;; ---- [INT] Logical inverse ;; ------------------------------------------------------------------------- @@ -1794,7 +1847,7 @@ [(match_operand: 1 "register_operand") (SVE_INT_BINARY:SVE_I (match_operand:SVE_I 2 "register_operand") - (match_operand:SVE_I 3 "register_operand")) + (match_operand:SVE_I 3 "")) (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")] UNSPEC_SEL))] "TARGET_SVE" diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index aaf9e80aea5..8e392257be9 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -8328,7 +8328,8 @@ sizetochar (int size) 'D': Take the duplicated element in a vector constant and print it as an unsigned integer, in decimal. 'e': Print the sign/zero-extend size as a character 8->b, - 16->h, 32->w. + 16->h, 32->w. Can also be used for masks: + 0xff->b, 0xffff->h, 0xffffffff->w. 'I': If the operand is a duplicated vector constant, replace it with the duplicated scalar. If the operand is then a floating-point constant, replace @@ -8399,27 +8400,22 @@ aarch64_print_operand (FILE *f, rtx x, int code) case 'e': { - int n; - - if (!CONST_INT_P (x) - || (n = exact_log2 (INTVAL (x) & ~7)) <= 0) + x = unwrap_const_vec_duplicate (x); + if (!CONST_INT_P (x)) { output_operand_lossage ("invalid operand for '%%%c'", code); return; } - switch (n) + HOST_WIDE_INT val = INTVAL (x); + if ((val & ~7) == 8 || val == 0xff) + fputc ('b', f); + else if ((val & ~7) == 16 || val == 0xffff) + fputc ('h', f); + else if ((val & ~7) == 32 || val == 0xffffffff) + fputc ('w', f); + else { - case 3: - fputc ('b', f); - break; - case 4: - fputc ('h', f); - break; - case 5: - fputc ('w', f); - break; - default: output_operand_lossage ("invalid operand for '%%%c'", code); return; } diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index bc43f736d22..3642ba114a3 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -1525,6 +1525,20 @@ (umax "D") (umin "D")]) +;; The predicate to use for the second input operand in a cond_ +;; pattern. +(define_code_attr sve_pred_int_rhs2_operand + [(plus "register_operand") + (minus "register_operand") + (mult "register_operand") + (smax "register_operand") + (umax "register_operand") + (smin "register_operand") + (umin "register_operand") + (and "aarch64_sve_pred_and_operand") + (ior "register_operand") + (xor "register_operand")]) + ;; ------------------------------------------------------------------- ;; Int Iterators. ;; ------------------------------------------------------------------- diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md index b7230ca32c6..1a47708c327 100644 --- a/gcc/config/aarch64/predicates.md +++ b/gcc/config/aarch64/predicates.md @@ -606,11 +606,26 @@ (and (match_code "const,const_vector") (match_test "aarch64_sve_inc_dec_immediate_p (op)"))) +(define_predicate "aarch64_sve_uxtb_immediate" + (and (match_code "const_vector") + (match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 8") + (match_test "aarch64_const_vec_all_same_int_p (op, 0xff)"))) + +(define_predicate "aarch64_sve_uxth_immediate" + (and (match_code "const_vector") + (match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 16") + (match_test "aarch64_const_vec_all_same_int_p (op, 0xffff)"))) + (define_predicate "aarch64_sve_uxtw_immediate" (and (match_code "const_vector") (match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 32") (match_test "aarch64_const_vec_all_same_int_p (op, 0xffffffff)"))) +(define_predicate "aarch64_sve_uxt_immediate" + (ior (match_operand 0 "aarch64_sve_uxtb_immediate") + (match_operand 0 "aarch64_sve_uxth_immediate") + (match_operand 0 "aarch64_sve_uxtw_immediate"))) + (define_predicate "aarch64_sve_logical_immediate" (and (match_code "const,const_vector") (match_test "aarch64_sve_bitmask_immediate_p (op)"))) @@ -670,6 +685,10 @@ (match_operand 0 "aarch64_sve_sub_arith_immediate") (match_operand 0 "aarch64_sve_inc_dec_immediate"))) +(define_predicate "aarch64_sve_pred_and_operand" + (ior (match_operand 0 "register_operand") + (match_operand 0 "aarch64_sve_uxt_immediate"))) + (define_predicate "aarch64_sve_logical_operand" (ior (match_operand 0 "register_operand") (match_operand 0 "aarch64_sve_logical_immediate"))) diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index a6c8c24f045..70f44b767bc 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,14 @@ +2019-08-14 Richard Sandiford + + * gcc.target/aarch64/sve/cond_uxt_1.c: New test. + * gcc.target/aarch64/sve/cond_uxt_1_run.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_2.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_2_run.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_3.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_3_run.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_4.c: Likewise. + * gcc.target/aarch64/sve/cond_uxt_4_run.c: Likewise. + 2019-08-14 Richard Sandiford * gcc.target/aarch64/sve/cond_convert_1.c: New test. diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1.c new file mode 100644 index 00000000000..05641199e15 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1.c @@ -0,0 +1,40 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include + +#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE)) + +#define DEF_LOOP(TYPE, CONST) \ + void __attribute__ ((noipa)) \ + test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \ + TYPE *restrict b) \ + { \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + r[i] = a[i] > 20 ? b[i] & CONST : b[i]; \ + } + +#define TEST_ALL(T) \ + T (uint16_t, 0xff) \ + \ + T (uint32_t, 0xff) \ + T (uint32_t, 0xffff) \ + \ + T (uint64_t, 0xff) \ + T (uint64_t, 0xffff) \ + T (uint64_t, 0xffffffff) + +TEST_ALL (DEF_LOOP) + +/* { dg-final { scan-assembler {\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */ + +/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */ +/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \1\n} } } */ + +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */ +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \1\n} } } */ +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtw\t\1, p[0-7]/m, \1\n} } } */ + +/* { dg-final { scan-assembler-not {\tmov\tz} } } */ +/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */ +/* { dg-final { scan-assembler-not {\tsel\t} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1_run.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1_run.c new file mode 100644 index 00000000000..685f39478d6 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_1_run.c @@ -0,0 +1,27 @@ +/* { dg-do run { target { aarch64_sve_hw } } } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include "cond_uxt_1.c" + +#define TEST_LOOP(TYPE, CONST) \ + { \ + TYPE r[NUM_ELEMS (TYPE)]; \ + TYPE a[NUM_ELEMS (TYPE)]; \ + TYPE b[NUM_ELEMS (TYPE)]; \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + { \ + a[i] = (i & 1 ? i : 3 * i); \ + b[i] = (i >> 4) << (i & 15); \ + asm volatile ("" ::: "memory"); \ + } \ + test_##CONST##_##TYPE (r, a, b); \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + if (r[i] != (a[i] > 20 ? b[i] & CONST : b[i])) \ + __builtin_abort (); \ + } + +int main () +{ + TEST_ALL (TEST_LOOP) + return 0; +} diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2.c new file mode 100644 index 00000000000..c900498a0df --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2.c @@ -0,0 +1,40 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include + +#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE)) + +#define DEF_LOOP(TYPE, CONST) \ + void __attribute__ ((noipa)) \ + test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \ + TYPE *restrict b) \ + { \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + r[i] = a[i] > 20 ? b[i] & CONST : a[i]; \ + } + +#define TEST_ALL(T) \ + T (uint16_t, 0xff) \ + \ + T (uint32_t, 0xff) \ + T (uint32_t, 0xffff) \ + \ + T (uint64_t, 0xff) \ + T (uint64_t, 0xffff) \ + T (uint64_t, 0xffffffff) + +TEST_ALL (DEF_LOOP) + +/* { dg-final { scan-assembler {\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x1,[^L]*\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */ + +/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x1,[^L]*\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */ +/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x1,[^L]*\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \2\n} } } */ + +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */ +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \2\n} } } */ +/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtw\t\1, p[0-7]/m, \2\n} } } */ + +/* { dg-final { scan-assembler-not {\tmov\tz} } } */ +/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */ +/* { dg-final { scan-assembler-not {\tsel\t} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2_run.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2_run.c new file mode 100644 index 00000000000..75679cdf9a3 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_2_run.c @@ -0,0 +1,27 @@ +/* { dg-do run { target { aarch64_sve_hw } } } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include "cond_uxt_2.c" + +#define TEST_LOOP(TYPE, CONST) \ + { \ + TYPE r[NUM_ELEMS (TYPE)]; \ + TYPE a[NUM_ELEMS (TYPE)]; \ + TYPE b[NUM_ELEMS (TYPE)]; \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + { \ + a[i] = (i & 1 ? i : 3 * i); \ + b[i] = (i >> 4) << (i & 15); \ + asm volatile ("" ::: "memory"); \ + } \ + test_##CONST##_##TYPE (r, a, b); \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + if (r[i] != (a[i] > 20 ? b[i] & CONST : a[i])) \ + __builtin_abort (); \ + } + +int main () +{ + TEST_ALL (TEST_LOOP) + return 0; +} diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3.c new file mode 100644 index 00000000000..cf1fd002915 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3.c @@ -0,0 +1,39 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include + +#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE)) + +#define DEF_LOOP(TYPE, CONST) \ + void __attribute__ ((noipa)) \ + test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \ + TYPE *restrict b) \ + { \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + r[i] = a[i] > 20 ? b[i] & CONST : 127; \ + } + +#define TEST_ALL(T) \ + T (uint16_t, 0xff) \ + \ + T (uint32_t, 0xff) \ + T (uint32_t, 0xffff) \ + \ + T (uint64_t, 0xff) \ + T (uint64_t, 0xffff) \ + T (uint64_t, 0xffffffff) + +TEST_ALL (DEF_LOOP) + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.h, p[0-7]/m, z[0-9]+\.h\n} } } */ + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.s, p[0-7]/m, z[0-9]+\.s\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxth\t\1\.s, p[0-7]/m, z[0-9]+\.s\n} } } */ + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxth\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtw\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */ + +/* { dg-final { scan-assembler-not {\tmov\tz[^\n]*z} } } */ +/* { dg-final { scan-assembler-not {\tsel\t} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3_run.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3_run.c new file mode 100644 index 00000000000..3d33d3a39c6 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_3_run.c @@ -0,0 +1,27 @@ +/* { dg-do run { target { aarch64_sve_hw } } } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include "cond_uxt_3.c" + +#define TEST_LOOP(TYPE, CONST) \ + { \ + TYPE r[NUM_ELEMS (TYPE)]; \ + TYPE a[NUM_ELEMS (TYPE)]; \ + TYPE b[NUM_ELEMS (TYPE)]; \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + { \ + a[i] = (i & 1 ? i : 3 * i); \ + b[i] = (i >> 4) << (i & 15); \ + asm volatile ("" ::: "memory"); \ + } \ + test_##CONST##_##TYPE (r, a, b); \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + if (r[i] != (a[i] > 20 ? b[i] & CONST : 127)) \ + __builtin_abort (); \ + } + +int main () +{ + TEST_ALL (TEST_LOOP) + return 0; +} diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4.c new file mode 100644 index 00000000000..25c664780cc --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4.c @@ -0,0 +1,36 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include + +#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE)) + +#define DEF_LOOP(TYPE, CONST) \ + void __attribute__ ((noipa)) \ + test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \ + TYPE *restrict b) \ + { \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + r[i] = a[i] > 20 ? b[i] & CONST : 0; \ + } + +#define TEST_ALL(T) \ + T (uint16_t, 0xff) \ + \ + T (uint32_t, 0xff) \ + T (uint32_t, 0xffff) \ + \ + T (uint64_t, 0xff) \ + T (uint64_t, 0xffff) \ + T (uint64_t, 0xffffffff) + +TEST_ALL (DEF_LOOP) + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.h), (p[0-7])/z, z[0-9]+\.h\n\tuxtb\t\1, \2/m, z[0-9]+\.h\n} } } */ + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.s), (p[0-7])/z, z[0-9]+\.s\n\tuxtb\t\1, \2/m, z[0-9]+\.s\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.s), (p[0-7])/z, z[0-9]+\.s\n\tuxth\t\1, \2/m, z[0-9]+\.s\n} } } */ + +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxtb\t\1, \2/m, z[0-9]+\.d\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxth\t\1, \2/m, z[0-9]+\.d\n} } } */ +/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxtw\t\1, \2/m, z[0-9]+\.d\n} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4_run.c b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4_run.c new file mode 100644 index 00000000000..f3c4374bab1 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/sve/cond_uxt_4_run.c @@ -0,0 +1,27 @@ +/* { dg-do run { target { aarch64_sve_hw } } } */ +/* { dg-options "-O2 -ftree-vectorize" } */ + +#include "cond_uxt_4.c" + +#define TEST_LOOP(TYPE, CONST) \ + { \ + TYPE r[NUM_ELEMS (TYPE)]; \ + TYPE a[NUM_ELEMS (TYPE)]; \ + TYPE b[NUM_ELEMS (TYPE)]; \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + { \ + a[i] = (i & 1 ? i : 3 * i); \ + b[i] = (i >> 4) << (i & 15); \ + asm volatile ("" ::: "memory"); \ + } \ + test_##CONST##_##TYPE (r, a, b); \ + for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \ + if (r[i] != (a[i] > 20 ? b[i] & CONST : 0)) \ + __builtin_abort (); \ + } + +int main () +{ + TEST_ALL (TEST_LOOP) + return 0; +} -- 2.30.2