+2019-08-15 Richard Sandiford <richard.sandiford@arm.com>
+ Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
+
+ * config/aarch64/aarch64-sve.md (*aarch64_cond_abd<SVE_F:mode>_2)
+ (*aarch64_cond_abd<SVE_F:mode>_3)
+ (*aarch64_cond_abd<SVE_F:mode>_any): New patterns.
+
2019-08-15 Richard Sandiford <richard.sandiford@arm.com>
Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
}
)
+;; Predicated floating-point absolute difference, merging with the first
+;; input.
+(define_insn_and_rewrite "*aarch64_cond_abd<mode>_2"
+ [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_F
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (unspec:SVE_F
+ [(match_operand 4)
+ (match_operand:SI 5 "aarch64_sve_gp_strictness")
+ (unspec:SVE_F
+ [(match_operand 6)
+ (match_operand:SI 7 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_F 2 "register_operand" "0, w")
+ (match_operand:SVE_F 3 "register_operand" "w, w")]
+ UNSPEC_COND_FSUB)]
+ UNSPEC_COND_FABS)
+ (match_dup 2)]
+ UNSPEC_SEL))]
+ "TARGET_SVE
+ && aarch64_sve_pred_dominates_p (&operands[4], operands[1])
+ && aarch64_sve_pred_dominates_p (&operands[6], operands[1])"
+ "@
+ fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ movprfx\t%0, %2\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ "&& (!rtx_equal_p (operands[1], operands[4])
+ || !rtx_equal_p (operands[1], operands[6]))"
+ {
+ operands[4] = copy_rtx (operands[1]);
+ operands[6] = copy_rtx (operands[1]);
+ }
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; Predicated floating-point absolute difference, merging with the second
+;; input.
+(define_insn_and_rewrite "*aarch64_cond_abd<mode>_3"
+ [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_F
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (unspec:SVE_F
+ [(match_operand 4)
+ (match_operand:SI 5 "aarch64_sve_gp_strictness")
+ (unspec:SVE_F
+ [(match_operand 6)
+ (match_operand:SI 7 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_F 2 "register_operand" "w, w")
+ (match_operand:SVE_F 3 "register_operand" "0, w")]
+ UNSPEC_COND_FSUB)]
+ UNSPEC_COND_FABS)
+ (match_dup 3)]
+ UNSPEC_SEL))]
+ "TARGET_SVE
+ && aarch64_sve_pred_dominates_p (&operands[4], operands[1])
+ && aarch64_sve_pred_dominates_p (&operands[6], operands[1])"
+ "@
+ fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ movprfx\t%0, %3\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>"
+ "&& (!rtx_equal_p (operands[1], operands[4])
+ || !rtx_equal_p (operands[1], operands[6]))"
+ {
+ operands[4] = copy_rtx (operands[1]);
+ operands[6] = copy_rtx (operands[1]);
+ }
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; Predicated floating-point absolute difference, merging with an
+;; independent value.
+(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any"
+ [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_F
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
+ (unspec:SVE_F
+ [(match_operand 5)
+ (match_operand:SI 6 "aarch64_sve_gp_strictness")
+ (unspec:SVE_F
+ [(match_operand 7)
+ (match_operand:SI 8 "aarch64_sve_gp_strictness")
+ (match_operand:SVE_F 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_F 3 "register_operand" "w, 0, w, w, w")]
+ UNSPEC_COND_FSUB)]
+ UNSPEC_COND_FABS)
+ (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ UNSPEC_SEL))]
+ "TARGET_SVE
+ && !rtx_equal_p (operands[2], operands[4])
+ && !rtx_equal_p (operands[3], operands[4])
+ && aarch64_sve_pred_dominates_p (&operands[5], operands[1])
+ && aarch64_sve_pred_dominates_p (&operands[7], operands[1])"
+ "@
+ movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+ movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ movprfx\t%0.<Vetype>, %1/m, %2.<Vetype>\;fabd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ #"
+ "&& 1"
+ {
+ if (reload_completed
+ && register_operand (operands[4], <MODE>mode)
+ && !rtx_equal_p (operands[0], operands[4]))
+ {
+ emit_insn (gen_vcond_mask_<mode><vpred> (operands[0], operands[3],
+ operands[4], operands[1]));
+ operands[4] = operands[3] = operands[0];
+ }
+ else if (!rtx_equal_p (operands[1], operands[5])
+ || !rtx_equal_p (operands[1], operands[7]))
+ {
+ operands[5] = copy_rtx (operands[1]);
+ operands[7] = copy_rtx (operands[1]);
+ }
+ else
+ FAIL;
+ }
+ [(set_attr "movprfx" "yes")]
+)
+
;; -------------------------------------------------------------------------
;; ---- [FP] Multiplication
;; -------------------------------------------------------------------------
+2019-08-15 Richard Sandiford <richard.sandiford@arm.com>
+ Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
+
+ * gcc.target/aarch64/sve/cond_fabd_1.c: New test.
+ * gcc.target/aarch64/sve/cond_fabd_1_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_2.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_2_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_3.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_3_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_4.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_4_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_5.c: Likewise.
+ * gcc.target/aarch64/sve/cond_fabd_5_run.c: Likewise.
+
2019-08-15 Richard Sandiford <richard.sandiford@arm.com>
Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include <stdint.h>
+
+#define DEF_LOOP(TYPE, ABS) \
+ void __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *__restrict r, TYPE *__restrict a, \
+ TYPE *__restrict b, TYPE *__restrict c, \
+ int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ r[i] = a[i] < 20 ? ABS (b[i] - c[i]) : b[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16, __builtin_fabsf16) \
+ T (float, __builtin_fabsf) \
+ T (double, __builtin_fabs)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.h, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.s, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.d, p[0-7]/m,} 1 } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^,]*z} } } */
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include "cond_fabd_1.c"
+
+#define N 99
+
+#define TEST_LOOP(TYPE, ABS) \
+ { \
+ TYPE r[N], a[N], b[N], c[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ c[i] = ((i + 2) % 3) * (i + 1); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##TYPE (r, a, b, c, N); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = a[i] < 20 ? ABS (b[i] - c[i]) : b[i]; \
+ if (r[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int
+main (void)
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include <stdint.h>
+
+#define DEF_LOOP(TYPE, ABS) \
+ void __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *__restrict r, TYPE *__restrict a, \
+ TYPE *__restrict b, TYPE *__restrict c, \
+ int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ r[i] = a[i] < 20 ? ABS (b[i] - c[i]) : c[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16, __builtin_fabsf16) \
+ T (float, __builtin_fabsf) \
+ T (double, __builtin_fabs)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.h, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.s, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.d, p[0-7]/m,} 1 } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^,]*z} } } */
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include "cond_fabd_2.c"
+
+#define N 99
+
+#define TEST_LOOP(TYPE, ABS) \
+ { \
+ TYPE r[N], a[N], b[N], c[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ c[i] = ((i + 2) % 3) * (i + 1); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##TYPE (r, a, b, c, N); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = a[i] < 20 ? ABS (b[i] - c[i]) : c[i]; \
+ if (r[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int
+main (void)
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include <stdint.h>
+
+#define DEF_LOOP(TYPE, ABS) \
+ void __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *__restrict r, TYPE *__restrict a, \
+ TYPE *__restrict b, TYPE *__restrict c, \
+ int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ r[i] = a[i] < 20 ? ABS (b[i] - c[i]) : a[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16, __builtin_fabsf16) \
+ T (float, __builtin_fabsf) \
+ T (double, __builtin_fabs)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.h, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.s, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.d, p[0-7]/m,} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^,]*z} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include "cond_fabd_3.c"
+
+#define N 99
+
+#define TEST_LOOP(TYPE, ABS) \
+ { \
+ TYPE r[N], a[N], b[N], c[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ c[i] = ((i + 2) % 3) * (i + 1); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##TYPE (r, a, b, c, N); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = a[i] < 20 ? ABS (b[i] - c[i]) : a[i]; \
+ if (r[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int
+main (void)
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include <stdint.h>
+
+#define DEF_LOOP(TYPE, ABS) \
+ void __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *__restrict r, TYPE *__restrict a, \
+ TYPE *__restrict b, TYPE *__restrict c, \
+ int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ r[i] = a[i] < 20 ? ABS (b[i] - c[i]) : 8.0; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16, __builtin_fabsf16) \
+ T (float, __builtin_fabsf) \
+ T (double, __builtin_fabs)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.h, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.s, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.d, p[0-7]/m,} 1 } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^,]*z} } } */
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
+/* { dg-final { scan-assembler-times {\tsel\t} 3 } } */
--- /dev/null
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include "cond_fabd_4.c"
+
+#define N 99
+
+#define TEST_LOOP(TYPE, ABS) \
+ { \
+ TYPE r[N], a[N], b[N], c[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ c[i] = ((i + 2) % 3) * (i + 1); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##TYPE (r, a, b, c, N); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = a[i] < 20 ? ABS (b[i] - c[i]) : 8; \
+ if (r[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int
+main (void)
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include <stdint.h>
+
+#define DEF_LOOP(TYPE, ABS) \
+ void __attribute__ ((noinline, noclone)) \
+ test_##TYPE (TYPE *__restrict r, TYPE *__restrict a, \
+ TYPE *__restrict b, TYPE *__restrict c, \
+ int n) \
+ { \
+ for (int i = 0; i < n; ++i) \
+ r[i] = a[i] < 20 ? ABS (b[i] - c[i]) : 0.0; \
+ }
+
+#define TEST_ALL(T) \
+ T (_Float16, __builtin_fabsf16) \
+ T (float, __builtin_fabsf) \
+ T (double, __builtin_fabs)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.h, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.s, p[0-7]/m,} 1 } } */
+/* { dg-final { scan-assembler-times {\tfabd\tz[0-9]+\.d, p[0-7]/m,} 1 } } */
+
+/* Really we should be able to use MOVPRFX /Z here, but at the moment
+ we're relying on combine to merge a SEL and an arithmetic operation,
+ and the SEL doesn't allow zero operands. */
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.h, p[0-7]/z, z[0-9]+\.h\n} 1 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.s, p[0-7]/z, z[0-9]+\.s\n} 1 { xfail *-*-* } } } */
+/* { dg-final { scan-assembler-times {\tmovprfx\tz[0-9]+\.d, p[0-7]/z, z[0-9]+\.d\n} 1 { xfail *-*-* } } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^,]*z} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} { xfail *-*-* } } } */
--- /dev/null
+/* { dg-do run { target aarch64_sve_hw } } */
+/* { dg-options "-O2 -ftree-vectorize -fno-trapping-math" } */
+
+#include "cond_fabd_5.c"
+
+#define N 99
+
+#define TEST_LOOP(TYPE, ABS) \
+ { \
+ TYPE r[N], a[N], b[N], c[N]; \
+ for (int i = 0; i < N; ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ c[i] = ((i + 2) % 3) * (i + 1); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##TYPE (r, a, b, c, N); \
+ for (int i = 0; i < N; ++i) \
+ { \
+ TYPE expected = a[i] < 20 ? ABS (b[i] - c[i]) : 0; \
+ if (r[i] != expected) \
+ __builtin_abort (); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ }
+
+int
+main (void)
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}