+2019-05-07 Alejandro Martinez <alejandro.martinezvicente@arm.com>
+
+ * config/aarch64/aarch64-sve.md (<su>abd<mode>_3): New define_expand.
+ (aarch64_<su>abd<mode>_3): Likewise.
+ (*aarch64_<su>abd<mode>_3): New define_insn.
+ (<sur>sad<vsi2qi>): New define_expand.
+ * config/aarch64/iterators.md: Added MAX_OPP attribute.
+ * tree-vect-loop.c (use_mask_by_cond_expr_p): Add SAD_EXPR.
+ (build_vect_cond_expr): Likewise.
+
2019-05-07 Uroš Bizjak <ubizjak@gmail.com>
* cfgexpand.c (asm_clobber_reg_is_valid): Reject
movprfx\t%0, %3\;<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>"
[(set_attr "movprfx" "*,yes")]
)
+
+;; Helper expander for aarch64_<su>abd<mode>_3 to save the callers
+;; the hassle of constructing the other arm of the MINUS.
+(define_expand "<su>abd<mode>_3"
+ [(use (match_operand:SVE_I 0 "register_operand"))
+ (USMAX:SVE_I (match_operand:SVE_I 1 "register_operand")
+ (match_operand:SVE_I 2 "register_operand"))]
+ "TARGET_SVE"
+ {
+ rtx pred = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ rtx other_arm = gen_rtx_<MAX_OPP> (<MODE>mode, operands[1], operands[2]);
+ emit_insn (gen_aarch64_<su>abd<mode>_3 (operands[0], pred, operands[1],
+ operands[2], other_arm));
+ DONE;
+ }
+)
+
+;; Predicated integer absolute difference.
+(define_insn "aarch64_<su>abd<mode>_3"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (minus:SVE_I
+ (USMAX:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 3 "register_operand" "w, w"))
+ (match_operator 4 "aarch64_<max_opp>"
+ [(match_dup 2)
+ (match_dup 3)]))]
+ UNSPEC_MERGE_PTRUE))]
+ "TARGET_SVE"
+ "@
+ <su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ movprfx\t%0, %2\;<su>abd\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; Emit a sequence to produce a sum-of-absolute-differences of the inputs in
+;; operands 1 and 2. The sequence also has to perform a widening reduction of
+;; the difference into a vector and accumulate that into operand 3 before
+;; copying that into the result operand 0.
+;; Perform that with a sequence of:
+;; MOV ones.b, #1
+;; [SU]ABD diff.b, p0/m, op1.b, op2.b
+;; MOVPRFX op0, op3 // If necessary
+;; UDOT op0.s, diff.b, ones.b
+
+(define_expand "<sur>sad<vsi2qi>"
+ [(use (match_operand:SVE_SDI 0 "register_operand"))
+ (unspec:<VSI2QI> [(use (match_operand:<VSI2QI> 1 "register_operand"))
+ (use (match_operand:<VSI2QI> 2 "register_operand"))] ABAL)
+ (use (match_operand:SVE_SDI 3 "register_operand"))]
+ "TARGET_SVE"
+ {
+ rtx ones = force_reg (<VSI2QI>mode, CONST1_RTX (<VSI2QI>mode));
+ rtx diff = gen_reg_rtx (<VSI2QI>mode);
+ emit_insn (gen_<sur>abd<vsi2qi>_3 (diff, operands[1], operands[2]));
+ emit_insn (gen_udot_prod<vsi2qi> (operands[0], diff, ones, operands[3]));
+ DONE;
+ }
+)
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define DEF_SAD(TYPE1, TYPE2) \
+TYPE1 __attribute__ ((noinline, noclone)) \
+sum_abs_##TYPE1##_##TYPE2 (TYPE2 *restrict x, TYPE2 *restrict y, int n) \
+{ \
+ TYPE1 sum = 0; \
+ for (int i = 0; i < n; i++) \
+ { \
+ sum += __builtin_abs (x[i] - y[i]); \
+ } \
+ return sum; \
+}
+
+DEF_SAD(int32_t, uint8_t)
+DEF_SAD(int32_t, int8_t)
+DEF_SAD(int64_t, uint16_t)
+DEF_SAD(int64_t, int16_t)
+
+/* { dg-final { scan-assembler-times {\tuabd\tz[0-9]+\.b, p[0-7]/m, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsabd\tz[0-9]+\.b, p[0-7]/m, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tudot\tz[0-9]+\.s, z[0-9]+\.b, z[0-9]+\.b\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuabd\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h, z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsabd\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h, z[0-9]+\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tudot\tz[0-9]+\.d, z[0-9]+\.h, z[0-9]+\.h\n} 2 } } */