+2019-08-14 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_print_operand): Allow %e to
+ take the equivalent mask, as well as a bit count.
+ * config/aarch64/predicates.md (aarch64_sve_uxtb_immediate)
+ (aarch64_sve_uxth_immediate, aarch64_sve_uxt_immediate)
+ (aarch64_sve_pred_and_operand): New predicates.
+ * config/aarch64/iterators.md (sve_pred_int_rhs2_operand): New
+ code attribute.
+ * config/aarch64/aarch64-sve.md
+ (cond_<SVE_INT_BINARY:optab><SVE_I:mode>): Use it.
+ (*cond_uxt<mode>_2, *cond_uxt<mode>_any): New patterns.
+
2019-08-14 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/aarch64-sve.md
;;
;; == Unary arithmetic
;; ---- [INT] General unary arithmetic corresponding to rtx codes
+;; ---- [INT] Zero extension
;; ---- [INT] Logical inverse
;; ---- [FP] General unary arithmetic corresponding to unspecs
;; ---- [PRED] Inverse
[(set_attr "movprfx" "*,yes,yes")]
)
+;; -------------------------------------------------------------------------
+;; ---- [INT] Zero extension
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - UXTB
+;; - UXTH
+;; - UXTW
+;; -------------------------------------------------------------------------
+
+;; Match UXT[BHW] as a conditional AND of a constant, merging with the
+;; first input.
+(define_insn "*cond_uxt<mode>_2"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (and:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "0, w")
+ (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
+ (match_dup 2)]
+ UNSPEC_SEL))]
+ "TARGET_SVE"
+ "@
+ uxt%e3\t%0.<Vetype>, %1/m, %0.<Vetype>
+ movprfx\t%0, %2\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; Match UXT[BHW] as a conditional AND of a constant, merging with an
+;; independent value.
+;;
+;; The earlyclobber isn't needed for the first alternative, but omitting
+;; it would only help the case in which operands 2 and 4 are the same,
+;; which is handled above rather than here. Marking all the alternatives
+;; as early-clobber helps to make the instruction more regular to the
+;; register allocator.
+(define_insn "*cond_uxt<mode>_any"
+ [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (and:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ UNSPEC_SEL))]
+ "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
+ "@
+ uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>
+ movprfx\t%0, %4\;uxt%e3\t%0.<Vetype>, %1/m, %2.<Vetype>"
+ [(set_attr "movprfx" "*,yes,yes")]
+)
+
;; -------------------------------------------------------------------------
;; ---- [INT] Logical inverse
;; -------------------------------------------------------------------------
[(match_operand:<VPRED> 1 "register_operand")
(SVE_INT_BINARY:SVE_I
(match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "register_operand"))
+ (match_operand:SVE_I 3 "<sve_pred_int_rhs2_operand>"))
(match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
'D': Take the duplicated element in a vector constant
and print it as an unsigned integer, in decimal.
'e': Print the sign/zero-extend size as a character 8->b,
- 16->h, 32->w.
+ 16->h, 32->w. Can also be used for masks:
+ 0xff->b, 0xffff->h, 0xffffffff->w.
'I': If the operand is a duplicated vector constant,
replace it with the duplicated scalar. If the
operand is then a floating-point constant, replace
case 'e':
{
- int n;
-
- if (!CONST_INT_P (x)
- || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
+ x = unwrap_const_vec_duplicate (x);
+ if (!CONST_INT_P (x))
{
output_operand_lossage ("invalid operand for '%%%c'", code);
return;
}
- switch (n)
+ HOST_WIDE_INT val = INTVAL (x);
+ if ((val & ~7) == 8 || val == 0xff)
+ fputc ('b', f);
+ else if ((val & ~7) == 16 || val == 0xffff)
+ fputc ('h', f);
+ else if ((val & ~7) == 32 || val == 0xffffffff)
+ fputc ('w', f);
+ else
{
- case 3:
- fputc ('b', f);
- break;
- case 4:
- fputc ('h', f);
- break;
- case 5:
- fputc ('w', f);
- break;
- default:
output_operand_lossage ("invalid operand for '%%%c'", code);
return;
}
(umax "D")
(umin "D")])
+;; The predicate to use for the second input operand in a cond_<optab><mode>
+;; pattern.
+(define_code_attr sve_pred_int_rhs2_operand
+ [(plus "register_operand")
+ (minus "register_operand")
+ (mult "register_operand")
+ (smax "register_operand")
+ (umax "register_operand")
+ (smin "register_operand")
+ (umin "register_operand")
+ (and "aarch64_sve_pred_and_operand")
+ (ior "register_operand")
+ (xor "register_operand")])
+
;; -------------------------------------------------------------------
;; Int Iterators.
;; -------------------------------------------------------------------
(and (match_code "const,const_vector")
(match_test "aarch64_sve_inc_dec_immediate_p (op)")))
+(define_predicate "aarch64_sve_uxtb_immediate"
+ (and (match_code "const_vector")
+ (match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 8")
+ (match_test "aarch64_const_vec_all_same_int_p (op, 0xff)")))
+
+(define_predicate "aarch64_sve_uxth_immediate"
+ (and (match_code "const_vector")
+ (match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 16")
+ (match_test "aarch64_const_vec_all_same_int_p (op, 0xffff)")))
+
(define_predicate "aarch64_sve_uxtw_immediate"
(and (match_code "const_vector")
(match_test "GET_MODE_UNIT_BITSIZE (GET_MODE (op)) > 32")
(match_test "aarch64_const_vec_all_same_int_p (op, 0xffffffff)")))
+(define_predicate "aarch64_sve_uxt_immediate"
+ (ior (match_operand 0 "aarch64_sve_uxtb_immediate")
+ (match_operand 0 "aarch64_sve_uxth_immediate")
+ (match_operand 0 "aarch64_sve_uxtw_immediate")))
+
(define_predicate "aarch64_sve_logical_immediate"
(and (match_code "const,const_vector")
(match_test "aarch64_sve_bitmask_immediate_p (op)")))
(match_operand 0 "aarch64_sve_sub_arith_immediate")
(match_operand 0 "aarch64_sve_inc_dec_immediate")))
+(define_predicate "aarch64_sve_pred_and_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_sve_uxt_immediate")))
+
(define_predicate "aarch64_sve_logical_operand"
(ior (match_operand 0 "register_operand")
(match_operand 0 "aarch64_sve_logical_immediate")))
+2019-08-14 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/cond_uxt_1.c: New test.
+ * gcc.target/aarch64/sve/cond_uxt_1_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_2.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_2_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_3.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_3_run.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_4.c: Likewise.
+ * gcc.target/aarch64/sve/cond_uxt_4_run.c: Likewise.
+
2019-08-14 Richard Sandiford <richard.sandiford@arm.com>
* gcc.target/aarch64/sve/cond_convert_1.c: New test.
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE))
+
+#define DEF_LOOP(TYPE, CONST) \
+ void __attribute__ ((noipa)) \
+ test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \
+ TYPE *restrict b) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ r[i] = a[i] > 20 ? b[i] & CONST : b[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (uint16_t, 0xff) \
+ \
+ T (uint32_t, 0xff) \
+ T (uint32_t, 0xffff) \
+ \
+ T (uint64_t, 0xff) \
+ T (uint64_t, 0xffff) \
+ T (uint64_t, 0xffffffff)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler {\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */
+
+/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */
+/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \1\n} } } */
+
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \1\n} } } */
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \1\n} } } */
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtw\t\1, p[0-7]/m, \1\n} } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz} } } */
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include "cond_uxt_1.c"
+
+#define TEST_LOOP(TYPE, CONST) \
+ { \
+ TYPE r[NUM_ELEMS (TYPE)]; \
+ TYPE a[NUM_ELEMS (TYPE)]; \
+ TYPE b[NUM_ELEMS (TYPE)]; \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##CONST##_##TYPE (r, a, b); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ if (r[i] != (a[i] > 20 ? b[i] & CONST : b[i])) \
+ __builtin_abort (); \
+ }
+
+int main ()
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE))
+
+#define DEF_LOOP(TYPE, CONST) \
+ void __attribute__ ((noipa)) \
+ test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \
+ TYPE *restrict b) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ r[i] = a[i] > 20 ? b[i] & CONST : a[i]; \
+ }
+
+#define TEST_ALL(T) \
+ T (uint16_t, 0xff) \
+ \
+ T (uint32_t, 0xff) \
+ T (uint32_t, 0xffff) \
+ \
+ T (uint64_t, 0xff) \
+ T (uint64_t, 0xffff) \
+ T (uint64_t, 0xffffffff)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler {\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x1,[^L]*\tld1h\t(z[0-9]+\.h), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */
+
+/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x1,[^L]*\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */
+/* { dg-final { scan-assembler {\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x1,[^L]*\tld1w\t(z[0-9]+\.s), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \2\n} } } */
+
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtb\t\1, p[0-7]/m, \2\n} } } */
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxth\t\1, p[0-7]/m, \2\n} } } */
+/* { dg-final { scan-assembler {\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x1,[^L]*\tld1d\t(z[0-9]+\.d), p[0-7]/z, \[x2,[^L]*\tuxtw\t\1, p[0-7]/m, \2\n} } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz} } } */
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include "cond_uxt_2.c"
+
+#define TEST_LOOP(TYPE, CONST) \
+ { \
+ TYPE r[NUM_ELEMS (TYPE)]; \
+ TYPE a[NUM_ELEMS (TYPE)]; \
+ TYPE b[NUM_ELEMS (TYPE)]; \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##CONST##_##TYPE (r, a, b); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ if (r[i] != (a[i] > 20 ? b[i] & CONST : a[i])) \
+ __builtin_abort (); \
+ }
+
+int main ()
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE))
+
+#define DEF_LOOP(TYPE, CONST) \
+ void __attribute__ ((noipa)) \
+ test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \
+ TYPE *restrict b) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ r[i] = a[i] > 20 ? b[i] & CONST : 127; \
+ }
+
+#define TEST_ALL(T) \
+ T (uint16_t, 0xff) \
+ \
+ T (uint32_t, 0xff) \
+ T (uint32_t, 0xffff) \
+ \
+ T (uint64_t, 0xff) \
+ T (uint64_t, 0xffff) \
+ T (uint64_t, 0xffffffff)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.h, p[0-7]/m, z[0-9]+\.h\n} } } */
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.s, p[0-7]/m, z[0-9]+\.s\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxth\t\1\.s, p[0-7]/m, z[0-9]+\.s\n} } } */
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtb\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxth\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+), z[0-9]+\n\tuxtw\t\1\.d, p[0-7]/m, z[0-9]+\.d\n} } } */
+
+/* { dg-final { scan-assembler-not {\tmov\tz[^\n]*z} } } */
+/* { dg-final { scan-assembler-not {\tsel\t} } } */
--- /dev/null
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include "cond_uxt_3.c"
+
+#define TEST_LOOP(TYPE, CONST) \
+ { \
+ TYPE r[NUM_ELEMS (TYPE)]; \
+ TYPE a[NUM_ELEMS (TYPE)]; \
+ TYPE b[NUM_ELEMS (TYPE)]; \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##CONST##_##TYPE (r, a, b); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ if (r[i] != (a[i] > 20 ? b[i] & CONST : 127)) \
+ __builtin_abort (); \
+ }
+
+int main ()
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+#define NUM_ELEMS(TYPE) (320 / sizeof (TYPE))
+
+#define DEF_LOOP(TYPE, CONST) \
+ void __attribute__ ((noipa)) \
+ test_##CONST##_##TYPE (TYPE *restrict r, TYPE *restrict a, \
+ TYPE *restrict b) \
+ { \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ r[i] = a[i] > 20 ? b[i] & CONST : 0; \
+ }
+
+#define TEST_ALL(T) \
+ T (uint16_t, 0xff) \
+ \
+ T (uint32_t, 0xff) \
+ T (uint32_t, 0xffff) \
+ \
+ T (uint64_t, 0xff) \
+ T (uint64_t, 0xffff) \
+ T (uint64_t, 0xffffffff)
+
+TEST_ALL (DEF_LOOP)
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.h), (p[0-7])/z, z[0-9]+\.h\n\tuxtb\t\1, \2/m, z[0-9]+\.h\n} } } */
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.s), (p[0-7])/z, z[0-9]+\.s\n\tuxtb\t\1, \2/m, z[0-9]+\.s\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.s), (p[0-7])/z, z[0-9]+\.s\n\tuxth\t\1, \2/m, z[0-9]+\.s\n} } } */
+
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxtb\t\1, \2/m, z[0-9]+\.d\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxth\t\1, \2/m, z[0-9]+\.d\n} } } */
+/* { dg-final { scan-assembler {\tmovprfx\t(z[0-9]+\.d), (p[0-7])/z, z[0-9]+\.d\n\tuxtw\t\1, \2/m, z[0-9]+\.d\n} } } */
--- /dev/null
+/* { dg-do run { target { aarch64_sve_hw } } } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include "cond_uxt_4.c"
+
+#define TEST_LOOP(TYPE, CONST) \
+ { \
+ TYPE r[NUM_ELEMS (TYPE)]; \
+ TYPE a[NUM_ELEMS (TYPE)]; \
+ TYPE b[NUM_ELEMS (TYPE)]; \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ { \
+ a[i] = (i & 1 ? i : 3 * i); \
+ b[i] = (i >> 4) << (i & 15); \
+ asm volatile ("" ::: "memory"); \
+ } \
+ test_##CONST##_##TYPE (r, a, b); \
+ for (int i = 0; i < NUM_ELEMS (TYPE); ++i) \
+ if (r[i] != (a[i] > 20 ? b[i] & CONST : 0)) \
+ __builtin_abort (); \
+ }
+
+int main ()
+{
+ TEST_ALL (TEST_LOOP)
+ return 0;
+}