From 1aeffdce2dfe718e1337d75eb4f22c3c300df9bb Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Mon, 18 Nov 2019 15:26:07 +0000 Subject: [PATCH] LRA: handle memory constraints that accept more than "m" LRA allows address constraints that are more relaxed than "p": /* Target hooks sometimes don't treat extra-constraint addresses as legitimate address_operands, so handle them specially. */ if (insn_extra_address_constraint (cn) && satisfies_address_constraint_p (&ad, cn)) return change_p; For SVE it's useful to allow the same thing for memory constraints. The particular use case is LD1RQ, which is an SVE instruction that addresses Advanced SIMD vector modes and that accepts some addresses that normal Advanced SIMD moves don't. Normally we require every memory to satisfy at least "m", which is defined to be a memory "with any kind of address that the machine supports in general". However, LD1RQ is very much special-purpose: it doesn't really have any relation to normal operations on these modes. Adding its addressing modes to "m" would lead to bad Advanced SIMD optimisation decisions in passes like ivopts. LD1RQ therefore has a memory constraint that accepts things "m" doesn't. 2019-11-18 Richard Sandiford gcc/ * lra-constraints.c (valid_address_p): Take the operand and a constraint as argument. If the operand is a MEM and the constraint is a memory constraint, check whether the eliminated form of the MEM already satisfies the constraint. (process_address_1): Update calls accordingly. gcc/testsuite/ * gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c: Remove XFAIL. * gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c: Likewise. * gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c: Likewise. From-SVN: r278408 --- gcc/ChangeLog | 8 ++++++ gcc/lra-constraints.c | 25 ++++++++++++++----- gcc/testsuite/ChangeLog | 12 +++++++++ .../aarch64/sve/acle/asm/ld1rq_f16.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_f32.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_f64.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_s16.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_s32.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_s64.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_u16.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_u32.c | 2 +- .../aarch64/sve/acle/asm/ld1rq_u64.c | 2 +- 12 files changed, 48 insertions(+), 15 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 6dbe6856472..eac041016ac 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,11 @@ +2019-11-18 Richard Sandiford + + * lra-constraints.c (valid_address_p): Take the operand and a + constraint as argument. If the operand is a MEM and the constraint + is a memory constraint, check whether the eliminated form of the + MEM already satisfies the constraint. + (process_address_1): Update calls accordingly. + 2019-11-18 Tom Tromey * doc/tm.texi: Rebuild. diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c index 54b5ae5cffe..39d2d2c5421 100644 --- a/gcc/lra-constraints.c +++ b/gcc/lra-constraints.c @@ -388,11 +388,24 @@ address_eliminator::~address_eliminator () *m_index_loc = m_index_reg; } -/* Return true if the eliminated form of AD is a legitimate target address. */ +/* Return true if the eliminated form of AD is a legitimate target address. + If OP is a MEM, AD is the address within OP, otherwise OP should be + ignored. CONSTRAINT is one constraint that the operand may need + to meet. */ static bool -valid_address_p (struct address_info *ad) +valid_address_p (rtx op, struct address_info *ad, + enum constraint_num constraint) { address_eliminator eliminator (ad); + + /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more + forgiving than "m". */ + if (MEM_P (op) + && (insn_extra_memory_constraint (constraint) + || insn_extra_special_memory_constraint (constraint)) + && constraint_satisfied_p (op, constraint)) + return true; + return valid_address_p (ad->mode, *ad->outer, ad->as); } @@ -3397,7 +3410,7 @@ process_address_1 (int nop, bool check_only_p, All these cases involve a non-autoinc address, so there is no point revalidating other types. */ - if (ad.autoinc_p || valid_address_p (&ad)) + if (ad.autoinc_p || valid_address_p (op, &ad, cn)) return change_p; /* Any index existed before LRA started, so we can assume that the @@ -3426,7 +3439,7 @@ process_address_1 (int nop, bool check_only_p, if (code >= 0) { *ad.inner = gen_rtx_LO_SUM (Pmode, new_reg, addr); - if (! valid_address_p (ad.mode, *ad.outer, ad.as)) + if (!valid_address_p (op, &ad, cn)) { /* Try to put lo_sum into register. */ insn = emit_insn (gen_rtx_SET @@ -3436,7 +3449,7 @@ process_address_1 (int nop, bool check_only_p, if (code >= 0) { *ad.inner = new_reg; - if (! valid_address_p (ad.mode, *ad.outer, ad.as)) + if (!valid_address_p (op, &ad, cn)) { *ad.inner = addr; code = -1; @@ -3531,7 +3544,7 @@ process_address_1 (int nop, bool check_only_p, && CONSTANT_P (XEXP (SET_SRC (set), 1))) { *ad.inner = SET_SRC (set); - if (valid_address_p (ad.mode, *ad.outer, ad.as)) + if (valid_address_p (op, &ad, cn)) { *ad.base_term = XEXP (SET_SRC (set), 0); *ad.disp_term = XEXP (SET_SRC (set), 1); diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 5053583d149..163ea21842f 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,15 @@ +2019-11-18 Richard Sandiford + + * gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c: Remove XFAIL. + * gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c: Likewise. + * gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c: Likewise. + 2019-11-18 Richard Biener PR tree-optimization/92516 diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c index 4071b6d1ba3..b98a381fd1b 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f16.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_f16_base, svfloat16_t, float16_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_f16_index: { xfail *-*-* } +** ld1rq_f16_index: ** ld1rqh z0\.h, p0/z, \[x0, x1, lsl 1\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c index 25013fcf0c5..1845aa81d71 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f32.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_f32_base, svfloat32_t, float32_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_f32_index: { xfail *-*-* } +** ld1rq_f32_index: ** ld1rqw z0\.s, p0/z, \[x0, x1, lsl 2\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c index 49f8da0803f..c88f3bd23ae 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_f64.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_f64_base, svfloat64_t, float64_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_f64_index: { xfail *-*-* } +** ld1rq_f64_index: ** ld1rqd z0\.d, p0/z, \[x0, x1, lsl 3\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c index c12b659c20e..bfbbff95fbd 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s16.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_s16_base, svint16_t, int16_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_s16_index: { xfail *-*-* } +** ld1rq_s16_index: ** ld1rqh z0\.h, p0/z, \[x0, x1, lsl 1\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c index 8184ab80011..d31225c7e7d 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s32.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_s32_base, svint32_t, int32_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_s32_index: { xfail *-*-* } +** ld1rq_s32_index: ** ld1rqw z0\.s, p0/z, \[x0, x1, lsl 2\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c index 616ce0bfa80..c87486dfd80 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_s64.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_s64_base, svint64_t, int64_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_s64_index: { xfail *-*-* } +** ld1rq_s64_index: ** ld1rqd z0\.d, p0/z, \[x0, x1, lsl 3\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c index 1f543006c38..f7bd4480236 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u16.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_u16_base, svuint16_t, uint16_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_u16_index: { xfail *-*-* } +** ld1rq_u16_index: ** ld1rqh z0\.h, p0/z, \[x0, x1, lsl 1\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c index e2a348d00ac..d815c483f94 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u32.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_u32_base, svuint32_t, uint32_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_u32_index: { xfail *-*-* } +** ld1rq_u32_index: ** ld1rqw z0\.s, p0/z, \[x0, x1, lsl 2\] ** ret */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c index bf9d7201677..ef7b61aa54e 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/ld1rq_u64.c @@ -12,7 +12,7 @@ TEST_LOAD (ld1rq_u64_base, svuint64_t, uint64_t, z0 = svld1rq (p0, x0)) /* -** ld1rq_u64_index: { xfail *-*-* } +** ld1rq_u64_index: ** ld1rqd z0\.d, p0/z, \[x0, x1, lsl 3\] ** ret */ -- 2.30.2