+2019-10-29 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_sve_cmp_immediate_p)
+ (aarch64_simd_shift_imm_p): Accept scalars as well as vectors.
+ * config/aarch64/predicates.md (aarch64_sve_cmp_vsc_immediate)
+ (aarch64_sve_cmp_vsd_immediate): Accept "const_int", but don't
+ accept "const".
+
2019-10-29 Richard Sandiford <richard.sandiford@arm.com>
* coretypes.h (string_int_pair): New typedef.
bool
aarch64_sve_cmp_immediate_p (rtx x, bool signed_p)
{
- rtx elt;
-
- return (const_vec_duplicate_p (x, &elt)
- && CONST_INT_P (elt)
+ x = unwrap_const_vec_duplicate (x);
+ return (CONST_INT_P (x)
&& (signed_p
- ? IN_RANGE (INTVAL (elt), -16, 15)
- : IN_RANGE (INTVAL (elt), 0, 127)));
+ ? IN_RANGE (INTVAL (x), -16, 15)
+ : IN_RANGE (INTVAL (x), 0, 127)));
}
/* Return true if X is a valid immediate operand for an SVE FADD or FSUB
bool
aarch64_simd_shift_imm_p (rtx x, machine_mode mode, bool left)
{
+ x = unwrap_const_vec_duplicate (x);
+ if (!CONST_INT_P (x))
+ return false;
int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
if (left)
- return aarch64_const_vec_all_same_in_range_p (x, 0, bit_width - 1);
+ return IN_RANGE (INTVAL (x), 0, bit_width - 1);
else
- return aarch64_const_vec_all_same_in_range_p (x, 1, bit_width);
+ return IN_RANGE (INTVAL (x), 1, bit_width);
}
/* Return the bitmask CONST_INT to select the bits required by a zero extract
(match_test "aarch64_float_const_representable_p (op)"))))
(define_predicate "aarch64_sve_cmp_vsc_immediate"
- (and (match_code "const,const_vector")
+ (and (match_code "const_int,const_vector")
(match_test "aarch64_sve_cmp_immediate_p (op, true)")))
(define_predicate "aarch64_sve_cmp_vsd_immediate"
- (and (match_code "const,const_vector")
+ (and (match_code "const_int,const_vector")
(match_test "aarch64_sve_cmp_immediate_p (op, false)")))
(define_predicate "aarch64_sve_index_immediate"