Add a pridicate to check whether the const offset is valid.
For 32-bit variant: -256 <= offset <= 252
For 64-bit variant: -512 <= offset <= 504
2014-07-29 Renlin Li <renlin.li@arm.com>
2014-07-29 Jiong Wang <jiong.wang@arm.com>
gcc/
* config/aarch64/aarch64.c (offset_7bit_signed_scaled_p): Rename to
aarch64_offset_7bit_signed_scaled_p, remove static and use it.
* config/aarch64/aarch64-protos.h (aarch64_offset_7bit_signed_scaled_p):
Declaration.
* config/aarch64/predicates.md (aarch64_mem_pair_offset): Define new
predicate.
* config/aarch64/aarch64.md (loadwb_pair, storewb_pair): Use
aarch64_mem_pair_offset.
From-SVN: r213487
+2014-08-01 Renlin Li <renlin.li@arm.com>
+2014-08-01 Jiong Wang <jiong.wang@arm.com>
+
+ * config/aarch64/aarch64.c (offset_7bit_signed_scaled_p): Rename to
+ aarch64_offset_7bit_signed_scaled_p, remove static and use it.
+ * config/aarch64/aarch64-protos.h (aarch64_offset_7bit_signed_scaled_p):
+ Declaration.
+ * config/aarch64/predicates.md (aarch64_mem_pair_offset): Define new
+ predicate.
+ * config/aarch64/aarch64.md (loadwb_pair, storewb_pair): Use
+ aarch64_mem_pair_offset.
+
2014-08-01 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/aarch64.md (loadwb_pair<GPI:mode>_<P:mode>): Fix
bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context,
enum machine_mode);
+bool aarch64_offset_7bit_signed_scaled_p (enum machine_mode, HOST_WIDE_INT);
char *aarch64_output_scalar_simd_mov_immediate (rtx, enum machine_mode);
char *aarch64_output_simd_mov_immediate (rtx, enum machine_mode, unsigned);
bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
return false;
}
-static inline bool
-offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+bool
+aarch64_offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
{
return (offset >= -64 * GET_MODE_SIZE (mode)
&& offset < 64 * GET_MODE_SIZE (mode)
We conservatively require an offset representable in either mode.
*/
if (mode == TImode || mode == TFmode)
- return (offset_7bit_signed_scaled_p (mode, offset)
+ return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
&& offset_9bit_signed_unscaled_p (mode, offset));
if (outer_code == PARALLEL)
return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
- && offset_7bit_signed_scaled_p (mode, offset));
+ && aarch64_offset_7bit_signed_scaled_p (mode, offset));
else
return (offset_9bit_signed_unscaled_p (mode, offset)
|| offset_12bit_unsigned_scaled_p (mode, offset));
We conservatively require an offset representable in either mode.
*/
if (mode == TImode || mode == TFmode)
- return (offset_7bit_signed_scaled_p (mode, offset)
+ return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
&& offset_9bit_signed_unscaled_p (mode, offset));
if (outer_code == PARALLEL)
return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
- && offset_7bit_signed_scaled_p (mode, offset));
+ && aarch64_offset_7bit_signed_scaled_p (mode, offset));
else
return offset_9bit_signed_unscaled_p (mode, offset);
}
[(parallel
[(set (match_operand:P 0 "register_operand" "=k")
(plus:P (match_operand:P 1 "register_operand" "0")
- (match_operand:P 4 "const_int_operand" "n")))
+ (match_operand:P 4 "aarch64_mem_pair_offset" "n")))
(set (match_operand:GPI 2 "register_operand" "=r")
(mem:GPI (match_dup 1)))
(set (match_operand:GPI 3 "register_operand" "=r")
[(parallel
[(set (match_operand:P 0 "register_operand" "=k")
(plus:P (match_operand:P 1 "register_operand" "0")
- (match_operand:P 4 "const_int_operand" "n")))
+ (match_operand:P 4 "aarch64_mem_pair_offset" "n")))
(set (match_operand:GPF 2 "register_operand" "=w")
(mem:GPF (match_dup 1)))
(set (match_operand:GPF 3 "register_operand" "=w")
[(parallel
[(set (match_operand:P 0 "register_operand" "=&k")
(plus:P (match_operand:P 1 "register_operand" "0")
- (match_operand:P 4 "const_int_operand" "n")))
+ (match_operand:P 4 "aarch64_mem_pair_offset" "n")))
(set (mem:GPI (plus:P (match_dup 0)
(match_dup 4)))
(match_operand:GPI 2 "register_operand" "r"))
[(parallel
[(set (match_operand:P 0 "register_operand" "=&k")
(plus:P (match_operand:P 1 "register_operand" "0")
- (match_operand:P 4 "const_int_operand" "n")))
+ (match_operand:P 4 "aarch64_mem_pair_offset" "n")))
(set (mem:GPF (plus:P (match_dup 0)
(match_dup 4)))
(match_operand:GPF 2 "register_operand" "w"))
(match_test "INTVAL (op) != 0
&& (unsigned) exact_log2 (INTVAL (op)) < 64")))
+(define_predicate "aarch64_mem_pair_offset"
+ (and (match_code "const_int")
+ (match_test "aarch64_offset_7bit_signed_scaled_p (mode, INTVAL (op))")))
+
(define_predicate "aarch64_mem_pair_operand"
(and (match_code "mem")
(match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), PARALLEL,