+2014-09-25 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_simd_const_bounds): Delete.
+ * config/aarch64/aarch64-simd.md (aarch64_<sur>q<r>shl<mode>): Use
+ new predicates.
+ (aarch64_<sur>shll2_n<mode>): Likewise.
+ (aarch64_<sur>shr_n<mode>): Likewise.
+ (aarch64_<sur>sra_n<mode>: Likewise.
+ (aarch64_<sur>s<lr>i_n<mode>): Likewise.
+ (aarch64_<sur>qshl<u>_n<mode>): Likewise.
+ * config/aarch64/aarch64.c (aarch64_simd_const_bounds): Delete.
+ * config/aarch64/iterators.md (ve_mode): New.
+ (offsetlr): Remap to infix text for use in new predicates.
+ * config/aarch64/predicates.md (aarch64_simd_shift_imm_qi): New.
+ (aarch64_simd_shift_imm_hi): Likewise.
+ (aarch64_simd_shift_imm_si): Likewise.
+ (aarch64_simd_shift_imm_di): Likewise.
+ (aarch64_simd_shift_imm_offset_qi): Likewise.
+ (aarch64_simd_shift_imm_offset_hi): Likewise.
+ (aarch64_simd_shift_imm_offset_si): Likewise.
+ (aarch64_simd_shift_imm_offset_di): Likewise.
+ (aarch64_simd_shift_imm_bitsize_qi): Likewise.
+ (aarch64_simd_shift_imm_bitsize_hi): Likewise.
+ (aarch64_simd_shift_imm_bitsize_si): Likewise.
+ (aarch64_simd_shift_imm_bitsize_di): Likewise.
+
2014-09-25 Jiong Wang <jiong.wang@arm.com>
* shrink-wrap.c (move_insn_for_shrink_wrap): Initialize the live-in of
/* Initialize builtins for SIMD intrinsics. */
void init_aarch64_simd_builtins (void);
-void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
/* Emit code to place a AdvSIMD pair result in memory locations (with equal
(define_insn "aarch64_<sur>shll_n<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (match_operand:SI 2
+ "aarch64_simd_shift_imm_bitsize_<ve_mode>" "i")]
VSHLL))]
"TARGET_SIMD"
"*
int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
if (INTVAL (operands[2]) == bit_width)
{
return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
"TARGET_SIMD"
"*
int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
if (INTVAL (operands[2]) == bit_width)
{
return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
(define_insn "aarch64_<sur>shr_n<mode>"
[(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (match_operand:SI 2
+ "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
VRSHR_N))]
"TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
- return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ "<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
[(set_attr "type" "neon_sat_shift_imm<q>")]
)
[(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
(match_operand:VSDQ_I_DI 2 "register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:SI 3
+ "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
VSRA))]
"TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
- return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ "<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
[(set_attr "type" "neon_shift_acc<q>")]
)
[(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
(unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
(match_operand:VSDQ_I_DI 2 "register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
+ (match_operand:SI 3
+ "aarch64_simd_shift_imm_<offsetlr><ve_mode>" "i")]
VSLRI))]
"TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
- bit_width - <VSLRI:offsetlr> + 1);
- return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ "s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
[(set_attr "type" "neon_shift_imm<q>")]
)
(define_insn "aarch64_<sur>qshl<u>_n<mode>"
[(set (match_operand:VSDQ_I 0 "register_operand" "=w")
(unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (match_operand:SI 2
+ "aarch64_simd_shift_imm_<ve_mode>" "i")]
VQSHL_N))]
"TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width);
- return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ "<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
[(set_attr "type" "neon_sat_shift_imm<q>")]
)
(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
[(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
(unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
+ (match_operand:SI 2
+ "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
VQSHRN_N))]
"TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
- return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+ "<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2"
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
)
error ("lane out of range");
}
-void
-aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
-{
- gcc_assert (CONST_INT_P (operand));
- HOST_WIDE_INT lane = INTVAL (operand);
-
- if (lane < low || lane >= high)
- error ("constant out of range");
-}
-
/* Emit code to place a AdvSIMD pair result in memory locations (with equal
registers). */
void
(V2DF "v2di") (DF "di")
(SF "si")])
+;; Lower case element modes (as used in shift immediate patterns).
+(define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi")
+ (V4HI "hi") (V8HI "hi")
+ (V2SI "si") (V4SI "si")
+ (DI "di") (V2DI "di")
+ (QI "qi") (HI "hi")
+ (SI "si")])
+
;; Vm for lane instructions is restricted to FP_LO_REGS.
(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
(V2SI "w") (V4SI "w") (SI "w")])
(UNSPEC_RADDHN2 "add")
(UNSPEC_RSUBHN2 "sub")])
-(define_int_attr offsetlr [(UNSPEC_SSLI "1") (UNSPEC_USLI "1")
- (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
+(define_int_attr offsetlr [(UNSPEC_SSLI "") (UNSPEC_USLI "")
+ (UNSPEC_SSRI "offset_")
+ (UNSPEC_USRI "offset_")])
;; Standard pattern names for floating-point rounding instructions.
(define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc")
{
return aarch64_const_vec_all_same_int_p (op, -1);
})
+
+;; Predicates used by the various SIMD shift operations. These
+;; fall in to 3 categories.
+;; Shifts with a range 0-(bit_size - 1) (aarch64_simd_shift_imm)
+;; Shifts with a range 1-bit_size (aarch64_simd_shift_imm_offset)
+;; Shifts with a range 0-bit_size (aarch64_simd_shift_imm_bitsize)
+(define_predicate "aarch64_simd_shift_imm_qi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+
+(define_predicate "aarch64_simd_shift_imm_hi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
+
+(define_predicate "aarch64_simd_shift_imm_si"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+
+(define_predicate "aarch64_simd_shift_imm_di"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_qi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_hi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_si"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_di"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 64)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_qi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_hi"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_si"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_di"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 64)")))
+2014-09-25 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * gcc.target/aarch64/simd/vqshlb_1.c: New.
+
2014-09-25 Jiong Wang <jiong.wang@arm.com>
* gcc.target/i386/shrink_wrap_1.c: New test.
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+extern void abort ();
+
+int
+main (int argc, char **argv)
+{
+ int8_t arg1 = -1;
+ int8_t arg2 = 127;
+ int8_t exp = -128;
+ int8_t got = vqshlb_s8 (arg1, arg2);
+
+ if (exp != got)
+ abort ();
+
+ return 0;
+}
+