[AArch64] Tighten predicates on SIMD shift intrinsics
authorJames Greenhalgh <james.greenhalgh@arm.com>
Thu, 25 Sep 2014 16:54:38 +0000 (16:54 +0000)
committerJames Greenhalgh <jgreenhalgh@gcc.gnu.org>
Thu, 25 Sep 2014 16:54:38 +0000 (16:54 +0000)
gcc/

* config/aarch64/aarch64-protos.h (aarch64_simd_const_bounds): Delete.
* config/aarch64/aarch64-simd.md (aarch64_<sur>q<r>shl<mode>): Use
new predicates.
(aarch64_<sur>shll2_n<mode>): Likewise.
(aarch64_<sur>shr_n<mode>): Likewise.
(aarch64_<sur>sra_n<mode>: Likewise.
(aarch64_<sur>s<lr>i_n<mode>): Likewise.
(aarch64_<sur>qshl<u>_n<mode>): Likewise.
* config/aarch64/aarch64.c (aarch64_simd_const_bounds): Delete.
* config/aarch64/iterators.md (ve_mode): New.
(offsetlr): Remap to infix text for use in new predicates.
* config/aarch64/predicates.md (aarch64_simd_shift_imm_qi): New.
(aarch64_simd_shift_imm_hi): Likewise.
(aarch64_simd_shift_imm_si): Likewise.
(aarch64_simd_shift_imm_di): Likewise.
(aarch64_simd_shift_imm_offset_qi): Likewise.
(aarch64_simd_shift_imm_offset_hi): Likewise.
(aarch64_simd_shift_imm_offset_si): Likewise.
(aarch64_simd_shift_imm_offset_di): Likewise.
(aarch64_simd_shift_imm_bitsize_qi): Likewise.
(aarch64_simd_shift_imm_bitsize_hi): Likewise.
(aarch64_simd_shift_imm_bitsize_si): Likewise.
(aarch64_simd_shift_imm_bitsize_di): Likewise.

gcc/testsuite/

* gcc.target/aarch64/simd/vqshlb_1.c: New.

From-SVN: r215612

gcc/ChangeLog
gcc/config/aarch64/aarch64-protos.h
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64.c
gcc/config/aarch64/iterators.md
gcc/config/aarch64/predicates.md
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c [new file with mode: 0644]

index c3533ef2c15153c4296892eef38bc264af71961e..8d2c5dd1210dc127b19548c2c2e6ec822a9a9bc5 100644 (file)
@@ -1,3 +1,29 @@
+2014-09-25  James Greenhalgh  <james.greenhalgh@arm.com>
+
+       * config/aarch64/aarch64-protos.h (aarch64_simd_const_bounds): Delete.
+       * config/aarch64/aarch64-simd.md (aarch64_<sur>q<r>shl<mode>): Use
+       new predicates.
+       (aarch64_<sur>shll2_n<mode>): Likewise.
+       (aarch64_<sur>shr_n<mode>): Likewise.
+       (aarch64_<sur>sra_n<mode>: Likewise.
+       (aarch64_<sur>s<lr>i_n<mode>): Likewise.
+       (aarch64_<sur>qshl<u>_n<mode>): Likewise.
+       * config/aarch64/aarch64.c (aarch64_simd_const_bounds): Delete.
+       * config/aarch64/iterators.md (ve_mode): New.
+       (offsetlr): Remap to infix text for use in new predicates.
+       * config/aarch64/predicates.md (aarch64_simd_shift_imm_qi): New.
+       (aarch64_simd_shift_imm_hi): Likewise.
+       (aarch64_simd_shift_imm_si): Likewise.
+       (aarch64_simd_shift_imm_di): Likewise.
+       (aarch64_simd_shift_imm_offset_qi): Likewise.
+       (aarch64_simd_shift_imm_offset_hi): Likewise.
+       (aarch64_simd_shift_imm_offset_si): Likewise.
+       (aarch64_simd_shift_imm_offset_di): Likewise.
+       (aarch64_simd_shift_imm_bitsize_qi): Likewise.
+       (aarch64_simd_shift_imm_bitsize_hi): Likewise.
+       (aarch64_simd_shift_imm_bitsize_si): Likewise.
+       (aarch64_simd_shift_imm_bitsize_di): Likewise.
+
 2014-09-25  Jiong Wang  <jiong.wang@arm.com>
 
        * shrink-wrap.c (move_insn_for_shrink_wrap): Initialize the live-in of
index e32ef645b68d4f367cc92f3953967c5a8ff81a25..b5f53d21cdf3bc7672338b9e27dbe56242369a24 100644 (file)
@@ -256,7 +256,6 @@ void aarch64_emit_call_insn (rtx);
 /* Initialize builtins for SIMD intrinsics.  */
 void init_aarch64_simd_builtins (void);
 
-void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
 void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
 
 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
index 493e88628c2a7ef2c4f87031d86d1a5edcbca06b..8b7923e4f39210ce19e4a54e936922a3c579bd44 100644 (file)
 (define_insn "aarch64_<sur>shll_n<mode>"
   [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
        (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
-                        (match_operand:SI 2 "immediate_operand" "i")]
+                        (match_operand:SI 2
+                          "aarch64_simd_shift_imm_bitsize_<ve_mode>" "i")]
                          VSHLL))]
   "TARGET_SIMD"
   "*
   int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
   if (INTVAL (operands[2]) == bit_width)
   {
     return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
   "TARGET_SIMD"
   "*
   int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
   if (INTVAL (operands[2]) == bit_width)
   {
     return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
 (define_insn "aarch64_<sur>shr_n<mode>"
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
         (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
-                          (match_operand:SI 2 "immediate_operand" "i")]
+                          (match_operand:SI 2
+                            "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
                          VRSHR_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-  return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  "<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm<q>")]
 )
 
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
        (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
                       (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-                       (match_operand:SI 3 "immediate_operand" "i")]
+                       (match_operand:SI 3
+                        "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
                       VSRA))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
-  return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  "<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
   [(set_attr "type" "neon_shift_acc<q>")]
 )
 
   [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
        (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
                       (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-                       (match_operand:SI 3 "immediate_operand" "i")]
+                       (match_operand:SI 3
+                        "aarch64_simd_shift_imm_<offsetlr><ve_mode>" "i")]
                       VSLRI))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
-                             bit_width - <VSLRI:offsetlr> + 1);
-  return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+  "s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3"
   [(set_attr "type" "neon_shift_imm<q>")]
 )
 
 (define_insn "aarch64_<sur>qshl<u>_n<mode>"
   [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
        (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
-                      (match_operand:SI 2 "immediate_operand" "i")]
+                      (match_operand:SI 2
+                        "aarch64_simd_shift_imm_<ve_mode>" "i")]
                       VQSHL_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 0, bit_width);
-  return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+  "<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm<q>")]
 )
 
 (define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
   [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
         (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
-                           (match_operand:SI 2 "immediate_operand" "i")]
+                           (match_operand:SI 2
+                             "aarch64_simd_shift_imm_offset_<ve_mode>" "i")]
                           VQSHRN_N))]
   "TARGET_SIMD"
-  "*
-  int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-  aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-  return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+  "<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2"
   [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
 )
 
index 4e0cba8da740ea2f64ee3ed31354fd29c093032c..68c78f6430600f1c11c9a3ad88552f297bd0d17b 100644 (file)
@@ -7988,16 +7988,6 @@ aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
     error ("lane out of range");
 }
 
-void
-aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
-{
-  gcc_assert (CONST_INT_P (operand));
-  HOST_WIDE_INT lane = INTVAL (operand);
-
-  if (lane < low || lane >= high)
-    error ("constant out of range");
-}
-
 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
    registers).  */
 void
index daa5d9f70963208bec31f749e760b7324f579513..efd006f83619405190400ddd0c89834208e15480 100644 (file)
                                (V2DF "v2di") (DF    "di")
                                (SF   "si")])
 
+;; Lower case element modes (as used in shift immediate patterns).
+(define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi")
+                          (V4HI "hi") (V8HI  "hi")
+                          (V2SI "si") (V4SI  "si")
+                          (DI   "di") (V2DI  "di")
+                          (QI   "qi") (HI    "hi")
+                          (SI   "si")])
+
 ;; Vm for lane instructions is restricted to FP_LO_REGS.
 (define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
                       (V2SI "w") (V4SI "w") (SI "w")])
                         (UNSPEC_RADDHN2 "add")
                         (UNSPEC_RSUBHN2 "sub")])
 
-(define_int_attr offsetlr [(UNSPEC_SSLI        "1") (UNSPEC_USLI "1")
-                          (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
+(define_int_attr offsetlr [(UNSPEC_SSLI "") (UNSPEC_USLI "")
+                          (UNSPEC_SSRI "offset_")
+                          (UNSPEC_USRI "offset_")])
 
 ;; Standard pattern names for floating-point rounding instructions.
 (define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc")
index 8191169e89b1eaf04c00ea709af70412d2cee361..d5b0b2a9d8dd8215a193e7fd8f4addb319f2f2a6 100644 (file)
 {
   return aarch64_const_vec_all_same_int_p (op, -1);
 })
+
+;; Predicates used by the various SIMD shift operations.  These
+;; fall in to 3 categories.
+;;   Shifts with a range 0-(bit_size - 1) (aarch64_simd_shift_imm)
+;;   Shifts with a range 1-bit_size (aarch64_simd_shift_imm_offset)
+;;   Shifts with a range 0-bit_size (aarch64_simd_shift_imm_bitsize)
+(define_predicate "aarch64_simd_shift_imm_qi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+
+(define_predicate "aarch64_simd_shift_imm_hi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
+
+(define_predicate "aarch64_simd_shift_imm_si"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+
+(define_predicate "aarch64_simd_shift_imm_di"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_qi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 1, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_hi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 1, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_si"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 1, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_offset_di"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 1, 64)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_qi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 8)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_hi"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 16)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_si"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 32)")))
+
+(define_predicate "aarch64_simd_shift_imm_bitsize_di"
+  (and (match_code "const_int")
+       (match_test "IN_RANGE (INTVAL (op), 0, 64)")))
index 9c7c3c9169d7a497f0485a83782676560ea7eff0..1ac6f0e03b1bb4f916f2ea0603315433cba54120 100644 (file)
@@ -1,3 +1,7 @@
+2014-09-25  James Greenhalgh  <james.greenhalgh@arm.com>
+
+       * gcc.target/aarch64/simd/vqshlb_1.c: New.
+
 2014-09-25  Jiong Wang  <jiong.wang@arm.com>
 
        * gcc.target/i386/shrink_wrap_1.c: New test.
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vqshlb_1.c
new file mode 100644 (file)
index 0000000..ae741de
--- /dev/null
@@ -0,0 +1,21 @@
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+extern void abort ();
+
+int
+main (int argc, char **argv)
+{
+  int8_t arg1 = -1;
+  int8_t arg2 = 127;
+  int8_t exp = -128;
+  int8_t got = vqshlb_s8 (arg1, arg2);
+
+  if (exp != got)
+    abort ();
+
+  return 0;
+}
+