AVX-512. Add widening pmov.
authorAlexander Ivchenko <alexander.ivchenko@intel.com>
Wed, 24 Sep 2014 08:00:35 +0000 (08:00 +0000)
committerKirill Yukhin <kyukhin@gcc.gnu.org>
Wed, 24 Sep 2014 08:00:35 +0000 (08:00 +0000)
gcc/
* config/i386/sse.md
(define_insn "avx2_<code>v16qiv16hi2<mask_name>"): Add masking.
(define_insn "avx512bw_<code>v32qiv32hi2<mask_name>"): New.
(define_insn "sse4_1_<code>v8qiv8hi2<mask_name>"): Add masking.
(define_insn "avx2_<code>v8qiv8si2<mask_name>"): Ditto.
(define_insn "sse4_1_<code>v4qiv4si2<mask_name>"): Ditto.
(define_insn "avx2_<code>v8hiv8si2<mask_name>"): Ditto.
(define_insn "sse4_1_<code>v4hiv4si2<mask_name>"): Ditto.
(define_insn "avx2_<code>v4qiv4di2<mask_name>"): Ditto.
(define_insn "sse4_1_<code>v2qiv2di2<mask_name>"): Ditto.
(define_insn "avx2_<code>v4hiv4di2<mask_name>"): Ditto.
(define_insn "sse4_1_<code>v2hiv2di2<mask_name>"): Ditto.
(define_insn "avx2_<code>v4siv4di2<mask_name>"): Ditto.
(define_insn "sse4_1_<code>v2siv2di2<mask_name>"): Ditto.

Co-Authored-By: Andrey Turetskiy <andrey.turetskiy@intel.com>
Co-Authored-By: Anna Tikhonova <anna.tikhonova@intel.com>
Co-Authored-By: Ilya Tocar <ilya.tocar@intel.com>
Co-Authored-By: Ilya Verbin <ilya.verbin@intel.com>
Co-Authored-By: Kirill Yukhin <kirill.yukhin@intel.com>
Co-Authored-By: Maxim Kuznetsov <maxim.kuznetsov@intel.com>
Co-Authored-By: Michael Zolotukhin <michael.v.zolotukhin@intel.com>
From-SVN: r215541

gcc/ChangeLog
gcc/config/i386/sse.md

index 10dbc523fb4c227af6d02b03ee1666f09aba89ab..05e14112421c847803e57c07b129bb55ae45d3e4 100644 (file)
@@ -1,3 +1,27 @@
+2014-09-24  Alexander Ivchenko  <alexander.ivchenko@intel.com>
+           Maxim Kuznetsov  <maxim.kuznetsov@intel.com>
+           Anna Tikhonova  <anna.tikhonova@intel.com>
+           Ilya Tocar  <ilya.tocar@intel.com>
+           Andrey Turetskiy  <andrey.turetskiy@intel.com>
+           Ilya Verbin  <ilya.verbin@intel.com>
+           Kirill Yukhin  <kirill.yukhin@intel.com>
+           Michael Zolotukhin  <michael.v.zolotukhin@intel.com>
+
+       * config/i386/sse.md
+       (define_insn "avx2_<code>v16qiv16hi2<mask_name>"): Add masking.
+       (define_insn "avx512bw_<code>v32qiv32hi2<mask_name>"): New.
+       (define_insn "sse4_1_<code>v8qiv8hi2<mask_name>"): Add masking.
+       (define_insn "avx2_<code>v8qiv8si2<mask_name>"): Ditto.
+       (define_insn "sse4_1_<code>v4qiv4si2<mask_name>"): Ditto.
+       (define_insn "avx2_<code>v8hiv8si2<mask_name>"): Ditto.
+       (define_insn "sse4_1_<code>v4hiv4si2<mask_name>"): Ditto.
+       (define_insn "avx2_<code>v4qiv4di2<mask_name>"): Ditto.
+       (define_insn "sse4_1_<code>v2qiv2di2<mask_name>"): Ditto.
+       (define_insn "avx2_<code>v4hiv4di2<mask_name>"): Ditto.
+       (define_insn "sse4_1_<code>v2hiv2di2<mask_name>"): Ditto.
+       (define_insn "avx2_<code>v4siv4di2<mask_name>"): Ditto.
+       (define_insn "sse4_1_<code>v2siv2di2<mask_name>"): Ditto.
+
 2014-09-24  Zhenqiang Chen  <zhenqiang.chen@arm.com>
 
        PR rtl-optimization/63210
index 9e0c0e82b433aa0da0fc376b6d1e5f02c9079a31..a7cc5adfa8875bf8ee70fb076f8006d924fecf46 100644 (file)
    (set_attr "prefix" "maybe_vex")
    (set_attr "mode" "TI")])
 
-(define_insn "avx2_<code>v16qiv16hi2"
-  [(set (match_operand:V16HI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v16qiv16hi2<mask_name>"
+  [(set (match_operand:V16HI 0 "register_operand" "=v")
        (any_extend:V16HI
-         (match_operand:V16QI 1 "nonimmediate_operand" "xm")))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>bw\t{%1, %0|%0, %1}"
+         (match_operand:V16QI 1 "nonimmediate_operand" "vm")))]
+  "TARGET_AVX2 && <mask_avx512bw_condition> && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
   [(set_attr "type" "ssemov")
    (set_attr "prefix_extra" "1")
-   (set_attr "prefix" "vex")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v8qiv8hi2"
-  [(set (match_operand:V8HI 0 "register_operand" "=x")
+(define_insn "avx512bw_<code>v32qiv32hi2<mask_name>"
+  [(set (match_operand:V32HI 0 "register_operand" "=v")
+       (any_extend:V32HI
+         (match_operand:V32QI 1 "nonimmediate_operand" "vm")))]
+  "TARGET_AVX512BW"
+  "vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
+  [(set_attr "type" "ssemov")
+   (set_attr "prefix_extra" "1")
+   (set_attr "prefix" "evex")
+   (set_attr "mode" "XI")])
+
+(define_insn "sse4_1_<code>v8qiv8hi2<mask_name>"
+  [(set (match_operand:V8HI 0 "register_operand" "=v")
        (any_extend:V8HI
          (vec_select:V8QI
-           (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+           (match_operand:V16QI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)
                       (const_int 4) (const_int 5)
                       (const_int 6) (const_int 7)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>bw\t{%1, %0|%0, %q1}"
+  "TARGET_SSE4_1 && <mask_avx512bw_condition> && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "64")
    (set_attr "prefix_extra" "1")
    (set_attr "prefix" "evex")
    (set_attr "mode" "XI")])
 
-(define_insn "avx2_<code>v8qiv8si2"
-  [(set (match_operand:V8SI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v8qiv8si2<mask_name>"
+  [(set (match_operand:V8SI 0 "register_operand" "=v")
        (any_extend:V8SI
          (vec_select:V8QI
-           (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+           (match_operand:V16QI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)
                       (const_int 4) (const_int 5)
                       (const_int 6) (const_int 7)]))))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>bd\t{%1, %0|%0, %q1}"
+  "TARGET_AVX2 && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>bd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
   [(set_attr "type" "ssemov")
    (set_attr "prefix_extra" "1")
-   (set_attr "prefix" "vex")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v4qiv4si2"
-  [(set (match_operand:V4SI 0 "register_operand" "=x")
+(define_insn "sse4_1_<code>v4qiv4si2<mask_name>"
+  [(set (match_operand:V4SI 0 "register_operand" "=v")
        (any_extend:V4SI
          (vec_select:V4QI
-           (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+           (match_operand:V16QI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>bd\t{%1, %0|%0, %k1}"
+  "TARGET_SSE4_1 && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>bd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %k1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "32")
    (set_attr "prefix_extra" "1")
    (set_attr "prefix" "evex")
    (set_attr "mode" "XI")])
 
-(define_insn "avx2_<code>v8hiv8si2"
-  [(set (match_operand:V8SI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v8hiv8si2<mask_name>"
+  [(set (match_operand:V8SI 0 "register_operand" "=v")
        (any_extend:V8SI
-           (match_operand:V8HI 1 "nonimmediate_operand" "xm")))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>wd\t{%1, %0|%0, %1}"
+           (match_operand:V8HI 1 "nonimmediate_operand" "vm")))]
+  "TARGET_AVX2 && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>wd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
   [(set_attr "type" "ssemov")
    (set_attr "prefix_extra" "1")
-   (set_attr "prefix" "vex")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v4hiv4si2"
-  [(set (match_operand:V4SI 0 "register_operand" "=x")
+(define_insn "sse4_1_<code>v4hiv4si2<mask_name>"
+  [(set (match_operand:V4SI 0 "register_operand" "=v")
        (any_extend:V4SI
          (vec_select:V4HI
-           (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+           (match_operand:V8HI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>wd\t{%1, %0|%0, %q1}"
+  "TARGET_SSE4_1 && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>wd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "64")
    (set_attr "prefix_extra" "1")
    (set_attr "prefix" "evex")
    (set_attr "mode" "XI")])
 
-(define_insn "avx2_<code>v4qiv4di2"
-  [(set (match_operand:V4DI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v4qiv4di2<mask_name>"
+  [(set (match_operand:V4DI 0 "register_operand" "=v")
        (any_extend:V4DI
          (vec_select:V4QI
-           (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+           (match_operand:V16QI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)]))))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>bq\t{%1, %0|%0, %k1}"
+  "TARGET_AVX2 && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %k1}"
   [(set_attr "type" "ssemov")
    (set_attr "prefix_extra" "1")
-   (set_attr "prefix" "vex")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v2qiv2di2"
-  [(set (match_operand:V2DI 0 "register_operand" "=x")
+(define_insn "sse4_1_<code>v2qiv2di2<mask_name>"
+  [(set (match_operand:V2DI 0 "register_operand" "=v")
        (any_extend:V2DI
          (vec_select:V2QI
-           (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+           (match_operand:V16QI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>bq\t{%1, %0|%0, %w1}"
+  "TARGET_SSE4_1 && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %w1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "16")
    (set_attr "prefix_extra" "1")
    (set_attr "prefix" "evex")
    (set_attr "mode" "XI")])
 
-(define_insn "avx2_<code>v4hiv4di2"
-  [(set (match_operand:V4DI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v4hiv4di2<mask_name>"
+  [(set (match_operand:V4DI 0 "register_operand" "=v")
        (any_extend:V4DI
          (vec_select:V4HI
-           (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+           (match_operand:V8HI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)
                       (const_int 2) (const_int 3)]))))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>wq\t{%1, %0|%0, %q1}"
+  "TARGET_AVX2 && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>wq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
   [(set_attr "type" "ssemov")
    (set_attr "prefix_extra" "1")
-   (set_attr "prefix" "vex")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v2hiv2di2"
-  [(set (match_operand:V2DI 0 "register_operand" "=x")
+(define_insn "sse4_1_<code>v2hiv2di2<mask_name>"
+  [(set (match_operand:V2DI 0 "register_operand" "=v")
        (any_extend:V2DI
          (vec_select:V2HI
-           (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+           (match_operand:V8HI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>wq\t{%1, %0|%0, %k1}"
+  "TARGET_SSE4_1 && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>wq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %k1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "32")
    (set_attr "prefix_extra" "1")
    (set_attr "prefix" "evex")
    (set_attr "mode" "XI")])
 
-(define_insn "avx2_<code>v4siv4di2"
-  [(set (match_operand:V4DI 0 "register_operand" "=x")
+(define_insn "avx2_<code>v4siv4di2<mask_name>"
+  [(set (match_operand:V4DI 0 "register_operand" "=v")
        (any_extend:V4DI
-           (match_operand:V4SI 1 "nonimmediate_operand" "xm")))]
-  "TARGET_AVX2"
-  "vpmov<extsuffix>dq\t{%1, %0|%0, %1}"
+           (match_operand:V4SI 1 "nonimmediate_operand" "vm")))]
+  "TARGET_AVX2 && <mask_avx512vl_condition>"
+  "vpmov<extsuffix>dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
   [(set_attr "type" "ssemov")
+   (set_attr "prefix" "maybe_evex")
    (set_attr "prefix_extra" "1")
    (set_attr "mode" "OI")])
 
-(define_insn "sse4_1_<code>v2siv2di2"
-  [(set (match_operand:V2DI 0 "register_operand" "=x")
+(define_insn "sse4_1_<code>v2siv2di2<mask_name>"
+  [(set (match_operand:V2DI 0 "register_operand" "=v")
        (any_extend:V2DI
          (vec_select:V2SI
-           (match_operand:V4SI 1 "nonimmediate_operand" "xm")
+           (match_operand:V4SI 1 "nonimmediate_operand" "vm")
            (parallel [(const_int 0) (const_int 1)]))))]
-  "TARGET_SSE4_1"
-  "%vpmov<extsuffix>dq\t{%1, %0|%0, %q1}"
+  "TARGET_SSE4_1 && <mask_avx512vl_condition>"
+  "%vpmov<extsuffix>dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
   [(set_attr "type" "ssemov")
    (set_attr "ssememalign" "64")
    (set_attr "prefix_extra" "1")