i386: Improve avx* vector concatenation [PR93594]
authorJakub Jelinek <jakub@redhat.com>
Thu, 6 Feb 2020 10:08:59 +0000 (11:08 +0100)
committerJakub Jelinek <jakub@redhat.com>
Thu, 6 Feb 2020 10:08:59 +0000 (11:08 +0100)
The following testcase shows that for _mm256_set*_m128i and similar
intrinsics, we sometimes generate bad code.  All 4 routines are expressing
the same thing, a 128-bit vector zero padded to 256-bit vector, but only the
3rd one actually emits the desired vmovdqa      %xmm0, %xmm0 insn, the
others vpxor    %xmm1, %xmm1, %xmm1; vinserti128        $0x1, %xmm1, %ymm0, %ymm0
The problem is that the cast builtins use UNSPEC_CAST which is after reload
simplified using a splitter, but during combine it prevents optimizations.
We do have avx_vec_concat* patterns that generate efficient code, both for
this low part + zero concatenation special case and for other cases too, so
the following define_insn_and_split just recognizes avx_vec_concat made of a
low half of a cast and some other reg.

2020-02-06  Jakub Jelinek  <jakub@redhat.com>

PR target/93594
* config/i386/predicates.md (avx_identity_operand): New predicate.
* config/i386/sse.md (*avx_vec_concat<mode>_1): New
define_insn_and_split.

* gcc.target/i386/avx2-pr93594.c: New test.

gcc/ChangeLog
gcc/config/i386/predicates.md
gcc/config/i386/sse.md
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/i386/avx2-pr93594.c [new file with mode: 0644]

index b5b465a922d6c4a9a21995d8bf13e3901a5ddc10..382e31368dc11822707dbc9d190e9f93eeb08d05 100644 (file)
@@ -1,5 +1,10 @@
 2020-02-06  Jakub Jelinek  <jakub@redhat.com>
 
+       PR target/93594
+       * config/i386/predicates.md (avx_identity_operand): New predicate.
+       * config/i386/sse.md (*avx_vec_concat<mode>_1): New
+       define_insn_and_split.
+
        PR libgomp/93515
        * omp-low.c (use_pointer_for_field): For nested constructs, also
        look for map clauses on target construct.
index 1119366d54eaaf5d374ffe40029837fc8eb5c943..3ab9da45ffbba3c4fc1e03212397081d59e813fc 100644 (file)
   return true;
 })
 
+;; Return true if OP is a parallel for identity permute.
+(define_predicate "avx_identity_operand"
+  (and (match_code "parallel")
+       (match_code "const_int" "a"))
+{
+  int i, nelt = XVECLEN (op, 0);
+
+  for (i = 0; i < nelt; ++i)
+    if (INTVAL (XVECEXP (op, 0, i)) != i)
+      return false;
+  return true;
+})
+
 ;; Return true if OP is a proper third operand to vpblendw256.
 (define_predicate "avx2_pblendw_operand"
   (match_code "const_int")
index ac4cf5be686b2e82eaa01eab522a0ce4c9d6161b..cfd79a8354469097af79eab539ea72d35bb9f2b9 100644 (file)
    (set_attr "prefix" "maybe_evex")
    (set_attr "mode" "<sseinsnmode>")])
 
+(define_insn_and_split "*avx_vec_concat<mode>_1"
+  [(set (match_operand:V_256_512 0 "register_operand")
+       (vec_concat:V_256_512
+         (vec_select:<ssehalfvecmode>
+           (unspec:V_256_512
+             [(match_operand:<ssehalfvecmode> 1 "nonimmediate_operand")]
+             UNSPEC_CAST)
+           (match_parallel 3 "avx_identity_operand"
+             [(match_operand 4 "const_int_operand")]))
+         (match_operand:<ssehalfvecmode> 2 "nonimm_or_0_operand")))]
+  "TARGET_AVX
+   && (operands[2] == CONST0_RTX (<ssehalfvecmode>mode)
+       || !MEM_P (operands[1]))
+   && ix86_pre_reload_split ()"
+  "#"
+  "&& 1"
+  [(set (match_dup 0) (vec_concat:V_256_512 (match_dup 1) (match_dup 2)))])
+
 (define_insn "vcvtph2ps<mask_name>"
   [(set (match_operand:V4SF 0 "register_operand" "=v")
        (vec_select:V4SF
index 5802f0d7c601048f108c8f8628afd67eb4dde06f..7b0b9c2c2421c7c2ee6076e1f9929f89be481c85 100644 (file)
@@ -1,3 +1,8 @@
+2020-02-06  Jakub Jelinek  <jakub@redhat.com>
+
+       PR target/93594
+       * gcc.target/i386/avx2-pr93594.c: New test.
+
 2020-02-05  Martin Sebor  <msebor@redhat.com>
 
        PR tree-optimization/92765
diff --git a/gcc/testsuite/gcc.target/i386/avx2-pr93594.c b/gcc/testsuite/gcc.target/i386/avx2-pr93594.c
new file mode 100644 (file)
index 0000000..963c8de
--- /dev/null
@@ -0,0 +1,32 @@
+/* PR target/93594 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx2 -masm=att" } */
+/* { dg-final { scan-assembler-times "vmovdqa\t%xmm0, %xmm0" 4 } } */
+/* { dg-final { scan-assembler-not "vpxor\t%" } } */
+/* { dg-final { scan-assembler-not "vinserti128\t\\\$" } } */
+
+#include <x86intrin.h>
+
+__m256i
+foo (__m128i x)
+{
+  return _mm256_setr_m128i (x, _mm_setzero_si128 ());
+}
+
+__m256i
+bar (__m128i x)
+{
+  return _mm256_set_m128i (_mm_setzero_si128 (), x);
+}
+
+__m256i
+baz (__m128i x)
+{
+  return _mm256_insertf128_si256 (_mm256_setzero_si256 (), x, 0);
+}
+
+__m256i
+qux (__m128i x)
+{
+  return _mm256_insertf128_si256 (_mm256_castsi128_si256 (x), _mm_setzero_si128 (), 1);
+}