Since the x86 backend enabled V2SImode vectorization (with
TARGET_MMX_WITH_SSE), slp vectorization can kick in and emit
movq (%rdi), %xmm1
pshufd $225, %xmm1, %xmm0
movq %xmm0, (%rdi)
instead of
rolq $32, (%rdi)
we used to emit (or emit when slp vectorization is disabled).
I think the rotate is both smaller and faster, so this patch adds
a combiner splitter to optimize that back.
2021-02-13 Jakub Jelinek <jakub@redhat.com>
PR target/96166
* config/i386/mmx.md (*mmx_pshufd_1): Add a combine splitter for
swap of V2SImode elements in memory into DImode memory rotate by 32.
* gcc.target/i386/pr96166.c: New test.
(set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
+;; Optimize V2SImode load from memory, swapping the elements and
+;; storing back into the memory into DImode rotate of the memory by 32.
+(define_split
+ [(set (match_operand:V2SI 0 "memory_operand")
+ (vec_select:V2SI (match_dup 0)
+ (parallel [(const_int 1) (const_int 0)])))]
+ "TARGET_64BIT && (TARGET_READ_MODIFY_WRITE || optimize_insn_for_size_p ())"
+ [(set (match_dup 0)
+ (rotate:DI (match_dup 0) (const_int 32)))]
+ "operands[0] = adjust_address (operands[0], DImode, 0);")
+
(define_insn "mmx_pswapdv2si2"
[(set (match_operand:V2SI 0 "register_operand" "=y,Yv")
(vec_select:V2SI
--- /dev/null
+/* PR target/96166 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O3 -mtune=generic -masm=att" } */
+/* { dg-final { scan-assembler "rolq\\s\\\$32, \\\(%\[re]di\\\)" } } */
+
+static inline void
+swap (int *x, int *y)
+{
+ int tmp = *x;
+ *x = *y;
+ *y = tmp;
+}
+
+void
+bar (int (*x)[2])
+{
+ int y[2];
+ __builtin_memcpy (&y, x, sizeof *x);
+ swap (&y[0], &y[1]);
+ __builtin_memcpy (x, &y, sizeof *x);
+}