i965/tiled_memcpy: Provide SSE2 for RGBA8 <-> BGRA8 swizzle.
authorRoland Scheidegger <sroland@vmware.com>
Fri, 29 Jan 2016 02:18:36 +0000 (03:18 +0100)
committerMatt Turner <mattst88@gmail.com>
Tue, 12 Apr 2016 21:37:01 +0000 (14:37 -0700)
The existing code uses SSSE3, and because it isn't compiled in a
separate file compiled with that, it is usually not used (that, of
course, could be fixed...), whereas SSE2 is always present with 64-bit
builds.  This should be pretty much as fast as the pshufb version,
albeit those code paths aren't really used on chips without llc in any
case.

v2: fix andnot argument order, add comments
v3: use pshuflw/hw instead of shifts (suggested by Matt Turner), cut comments
v4: [mattst88] Rebase

Reviewed-by: Matt Turner <mattst88@gmail.com>
src/mesa/drivers/dri/i965/intel_tiled_memcpy.c

index 5d585302288a3dfe5232e9b8ff410e79d87a3a80..04a348ace187a85706f1a2d060fe85bff9007f93 100644 (file)
 #include "brw_context.h"
 #include "intel_tiled_memcpy.h"
 
-#ifdef __SSSE3__
+#if defined(__SSSE3__)
 #include <tmmintrin.h>
+#elif defined(__SSE2__)
+#include <emmintrin.h>
 #endif
 
 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
@@ -101,6 +103,41 @@ rgba8_copy_16_aligned_src(void *dst, const void *src)
                     _mm_shuffle_epi8(_mm_load_si128(src),
                                      *(__m128i *)rgba8_permutation));
 }
+
+#elif defined(__SSE2__)
+static inline void
+rgba8_copy_16_aligned_dst(void *dst, const void *src)
+{
+   __m128i srcreg, dstreg, agmask, ag, rb, br;
+
+   agmask = _mm_set1_epi32(0xFF00FF00);
+   srcreg = _mm_loadu_si128((__m128i *)src);
+
+   rb = _mm_andnot_si128(agmask, srcreg);
+   ag = _mm_and_si128(agmask, srcreg);
+   br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)),
+                            _MM_SHUFFLE(2, 3, 0, 1));
+   dstreg = _mm_or_si128(ag, br);
+
+   _mm_store_si128((__m128i *)dst, dstreg);
+}
+
+static inline void
+rgba8_copy_16_aligned_src(void *dst, const void *src)
+{
+   __m128i srcreg, dstreg, agmask, ag, rb, br;
+
+   agmask = _mm_set1_epi32(0xFF00FF00);
+   srcreg = _mm_load_si128((__m128i *)src);
+
+   rb = _mm_andnot_si128(agmask, srcreg);
+   ag = _mm_and_si128(agmask, srcreg);
+   br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)),
+                            _MM_SHUFFLE(2, 3, 0, 1));
+   dstreg = _mm_or_si128(ag, br);
+
+   _mm_storeu_si128((__m128i *)dst, dstreg);
+}
 #endif
 
 /**
@@ -111,7 +148,7 @@ rgba8_copy_aligned_dst(void *dst, const void *src, size_t bytes)
 {
    assert(bytes == 0 || !(((uintptr_t)dst) & 0xf));
 
-#ifdef __SSSE3__
+#if defined(__SSSE3__) || defined(__SSE2__)
    while (bytes >= 16) {
       rgba8_copy_16_aligned_dst(dst, src);
       src += 16;
@@ -133,7 +170,7 @@ rgba8_copy_aligned_src(void *dst, const void *src, size_t bytes)
 {
    assert(bytes == 0 || !(((uintptr_t)src) & 0xf));
 
-#ifdef __SSSE3__
+#if defined(__SSSE3__) || defined(__SSE2__)
    while (bytes >= 16) {
       rgba8_copy_16_aligned_src(dst, src);
       src += 16;