From 0e605d9b3af68b67dceb8eeaeabebe91bc9cf31c Mon Sep 17 00:00:00 2001 From: Roland Scheidegger Date: Fri, 29 Jan 2016 03:18:36 +0100 Subject: [PATCH] i965/tiled_memcpy: Provide SSE2 for RGBA8 <-> BGRA8 swizzle. The existing code uses SSSE3, and because it isn't compiled in a separate file compiled with that, it is usually not used (that, of course, could be fixed...), whereas SSE2 is always present with 64-bit builds. This should be pretty much as fast as the pshufb version, albeit those code paths aren't really used on chips without llc in any case. v2: fix andnot argument order, add comments v3: use pshuflw/hw instead of shifts (suggested by Matt Turner), cut comments v4: [mattst88] Rebase Reviewed-by: Matt Turner --- .../drivers/dri/i965/intel_tiled_memcpy.c | 43 +++++++++++++++++-- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/src/mesa/drivers/dri/i965/intel_tiled_memcpy.c b/src/mesa/drivers/dri/i965/intel_tiled_memcpy.c index 5d585302288..04a348ace18 100644 --- a/src/mesa/drivers/dri/i965/intel_tiled_memcpy.c +++ b/src/mesa/drivers/dri/i965/intel_tiled_memcpy.c @@ -36,8 +36,10 @@ #include "brw_context.h" #include "intel_tiled_memcpy.h" -#ifdef __SSSE3__ +#if defined(__SSSE3__) #include +#elif defined(__SSE2__) +#include #endif #define FILE_DEBUG_FLAG DEBUG_TEXTURE @@ -101,6 +103,41 @@ rgba8_copy_16_aligned_src(void *dst, const void *src) _mm_shuffle_epi8(_mm_load_si128(src), *(__m128i *)rgba8_permutation)); } + +#elif defined(__SSE2__) +static inline void +rgba8_copy_16_aligned_dst(void *dst, const void *src) +{ + __m128i srcreg, dstreg, agmask, ag, rb, br; + + agmask = _mm_set1_epi32(0xFF00FF00); + srcreg = _mm_loadu_si128((__m128i *)src); + + rb = _mm_andnot_si128(agmask, srcreg); + ag = _mm_and_si128(agmask, srcreg); + br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)), + _MM_SHUFFLE(2, 3, 0, 1)); + dstreg = _mm_or_si128(ag, br); + + _mm_store_si128((__m128i *)dst, dstreg); +} + +static inline void +rgba8_copy_16_aligned_src(void *dst, const void *src) +{ + __m128i srcreg, dstreg, agmask, ag, rb, br; + + agmask = _mm_set1_epi32(0xFF00FF00); + srcreg = _mm_load_si128((__m128i *)src); + + rb = _mm_andnot_si128(agmask, srcreg); + ag = _mm_and_si128(agmask, srcreg); + br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)), + _MM_SHUFFLE(2, 3, 0, 1)); + dstreg = _mm_or_si128(ag, br); + + _mm_storeu_si128((__m128i *)dst, dstreg); +} #endif /** @@ -111,7 +148,7 @@ rgba8_copy_aligned_dst(void *dst, const void *src, size_t bytes) { assert(bytes == 0 || !(((uintptr_t)dst) & 0xf)); -#ifdef __SSSE3__ +#if defined(__SSSE3__) || defined(__SSE2__) while (bytes >= 16) { rgba8_copy_16_aligned_dst(dst, src); src += 16; @@ -133,7 +170,7 @@ rgba8_copy_aligned_src(void *dst, const void *src, size_t bytes) { assert(bytes == 0 || !(((uintptr_t)src) & 0xf)); -#ifdef __SSSE3__ +#if defined(__SSSE3__) || defined(__SSE2__) while (bytes >= 16) { rgba8_copy_16_aligned_src(dst, src); src += 16; -- 2.30.2