* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
+ * distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#include "brw_context.h"
#include "intel_tiled_memcpy.h"
-#ifdef __SSSE3__
+#if defined(__SSSE3__)
#include <tmmintrin.h>
+#elif defined(__SSE2__)
+#include <emmintrin.h>
#endif
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static const uint32_t ytile_height = 32;
static const uint32_t ytile_span = 16;
-#ifdef __SSSE3__
-static const uint8_t rgba8_permutation[16] =
- { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 };
+static inline uint32_t
+ror(uint32_t n, uint32_t d)
+{
+ return (n >> d) | (n << (32 - d));
+}
-/* NOTE: dst must be 16 byte aligned */
-#define rgba8_copy_16(dst, src) \
- *(__m128i *)(dst) = _mm_shuffle_epi8( \
- (__m128i) _mm_loadu_ps((float *)(src)), \
- *(__m128i *) rgba8_permutation \
- )
+static inline uint32_t
+bswap32(uint32_t n)
+{
+#if defined(HAVE___BUILTIN_BSWAP32)
+ return __builtin_bswap32(n);
+#else
+ return (n >> 24) |
+ ((n >> 8) & 0x0000ff00) |
+ ((n << 8) & 0x00ff0000) |
+ (n << 24);
#endif
+}
/**
* Copy RGBA to BGRA - swap R and B.
static inline void *
rgba8_copy(void *dst, const void *src, size_t bytes)
{
- uint8_t *d = dst;
- uint8_t const *s = src;
+ uint32_t *d = dst;
+ uint32_t const *s = src;
+
+ assert(bytes % 4 == 0);
+
+ while (bytes >= 4) {
+ *d = ror(bswap32(*s), 8);
+ d += 1;
+ s += 1;
+ bytes -= 4;
+ }
+ return dst;
+}
#ifdef __SSSE3__
- /* Fast copying for tile spans.
- *
- * As long as the destination texture is 16 aligned,
- * any 16 or 64 spans we get here should also be 16 aligned.
- */
+static const uint8_t rgba8_permutation[16] =
+ { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 };
+
+static inline void
+rgba8_copy_16_aligned_dst(void *dst, const void *src)
+{
+ _mm_store_si128(dst,
+ _mm_shuffle_epi8(_mm_loadu_si128(src),
+ *(__m128i *)rgba8_permutation));
+}
+
+static inline void
+rgba8_copy_16_aligned_src(void *dst, const void *src)
+{
+ _mm_storeu_si128(dst,
+ _mm_shuffle_epi8(_mm_load_si128(src),
+ *(__m128i *)rgba8_permutation));
+}
+
+#elif defined(__SSE2__)
+static inline void
+rgba8_copy_16_aligned_dst(void *dst, const void *src)
+{
+ __m128i srcreg, dstreg, agmask, ag, rb, br;
+
+ agmask = _mm_set1_epi32(0xFF00FF00);
+ srcreg = _mm_loadu_si128((__m128i *)src);
+
+ rb = _mm_andnot_si128(agmask, srcreg);
+ ag = _mm_and_si128(agmask, srcreg);
+ br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)),
+ _MM_SHUFFLE(2, 3, 0, 1));
+ dstreg = _mm_or_si128(ag, br);
+
+ _mm_store_si128((__m128i *)dst, dstreg);
+}
- if (bytes == 16) {
- assert(!(((uintptr_t)dst) & 0xf));
- rgba8_copy_16(d+ 0, s+ 0);
+static inline void
+rgba8_copy_16_aligned_src(void *dst, const void *src)
+{
+ __m128i srcreg, dstreg, agmask, ag, rb, br;
+
+ agmask = _mm_set1_epi32(0xFF00FF00);
+ srcreg = _mm_load_si128((__m128i *)src);
+
+ rb = _mm_andnot_si128(agmask, srcreg);
+ ag = _mm_and_si128(agmask, srcreg);
+ br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)),
+ _MM_SHUFFLE(2, 3, 0, 1));
+ dstreg = _mm_or_si128(ag, br);
+
+ _mm_storeu_si128((__m128i *)dst, dstreg);
+}
+#endif
+
+/**
+ * Copy RGBA to BGRA - swap R and B, with the destination 16-byte aligned.
+ */
+static inline void *
+rgba8_copy_aligned_dst(void *dst, const void *src, size_t bytes)
+{
+ assert(bytes == 0 || !(((uintptr_t)dst) & 0xf));
+
+#if defined(__SSSE3__) || defined(__SSE2__)
+ if (bytes == 64) {
+ rgba8_copy_16_aligned_dst(dst + 0, src + 0);
+ rgba8_copy_16_aligned_dst(dst + 16, src + 16);
+ rgba8_copy_16_aligned_dst(dst + 32, src + 32);
+ rgba8_copy_16_aligned_dst(dst + 48, src + 48);
return dst;
}
+ while (bytes >= 16) {
+ rgba8_copy_16_aligned_dst(dst, src);
+ src += 16;
+ dst += 16;
+ bytes -= 16;
+ }
+#endif
+
+ rgba8_copy(dst, src, bytes);
+
+ return dst;
+}
+
+/**
+ * Copy RGBA to BGRA - swap R and B, with the source 16-byte aligned.
+ */
+static inline void *
+rgba8_copy_aligned_src(void *dst, const void *src, size_t bytes)
+{
+ assert(bytes == 0 || !(((uintptr_t)src) & 0xf));
+
+#if defined(__SSSE3__) || defined(__SSE2__)
if (bytes == 64) {
- assert(!(((uintptr_t)dst) & 0xf));
- rgba8_copy_16(d+ 0, s+ 0);
- rgba8_copy_16(d+16, s+16);
- rgba8_copy_16(d+32, s+32);
- rgba8_copy_16(d+48, s+48);
+ rgba8_copy_16_aligned_src(dst + 0, src + 0);
+ rgba8_copy_16_aligned_src(dst + 16, src + 16);
+ rgba8_copy_16_aligned_src(dst + 32, src + 32);
+ rgba8_copy_16_aligned_src(dst + 48, src + 48);
return dst;
}
-#endif
- while (bytes >= 4) {
- d[0] = s[2];
- d[1] = s[1];
- d[2] = s[0];
- d[3] = s[3];
- d += 4;
- s += 4;
- bytes -= 4;
+ while (bytes >= 16) {
+ rgba8_copy_16_aligned_src(dst, src);
+ src += 16;
+ dst += 16;
+ bytes -= 16;
}
+#endif
+
+ rgba8_copy(dst, src, bytes);
+
return dst;
}
typedef void (*tile_copy_fn)(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t src_pitch,
+ int32_t linear_pitch,
uint32_t swizzle_bit,
mem_copy_fn mem_copy);
* Copy texture data from linear to X tile layout.
*
* \copydoc tile_copy_fn
+ *
+ * The mem_copy parameters allow the user to specify an alternative mem_copy
+ * function that, for instance, may do RGBA -> BGRA swizzling. The first
+ * function must handle any memory alignment while the second function must
+ * only handle 16-byte alignment in whichever side (source or destination) is
+ * tiled.
*/
static inline void
linear_to_xtiled(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t src_pitch,
+ int32_t src_pitch,
uint32_t swizzle_bit,
- mem_copy_fn mem_copy)
+ mem_copy_fn mem_copy,
+ mem_copy_fn mem_copy_align16)
{
/* The copy destination offset for each range copied is the sum of
* an X offset 'x0' or 'xo' and a Y offset 'yo.'
*/
uint32_t xo, yo;
- src += y0 * src_pitch;
+ src += (ptrdiff_t)y0 * src_pitch;
for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) {
/* Bits 9 and 10 of the copy destination offset control swizzling.
mem_copy(dst + ((x0 + yo) ^ swizzle), src + x0, x1 - x0);
for (xo = x1; xo < x2; xo += xtile_span) {
- mem_copy(dst + ((xo + yo) ^ swizzle), src + xo, xtile_span);
+ mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + xo, xtile_span);
}
- mem_copy(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
+ mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
src += src_pitch;
}
linear_to_ytiled(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t src_pitch,
+ int32_t src_pitch,
uint32_t swizzle_bit,
- mem_copy_fn mem_copy)
+ mem_copy_fn mem_copy,
+ mem_copy_fn mem_copy_align16)
{
/* Y tiles consist of columns that are 'ytile_span' wide (and the same height
* as the tile). Thus the destination offset for (x,y) is the sum of:
uint32_t x, yo;
- src += y0 * src_pitch;
+ src += (ptrdiff_t)y0 * src_pitch;
for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) {
uint32_t xo = xo1;
* at each step so we don't need to calculate it explicitly.
*/
for (x = x1; x < x2; x += ytile_span) {
- mem_copy(dst + ((xo + yo) ^ swizzle), src + x, ytile_span);
+ mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x, ytile_span);
xo += bytes_per_column;
swizzle ^= swizzle_bit;
}
- mem_copy(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
+ mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2);
src += src_pitch;
}
xtiled_to_linear(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t dst_pitch,
+ int32_t dst_pitch,
uint32_t swizzle_bit,
- mem_copy_fn mem_copy)
+ mem_copy_fn mem_copy,
+ mem_copy_fn mem_copy_align16)
{
/* The copy destination offset for each range copied is the sum of
* an X offset 'x0' or 'xo' and a Y offset 'yo.'
*/
uint32_t xo, yo;
- dst += y0 * dst_pitch;
+ dst += (ptrdiff_t)y0 * dst_pitch;
for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) {
/* Bits 9 and 10 of the copy destination offset control swizzling.
mem_copy(dst + x0, src + ((x0 + yo) ^ swizzle), x1 - x0);
for (xo = x1; xo < x2; xo += xtile_span) {
- mem_copy(dst + xo, src + ((xo + yo) ^ swizzle), xtile_span);
+ mem_copy_align16(dst + xo, src + ((xo + yo) ^ swizzle), xtile_span);
}
- mem_copy(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
+ mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
dst += dst_pitch;
}
ytiled_to_linear(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t dst_pitch,
+ int32_t dst_pitch,
uint32_t swizzle_bit,
- mem_copy_fn mem_copy)
+ mem_copy_fn mem_copy,
+ mem_copy_fn mem_copy_align16)
{
/* Y tiles consist of columns that are 'ytile_span' wide (and the same height
* as the tile). Thus the destination offset for (x,y) is the sum of:
uint32_t x, yo;
- dst += y0 * dst_pitch;
+ dst += (ptrdiff_t)y0 * dst_pitch;
for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) {
uint32_t xo = xo1;
* at each step so we don't need to calculate it explicitly.
*/
for (x = x1; x < x2; x += ytile_span) {
- mem_copy(dst + x, src + ((xo + yo) ^ swizzle), ytile_span);
+ mem_copy_align16(dst + x, src + ((xo + yo) ^ swizzle), ytile_span);
xo += bytes_per_column;
swizzle ^= swizzle_bit;
}
- mem_copy(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
+ mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2);
dst += dst_pitch;
}
linear_to_xtiled_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t src_pitch,
+ int32_t src_pitch,
uint32_t swizzle_bit,
mem_copy_fn mem_copy)
{
if (x0 == 0 && x3 == xtile_width && y0 == 0 && y1 == xtile_height) {
if (mem_copy == memcpy)
return linear_to_xtiled(0, 0, xtile_width, xtile_width, 0, xtile_height,
- dst, src, src_pitch, swizzle_bit, memcpy);
+ dst, src, src_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return linear_to_xtiled(0, 0, xtile_width, xtile_width, 0, xtile_height,
- dst, src, src_pitch, swizzle_bit, rgba8_copy);
+ dst, src, src_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_dst);
+ else
+ unreachable("not reached");
} else {
if (mem_copy == memcpy)
return linear_to_xtiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, memcpy);
+ dst, src, src_pitch, swizzle_bit,
+ memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return linear_to_xtiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, rgba8_copy);
+ dst, src, src_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_dst);
+ else
+ unreachable("not reached");
}
linear_to_xtiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, mem_copy);
+ dst, src, src_pitch, swizzle_bit, mem_copy, mem_copy);
}
/**
linear_to_ytiled_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t src_pitch,
+ int32_t src_pitch,
uint32_t swizzle_bit,
mem_copy_fn mem_copy)
{
if (x0 == 0 && x3 == ytile_width && y0 == 0 && y1 == ytile_height) {
if (mem_copy == memcpy)
return linear_to_ytiled(0, 0, ytile_width, ytile_width, 0, ytile_height,
- dst, src, src_pitch, swizzle_bit, memcpy);
+ dst, src, src_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return linear_to_ytiled(0, 0, ytile_width, ytile_width, 0, ytile_height,
- dst, src, src_pitch, swizzle_bit, rgba8_copy);
+ dst, src, src_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_dst);
+ else
+ unreachable("not reached");
} else {
if (mem_copy == memcpy)
return linear_to_ytiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, memcpy);
+ dst, src, src_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return linear_to_ytiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, rgba8_copy);
+ dst, src, src_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_dst);
+ else
+ unreachable("not reached");
}
linear_to_ytiled(x0, x1, x2, x3, y0, y1,
- dst, src, src_pitch, swizzle_bit, mem_copy);
+ dst, src, src_pitch, swizzle_bit, mem_copy, mem_copy);
}
/**
xtiled_to_linear_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t dst_pitch,
+ int32_t dst_pitch,
uint32_t swizzle_bit,
mem_copy_fn mem_copy)
{
if (x0 == 0 && x3 == xtile_width && y0 == 0 && y1 == xtile_height) {
if (mem_copy == memcpy)
return xtiled_to_linear(0, 0, xtile_width, xtile_width, 0, xtile_height,
- dst, src, dst_pitch, swizzle_bit, memcpy);
+ dst, src, dst_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return xtiled_to_linear(0, 0, xtile_width, xtile_width, 0, xtile_height,
- dst, src, dst_pitch, swizzle_bit, rgba8_copy);
+ dst, src, dst_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_src);
+ else
+ unreachable("not reached");
} else {
if (mem_copy == memcpy)
return xtiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, memcpy);
+ dst, src, dst_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return xtiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, rgba8_copy);
+ dst, src, dst_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_src);
+ else
+ unreachable("not reached");
}
xtiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, mem_copy);
+ dst, src, dst_pitch, swizzle_bit, mem_copy, mem_copy);
}
/**
ytiled_to_linear_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
uint32_t y0, uint32_t y1,
char *dst, const char *src,
- uint32_t dst_pitch,
+ int32_t dst_pitch,
uint32_t swizzle_bit,
mem_copy_fn mem_copy)
{
if (x0 == 0 && x3 == ytile_width && y0 == 0 && y1 == ytile_height) {
if (mem_copy == memcpy)
return ytiled_to_linear(0, 0, ytile_width, ytile_width, 0, ytile_height,
- dst, src, dst_pitch, swizzle_bit, memcpy);
+ dst, src, dst_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return ytiled_to_linear(0, 0, ytile_width, ytile_width, 0, ytile_height,
- dst, src, dst_pitch, swizzle_bit, rgba8_copy);
+ dst, src, dst_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_src);
+ else
+ unreachable("not reached");
} else {
if (mem_copy == memcpy)
return ytiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, memcpy);
+ dst, src, dst_pitch, swizzle_bit, memcpy, memcpy);
else if (mem_copy == rgba8_copy)
return ytiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, rgba8_copy);
+ dst, src, dst_pitch, swizzle_bit,
+ rgba8_copy, rgba8_copy_aligned_src);
+ else
+ unreachable("not reached");
}
ytiled_to_linear(x0, x1, x2, x3, y0, y1,
- dst, src, dst_pitch, swizzle_bit, mem_copy);
+ dst, src, dst_pitch, swizzle_bit, mem_copy, mem_copy);
}
/**
linear_to_tiled(uint32_t xt1, uint32_t xt2,
uint32_t yt1, uint32_t yt2,
char *dst, const char *src,
- uint32_t dst_pitch, uint32_t src_pitch,
+ uint32_t dst_pitch, int32_t src_pitch,
bool has_swizzling,
uint32_t tiling,
mem_copy_fn mem_copy)
tiled_to_linear(uint32_t xt1, uint32_t xt2,
uint32_t yt1, uint32_t yt2,
char *dst, const char *src,
- uint32_t dst_pitch, uint32_t src_pitch,
+ int32_t dst_pitch, uint32_t src_pitch,
bool has_swizzling,
uint32_t tiling,
mem_copy_fn mem_copy)
* \return true if the format and type combination are valid
*/
bool intel_get_memcpy(mesa_format tiledFormat, GLenum format,
- GLenum type, mem_copy_fn* mem_copy, uint32_t* cpp)
+ GLenum type, mem_copy_fn *mem_copy, uint32_t *cpp)
{
if (type == GL_UNSIGNED_INT_8_8_8_8_REV &&
!(format == GL_RGBA || format == GL_BGRA))
*cpp = 1;
*mem_copy = memcpy;
} else if ((tiledFormat == MESA_FORMAT_B8G8R8A8_UNORM) ||
- (tiledFormat == MESA_FORMAT_B8G8R8X8_UNORM)) {
+ (tiledFormat == MESA_FORMAT_B8G8R8X8_UNORM) ||
+ (tiledFormat == MESA_FORMAT_B8G8R8A8_SRGB) ||
+ (tiledFormat == MESA_FORMAT_B8G8R8X8_SRGB)) {
*cpp = 4;
if (format == GL_BGRA) {
*mem_copy = memcpy;
*mem_copy = rgba8_copy;
}
} else if ((tiledFormat == MESA_FORMAT_R8G8B8A8_UNORM) ||
- (tiledFormat == MESA_FORMAT_R8G8B8X8_UNORM)) {
+ (tiledFormat == MESA_FORMAT_R8G8B8X8_UNORM) ||
+ (tiledFormat == MESA_FORMAT_R8G8B8A8_SRGB) ||
+ (tiledFormat == MESA_FORMAT_R8G8B8X8_SRGB)) {
*cpp = 4;
if (format == GL_BGRA) {
/* Copying from RGBA to BGRA is the same as BGRA to RGBA so we can