1 /**************************************************************************
3 * Copyright 2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * SSE intrinsics portability header.
32 * Although the SSE intrinsics are support by all modern x86 and x86-64
33 * compilers, there are some intrisincs missing in some implementations
34 * (especially older MSVC versions). This header abstracts that away.
40 #include "pipe/p_config.h"
42 #if defined(PIPE_ARCH_SSE)
44 #include <emmintrin.h>
54 static inline void u_print_epi8(const char *name
, __m128i r
)
56 union { __m128i m
; ubyte ub
[16]; } u
;
77 u
.ub
[0], u
.ub
[1], u
.ub
[2], u
.ub
[3],
78 u
.ub
[4], u
.ub
[5], u
.ub
[6], u
.ub
[7],
79 u
.ub
[8], u
.ub
[9], u
.ub
[10], u
.ub
[11],
80 u
.ub
[12], u
.ub
[13], u
.ub
[14], u
.ub
[15]);
83 static inline void u_print_epi16(const char *name
, __m128i r
)
85 union { __m128i m
; ushort us
[8]; } u
;
98 u
.us
[0], u
.us
[1], u
.us
[2], u
.us
[3],
99 u
.us
[4], u
.us
[5], u
.us
[6], u
.us
[7]);
102 static inline void u_print_epi32(const char *name
, __m128i r
)
104 union { __m128i m
; uint ui
[4]; } u
;
113 u
.ui
[0], u
.ui
[1], u
.ui
[2], u
.ui
[3]);
116 static inline void u_print_ps(const char *name
, __m128 r
)
118 union { __m128 m
; float f
[4]; } u
;
127 u
.f
[0], u
.f
[1], u
.f
[2], u
.f
[3]);
131 #define U_DUMP_EPI32(a) u_print_epi32(#a, a)
132 #define U_DUMP_EPI16(a) u_print_epi16(#a, a)
133 #define U_DUMP_EPI8(a) u_print_epi8(#a, a)
134 #define U_DUMP_PS(a) u_print_ps(#a, a)
138 #if defined(PIPE_ARCH_SSSE3)
140 #include <tmmintrin.h>
142 #else /* !PIPE_ARCH_SSSE3 */
145 * Describe _mm_shuffle_epi8() with gcc extended inline assembly, for cases
146 * where -mssse3 is not supported/enabled.
148 * MSVC will never get in here as its intrinsics support do not rely on
149 * compiler command line options.
151 static __inline __m128i
153 __attribute__((__always_inline__
, __nodebug__
))
155 __attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
157 _mm_shuffle_epi8(__m128i a
, __m128i mask
)
160 __asm__("pshufb %1, %0"
162 : "xm" (mask
), "0" (a
));
166 #endif /* !PIPE_ARCH_SSSE3 */
170 * Provide an SSE implementation of _mm_mul_epi32() in terms of
173 * Basically, albeit surprising at first (and second, and third...) look
174 * if a * b is done signed instead of unsigned, can just
175 * subtract b from the high bits of the result if a is negative
176 * (and the same for a if b is negative). Modular arithmetic at its best!
178 * So for int32 a,b in crude pseudo-code ("*" here denoting a widening mul)
179 * fixupb = (signmask(b) & a) << 32ULL
180 * fixupa = (signmask(a) & b) << 32ULL
181 * a * b = (unsigned)a * (unsigned)b - fixupb - fixupa
182 * = (unsigned)a * (unsigned)b -(fixupb + fixupa)
184 * This does both lo (dwords 0/2) and hi parts (1/3) at the same time due
185 * to some optimization potential.
187 static inline __m128i
188 mm_mullohi_epi32(const __m128i a
, const __m128i b
, __m128i
*res13
)
190 __m128i a13
, b13
, mul02
, mul13
;
191 __m128i anegmask
, bnegmask
, fixup
, fixup02
, fixup13
;
192 a13
= _mm_shuffle_epi32(a
, _MM_SHUFFLE(2,3,0,1));
193 b13
= _mm_shuffle_epi32(b
, _MM_SHUFFLE(2,3,0,1));
194 anegmask
= _mm_srai_epi32(a
, 31);
195 bnegmask
= _mm_srai_epi32(b
, 31);
196 fixup
= _mm_add_epi32(_mm_and_si128(anegmask
, b
),
197 _mm_and_si128(bnegmask
, a
));
198 mul02
= _mm_mul_epu32(a
, b
);
199 mul13
= _mm_mul_epu32(a13
, b13
);
200 fixup02
= _mm_slli_epi64(fixup
, 32);
201 fixup13
= _mm_and_si128(fixup
, _mm_set_epi32(-1,0,-1,0));
202 *res13
= _mm_sub_epi64(mul13
, fixup13
);
203 return _mm_sub_epi64(mul02
, fixup02
);
207 /* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
210 * This always works regardless the signs of the operands, since
211 * the high bits (which would be different) aren't used.
213 * This seems close enough to the speed of SSE4 and the real
214 * _mm_mullo_epi32() intrinsic as to not justify adding an sse4
215 * dependency at this point.
217 static inline __m128i
mm_mullo_epi32(const __m128i a
, const __m128i b
)
219 __m128i a4
= _mm_srli_epi64(a
, 32); /* shift by one dword */
220 __m128i b4
= _mm_srli_epi64(b
, 32); /* shift by one dword */
221 __m128i ba
= _mm_mul_epu32(b
, a
); /* multply dwords 0, 2 */
222 __m128i b4a4
= _mm_mul_epu32(b4
, a4
); /* multiply dwords 1, 3 */
224 /* Interleave the results, either with shuffles or (slightly
225 * faster) direct bit operations:
226 * XXX: might be only true for some cpus (in particular 65nm
227 * Core 2). On most cpus (including that Core 2, but not Nehalem...)
228 * using _mm_shuffle_ps/_mm_shuffle_epi32 might also be faster
229 * than using the 3 instructions below. But logic should be fine
230 * as well, we can't have optimal solution for all cpus (if anything,
231 * should just use _mm_mullo_epi32() if sse41 is available...).
234 __m128i ba8
= _mm_shuffle_epi32(ba
, 8);
235 __m128i b4a48
= _mm_shuffle_epi32(b4a4
, 8);
236 __m128i result
= _mm_unpacklo_epi32(ba8
, b4a48
);
238 __m128i mask
= _mm_setr_epi32(~0,0,~0,0);
239 __m128i ba_mask
= _mm_and_si128(ba
, mask
);
240 __m128i b4a4_mask_shift
= _mm_slli_epi64(b4a4
, 32);
241 __m128i result
= _mm_or_si128(ba_mask
, b4a4_mask_shift
);
249 transpose4_epi32(const __m128i
* restrict a
,
250 const __m128i
* restrict b
,
251 const __m128i
* restrict c
,
252 const __m128i
* restrict d
,
253 __m128i
* restrict o
,
254 __m128i
* restrict p
,
255 __m128i
* restrict q
,
256 __m128i
* restrict r
)
258 __m128i t0
= _mm_unpacklo_epi32(*a
, *b
);
259 __m128i t1
= _mm_unpacklo_epi32(*c
, *d
);
260 __m128i t2
= _mm_unpackhi_epi32(*a
, *b
);
261 __m128i t3
= _mm_unpackhi_epi32(*c
, *d
);
263 *o
= _mm_unpacklo_epi64(t0
, t1
);
264 *p
= _mm_unpackhi_epi64(t0
, t1
);
265 *q
= _mm_unpacklo_epi64(t2
, t3
);
266 *r
= _mm_unpackhi_epi64(t2
, t3
);
271 * Same as above, except the first two values are already interleaved
272 * (i.e. contain 64bit values).
275 transpose2_64_2_32(const __m128i
* restrict a01
,
276 const __m128i
* restrict a23
,
277 const __m128i
* restrict c
,
278 const __m128i
* restrict d
,
279 __m128i
* restrict o
,
280 __m128i
* restrict p
,
281 __m128i
* restrict q
,
282 __m128i
* restrict r
)
285 __m128i t1
= _mm_unpacklo_epi32(*c
, *d
);
287 __m128i t3
= _mm_unpackhi_epi32(*c
, *d
);
289 *o
= _mm_unpacklo_epi64(t0
, t1
);
290 *p
= _mm_unpackhi_epi64(t0
, t1
);
291 *q
= _mm_unpacklo_epi64(t2
, t3
);
292 *r
= _mm_unpackhi_epi64(t2
, t3
);
296 #define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
299 #endif /* PIPE_ARCH_SSE */
301 #endif /* U_SSE_H_ */