01943cd2bb7684a34b2d71ce204684b1766683e4
[gcc.git] / gcc / config / i386 / smmintrin.h
1 /* Copyright (C) 2007 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
19
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
26
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 10.0. */
29
30 #ifndef _SMMINTRIN_H_INCLUDED
31 #define _SMMINTRIN_H_INCLUDED
32
33 #ifndef __SSE4_1__
34 # error "SSE4.1 instruction set not enabled"
35 #else
36
37 /* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
38 files. */
39 #include <tmmintrin.h>
40
41 /* SSE4.1 */
42
43 /* Rounding mode macros. */
44 #define _MM_FROUND_TO_NEAREST_INT 0x00
45 #define _MM_FROUND_TO_NEG_INF 0x01
46 #define _MM_FROUND_TO_POS_INF 0x02
47 #define _MM_FROUND_TO_ZERO 0x03
48 #define _MM_FROUND_CUR_DIRECTION 0x04
49
50 #define _MM_FROUND_RAISE_EXC 0x00
51 #define _MM_FROUND_NO_EXC 0x08
52
53 #define _MM_FROUND_NINT \
54 (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
55 #define _MM_FROUND_FLOOR \
56 (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
57 #define _MM_FROUND_CEIL \
58 (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
59 #define _MM_FROUND_TRUNC \
60 (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
61 #define _MM_FROUND_RINT \
62 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
63 #define _MM_FROUND_NEARBYINT \
64 (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
65
66 /* Integer blend instructions - select data from 2 sources using
67 constant/variable mask. */
68
69 #ifdef __OPTIMIZE__
70 static __inline __m128i __attribute__((__always_inline__))
71 _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
72 {
73 return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
74 (__v8hi)__Y,
75 __M);
76 }
77 #else
78 #define _mm_blend_epi16(X, Y, M) \
79 ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
80 #endif
81
82 static __inline __m128i __attribute__((__always_inline__))
83 _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
84 {
85 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
86 (__v16qi)__Y,
87 (__v16qi)__M);
88 }
89
90 /* Single precision floating point blend instructions - select data
91 from 2 sources using constant/variable mask. */
92
93 #ifdef __OPTIMIZE__
94 static __inline __m128 __attribute__((__always_inline__))
95 _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
96 {
97 return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
98 (__v4sf)__Y,
99 __M);
100 }
101 #else
102 #define _mm_blend_ps(X, Y, M) \
103 ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
104 #endif
105
106 static __inline __m128 __attribute__((__always_inline__))
107 _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
108 {
109 return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
110 (__v4sf)__Y,
111 (__v4sf)__M);
112 }
113
114 /* Double precision floating point blend instructions - select data
115 from 2 sources using constant/variable mask. */
116
117 #ifdef __OPTIMIZE__
118 static __inline __m128d __attribute__((__always_inline__))
119 _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
120 {
121 return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
122 (__v2df)__Y,
123 __M);
124 }
125 #else
126 #define _mm_blend_pd(X, Y, M) \
127 ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
128 #endif
129
130 static __inline __m128d __attribute__((__always_inline__))
131 _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
132 {
133 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
134 (__v2df)__Y,
135 (__v2df)__M);
136 }
137
138 /* Dot product instructions with mask-defined summing and zeroing parts
139 of result. */
140
141 #ifdef __OPTIMIZE__
142 static __inline __m128 __attribute__((__always_inline__))
143 _mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
144 {
145 return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
146 (__v4sf)__Y,
147 __M);
148 }
149
150 static __inline __m128d __attribute__((__always_inline__))
151 _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
152 {
153 return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
154 (__v2df)__Y,
155 __M);
156 }
157 #else
158 #define _mm_dp_ps(X, Y, M) \
159 ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
160
161 #define _mm_dp_pd(X, Y, M) \
162 ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
163 #endif
164
165 /* Packed integer 64-bit comparison, zeroing or filling with ones
166 corresponding parts of result. */
167 static __inline __m128i __attribute__((__always_inline__))
168 _mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
169 {
170 return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
171 }
172
173 /* Min/max packed integer instructions. */
174
175 static __inline __m128i __attribute__((__always_inline__))
176 _mm_min_epi8 (__m128i __X, __m128i __Y)
177 {
178 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
179 }
180
181 static __inline __m128i __attribute__((__always_inline__))
182 _mm_max_epi8 (__m128i __X, __m128i __Y)
183 {
184 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
185 }
186
187 static __inline __m128i __attribute__((__always_inline__))
188 _mm_min_epu16 (__m128i __X, __m128i __Y)
189 {
190 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
191 }
192
193 static __inline __m128i __attribute__((__always_inline__))
194 _mm_max_epu16 (__m128i __X, __m128i __Y)
195 {
196 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
197 }
198
199 static __inline __m128i __attribute__((__always_inline__))
200 _mm_min_epi32 (__m128i __X, __m128i __Y)
201 {
202 return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
203 }
204
205 static __inline __m128i __attribute__((__always_inline__))
206 _mm_max_epi32 (__m128i __X, __m128i __Y)
207 {
208 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
209 }
210
211 static __inline __m128i __attribute__((__always_inline__))
212 _mm_min_epu32 (__m128i __X, __m128i __Y)
213 {
214 return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
215 }
216
217 static __inline __m128i __attribute__((__always_inline__))
218 _mm_max_epu32 (__m128i __X, __m128i __Y)
219 {
220 return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
221 }
222
223 /* Packed integer 32-bit multiplication with truncation of upper
224 halves of results. */
225 static __inline __m128i __attribute__((__always_inline__))
226 _mm_mullo_epi32 (__m128i __X, __m128i __Y)
227 {
228 return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
229 }
230
231 /* Packed integer 32-bit multiplication of 2 pairs of operands
232 with two 64-bit results. */
233 static __inline __m128i __attribute__((__always_inline__))
234 _mm_mul_epi32 (__m128i __X, __m128i __Y)
235 {
236 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
237 }
238
239 /* Packed integer 128-bit bitwise comparison. Return 1 if
240 (__V & __M) == 0. */
241 static __inline int __attribute__((__always_inline__))
242 _mm_testz_si128 (__m128i __M, __m128i __V)
243 {
244 return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
245 }
246
247 /* Packed integer 128-bit bitwise comparison. Return 1 if
248 (__V & ~__M) == 0. */
249 static __inline int __attribute__((__always_inline__))
250 _mm_testc_si128 (__m128i __M, __m128i __V)
251 {
252 return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
253 }
254
255 /* Packed integer 128-bit bitwise comparison. Return 1 if
256 (__V & __M) != 0 && (__V & ~__M) != 0. */
257 static __inline int __attribute__((__always_inline__))
258 _mm_testnzc_si128 (__m128i __M, __m128i __V)
259 {
260 return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
261 }
262
263 /* Macros for packed integer 128-bit comparison intrinsics. */
264 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
265
266 #define _mm_test_all_ones(V) \
267 _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
268
269 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
270
271 /* Insert single precision float into packed single precision array
272 element selected by index N. The bits [7-6] of N define S
273 index, the bits [5-4] define D index, and bits [3-0] define
274 zeroing mask for D. */
275
276 #ifdef __OPTIMIZE__
277 static __inline __m128 __attribute__((__always_inline__))
278 _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
279 {
280 return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
281 (__v4sf)__S,
282 __N);
283 }
284 #else
285 #define _mm_insert_ps(D, S, N) \
286 ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
287 #endif
288
289 /* Helper macro to create the N value for _mm_insert_ps. */
290 #define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
291
292 /* Extract binary representation of single precision float from packed
293 single precision array element of X selected by index N. */
294
295 #ifdef __OPTIMIZE__
296 static __inline int __attribute__((__always_inline__))
297 _mm_extract_ps (__m128 __X, const int __N)
298 {
299 union { int i; float f; } __tmp;
300 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
301 return __tmp.i;
302 }
303 #else
304 #define _mm_extract_ps(X, N) \
305 (__extension__ \
306 ({ \
307 union { int i; float f; } __tmp; \
308 __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \
309 __tmp.i; \
310 }) \
311 )
312 #endif
313
314 /* Extract binary representation of single precision float into
315 D from packed single precision array element of S selected
316 by index N. */
317 #define _MM_EXTRACT_FLOAT(D, S, N) \
318 { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
319
320 /* Extract specified single precision float element into the lower
321 part of __m128. */
322 #define _MM_PICK_OUT_PS(X, N) \
323 _mm_insert_ps (_mm_setzero_ps (), (X), \
324 _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
325
326 /* Insert integer, S, into packed integer array element of D
327 selected by index N. */
328
329 #ifdef __OPTIMIZE__
330 static __inline __m128i __attribute__((__always_inline__))
331 _mm_insert_epi8 (__m128i __D, int __S, const int __N)
332 {
333 return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
334 __S, __N);
335 }
336
337 static __inline __m128i __attribute__((__always_inline__))
338 _mm_insert_epi32 (__m128i __D, int __S, const int __N)
339 {
340 return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
341 __S, __N);
342 }
343
344 #ifdef __x86_64__
345 static __inline __m128i __attribute__((__always_inline__))
346 _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
347 {
348 return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
349 __S, __N);
350 }
351 #endif
352 #else
353 #define _mm_insert_epi8(D, S, N) \
354 ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
355
356 #define _mm_insert_epi32(D, S, N) \
357 ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
358
359 #ifdef __x86_64__
360 #define _mm_insert_epi64(D, S, N) \
361 ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
362 #endif
363 #endif
364
365 /* Extract integer from packed integer array element of X selected by
366 index N. */
367
368 #ifdef __OPTIMIZE__
369 static __inline int __attribute__((__always_inline__))
370 _mm_extract_epi8 (__m128i __X, const int __N)
371 {
372 return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
373 }
374
375 static __inline int __attribute__((__always_inline__))
376 _mm_extract_epi32 (__m128i __X, const int __N)
377 {
378 return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
379 }
380
381 #ifdef __x86_64__
382 static __inline long long __attribute__((__always_inline__))
383 _mm_extract_epi64 (__m128i __X, const int __N)
384 {
385 return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
386 }
387 #endif
388 #else
389 #define _mm_extract_epi8(X, N) \
390 __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
391 #define _mm_extract_epi32(X, N) \
392 __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
393
394 #ifdef __x86_64__
395 #define _mm_extract_epi64(X, N) \
396 ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
397 #endif
398 #endif
399
400 /* Return horizontal packed word minimum and its index in bits [15:0]
401 and bits [18:16] respectively. */
402 static __inline __m128i __attribute__((__always_inline__))
403 _mm_minpos_epu16 (__m128i __X)
404 {
405 return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
406 }
407
408 /* Packed/scalar double precision floating point rounding. */
409
410 #ifdef __OPTIMIZE__
411 static __inline __m128d __attribute__((__always_inline__))
412 _mm_round_pd (__m128d __V, const int __M)
413 {
414 return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
415 }
416
417 static __inline __m128d __attribute__((__always_inline__))
418 _mm_round_sd(__m128d __D, __m128d __V, const int __M)
419 {
420 return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
421 (__v2df)__V,
422 __M);
423 }
424 #else
425 #define _mm_round_pd(V, M) \
426 ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
427
428 #define _mm_round_sd(D, V, M) \
429 ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
430 #endif
431
432 /* Packed/scalar single precision floating point rounding. */
433
434 #ifdef __OPTIMIZE__
435 static __inline __m128 __attribute__((__always_inline__))
436 _mm_round_ps (__m128 __V, const int __M)
437 {
438 return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
439 }
440
441 static __inline __m128 __attribute__((__always_inline__))
442 _mm_round_ss (__m128 __D, __m128 __V, const int __M)
443 {
444 return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
445 (__v4sf)__V,
446 __M);
447 }
448 #else
449 #define _mm_round_ps(V, M) \
450 ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
451
452 #define _mm_round_ss(D, V, M) \
453 ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
454 #endif
455
456 /* Macros for ceil/floor intrinsics. */
457 #define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
458 #define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
459
460 #define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
461 #define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
462
463 #define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
464 #define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
465
466 #define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
467 #define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
468
469 /* Packed integer sign-extension. */
470
471 static __inline __m128i __attribute__((__always_inline__))
472 _mm_cvtepi8_epi32 (__m128i __X)
473 {
474 return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
475 }
476
477 static __inline __m128i __attribute__((__always_inline__))
478 _mm_cvtepi16_epi32 (__m128i __X)
479 {
480 return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
481 }
482
483 static __inline __m128i __attribute__((__always_inline__))
484 _mm_cvtepi8_epi64 (__m128i __X)
485 {
486 return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
487 }
488
489 static __inline __m128i __attribute__((__always_inline__))
490 _mm_cvtepi32_epi64 (__m128i __X)
491 {
492 return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
493 }
494
495 static __inline __m128i __attribute__((__always_inline__))
496 _mm_cvtepi16_epi64 (__m128i __X)
497 {
498 return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
499 }
500
501 static __inline __m128i __attribute__((__always_inline__))
502 _mm_cvtepi8_epi16 (__m128i __X)
503 {
504 return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
505 }
506
507 /* Packed integer zero-extension. */
508
509 static __inline __m128i __attribute__((__always_inline__))
510 _mm_cvtepu8_epi32 (__m128i __X)
511 {
512 return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
513 }
514
515 static __inline __m128i __attribute__((__always_inline__))
516 _mm_cvtepu16_epi32 (__m128i __X)
517 {
518 return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
519 }
520
521 static __inline __m128i __attribute__((__always_inline__))
522 _mm_cvtepu8_epi64 (__m128i __X)
523 {
524 return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
525 }
526
527 static __inline __m128i __attribute__((__always_inline__))
528 _mm_cvtepu32_epi64 (__m128i __X)
529 {
530 return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
531 }
532
533 static __inline __m128i __attribute__((__always_inline__))
534 _mm_cvtepu16_epi64 (__m128i __X)
535 {
536 return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
537 }
538
539 static __inline __m128i __attribute__((__always_inline__))
540 _mm_cvtepu8_epi16 (__m128i __X)
541 {
542 return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
543 }
544
545 /* Pack 8 double words from 2 operands into 8 words of result with
546 unsigned saturation. */
547 static __inline __m128i __attribute__((__always_inline__))
548 _mm_packus_epi32 (__m128i __X, __m128i __Y)
549 {
550 return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
551 }
552
553 /* Sum absolute 8-bit integer difference of adjacent groups of 4
554 byte integers in the first 2 operands. Starting offsets within
555 operands are determined by the 3rd mask operand. */
556
557 #ifdef __OPTIMIZE__
558 static __inline __m128i __attribute__((__always_inline__))
559 _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
560 {
561 return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
562 (__v16qi)__Y, __M);
563 }
564 #else
565 #define _mm_mpsadbw_epu8(X, Y, M) \
566 ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
567 #endif
568
569 /* Load double quadword using non-temporal aligned hint. */
570 static __inline __m128i __attribute__((__always_inline__))
571 _mm_stream_load_si128 (__m128i *__X)
572 {
573 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
574 }
575
576 #ifdef __SSE4_2__
577
578 /* These macros specify the source data format. */
579 #define SIDD_UBYTE_OPS 0x00
580 #define SIDD_UWORD_OPS 0x01
581 #define SIDD_SBYTE_OPS 0x02
582 #define SIDD_SWORD_OPS 0x03
583
584 /* These macros specify the comparison operation. */
585 #define SIDD_CMP_EQUAL_ANY 0x00
586 #define SIDD_CMP_RANGES 0x04
587 #define SIDD_CMP_EQUAL_EACH 0x08
588 #define SIDD_CMP_EQUAL_ORDERED 0x0c
589
590 /* These macros specify the the polarity. */
591 #define SIDD_POSITIVE_POLARITY 0x00
592 #define SIDD_NEGATIVE_POLARITY 0x10
593 #define SIDD_MASKED_POSITIVE_POLARITY 0x20
594 #define SIDD_MASKED_NEGATIVE_POLARITY 0x30
595
596 /* These macros specify the output selection in _mm_cmpXstri (). */
597 #define SIDD_LEAST_SIGNIFICANT 0x00
598 #define SIDD_MOST_SIGNIFICANT 0x40
599
600 /* These macros specify the output selection in _mm_cmpXstrm (). */
601 #define SIDD_BIT_MASK 0x00
602 #define SIDD_UNIT_MASK 0x40
603
604 /* Intrinsics for text/string processing. */
605
606 #ifdef __OPTIMIZE__
607 static __inline __m128i __attribute__((__always_inline__))
608 _mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
609 {
610 return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
611 (__v16qi)__Y,
612 __M);
613 }
614
615 static __inline int __attribute__((__always_inline__))
616 _mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
617 {
618 return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
619 (__v16qi)__Y,
620 __M);
621 }
622
623 static __inline __m128i __attribute__((__always_inline__))
624 _mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
625 {
626 return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
627 (__v16qi)__Y, __LY,
628 __M);
629 }
630
631 static __inline int __attribute__((__always_inline__))
632 _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
633 {
634 return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
635 (__v16qi)__Y, __LY,
636 __M);
637 }
638 #else
639 #define _mm_cmpistrm(X, Y, M) \
640 ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M)))
641 #define _mm_cmpistri(X, Y, M) \
642 __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M))
643
644 #define _mm_cmpestrm(X, LX, Y, LY, M) \
645 ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \
646 (__v16qi)(Y), (int)(LY), (M)))
647 #define _mm_cmpestri(X, LX, Y, LY, M) \
648 __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \
649 (__v16qi)(Y), (int)(LY), (M))
650 #endif
651
652 /* Intrinsics for text/string processing and reading values of
653 EFlags. */
654
655 #ifdef __OPTIMIZE__
656 static __inline int __attribute__((__always_inline__))
657 _mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
658 {
659 return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
660 (__v16qi)__Y,
661 __M);
662 }
663
664 static __inline int __attribute__((__always_inline__))
665 _mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
666 {
667 return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
668 (__v16qi)__Y,
669 __M);
670 }
671
672 static __inline int __attribute__((__always_inline__))
673 _mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
674 {
675 return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
676 (__v16qi)__Y,
677 __M);
678 }
679
680 static __inline int __attribute__((__always_inline__))
681 _mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
682 {
683 return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
684 (__v16qi)__Y,
685 __M);
686 }
687
688 static __inline int __attribute__((__always_inline__))
689 _mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
690 {
691 return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
692 (__v16qi)__Y,
693 __M);
694 }
695
696 static __inline int __attribute__((__always_inline__))
697 _mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
698 {
699 return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
700 (__v16qi)__Y, __LY,
701 __M);
702 }
703
704 static __inline int __attribute__((__always_inline__))
705 _mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
706 {
707 return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
708 (__v16qi)__Y, __LY,
709 __M);
710 }
711
712 static __inline int __attribute__((__always_inline__))
713 _mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
714 {
715 return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
716 (__v16qi)__Y, __LY,
717 __M);
718 }
719
720 static __inline int __attribute__((__always_inline__))
721 _mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
722 {
723 return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
724 (__v16qi)__Y, __LY,
725 __M);
726 }
727
728 static __inline int __attribute__((__always_inline__))
729 _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
730 {
731 return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
732 (__v16qi)__Y, __LY,
733 __M);
734 }
735 #else
736 #define _mm_cmpistra(X, Y, M) \
737 __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M))
738 #define _mm_cmpistrc(X, Y, M) \
739 __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M))
740 #define _mm_cmpistro(X, Y, M) \
741 __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M))
742 #define _mm_cmpistrs(X, Y, M) \
743 __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M))
744 #define _mm_cmpistrz(X, Y, M) \
745 __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M))
746
747 #define _mm_cmpestra(X, LX, Y, LY, M) \
748 __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \
749 (__v16qi)(Y), (int)(LY), (M))
750 #define _mm_cmpestrc(X, LX, Y, LY, M) \
751 __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \
752 (__v16qi)(Y), (int)(LY), (M))
753 #define _mm_cmpestro(X, LX, Y, LY, M) \
754 __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \
755 (__v16qi)(Y), (int)(LY), (M))
756 #define _mm_cmpestrs(X, LX, Y, LY, M) \
757 __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \
758 (__v16qi)(Y), (int)(LY), (M))
759 #define _mm_cmpestrz(X, LX, Y, LY, M) \
760 __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \
761 (__v16qi)(Y), (int)(LY), (M))
762 #endif
763
764 /* Packed integer 64-bit comparison, zeroing or filling with ones
765 corresponding parts of result. */
766 static __inline __m128i __attribute__((__always_inline__))
767 _mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
768 {
769 return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
770 }
771
772 /* Calculate a number of bits set to 1. */
773 static __inline int __attribute__((__always_inline__))
774 _mm_popcnt_u32 (unsigned int __X)
775 {
776 return __builtin_popcount (__X);
777 }
778
779 #ifdef __x86_64__
780 static __inline long long __attribute__((__always_inline__))
781 _mm_popcnt_u64 (unsigned long long __X)
782 {
783 return __builtin_popcountll (__X);
784 }
785 #endif
786
787 /* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
788 static __inline unsigned int __attribute__((__always_inline__))
789 _mm_crc32_u8 (unsigned int __C, unsigned char __V)
790 {
791 return __builtin_ia32_crc32qi (__C, __V);
792 }
793
794 static __inline unsigned int __attribute__((__always_inline__))
795 _mm_crc32_u16 (unsigned int __C, unsigned short __V)
796 {
797 return __builtin_ia32_crc32hi (__C, __V);
798 }
799
800 static __inline unsigned int __attribute__((__always_inline__))
801 _mm_crc32_u32 (unsigned int __C, unsigned int __V)
802 {
803 return __builtin_ia32_crc32si (__C, __V);
804 }
805
806 #ifdef __x86_64__
807 static __inline unsigned long long __attribute__((__always_inline__))
808 _mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
809 {
810 return __builtin_ia32_crc32di (__C, __V);
811 }
812 #endif
813
814 #endif /* __SSE4_2__ */
815
816 #endif /* __SSE4_1__ */
817
818 #endif /* _SMMINTRIN_H_INCLUDED */