43a626236445aaf17380ecb0edb53ccaf47adefe
[gcc.git] / gcc / config / i386 / emmintrin.h
1 /* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18 Boston, MA 02110-1301, USA. */
19
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
26
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 9.0. */
29
30 #ifndef _EMMINTRIN_H_INCLUDED
31 #define _EMMINTRIN_H_INCLUDED
32
33 #ifndef __SSE2__
34 # error "SSE2 instruction set not enabled"
35 #else
36
37 /* We need definitions from the SSE header files*/
38 #include <xmmintrin.h>
39
40 /* SSE2 */
41 typedef double __v2df __attribute__ ((__vector_size__ (16)));
42 typedef long long __v2di __attribute__ ((__vector_size__ (16)));
43 typedef int __v4si __attribute__ ((__vector_size__ (16)));
44 typedef short __v8hi __attribute__ ((__vector_size__ (16)));
45 typedef char __v16qi __attribute__ ((__vector_size__ (16)));
46
47 typedef __v2di __m128i;
48 typedef __v2df __m128d;
49
50 /* Create a selector for use with the SHUFPD instruction. */
51 #define _MM_SHUFFLE2(fp1,fp0) \
52 (((fp1) << 1) | (fp0))
53
54 /* Create a vector with element 0 as F and the rest zero. */
55 static __inline __m128d __attribute__((__always_inline__))
56 _mm_set_sd (double __F)
57 {
58 return __extension__ (__m128d){ __F, 0 };
59 }
60
61 /* Create a vector with both elements equal to F. */
62 static __inline __m128d __attribute__((__always_inline__))
63 _mm_set1_pd (double __F)
64 {
65 return __extension__ (__m128d){ __F, __F };
66 }
67
68 static __inline __m128d __attribute__((__always_inline__))
69 _mm_set_pd1 (double __F)
70 {
71 return _mm_set1_pd (__F);
72 }
73
74 /* Create a vector with the lower value X and upper value W. */
75 static __inline __m128d __attribute__((__always_inline__))
76 _mm_set_pd (double __W, double __X)
77 {
78 return __extension__ (__m128d){ __X, __W };
79 }
80
81 /* Create a vector with the lower value W and upper value X. */
82 static __inline __m128d __attribute__((__always_inline__))
83 _mm_setr_pd (double __W, double __X)
84 {
85 return __extension__ (__m128d){ __W, __X };
86 }
87
88 /* Create a vector of zeros. */
89 static __inline __m128d __attribute__((__always_inline__))
90 _mm_setzero_pd (void)
91 {
92 return __extension__ (__m128d){ 0.0, 0.0 };
93 }
94
95 /* Sets the low DPFP value of A from the low value of B. */
96 static __inline __m128d __attribute__((__always_inline__))
97 _mm_move_sd (__m128d __A, __m128d __B)
98 {
99 return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
100 }
101
102 /* Load two DPFP values from P. The address must be 16-byte aligned. */
103 static __inline __m128d __attribute__((__always_inline__))
104 _mm_load_pd (double const *__P)
105 {
106 return *(__m128d *)__P;
107 }
108
109 /* Load two DPFP values from P. The address need not be 16-byte aligned. */
110 static __inline __m128d __attribute__((__always_inline__))
111 _mm_loadu_pd (double const *__P)
112 {
113 return __builtin_ia32_loadupd (__P);
114 }
115
116 /* Create a vector with all two elements equal to *P. */
117 static __inline __m128d __attribute__((__always_inline__))
118 _mm_load1_pd (double const *__P)
119 {
120 return _mm_set1_pd (*__P);
121 }
122
123 /* Create a vector with element 0 as *P and the rest zero. */
124 static __inline __m128d __attribute__((__always_inline__))
125 _mm_load_sd (double const *__P)
126 {
127 return _mm_set_sd (*__P);
128 }
129
130 static __inline __m128d __attribute__((__always_inline__))
131 _mm_load_pd1 (double const *__P)
132 {
133 return _mm_load1_pd (__P);
134 }
135
136 /* Load two DPFP values in reverse order. The address must be aligned. */
137 static __inline __m128d __attribute__((__always_inline__))
138 _mm_loadr_pd (double const *__P)
139 {
140 __m128d __tmp = _mm_load_pd (__P);
141 return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
142 }
143
144 /* Store two DPFP values. The address must be 16-byte aligned. */
145 static __inline void __attribute__((__always_inline__))
146 _mm_store_pd (double *__P, __m128d __A)
147 {
148 *(__m128d *)__P = __A;
149 }
150
151 /* Store two DPFP values. The address need not be 16-byte aligned. */
152 static __inline void __attribute__((__always_inline__))
153 _mm_storeu_pd (double *__P, __m128d __A)
154 {
155 __builtin_ia32_storeupd (__P, __A);
156 }
157
158 /* Stores the lower DPFP value. */
159 static __inline void __attribute__((__always_inline__))
160 _mm_store_sd (double *__P, __m128d __A)
161 {
162 *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
163 }
164
165 static __inline double __attribute__((__always_inline__))
166 _mm_cvtsd_f64 (__m128d __A)
167 {
168 return __builtin_ia32_vec_ext_v2df (__A, 0);
169 }
170
171 static __inline void __attribute__((__always_inline__))
172 _mm_storel_pd (double *__P, __m128d __A)
173 {
174 _mm_store_sd (__P, __A);
175 }
176
177 /* Stores the upper DPFP value. */
178 static __inline void __attribute__((__always_inline__))
179 _mm_storeh_pd (double *__P, __m128d __A)
180 {
181 *__P = __builtin_ia32_vec_ext_v2df (__A, 1);
182 }
183
184 /* Store the lower DPFP value across two words.
185 The address must be 16-byte aligned. */
186 static __inline void __attribute__((__always_inline__))
187 _mm_store1_pd (double *__P, __m128d __A)
188 {
189 _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0)));
190 }
191
192 static __inline void __attribute__((__always_inline__))
193 _mm_store_pd1 (double *__P, __m128d __A)
194 {
195 _mm_store1_pd (__P, __A);
196 }
197
198 /* Store two DPFP values in reverse order. The address must be aligned. */
199 static __inline void __attribute__((__always_inline__))
200 _mm_storer_pd (double *__P, __m128d __A)
201 {
202 _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1)));
203 }
204
205 static __inline int __attribute__((__always_inline__))
206 _mm_cvtsi128_si32 (__m128i __A)
207 {
208 return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0);
209 }
210
211 #ifdef __x86_64__
212 /* Intel intrinsic. */
213 static __inline long long __attribute__((__always_inline__))
214 _mm_cvtsi128_si64 (__m128i __A)
215 {
216 return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
217 }
218
219 /* Microsoft intrinsic. */
220 static __inline long long __attribute__((__always_inline__))
221 _mm_cvtsi128_si64x (__m128i __A)
222 {
223 return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
224 }
225 #endif
226
227 static __inline __m128d __attribute__((__always_inline__))
228 _mm_add_pd (__m128d __A, __m128d __B)
229 {
230 return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
231 }
232
233 static __inline __m128d __attribute__((__always_inline__))
234 _mm_add_sd (__m128d __A, __m128d __B)
235 {
236 return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
237 }
238
239 static __inline __m128d __attribute__((__always_inline__))
240 _mm_sub_pd (__m128d __A, __m128d __B)
241 {
242 return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
243 }
244
245 static __inline __m128d __attribute__((__always_inline__))
246 _mm_sub_sd (__m128d __A, __m128d __B)
247 {
248 return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
249 }
250
251 static __inline __m128d __attribute__((__always_inline__))
252 _mm_mul_pd (__m128d __A, __m128d __B)
253 {
254 return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
255 }
256
257 static __inline __m128d __attribute__((__always_inline__))
258 _mm_mul_sd (__m128d __A, __m128d __B)
259 {
260 return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
261 }
262
263 static __inline __m128d __attribute__((__always_inline__))
264 _mm_div_pd (__m128d __A, __m128d __B)
265 {
266 return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
267 }
268
269 static __inline __m128d __attribute__((__always_inline__))
270 _mm_div_sd (__m128d __A, __m128d __B)
271 {
272 return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
273 }
274
275 static __inline __m128d __attribute__((__always_inline__))
276 _mm_sqrt_pd (__m128d __A)
277 {
278 return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
279 }
280
281 /* Return pair {sqrt (A[0), B[1]}. */
282 static __inline __m128d __attribute__((__always_inline__))
283 _mm_sqrt_sd (__m128d __A, __m128d __B)
284 {
285 __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
286 return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
287 }
288
289 static __inline __m128d __attribute__((__always_inline__))
290 _mm_min_pd (__m128d __A, __m128d __B)
291 {
292 return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
293 }
294
295 static __inline __m128d __attribute__((__always_inline__))
296 _mm_min_sd (__m128d __A, __m128d __B)
297 {
298 return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
299 }
300
301 static __inline __m128d __attribute__((__always_inline__))
302 _mm_max_pd (__m128d __A, __m128d __B)
303 {
304 return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
305 }
306
307 static __inline __m128d __attribute__((__always_inline__))
308 _mm_max_sd (__m128d __A, __m128d __B)
309 {
310 return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
311 }
312
313 static __inline __m128d __attribute__((__always_inline__))
314 _mm_and_pd (__m128d __A, __m128d __B)
315 {
316 return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
317 }
318
319 static __inline __m128d __attribute__((__always_inline__))
320 _mm_andnot_pd (__m128d __A, __m128d __B)
321 {
322 return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
323 }
324
325 static __inline __m128d __attribute__((__always_inline__))
326 _mm_or_pd (__m128d __A, __m128d __B)
327 {
328 return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
329 }
330
331 static __inline __m128d __attribute__((__always_inline__))
332 _mm_xor_pd (__m128d __A, __m128d __B)
333 {
334 return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
335 }
336
337 static __inline __m128d __attribute__((__always_inline__))
338 _mm_cmpeq_pd (__m128d __A, __m128d __B)
339 {
340 return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
341 }
342
343 static __inline __m128d __attribute__((__always_inline__))
344 _mm_cmplt_pd (__m128d __A, __m128d __B)
345 {
346 return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
347 }
348
349 static __inline __m128d __attribute__((__always_inline__))
350 _mm_cmple_pd (__m128d __A, __m128d __B)
351 {
352 return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
353 }
354
355 static __inline __m128d __attribute__((__always_inline__))
356 _mm_cmpgt_pd (__m128d __A, __m128d __B)
357 {
358 return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
359 }
360
361 static __inline __m128d __attribute__((__always_inline__))
362 _mm_cmpge_pd (__m128d __A, __m128d __B)
363 {
364 return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
365 }
366
367 static __inline __m128d __attribute__((__always_inline__))
368 _mm_cmpneq_pd (__m128d __A, __m128d __B)
369 {
370 return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
371 }
372
373 static __inline __m128d __attribute__((__always_inline__))
374 _mm_cmpnlt_pd (__m128d __A, __m128d __B)
375 {
376 return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
377 }
378
379 static __inline __m128d __attribute__((__always_inline__))
380 _mm_cmpnle_pd (__m128d __A, __m128d __B)
381 {
382 return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
383 }
384
385 static __inline __m128d __attribute__((__always_inline__))
386 _mm_cmpngt_pd (__m128d __A, __m128d __B)
387 {
388 return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
389 }
390
391 static __inline __m128d __attribute__((__always_inline__))
392 _mm_cmpnge_pd (__m128d __A, __m128d __B)
393 {
394 return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
395 }
396
397 static __inline __m128d __attribute__((__always_inline__))
398 _mm_cmpord_pd (__m128d __A, __m128d __B)
399 {
400 return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
401 }
402
403 static __inline __m128d __attribute__((__always_inline__))
404 _mm_cmpunord_pd (__m128d __A, __m128d __B)
405 {
406 return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
407 }
408
409 static __inline __m128d __attribute__((__always_inline__))
410 _mm_cmpeq_sd (__m128d __A, __m128d __B)
411 {
412 return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
413 }
414
415 static __inline __m128d __attribute__((__always_inline__))
416 _mm_cmplt_sd (__m128d __A, __m128d __B)
417 {
418 return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
419 }
420
421 static __inline __m128d __attribute__((__always_inline__))
422 _mm_cmple_sd (__m128d __A, __m128d __B)
423 {
424 return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
425 }
426
427 static __inline __m128d __attribute__((__always_inline__))
428 _mm_cmpgt_sd (__m128d __A, __m128d __B)
429 {
430 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
431 (__v2df)
432 __builtin_ia32_cmpltsd ((__v2df) __B,
433 (__v2df)
434 __A));
435 }
436
437 static __inline __m128d __attribute__((__always_inline__))
438 _mm_cmpge_sd (__m128d __A, __m128d __B)
439 {
440 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
441 (__v2df)
442 __builtin_ia32_cmplesd ((__v2df) __B,
443 (__v2df)
444 __A));
445 }
446
447 static __inline __m128d __attribute__((__always_inline__))
448 _mm_cmpneq_sd (__m128d __A, __m128d __B)
449 {
450 return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
451 }
452
453 static __inline __m128d __attribute__((__always_inline__))
454 _mm_cmpnlt_sd (__m128d __A, __m128d __B)
455 {
456 return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
457 }
458
459 static __inline __m128d __attribute__((__always_inline__))
460 _mm_cmpnle_sd (__m128d __A, __m128d __B)
461 {
462 return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
463 }
464
465 static __inline __m128d __attribute__((__always_inline__))
466 _mm_cmpngt_sd (__m128d __A, __m128d __B)
467 {
468 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
469 (__v2df)
470 __builtin_ia32_cmpnltsd ((__v2df) __B,
471 (__v2df)
472 __A));
473 }
474
475 static __inline __m128d __attribute__((__always_inline__))
476 _mm_cmpnge_sd (__m128d __A, __m128d __B)
477 {
478 return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
479 (__v2df)
480 __builtin_ia32_cmpnlesd ((__v2df) __B,
481 (__v2df)
482 __A));
483 }
484
485 static __inline __m128d __attribute__((__always_inline__))
486 _mm_cmpord_sd (__m128d __A, __m128d __B)
487 {
488 return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
489 }
490
491 static __inline __m128d __attribute__((__always_inline__))
492 _mm_cmpunord_sd (__m128d __A, __m128d __B)
493 {
494 return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
495 }
496
497 static __inline int __attribute__((__always_inline__))
498 _mm_comieq_sd (__m128d __A, __m128d __B)
499 {
500 return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
501 }
502
503 static __inline int __attribute__((__always_inline__))
504 _mm_comilt_sd (__m128d __A, __m128d __B)
505 {
506 return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
507 }
508
509 static __inline int __attribute__((__always_inline__))
510 _mm_comile_sd (__m128d __A, __m128d __B)
511 {
512 return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
513 }
514
515 static __inline int __attribute__((__always_inline__))
516 _mm_comigt_sd (__m128d __A, __m128d __B)
517 {
518 return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
519 }
520
521 static __inline int __attribute__((__always_inline__))
522 _mm_comige_sd (__m128d __A, __m128d __B)
523 {
524 return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
525 }
526
527 static __inline int __attribute__((__always_inline__))
528 _mm_comineq_sd (__m128d __A, __m128d __B)
529 {
530 return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
531 }
532
533 static __inline int __attribute__((__always_inline__))
534 _mm_ucomieq_sd (__m128d __A, __m128d __B)
535 {
536 return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
537 }
538
539 static __inline int __attribute__((__always_inline__))
540 _mm_ucomilt_sd (__m128d __A, __m128d __B)
541 {
542 return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
543 }
544
545 static __inline int __attribute__((__always_inline__))
546 _mm_ucomile_sd (__m128d __A, __m128d __B)
547 {
548 return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
549 }
550
551 static __inline int __attribute__((__always_inline__))
552 _mm_ucomigt_sd (__m128d __A, __m128d __B)
553 {
554 return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
555 }
556
557 static __inline int __attribute__((__always_inline__))
558 _mm_ucomige_sd (__m128d __A, __m128d __B)
559 {
560 return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
561 }
562
563 static __inline int __attribute__((__always_inline__))
564 _mm_ucomineq_sd (__m128d __A, __m128d __B)
565 {
566 return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
567 }
568
569 /* Create a vector of Qi, where i is the element number. */
570
571 static __inline __m128i __attribute__((__always_inline__))
572 _mm_set_epi64x (long long __q1, long long __q0)
573 {
574 return __extension__ (__m128i)(__v2di){ __q0, __q1 };
575 }
576
577 static __inline __m128i __attribute__((__always_inline__))
578 _mm_set_epi64 (__m64 __q1, __m64 __q0)
579 {
580 return _mm_set_epi64x ((long long)__q1, (long long)__q0);
581 }
582
583 static __inline __m128i __attribute__((__always_inline__))
584 _mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
585 {
586 return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
587 }
588
589 static __inline __m128i __attribute__((__always_inline__))
590 _mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
591 short __q3, short __q2, short __q1, short __q0)
592 {
593 return __extension__ (__m128i)(__v8hi){
594 __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
595 }
596
597 static __inline __m128i __attribute__((__always_inline__))
598 _mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
599 char __q11, char __q10, char __q09, char __q08,
600 char __q07, char __q06, char __q05, char __q04,
601 char __q03, char __q02, char __q01, char __q00)
602 {
603 return __extension__ (__m128i)(__v16qi){
604 __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
605 __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
606 };
607 }
608
609 /* Set all of the elements of the vector to A. */
610
611 static __inline __m128i __attribute__((__always_inline__))
612 _mm_set1_epi64x (long long __A)
613 {
614 return _mm_set_epi64x (__A, __A);
615 }
616
617 static __inline __m128i __attribute__((__always_inline__))
618 _mm_set1_epi64 (__m64 __A)
619 {
620 return _mm_set_epi64 (__A, __A);
621 }
622
623 static __inline __m128i __attribute__((__always_inline__))
624 _mm_set1_epi32 (int __A)
625 {
626 return _mm_set_epi32 (__A, __A, __A, __A);
627 }
628
629 static __inline __m128i __attribute__((__always_inline__))
630 _mm_set1_epi16 (short __A)
631 {
632 return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
633 }
634
635 static __inline __m128i __attribute__((__always_inline__))
636 _mm_set1_epi8 (char __A)
637 {
638 return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
639 __A, __A, __A, __A, __A, __A, __A, __A);
640 }
641
642 /* Create a vector of Qi, where i is the element number.
643 The parameter order is reversed from the _mm_set_epi* functions. */
644
645 static __inline __m128i __attribute__((__always_inline__))
646 _mm_setr_epi64 (__m64 __q0, __m64 __q1)
647 {
648 return _mm_set_epi64 (__q1, __q0);
649 }
650
651 static __inline __m128i __attribute__((__always_inline__))
652 _mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
653 {
654 return _mm_set_epi32 (__q3, __q2, __q1, __q0);
655 }
656
657 static __inline __m128i __attribute__((__always_inline__))
658 _mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
659 short __q4, short __q5, short __q6, short __q7)
660 {
661 return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
662 }
663
664 static __inline __m128i __attribute__((__always_inline__))
665 _mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
666 char __q04, char __q05, char __q06, char __q07,
667 char __q08, char __q09, char __q10, char __q11,
668 char __q12, char __q13, char __q14, char __q15)
669 {
670 return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
671 __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
672 }
673
674 /* Create a vector with element 0 as *P and the rest zero. */
675
676 static __inline __m128i __attribute__((__always_inline__))
677 _mm_load_si128 (__m128i const *__P)
678 {
679 return *__P;
680 }
681
682 static __inline __m128i __attribute__((__always_inline__))
683 _mm_loadu_si128 (__m128i const *__P)
684 {
685 return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
686 }
687
688 static __inline __m128i __attribute__((__always_inline__))
689 _mm_loadl_epi64 (__m128i const *__P)
690 {
691 return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
692 }
693
694 static __inline void __attribute__((__always_inline__))
695 _mm_store_si128 (__m128i *__P, __m128i __B)
696 {
697 *__P = __B;
698 }
699
700 static __inline void __attribute__((__always_inline__))
701 _mm_storeu_si128 (__m128i *__P, __m128i __B)
702 {
703 __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
704 }
705
706 static __inline void __attribute__((__always_inline__))
707 _mm_storel_epi64 (__m128i *__P, __m128i __B)
708 {
709 *(long long *)__P = __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
710 }
711
712 static __inline __m64 __attribute__((__always_inline__))
713 _mm_movepi64_pi64 (__m128i __B)
714 {
715 return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
716 }
717
718 static __inline __m128i __attribute__((__always_inline__))
719 _mm_movpi64_epi64 (__m64 __A)
720 {
721 return _mm_set_epi64 ((__m64)0LL, __A);
722 }
723
724 static __inline __m128i __attribute__((__always_inline__))
725 _mm_move_epi64 (__m128i __A)
726 {
727 return _mm_set_epi64 ((__m64)0LL, _mm_movepi64_pi64 (__A));
728 }
729
730 /* Create a vector of zeros. */
731 static __inline __m128i __attribute__((__always_inline__))
732 _mm_setzero_si128 (void)
733 {
734 return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
735 }
736
737 static __inline __m128d __attribute__((__always_inline__))
738 _mm_cvtepi32_pd (__m128i __A)
739 {
740 return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
741 }
742
743 static __inline __m128 __attribute__((__always_inline__))
744 _mm_cvtepi32_ps (__m128i __A)
745 {
746 return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
747 }
748
749 static __inline __m128i __attribute__((__always_inline__))
750 _mm_cvtpd_epi32 (__m128d __A)
751 {
752 return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
753 }
754
755 static __inline __m64 __attribute__((__always_inline__))
756 _mm_cvtpd_pi32 (__m128d __A)
757 {
758 return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
759 }
760
761 static __inline __m128 __attribute__((__always_inline__))
762 _mm_cvtpd_ps (__m128d __A)
763 {
764 return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
765 }
766
767 static __inline __m128i __attribute__((__always_inline__))
768 _mm_cvttpd_epi32 (__m128d __A)
769 {
770 return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
771 }
772
773 static __inline __m64 __attribute__((__always_inline__))
774 _mm_cvttpd_pi32 (__m128d __A)
775 {
776 return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
777 }
778
779 static __inline __m128d __attribute__((__always_inline__))
780 _mm_cvtpi32_pd (__m64 __A)
781 {
782 return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
783 }
784
785 static __inline __m128i __attribute__((__always_inline__))
786 _mm_cvtps_epi32 (__m128 __A)
787 {
788 return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
789 }
790
791 static __inline __m128i __attribute__((__always_inline__))
792 _mm_cvttps_epi32 (__m128 __A)
793 {
794 return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
795 }
796
797 static __inline __m128d __attribute__((__always_inline__))
798 _mm_cvtps_pd (__m128 __A)
799 {
800 return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
801 }
802
803 static __inline int __attribute__((__always_inline__))
804 _mm_cvtsd_si32 (__m128d __A)
805 {
806 return __builtin_ia32_cvtsd2si ((__v2df) __A);
807 }
808
809 #ifdef __x86_64__
810 /* Intel intrinsic. */
811 static __inline long long __attribute__((__always_inline__))
812 _mm_cvtsd_si64 (__m128d __A)
813 {
814 return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
815 }
816
817 /* Microsoft intrinsic. */
818 static __inline long long __attribute__((__always_inline__))
819 _mm_cvtsd_si64x (__m128d __A)
820 {
821 return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
822 }
823 #endif
824
825 static __inline int __attribute__((__always_inline__))
826 _mm_cvttsd_si32 (__m128d __A)
827 {
828 return __builtin_ia32_cvttsd2si ((__v2df) __A);
829 }
830
831 #ifdef __x86_64__
832 /* Intel intrinsic. */
833 static __inline long long __attribute__((__always_inline__))
834 _mm_cvttsd_si64 (__m128d __A)
835 {
836 return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
837 }
838
839 /* Microsoft intrinsic. */
840 static __inline long long __attribute__((__always_inline__))
841 _mm_cvttsd_si64x (__m128d __A)
842 {
843 return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
844 }
845 #endif
846
847 static __inline __m128 __attribute__((__always_inline__))
848 _mm_cvtsd_ss (__m128 __A, __m128d __B)
849 {
850 return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
851 }
852
853 static __inline __m128d __attribute__((__always_inline__))
854 _mm_cvtsi32_sd (__m128d __A, int __B)
855 {
856 return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
857 }
858
859 #ifdef __x86_64__
860 /* Intel intrinsic. */
861 static __inline __m128d __attribute__((__always_inline__))
862 _mm_cvtsi64_sd (__m128d __A, long long __B)
863 {
864 return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
865 }
866
867 /* Microsoft intrinsic. */
868 static __inline __m128d __attribute__((__always_inline__))
869 _mm_cvtsi64x_sd (__m128d __A, long long __B)
870 {
871 return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
872 }
873 #endif
874
875 static __inline __m128d __attribute__((__always_inline__))
876 _mm_cvtss_sd (__m128d __A, __m128 __B)
877 {
878 return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
879 }
880
881 #define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
882
883 static __inline __m128d __attribute__((__always_inline__))
884 _mm_unpackhi_pd (__m128d __A, __m128d __B)
885 {
886 return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
887 }
888
889 static __inline __m128d __attribute__((__always_inline__))
890 _mm_unpacklo_pd (__m128d __A, __m128d __B)
891 {
892 return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
893 }
894
895 static __inline __m128d __attribute__((__always_inline__))
896 _mm_loadh_pd (__m128d __A, double const *__B)
897 {
898 return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B);
899 }
900
901 static __inline __m128d __attribute__((__always_inline__))
902 _mm_loadl_pd (__m128d __A, double const *__B)
903 {
904 return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B);
905 }
906
907 static __inline int __attribute__((__always_inline__))
908 _mm_movemask_pd (__m128d __A)
909 {
910 return __builtin_ia32_movmskpd ((__v2df)__A);
911 }
912
913 static __inline __m128i __attribute__((__always_inline__))
914 _mm_packs_epi16 (__m128i __A, __m128i __B)
915 {
916 return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
917 }
918
919 static __inline __m128i __attribute__((__always_inline__))
920 _mm_packs_epi32 (__m128i __A, __m128i __B)
921 {
922 return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
923 }
924
925 static __inline __m128i __attribute__((__always_inline__))
926 _mm_packus_epi16 (__m128i __A, __m128i __B)
927 {
928 return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
929 }
930
931 static __inline __m128i __attribute__((__always_inline__))
932 _mm_unpackhi_epi8 (__m128i __A, __m128i __B)
933 {
934 return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
935 }
936
937 static __inline __m128i __attribute__((__always_inline__))
938 _mm_unpackhi_epi16 (__m128i __A, __m128i __B)
939 {
940 return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
941 }
942
943 static __inline __m128i __attribute__((__always_inline__))
944 _mm_unpackhi_epi32 (__m128i __A, __m128i __B)
945 {
946 return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
947 }
948
949 static __inline __m128i __attribute__((__always_inline__))
950 _mm_unpackhi_epi64 (__m128i __A, __m128i __B)
951 {
952 return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
953 }
954
955 static __inline __m128i __attribute__((__always_inline__))
956 _mm_unpacklo_epi8 (__m128i __A, __m128i __B)
957 {
958 return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
959 }
960
961 static __inline __m128i __attribute__((__always_inline__))
962 _mm_unpacklo_epi16 (__m128i __A, __m128i __B)
963 {
964 return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
965 }
966
967 static __inline __m128i __attribute__((__always_inline__))
968 _mm_unpacklo_epi32 (__m128i __A, __m128i __B)
969 {
970 return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
971 }
972
973 static __inline __m128i __attribute__((__always_inline__))
974 _mm_unpacklo_epi64 (__m128i __A, __m128i __B)
975 {
976 return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
977 }
978
979 static __inline __m128i __attribute__((__always_inline__))
980 _mm_add_epi8 (__m128i __A, __m128i __B)
981 {
982 return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
983 }
984
985 static __inline __m128i __attribute__((__always_inline__))
986 _mm_add_epi16 (__m128i __A, __m128i __B)
987 {
988 return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
989 }
990
991 static __inline __m128i __attribute__((__always_inline__))
992 _mm_add_epi32 (__m128i __A, __m128i __B)
993 {
994 return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
995 }
996
997 static __inline __m128i __attribute__((__always_inline__))
998 _mm_add_epi64 (__m128i __A, __m128i __B)
999 {
1000 return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
1001 }
1002
1003 static __inline __m128i __attribute__((__always_inline__))
1004 _mm_adds_epi8 (__m128i __A, __m128i __B)
1005 {
1006 return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
1007 }
1008
1009 static __inline __m128i __attribute__((__always_inline__))
1010 _mm_adds_epi16 (__m128i __A, __m128i __B)
1011 {
1012 return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
1013 }
1014
1015 static __inline __m128i __attribute__((__always_inline__))
1016 _mm_adds_epu8 (__m128i __A, __m128i __B)
1017 {
1018 return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
1019 }
1020
1021 static __inline __m128i __attribute__((__always_inline__))
1022 _mm_adds_epu16 (__m128i __A, __m128i __B)
1023 {
1024 return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
1025 }
1026
1027 static __inline __m128i __attribute__((__always_inline__))
1028 _mm_sub_epi8 (__m128i __A, __m128i __B)
1029 {
1030 return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
1031 }
1032
1033 static __inline __m128i __attribute__((__always_inline__))
1034 _mm_sub_epi16 (__m128i __A, __m128i __B)
1035 {
1036 return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
1037 }
1038
1039 static __inline __m128i __attribute__((__always_inline__))
1040 _mm_sub_epi32 (__m128i __A, __m128i __B)
1041 {
1042 return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
1043 }
1044
1045 static __inline __m128i __attribute__((__always_inline__))
1046 _mm_sub_epi64 (__m128i __A, __m128i __B)
1047 {
1048 return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
1049 }
1050
1051 static __inline __m128i __attribute__((__always_inline__))
1052 _mm_subs_epi8 (__m128i __A, __m128i __B)
1053 {
1054 return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
1055 }
1056
1057 static __inline __m128i __attribute__((__always_inline__))
1058 _mm_subs_epi16 (__m128i __A, __m128i __B)
1059 {
1060 return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
1061 }
1062
1063 static __inline __m128i __attribute__((__always_inline__))
1064 _mm_subs_epu8 (__m128i __A, __m128i __B)
1065 {
1066 return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
1067 }
1068
1069 static __inline __m128i __attribute__((__always_inline__))
1070 _mm_subs_epu16 (__m128i __A, __m128i __B)
1071 {
1072 return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
1073 }
1074
1075 static __inline __m128i __attribute__((__always_inline__))
1076 _mm_madd_epi16 (__m128i __A, __m128i __B)
1077 {
1078 return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
1079 }
1080
1081 static __inline __m128i __attribute__((__always_inline__))
1082 _mm_mulhi_epi16 (__m128i __A, __m128i __B)
1083 {
1084 return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
1085 }
1086
1087 static __inline __m128i __attribute__((__always_inline__))
1088 _mm_mullo_epi16 (__m128i __A, __m128i __B)
1089 {
1090 return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
1091 }
1092
1093 static __inline __m64 __attribute__((__always_inline__))
1094 _mm_mul_su32 (__m64 __A, __m64 __B)
1095 {
1096 return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
1097 }
1098
1099 static __inline __m128i __attribute__((__always_inline__))
1100 _mm_mul_epu32 (__m128i __A, __m128i __B)
1101 {
1102 return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
1103 }
1104
1105 static __inline __m128i __attribute__((__always_inline__))
1106 _mm_slli_epi16 (__m128i __A, int __B)
1107 {
1108 return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
1109 }
1110
1111 static __inline __m128i __attribute__((__always_inline__))
1112 _mm_slli_epi32 (__m128i __A, int __B)
1113 {
1114 return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
1115 }
1116
1117 static __inline __m128i __attribute__((__always_inline__))
1118 _mm_slli_epi64 (__m128i __A, int __B)
1119 {
1120 return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
1121 }
1122
1123 static __inline __m128i __attribute__((__always_inline__))
1124 _mm_srai_epi16 (__m128i __A, int __B)
1125 {
1126 return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
1127 }
1128
1129 static __inline __m128i __attribute__((__always_inline__))
1130 _mm_srai_epi32 (__m128i __A, int __B)
1131 {
1132 return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
1133 }
1134
1135 #if 0
1136 static __m128i __attribute__((__always_inline__))
1137 _mm_srli_si128 (__m128i __A, const int __B)
1138 {
1139 return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
1140 }
1141
1142 static __m128i __attribute__((__always_inline__))
1143 _mm_srli_si128 (__m128i __A, const int __B)
1144 {
1145 return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
1146 }
1147 #else
1148 #define _mm_srli_si128(__A, __B) \
1149 ((__m128i)__builtin_ia32_psrldqi128 (__A, (__B) * 8))
1150 #define _mm_slli_si128(__A, __B) \
1151 ((__m128i)__builtin_ia32_pslldqi128 (__A, (__B) * 8))
1152 #endif
1153
1154 static __inline __m128i __attribute__((__always_inline__))
1155 _mm_srli_epi16 (__m128i __A, int __B)
1156 {
1157 return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
1158 }
1159
1160 static __inline __m128i __attribute__((__always_inline__))
1161 _mm_srli_epi32 (__m128i __A, int __B)
1162 {
1163 return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
1164 }
1165
1166 static __inline __m128i __attribute__((__always_inline__))
1167 _mm_srli_epi64 (__m128i __A, int __B)
1168 {
1169 return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
1170 }
1171
1172 static __inline __m128i __attribute__((__always_inline__))
1173 _mm_sll_epi16 (__m128i __A, __m128i __B)
1174 {
1175 return _mm_slli_epi16 (__A, _mm_cvtsi128_si32 (__B));
1176 }
1177
1178 static __inline __m128i __attribute__((__always_inline__))
1179 _mm_sll_epi32 (__m128i __A, __m128i __B)
1180 {
1181 return _mm_slli_epi32 (__A, _mm_cvtsi128_si32 (__B));
1182 }
1183
1184 static __inline __m128i __attribute__((__always_inline__))
1185 _mm_sll_epi64 (__m128i __A, __m128i __B)
1186 {
1187 return _mm_slli_epi64 (__A, _mm_cvtsi128_si32 (__B));
1188 }
1189
1190 static __inline __m128i __attribute__((__always_inline__))
1191 _mm_sra_epi16 (__m128i __A, __m128i __B)
1192 {
1193 return _mm_srai_epi16 (__A, _mm_cvtsi128_si32 (__B));
1194 }
1195
1196 static __inline __m128i __attribute__((__always_inline__))
1197 _mm_sra_epi32 (__m128i __A, __m128i __B)
1198 {
1199 return _mm_srai_epi32 (__A, _mm_cvtsi128_si32 (__B));
1200 }
1201
1202 static __inline __m128i __attribute__((__always_inline__))
1203 _mm_srl_epi16 (__m128i __A, __m128i __B)
1204 {
1205 return _mm_srli_epi16 (__A, _mm_cvtsi128_si32 (__B));
1206 }
1207
1208 static __inline __m128i __attribute__((__always_inline__))
1209 _mm_srl_epi32 (__m128i __A, __m128i __B)
1210 {
1211 return _mm_srli_epi32 (__A, _mm_cvtsi128_si32 (__B));
1212 }
1213
1214 static __inline __m128i __attribute__((__always_inline__))
1215 _mm_srl_epi64 (__m128i __A, __m128i __B)
1216 {
1217 return _mm_srli_epi64 (__A, _mm_cvtsi128_si32 (__B));
1218 }
1219
1220 static __inline __m128i __attribute__((__always_inline__))
1221 _mm_and_si128 (__m128i __A, __m128i __B)
1222 {
1223 return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
1224 }
1225
1226 static __inline __m128i __attribute__((__always_inline__))
1227 _mm_andnot_si128 (__m128i __A, __m128i __B)
1228 {
1229 return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
1230 }
1231
1232 static __inline __m128i __attribute__((__always_inline__))
1233 _mm_or_si128 (__m128i __A, __m128i __B)
1234 {
1235 return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
1236 }
1237
1238 static __inline __m128i __attribute__((__always_inline__))
1239 _mm_xor_si128 (__m128i __A, __m128i __B)
1240 {
1241 return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
1242 }
1243
1244 static __inline __m128i __attribute__((__always_inline__))
1245 _mm_cmpeq_epi8 (__m128i __A, __m128i __B)
1246 {
1247 return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
1248 }
1249
1250 static __inline __m128i __attribute__((__always_inline__))
1251 _mm_cmpeq_epi16 (__m128i __A, __m128i __B)
1252 {
1253 return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
1254 }
1255
1256 static __inline __m128i __attribute__((__always_inline__))
1257 _mm_cmpeq_epi32 (__m128i __A, __m128i __B)
1258 {
1259 return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
1260 }
1261
1262 static __inline __m128i __attribute__((__always_inline__))
1263 _mm_cmplt_epi8 (__m128i __A, __m128i __B)
1264 {
1265 return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
1266 }
1267
1268 static __inline __m128i __attribute__((__always_inline__))
1269 _mm_cmplt_epi16 (__m128i __A, __m128i __B)
1270 {
1271 return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
1272 }
1273
1274 static __inline __m128i __attribute__((__always_inline__))
1275 _mm_cmplt_epi32 (__m128i __A, __m128i __B)
1276 {
1277 return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
1278 }
1279
1280 static __inline __m128i __attribute__((__always_inline__))
1281 _mm_cmpgt_epi8 (__m128i __A, __m128i __B)
1282 {
1283 return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
1284 }
1285
1286 static __inline __m128i __attribute__((__always_inline__))
1287 _mm_cmpgt_epi16 (__m128i __A, __m128i __B)
1288 {
1289 return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
1290 }
1291
1292 static __inline __m128i __attribute__((__always_inline__))
1293 _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
1294 {
1295 return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
1296 }
1297
1298 #if 0
1299 static __inline int __attribute__((__always_inline__))
1300 _mm_extract_epi16 (__m128i const __A, int const __N)
1301 {
1302 return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N);
1303 }
1304
1305 static __inline __m128i __attribute__((__always_inline__))
1306 _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
1307 {
1308 return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
1309 }
1310 #else
1311 #define _mm_extract_epi16(A, N) \
1312 ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
1313 #define _mm_insert_epi16(A, D, N) \
1314 ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
1315 #endif
1316
1317 static __inline __m128i __attribute__((__always_inline__))
1318 _mm_max_epi16 (__m128i __A, __m128i __B)
1319 {
1320 return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
1321 }
1322
1323 static __inline __m128i __attribute__((__always_inline__))
1324 _mm_max_epu8 (__m128i __A, __m128i __B)
1325 {
1326 return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
1327 }
1328
1329 static __inline __m128i __attribute__((__always_inline__))
1330 _mm_min_epi16 (__m128i __A, __m128i __B)
1331 {
1332 return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
1333 }
1334
1335 static __inline __m128i __attribute__((__always_inline__))
1336 _mm_min_epu8 (__m128i __A, __m128i __B)
1337 {
1338 return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
1339 }
1340
1341 static __inline int __attribute__((__always_inline__))
1342 _mm_movemask_epi8 (__m128i __A)
1343 {
1344 return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
1345 }
1346
1347 static __inline __m128i __attribute__((__always_inline__))
1348 _mm_mulhi_epu16 (__m128i __A, __m128i __B)
1349 {
1350 return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
1351 }
1352
1353 #define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
1354 #define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
1355 #define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
1356
1357 static __inline void __attribute__((__always_inline__))
1358 _mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
1359 {
1360 __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
1361 }
1362
1363 static __inline __m128i __attribute__((__always_inline__))
1364 _mm_avg_epu8 (__m128i __A, __m128i __B)
1365 {
1366 return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
1367 }
1368
1369 static __inline __m128i __attribute__((__always_inline__))
1370 _mm_avg_epu16 (__m128i __A, __m128i __B)
1371 {
1372 return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
1373 }
1374
1375 static __inline __m128i __attribute__((__always_inline__))
1376 _mm_sad_epu8 (__m128i __A, __m128i __B)
1377 {
1378 return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
1379 }
1380
1381 static __inline void __attribute__((__always_inline__))
1382 _mm_stream_si32 (int *__A, int __B)
1383 {
1384 __builtin_ia32_movnti (__A, __B);
1385 }
1386
1387 static __inline void __attribute__((__always_inline__))
1388 _mm_stream_si128 (__m128i *__A, __m128i __B)
1389 {
1390 __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
1391 }
1392
1393 static __inline void __attribute__((__always_inline__))
1394 _mm_stream_pd (double *__A, __m128d __B)
1395 {
1396 __builtin_ia32_movntpd (__A, (__v2df)__B);
1397 }
1398
1399 static __inline void __attribute__((__always_inline__))
1400 _mm_clflush (void const *__A)
1401 {
1402 __builtin_ia32_clflush (__A);
1403 }
1404
1405 static __inline void __attribute__((__always_inline__))
1406 _mm_lfence (void)
1407 {
1408 __builtin_ia32_lfence ();
1409 }
1410
1411 static __inline void __attribute__((__always_inline__))
1412 _mm_mfence (void)
1413 {
1414 __builtin_ia32_mfence ();
1415 }
1416
1417 static __inline __m128i __attribute__((__always_inline__))
1418 _mm_cvtsi32_si128 (int __A)
1419 {
1420 return _mm_set_epi32 (0, 0, 0, __A);
1421 }
1422
1423 #ifdef __x86_64__
1424 /* Intel intrinsic. */
1425 static __inline __m128i __attribute__((__always_inline__))
1426 _mm_cvtsi64_si128 (long long __A)
1427 {
1428 return _mm_set_epi64x (0, __A);
1429 }
1430
1431 /* Microsoft intrinsic. */
1432 static __inline __m128i __attribute__((__always_inline__))
1433 _mm_cvtsi64x_si128 (long long __A)
1434 {
1435 return _mm_set_epi64x (0, __A);
1436 }
1437 #endif
1438
1439 /* Casts between various SP, DP, INT vector types. Note that these do no
1440 conversion of values, they just change the type. */
1441 static __inline __m128 __attribute__((__always_inline__))
1442 _mm_castpd_ps(__m128d __A)
1443 {
1444 return (__m128) __A;
1445 }
1446
1447 static __inline __m128i __attribute__((__always_inline__))
1448 _mm_castpd_si128(__m128d __A)
1449 {
1450 return (__m128i) __A;
1451 }
1452
1453 static __inline __m128d __attribute__((__always_inline__))
1454 _mm_castps_pd(__m128 __A)
1455 {
1456 return (__m128d) __A;
1457 }
1458
1459 static __inline __m128i __attribute__((__always_inline__))
1460 _mm_castps_si128(__m128 __A)
1461 {
1462 return (__m128i) __A;
1463 }
1464
1465 static __inline __m128 __attribute__((__always_inline__))
1466 _mm_castsi128_ps(__m128i __A)
1467 {
1468 return (__m128) __A;
1469 }
1470
1471 static __inline __m128d __attribute__((__always_inline__))
1472 _mm_castsi128_pd(__m128i __A)
1473 {
1474 return (__m128d) __A;
1475 }
1476
1477 #endif /* __SSE2__ */
1478
1479 #endif /* _EMMINTRIN_H_INCLUDED */