4271f03ab38d1fce1e4a9f048fa5f36bbe2dfa67
[gcc.git] / gcc / config / i386 / xmmintrin.h
1 /* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
19
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
26
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 8.0. */
29
30 #ifndef _XMMINTRIN_H_INCLUDED
31 #define _XMMINTRIN_H_INCLUDED
32
33 #ifndef __SSE__
34 # error "SSE instruction set not enabled"
35 #else
36
37 /* We need type definitions from the MMX header file. */
38 #include <mmintrin.h>
39
40 /* The data type intended for user use. */
41 typedef float __m128 __attribute__ ((vector_size (16)));
42
43 /* Internal data types for implementing the intrinsics. */
44 typedef float __v4sf __attribute__ ((vector_size (16)));
45
46 /* Create a selector for use with the SHUFPS instruction. */
47 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
48 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
49
50 /* Constants for use with _mm_prefetch. */
51 enum _mm_hint
52 {
53 _MM_HINT_T0 = 3,
54 _MM_HINT_T1 = 2,
55 _MM_HINT_T2 = 1,
56 _MM_HINT_NTA = 0
57 };
58
59 /* Bits in the MXCSR. */
60 #define _MM_EXCEPT_MASK 0x003f
61 #define _MM_EXCEPT_INVALID 0x0001
62 #define _MM_EXCEPT_DENORM 0x0002
63 #define _MM_EXCEPT_DIV_ZERO 0x0004
64 #define _MM_EXCEPT_OVERFLOW 0x0008
65 #define _MM_EXCEPT_UNDERFLOW 0x0010
66 #define _MM_EXCEPT_INEXACT 0x0020
67
68 #define _MM_MASK_MASK 0x1f80
69 #define _MM_MASK_INVALID 0x0080
70 #define _MM_MASK_DENORM 0x0100
71 #define _MM_MASK_DIV_ZERO 0x0200
72 #define _MM_MASK_OVERFLOW 0x0400
73 #define _MM_MASK_UNDERFLOW 0x0800
74 #define _MM_MASK_INEXACT 0x1000
75
76 #define _MM_ROUND_MASK 0x6000
77 #define _MM_ROUND_NEAREST 0x0000
78 #define _MM_ROUND_DOWN 0x2000
79 #define _MM_ROUND_UP 0x4000
80 #define _MM_ROUND_TOWARD_ZERO 0x6000
81
82 #define _MM_FLUSH_ZERO_MASK 0x8000
83 #define _MM_FLUSH_ZERO_ON 0x8000
84 #define _MM_FLUSH_ZERO_OFF 0x0000
85
86 /* Perform the respective operation on the lower SPFP (single-precision
87 floating-point) values of A and B; the upper three SPFP values are
88 passed through from A. */
89
90 static __inline __m128
91 _mm_add_ss (__m128 __A, __m128 __B)
92 {
93 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
94 }
95
96 static __inline __m128
97 _mm_sub_ss (__m128 __A, __m128 __B)
98 {
99 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
100 }
101
102 static __inline __m128
103 _mm_mul_ss (__m128 __A, __m128 __B)
104 {
105 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
106 }
107
108 static __inline __m128
109 _mm_div_ss (__m128 __A, __m128 __B)
110 {
111 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
112 }
113
114 static __inline __m128
115 _mm_sqrt_ss (__m128 __A)
116 {
117 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
118 }
119
120 static __inline __m128
121 _mm_rcp_ss (__m128 __A)
122 {
123 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
124 }
125
126 static __inline __m128
127 _mm_rsqrt_ss (__m128 __A)
128 {
129 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
130 }
131
132 static __inline __m128
133 _mm_min_ss (__m128 __A, __m128 __B)
134 {
135 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
136 }
137
138 static __inline __m128
139 _mm_max_ss (__m128 __A, __m128 __B)
140 {
141 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
142 }
143
144 /* Perform the respective operation on the four SPFP values in A and B. */
145
146 static __inline __m128
147 _mm_add_ps (__m128 __A, __m128 __B)
148 {
149 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
150 }
151
152 static __inline __m128
153 _mm_sub_ps (__m128 __A, __m128 __B)
154 {
155 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
156 }
157
158 static __inline __m128
159 _mm_mul_ps (__m128 __A, __m128 __B)
160 {
161 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
162 }
163
164 static __inline __m128
165 _mm_div_ps (__m128 __A, __m128 __B)
166 {
167 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
168 }
169
170 static __inline __m128
171 _mm_sqrt_ps (__m128 __A)
172 {
173 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
174 }
175
176 static __inline __m128
177 _mm_rcp_ps (__m128 __A)
178 {
179 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
180 }
181
182 static __inline __m128
183 _mm_rsqrt_ps (__m128 __A)
184 {
185 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
186 }
187
188 static __inline __m128
189 _mm_min_ps (__m128 __A, __m128 __B)
190 {
191 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
192 }
193
194 static __inline __m128
195 _mm_max_ps (__m128 __A, __m128 __B)
196 {
197 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
198 }
199
200 /* Perform logical bit-wise operations on 128-bit values. */
201
202 static __inline __m128
203 _mm_and_ps (__m128 __A, __m128 __B)
204 {
205 return __builtin_ia32_andps (__A, __B);
206 }
207
208 static __inline __m128
209 _mm_andnot_ps (__m128 __A, __m128 __B)
210 {
211 return __builtin_ia32_andnps (__A, __B);
212 }
213
214 static __inline __m128
215 _mm_or_ps (__m128 __A, __m128 __B)
216 {
217 return __builtin_ia32_orps (__A, __B);
218 }
219
220 static __inline __m128
221 _mm_xor_ps (__m128 __A, __m128 __B)
222 {
223 return __builtin_ia32_xorps (__A, __B);
224 }
225
226 /* Perform a comparison on the lower SPFP values of A and B. If the
227 comparison is true, place a mask of all ones in the result, otherwise a
228 mask of zeros. The upper three SPFP values are passed through from A. */
229
230 static __inline __m128
231 _mm_cmpeq_ss (__m128 __A, __m128 __B)
232 {
233 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
234 }
235
236 static __inline __m128
237 _mm_cmplt_ss (__m128 __A, __m128 __B)
238 {
239 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
240 }
241
242 static __inline __m128
243 _mm_cmple_ss (__m128 __A, __m128 __B)
244 {
245 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
246 }
247
248 static __inline __m128
249 _mm_cmpgt_ss (__m128 __A, __m128 __B)
250 {
251 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
252 (__v4sf)
253 __builtin_ia32_cmpltss ((__v4sf) __B,
254 (__v4sf)
255 __A));
256 }
257
258 static __inline __m128
259 _mm_cmpge_ss (__m128 __A, __m128 __B)
260 {
261 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
262 (__v4sf)
263 __builtin_ia32_cmpless ((__v4sf) __B,
264 (__v4sf)
265 __A));
266 }
267
268 static __inline __m128
269 _mm_cmpneq_ss (__m128 __A, __m128 __B)
270 {
271 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
272 }
273
274 static __inline __m128
275 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
276 {
277 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
278 }
279
280 static __inline __m128
281 _mm_cmpnle_ss (__m128 __A, __m128 __B)
282 {
283 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
284 }
285
286 static __inline __m128
287 _mm_cmpngt_ss (__m128 __A, __m128 __B)
288 {
289 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
290 (__v4sf)
291 __builtin_ia32_cmpnltss ((__v4sf) __B,
292 (__v4sf)
293 __A));
294 }
295
296 static __inline __m128
297 _mm_cmpnge_ss (__m128 __A, __m128 __B)
298 {
299 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
300 (__v4sf)
301 __builtin_ia32_cmpnless ((__v4sf) __B,
302 (__v4sf)
303 __A));
304 }
305
306 static __inline __m128
307 _mm_cmpord_ss (__m128 __A, __m128 __B)
308 {
309 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
310 }
311
312 static __inline __m128
313 _mm_cmpunord_ss (__m128 __A, __m128 __B)
314 {
315 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
316 }
317
318 /* Perform a comparison on the four SPFP values of A and B. For each
319 element, if the comparison is true, place a mask of all ones in the
320 result, otherwise a mask of zeros. */
321
322 static __inline __m128
323 _mm_cmpeq_ps (__m128 __A, __m128 __B)
324 {
325 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
326 }
327
328 static __inline __m128
329 _mm_cmplt_ps (__m128 __A, __m128 __B)
330 {
331 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
332 }
333
334 static __inline __m128
335 _mm_cmple_ps (__m128 __A, __m128 __B)
336 {
337 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
338 }
339
340 static __inline __m128
341 _mm_cmpgt_ps (__m128 __A, __m128 __B)
342 {
343 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
344 }
345
346 static __inline __m128
347 _mm_cmpge_ps (__m128 __A, __m128 __B)
348 {
349 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
350 }
351
352 static __inline __m128
353 _mm_cmpneq_ps (__m128 __A, __m128 __B)
354 {
355 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
356 }
357
358 static __inline __m128
359 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
360 {
361 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
362 }
363
364 static __inline __m128
365 _mm_cmpnle_ps (__m128 __A, __m128 __B)
366 {
367 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
368 }
369
370 static __inline __m128
371 _mm_cmpngt_ps (__m128 __A, __m128 __B)
372 {
373 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
374 }
375
376 static __inline __m128
377 _mm_cmpnge_ps (__m128 __A, __m128 __B)
378 {
379 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
380 }
381
382 static __inline __m128
383 _mm_cmpord_ps (__m128 __A, __m128 __B)
384 {
385 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
386 }
387
388 static __inline __m128
389 _mm_cmpunord_ps (__m128 __A, __m128 __B)
390 {
391 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
392 }
393
394 /* Compare the lower SPFP values of A and B and return 1 if true
395 and 0 if false. */
396
397 static __inline int
398 _mm_comieq_ss (__m128 __A, __m128 __B)
399 {
400 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
401 }
402
403 static __inline int
404 _mm_comilt_ss (__m128 __A, __m128 __B)
405 {
406 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
407 }
408
409 static __inline int
410 _mm_comile_ss (__m128 __A, __m128 __B)
411 {
412 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
413 }
414
415 static __inline int
416 _mm_comigt_ss (__m128 __A, __m128 __B)
417 {
418 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
419 }
420
421 static __inline int
422 _mm_comige_ss (__m128 __A, __m128 __B)
423 {
424 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
425 }
426
427 static __inline int
428 _mm_comineq_ss (__m128 __A, __m128 __B)
429 {
430 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
431 }
432
433 static __inline int
434 _mm_ucomieq_ss (__m128 __A, __m128 __B)
435 {
436 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
437 }
438
439 static __inline int
440 _mm_ucomilt_ss (__m128 __A, __m128 __B)
441 {
442 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
443 }
444
445 static __inline int
446 _mm_ucomile_ss (__m128 __A, __m128 __B)
447 {
448 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
449 }
450
451 static __inline int
452 _mm_ucomigt_ss (__m128 __A, __m128 __B)
453 {
454 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
455 }
456
457 static __inline int
458 _mm_ucomige_ss (__m128 __A, __m128 __B)
459 {
460 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
461 }
462
463 static __inline int
464 _mm_ucomineq_ss (__m128 __A, __m128 __B)
465 {
466 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
467 }
468
469 /* Convert the lower SPFP value to a 32-bit integer according to the current
470 rounding mode. */
471 static __inline int
472 _mm_cvtss_si32 (__m128 __A)
473 {
474 return __builtin_ia32_cvtss2si ((__v4sf) __A);
475 }
476
477 static __inline int
478 _mm_cvt_ss2si (__m128 __A)
479 {
480 return _mm_cvtss_si32 (__A);
481 }
482
483 #ifdef __x86_64__
484 /* Convert the lower SPFP value to a 32-bit integer according to the current
485 rounding mode. */
486 static __inline long long
487 _mm_cvtss_si64x (__m128 __A)
488 {
489 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
490 }
491 #endif
492
493 /* Convert the two lower SPFP values to 32-bit integers according to the
494 current rounding mode. Return the integers in packed form. */
495 static __inline __m64
496 _mm_cvtps_pi32 (__m128 __A)
497 {
498 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
499 }
500
501 static __inline __m64
502 _mm_cvt_ps2pi (__m128 __A)
503 {
504 return _mm_cvtps_pi32 (__A);
505 }
506
507 /* Truncate the lower SPFP value to a 32-bit integer. */
508 static __inline int
509 _mm_cvttss_si32 (__m128 __A)
510 {
511 return __builtin_ia32_cvttss2si ((__v4sf) __A);
512 }
513
514 static __inline int
515 _mm_cvtt_ss2si (__m128 __A)
516 {
517 return _mm_cvttss_si32 (__A);
518 }
519
520 #ifdef __x86_64__
521 /* Truncate the lower SPFP value to a 32-bit integer. */
522 static __inline long long
523 _mm_cvttss_si64x (__m128 __A)
524 {
525 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
526 }
527 #endif
528
529 /* Truncate the two lower SPFP values to 32-bit integers. Return the
530 integers in packed form. */
531 static __inline __m64
532 _mm_cvttps_pi32 (__m128 __A)
533 {
534 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
535 }
536
537 static __inline __m64
538 _mm_cvtt_ps2pi (__m128 __A)
539 {
540 return _mm_cvttps_pi32 (__A);
541 }
542
543 /* Convert B to a SPFP value and insert it as element zero in A. */
544 static __inline __m128
545 _mm_cvtsi32_ss (__m128 __A, int __B)
546 {
547 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
548 }
549
550 static __inline __m128
551 _mm_cvt_si2ss (__m128 __A, int __B)
552 {
553 return _mm_cvtsi32_ss (__A, __B);
554 }
555
556 #ifdef __x86_64__
557 /* Convert B to a SPFP value and insert it as element zero in A. */
558 static __inline __m128
559 _mm_cvtsi64x_ss (__m128 __A, long long __B)
560 {
561 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
562 }
563 #endif
564
565 /* Convert the two 32-bit values in B to SPFP form and insert them
566 as the two lower elements in A. */
567 static __inline __m128
568 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
569 {
570 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
571 }
572
573 static __inline __m128
574 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
575 {
576 return _mm_cvtpi32_ps (__A, __B);
577 }
578
579 /* Convert the four signed 16-bit values in A to SPFP form. */
580 static __inline __m128
581 _mm_cvtpi16_ps (__m64 __A)
582 {
583 __v4hi __sign;
584 __v2si __hisi, __losi;
585 __v4sf __r;
586
587 /* This comparison against zero gives us a mask that can be used to
588 fill in the missing sign bits in the unpack operations below, so
589 that we get signed values after unpacking. */
590 __sign = (__v4hi) __builtin_ia32_mmx_zero ();
591 __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A);
592
593 /* Convert the four words to doublewords. */
594 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
595 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
596
597 /* Convert the doublewords to floating point two at a time. */
598 __r = (__v4sf) __builtin_ia32_setzerops ();
599 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
600 __r = __builtin_ia32_movlhps (__r, __r);
601 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
602
603 return (__m128) __r;
604 }
605
606 /* Convert the four unsigned 16-bit values in A to SPFP form. */
607 static __inline __m128
608 _mm_cvtpu16_ps (__m64 __A)
609 {
610 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
611 __v2si __hisi, __losi;
612 __v4sf __r;
613
614 /* Convert the four words to doublewords. */
615 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero);
616 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero);
617
618 /* Convert the doublewords to floating point two at a time. */
619 __r = (__v4sf) __builtin_ia32_setzerops ();
620 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
621 __r = __builtin_ia32_movlhps (__r, __r);
622 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
623
624 return (__m128) __r;
625 }
626
627 /* Convert the low four signed 8-bit values in A to SPFP form. */
628 static __inline __m128
629 _mm_cvtpi8_ps (__m64 __A)
630 {
631 __v8qi __sign;
632
633 /* This comparison against zero gives us a mask that can be used to
634 fill in the missing sign bits in the unpack operations below, so
635 that we get signed values after unpacking. */
636 __sign = (__v8qi) __builtin_ia32_mmx_zero ();
637 __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A);
638
639 /* Convert the four low bytes to words. */
640 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
641
642 return _mm_cvtpi16_ps(__A);
643 }
644
645 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
646 static __inline __m128
647 _mm_cvtpu8_ps(__m64 __A)
648 {
649 __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero ();
650 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero);
651 return _mm_cvtpu16_ps(__A);
652 }
653
654 /* Convert the four signed 32-bit values in A and B to SPFP form. */
655 static __inline __m128
656 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
657 {
658 __v4sf __zero = (__v4sf) __builtin_ia32_setzerops ();
659 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
660 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
661 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
662 }
663
664 /* Convert the four SPFP values in A to four signed 16-bit integers. */
665 static __inline __m64
666 _mm_cvtps_pi16(__m128 __A)
667 {
668 __v4sf __hisf = (__v4sf)__A;
669 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
670 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
671 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
672 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
673 }
674
675 /* Convert the four SPFP values in A to four signed 8-bit integers. */
676 static __inline __m64
677 _mm_cvtps_pi8(__m128 __A)
678 {
679 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
680 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
681 return (__m64) __builtin_ia32_packsswb (__tmp, __zero);
682 }
683
684 /* Selects four specific SPFP values from A and B based on MASK. */
685 #if 0
686 static __inline __m128
687 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
688 {
689 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
690 }
691 #else
692 #define _mm_shuffle_ps(A, B, MASK) \
693 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
694 #endif
695
696
697 /* Selects and interleaves the upper two SPFP values from A and B. */
698 static __inline __m128
699 _mm_unpackhi_ps (__m128 __A, __m128 __B)
700 {
701 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
702 }
703
704 /* Selects and interleaves the lower two SPFP values from A and B. */
705 static __inline __m128
706 _mm_unpacklo_ps (__m128 __A, __m128 __B)
707 {
708 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
709 }
710
711 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
712 the lower two values are passed through from A. */
713 static __inline __m128
714 _mm_loadh_pi (__m128 __A, __m64 const *__P)
715 {
716 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
717 }
718
719 /* Stores the upper two SPFP values of A into P. */
720 static __inline void
721 _mm_storeh_pi (__m64 *__P, __m128 __A)
722 {
723 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
724 }
725
726 /* Moves the upper two values of B into the lower two values of A. */
727 static __inline __m128
728 _mm_movehl_ps (__m128 __A, __m128 __B)
729 {
730 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
731 }
732
733 /* Moves the lower two values of B into the upper two values of A. */
734 static __inline __m128
735 _mm_movelh_ps (__m128 __A, __m128 __B)
736 {
737 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
738 }
739
740 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
741 the upper two values are passed through from A. */
742 static __inline __m128
743 _mm_loadl_pi (__m128 __A, __m64 const *__P)
744 {
745 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
746 }
747
748 /* Stores the lower two SPFP values of A into P. */
749 static __inline void
750 _mm_storel_pi (__m64 *__P, __m128 __A)
751 {
752 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
753 }
754
755 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
756 static __inline int
757 _mm_movemask_ps (__m128 __A)
758 {
759 return __builtin_ia32_movmskps ((__v4sf)__A);
760 }
761
762 /* Return the contents of the control register. */
763 static __inline unsigned int
764 _mm_getcsr (void)
765 {
766 return __builtin_ia32_stmxcsr ();
767 }
768
769 /* Read exception bits from the control register. */
770 static __inline unsigned int
771 _MM_GET_EXCEPTION_STATE (void)
772 {
773 return _mm_getcsr() & _MM_EXCEPT_MASK;
774 }
775
776 static __inline unsigned int
777 _MM_GET_EXCEPTION_MASK (void)
778 {
779 return _mm_getcsr() & _MM_MASK_MASK;
780 }
781
782 static __inline unsigned int
783 _MM_GET_ROUNDING_MODE (void)
784 {
785 return _mm_getcsr() & _MM_ROUND_MASK;
786 }
787
788 static __inline unsigned int
789 _MM_GET_FLUSH_ZERO_MODE (void)
790 {
791 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
792 }
793
794 /* Set the control register to I. */
795 static __inline void
796 _mm_setcsr (unsigned int __I)
797 {
798 __builtin_ia32_ldmxcsr (__I);
799 }
800
801 /* Set exception bits in the control register. */
802 static __inline void
803 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
804 {
805 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
806 }
807
808 static __inline void
809 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
810 {
811 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
812 }
813
814 static __inline void
815 _MM_SET_ROUNDING_MODE (unsigned int __mode)
816 {
817 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
818 }
819
820 static __inline void
821 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
822 {
823 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
824 }
825
826 /* Create a vector with element 0 as *P and the rest zero. */
827 static __inline __m128
828 _mm_load_ss (float const *__P)
829 {
830 return (__m128) __builtin_ia32_loadss (__P);
831 }
832
833 /* Create a vector with all four elements equal to *P. */
834 static __inline __m128
835 _mm_load1_ps (float const *__P)
836 {
837 __v4sf __tmp = __builtin_ia32_loadss (__P);
838 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
839 }
840
841 static __inline __m128
842 _mm_load_ps1 (float const *__P)
843 {
844 return _mm_load1_ps (__P);
845 }
846
847 /* Load four SPFP values from P. The address must be 16-byte aligned. */
848 static __inline __m128
849 _mm_load_ps (float const *__P)
850 {
851 return (__m128) __builtin_ia32_loadaps (__P);
852 }
853
854 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
855 static __inline __m128
856 _mm_loadu_ps (float const *__P)
857 {
858 return (__m128) __builtin_ia32_loadups (__P);
859 }
860
861 /* Load four SPFP values in reverse order. The address must be aligned. */
862 static __inline __m128
863 _mm_loadr_ps (float const *__P)
864 {
865 __v4sf __tmp = __builtin_ia32_loadaps (__P);
866 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
867 }
868
869 /* Create a vector with element 0 as F and the rest zero. */
870 static __inline __m128
871 _mm_set_ss (float __F)
872 {
873 return (__m128) __builtin_ia32_loadss (&__F);
874 }
875
876 /* Create a vector with all four elements equal to F. */
877 static __inline __m128
878 _mm_set1_ps (float __F)
879 {
880 __v4sf __tmp = __builtin_ia32_loadss (&__F);
881 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
882 }
883
884 static __inline __m128
885 _mm_set_ps1 (float __F)
886 {
887 return _mm_set1_ps (__F);
888 }
889
890 /* Create the vector [Z Y X W]. */
891 static __inline __m128
892 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
893 {
894 return (__v4sf) {__W, __X, __Y, __Z};
895 }
896
897 /* Create the vector [W X Y Z]. */
898 static __inline __m128
899 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
900 {
901 return _mm_set_ps (__W, __X, __Y, __Z);
902 }
903
904 /* Create a vector of zeros. */
905 static __inline __m128
906 _mm_setzero_ps (void)
907 {
908 return (__m128) __builtin_ia32_setzerops ();
909 }
910
911 /* Stores the lower SPFP value. */
912 static __inline void
913 _mm_store_ss (float *__P, __m128 __A)
914 {
915 __builtin_ia32_storess (__P, (__v4sf)__A);
916 }
917
918 /* Store the lower SPFP value across four words. */
919 static __inline void
920 _mm_store1_ps (float *__P, __m128 __A)
921 {
922 __v4sf __va = (__v4sf)__A;
923 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
924 __builtin_ia32_storeaps (__P, __tmp);
925 }
926
927 static __inline void
928 _mm_store_ps1 (float *__P, __m128 __A)
929 {
930 _mm_store1_ps (__P, __A);
931 }
932
933 /* Store four SPFP values. The address must be 16-byte aligned. */
934 static __inline void
935 _mm_store_ps (float *__P, __m128 __A)
936 {
937 __builtin_ia32_storeaps (__P, (__v4sf)__A);
938 }
939
940 /* Store four SPFP values. The address need not be 16-byte aligned. */
941 static __inline void
942 _mm_storeu_ps (float *__P, __m128 __A)
943 {
944 __builtin_ia32_storeups (__P, (__v4sf)__A);
945 }
946
947 /* Store four SPFP values in reverse order. The address must be aligned. */
948 static __inline void
949 _mm_storer_ps (float *__P, __m128 __A)
950 {
951 __v4sf __va = (__v4sf)__A;
952 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
953 __builtin_ia32_storeaps (__P, __tmp);
954 }
955
956 /* Sets the low SPFP value of A from the low value of B. */
957 static __inline __m128
958 _mm_move_ss (__m128 __A, __m128 __B)
959 {
960 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
961 }
962
963 /* Extracts one of the four words of A. The selector N must be immediate. */
964 #if 0
965 static __inline int
966 _mm_extract_pi16 (__m64 __A, int __N)
967 {
968 return __builtin_ia32_pextrw ((__v4hi)__A, __N);
969 }
970
971 static __inline int
972 _m_pextrw (__m64 __A, int __N)
973 {
974 return _mm_extract_pi16 (__A, __N);
975 }
976 #else
977 #define _mm_extract_pi16(A, N) \
978 __builtin_ia32_pextrw ((__v4hi)(A), (N))
979 #define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
980 #endif
981
982 /* Inserts word D into one of four words of A. The selector N must be
983 immediate. */
984 #if 0
985 static __inline __m64
986 _mm_insert_pi16 (__m64 __A, int __D, int __N)
987 {
988 return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
989 }
990
991 static __inline __m64
992 _m_pinsrw (__m64 __A, int __D, int __N)
993 {
994 return _mm_insert_pi16 (__A, __D, __N);
995 }
996 #else
997 #define _mm_insert_pi16(A, D, N) \
998 ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
999 #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
1000 #endif
1001
1002 /* Compute the element-wise maximum of signed 16-bit values. */
1003 static __inline __m64
1004 _mm_max_pi16 (__m64 __A, __m64 __B)
1005 {
1006 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1007 }
1008
1009 static __inline __m64
1010 _m_pmaxsw (__m64 __A, __m64 __B)
1011 {
1012 return _mm_max_pi16 (__A, __B);
1013 }
1014
1015 /* Compute the element-wise maximum of unsigned 8-bit values. */
1016 static __inline __m64
1017 _mm_max_pu8 (__m64 __A, __m64 __B)
1018 {
1019 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1020 }
1021
1022 static __inline __m64
1023 _m_pmaxub (__m64 __A, __m64 __B)
1024 {
1025 return _mm_max_pu8 (__A, __B);
1026 }
1027
1028 /* Compute the element-wise minimum of signed 16-bit values. */
1029 static __inline __m64
1030 _mm_min_pi16 (__m64 __A, __m64 __B)
1031 {
1032 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1033 }
1034
1035 static __inline __m64
1036 _m_pminsw (__m64 __A, __m64 __B)
1037 {
1038 return _mm_min_pi16 (__A, __B);
1039 }
1040
1041 /* Compute the element-wise minimum of unsigned 8-bit values. */
1042 static __inline __m64
1043 _mm_min_pu8 (__m64 __A, __m64 __B)
1044 {
1045 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1046 }
1047
1048 static __inline __m64
1049 _m_pminub (__m64 __A, __m64 __B)
1050 {
1051 return _mm_min_pu8 (__A, __B);
1052 }
1053
1054 /* Create an 8-bit mask of the signs of 8-bit values. */
1055 static __inline int
1056 _mm_movemask_pi8 (__m64 __A)
1057 {
1058 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1059 }
1060
1061 static __inline int
1062 _m_pmovmskb (__m64 __A)
1063 {
1064 return _mm_movemask_pi8 (__A);
1065 }
1066
1067 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1068 in B and produce the high 16 bits of the 32-bit results. */
1069 static __inline __m64
1070 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1071 {
1072 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1073 }
1074
1075 static __inline __m64
1076 _m_pmulhuw (__m64 __A, __m64 __B)
1077 {
1078 return _mm_mulhi_pu16 (__A, __B);
1079 }
1080
1081 /* Return a combination of the four 16-bit values in A. The selector
1082 must be an immediate. */
1083 #if 0
1084 static __inline __m64
1085 _mm_shuffle_pi16 (__m64 __A, int __N)
1086 {
1087 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1088 }
1089
1090 static __inline __m64
1091 _m_pshufw (__m64 __A, int __N)
1092 {
1093 return _mm_shuffle_pi16 (__A, __N);
1094 }
1095 #else
1096 #define _mm_shuffle_pi16(A, N) \
1097 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1098 #define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
1099 #endif
1100
1101 /* Conditionally store byte elements of A into P. The high bit of each
1102 byte in the selector N determines whether the corresponding byte from
1103 A is stored. */
1104 static __inline void
1105 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1106 {
1107 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1108 }
1109
1110 static __inline void
1111 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1112 {
1113 _mm_maskmove_si64 (__A, __N, __P);
1114 }
1115
1116 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1117 static __inline __m64
1118 _mm_avg_pu8 (__m64 __A, __m64 __B)
1119 {
1120 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1121 }
1122
1123 static __inline __m64
1124 _m_pavgb (__m64 __A, __m64 __B)
1125 {
1126 return _mm_avg_pu8 (__A, __B);
1127 }
1128
1129 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1130 static __inline __m64
1131 _mm_avg_pu16 (__m64 __A, __m64 __B)
1132 {
1133 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1134 }
1135
1136 static __inline __m64
1137 _m_pavgw (__m64 __A, __m64 __B)
1138 {
1139 return _mm_avg_pu16 (__A, __B);
1140 }
1141
1142 /* Compute the sum of the absolute differences of the unsigned 8-bit
1143 values in A and B. Return the value in the lower 16-bit word; the
1144 upper words are cleared. */
1145 static __inline __m64
1146 _mm_sad_pu8 (__m64 __A, __m64 __B)
1147 {
1148 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1149 }
1150
1151 static __inline __m64
1152 _m_psadbw (__m64 __A, __m64 __B)
1153 {
1154 return _mm_sad_pu8 (__A, __B);
1155 }
1156
1157 /* Loads one cache line from address P to a location "closer" to the
1158 processor. The selector I specifies the type of prefetch operation. */
1159 #if 0
1160 static __inline void
1161 _mm_prefetch (void *__P, enum _mm_hint __I)
1162 {
1163 __builtin_prefetch (__P, 0, __I);
1164 }
1165 #else
1166 #define _mm_prefetch(P, I) \
1167 __builtin_prefetch ((P), 0, (I))
1168 #endif
1169
1170 /* Stores the data in A to the address P without polluting the caches. */
1171 static __inline void
1172 _mm_stream_pi (__m64 *__P, __m64 __A)
1173 {
1174 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1175 }
1176
1177 /* Likewise. The address must be 16-byte aligned. */
1178 static __inline void
1179 _mm_stream_ps (float *__P, __m128 __A)
1180 {
1181 __builtin_ia32_movntps (__P, (__v4sf)__A);
1182 }
1183
1184 /* Guarantees that every preceding store is globally visible before
1185 any subsequent store. */
1186 static __inline void
1187 _mm_sfence (void)
1188 {
1189 __builtin_ia32_sfence ();
1190 }
1191
1192 /* The execution of the next instruction is delayed by an implementation
1193 specific amount of time. The instruction does not modify the
1194 architectural state. */
1195 static __inline void
1196 _mm_pause (void)
1197 {
1198 __asm__ __volatile__ ("rep; nop" : : );
1199 }
1200
1201 /* Transpose the 4x4 matrix composed of row[0-3]. */
1202 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1203 do { \
1204 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1205 __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \
1206 __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \
1207 __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \
1208 __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \
1209 (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \
1210 (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \
1211 (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \
1212 (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
1213 } while (0)
1214
1215 /* For backward source compatibility. */
1216 #include <emmintrin.h>
1217
1218 #endif /* __SSE__ */
1219 #endif /* _XMMINTRIN_H_INCLUDED */