re PR target/34435 (SSE2 intrinsics - emmintrin with optimisations off and type conve...
authorUros Bizjak <uros@gcc.gnu.org>
Thu, 13 Dec 2007 18:19:38 +0000 (19:19 +0100)
committerUros Bizjak <uros@gcc.gnu.org>
Thu, 13 Dec 2007 18:19:38 +0000 (19:19 +0100)
        PR target/34435
        * config/i386/emmintrin.h (_mm_shuffle_pd, _mm_extract_epi16,
        _mm_insert_epi16, _mm_shufflehi_epi16, _mm_shufflelo_epi16,
        _mm_shuffle_epi32): Cast non-constant input values to either __m64,
        __m128, __m128i or __m128d in a macro version of the intrinsic.
        Cast constant input values to int.
        * config/i386/ammintrin.h (_mm_extracti_si64, _mm_inserti_si64): Ditto.
        * config/i386/bmmintrin.h (_mm_roti_epi8, _mm_roti_epi16,
        _mm_roti_epi32, _mm_roti_epi64): Ditto.
        * config/i386/smmintrin.h (_mm_blend_epi16, _mm_blend_ps, _mm_blend_pd,
        _mm_dp_ps, _mm_dp_pd, _mm_insert_ps, _mm_extract_ps, _mm_insert_epi8,
        _mm_insert_epi32, _mm_insert_epi64, _mm_extract_epi8, mm_extract_epi32,
        _mm_extract_epi64, _mm_mpsadbw_epu8, _mm_cmpistrm, _mm_cmpistri,
        _mm_cmpestrm, _mm_cmpestri, _mm_cmpistra, _mm_cmpistrc, _mm_cmpistro,
        _mm_cmpistrs, _mm_cmpistrz, _mm_cmpestra, _mm_cmpestrc, _mm_cmpestro,
        _mm_cmpestrs, _mm_cmpestrz): Ditto.
        * config/i386/tmmintrin.h (_mm_alignr_epi8, _mm_alignr_pi8): Ditto.
        * config/i386/xmmintrin.h (_mm_shuffle_ps, _mm_extract_pi16, _m_pextrw,
        _mm_insert_pi16, _m_pinsrw, _mm_shuffle_pi16, _m_pshufw): Ditto.
        * config/i386/mmintrin-common.h (_mm_round_pd, _mm_round_sd,
        _mm_round_ps, _mm_round_ss): Ditto.

testsuite/ChangeLog:

        PR target/34435
        * g++.dg/other/pr34435.C: New testcase.

From-SVN: r130904

gcc/ChangeLog
gcc/config/i386/ammintrin.h
gcc/config/i386/bmmintrin.h
gcc/config/i386/emmintrin.h
gcc/config/i386/mmintrin-common.h
gcc/config/i386/smmintrin.h
gcc/config/i386/tmmintrin.h
gcc/config/i386/xmmintrin.h
gcc/testsuite/ChangeLog
gcc/testsuite/g++.dg/other/pr34435.C [new file with mode: 0644]

index 96a5d642d13483da178d49ea7d1e37985c61112c..1a5c210f4dfde313d1255f58c5a262cb3d0aabe7 100644 (file)
@@ -1,3 +1,28 @@
+2007-12-13  Uros Bizjak  <ubizjak@gmail.com>
+           Richard Guenther  <rguenther@suse.de>
+
+       PR target/34435
+       * config/i386/emmintrin.h (_mm_shuffle_pd, _mm_extract_epi16,
+       _mm_insert_epi16, _mm_shufflehi_epi16, _mm_shufflelo_epi16,
+       _mm_shuffle_epi32): Cast non-constant input values to either __m64,
+       __m128, __m128i or __m128d in a macro version of the intrinsic.
+       Cast constant input values to int.
+       * config/i386/ammintrin.h (_mm_extracti_si64, _mm_inserti_si64): Ditto.
+       * config/i386/bmmintrin.h (_mm_roti_epi8, _mm_roti_epi16,
+       _mm_roti_epi32, _mm_roti_epi64): Ditto.
+       * config/i386/smmintrin.h (_mm_blend_epi16, _mm_blend_ps, _mm_blend_pd,
+       _mm_dp_ps, _mm_dp_pd, _mm_insert_ps, _mm_extract_ps, _mm_insert_epi8,
+       _mm_insert_epi32, _mm_insert_epi64, _mm_extract_epi8, mm_extract_epi32,
+       _mm_extract_epi64, _mm_mpsadbw_epu8, _mm_cmpistrm, _mm_cmpistri,
+       _mm_cmpestrm, _mm_cmpestri, _mm_cmpistra, _mm_cmpistrc, _mm_cmpistro,
+       _mm_cmpistrs, _mm_cmpistrz, _mm_cmpestra, _mm_cmpestrc, _mm_cmpestro,
+       _mm_cmpestrs, _mm_cmpestrz): Ditto.
+       * config/i386/tmmintrin.h (_mm_alignr_epi8, _mm_alignr_pi8): Ditto.
+       * config/i386/xmmintrin.h (_mm_shuffle_ps, _mm_extract_pi16, _m_pextrw,
+       _mm_insert_pi16, _m_pinsrw, _mm_shuffle_pi16, _m_pshufw): Ditto.
+       * config/i386/mmintrin-common.h (_mm_round_pd, _mm_round_sd,
+       _mm_round_ps, _mm_round_ss): Ditto.
+
 2007-12-13  Richard Guenther  <rguenther@suse.de>
 
        PR tree-optimization/34450
@@ -29,7 +54,7 @@
        (dump_new_types): Print a number of new types to replace 
        an original structure type.
        
-2007-12-13  Golovanevsky Olga  <olga@il.ibm.com>
+2007-12-13  Olga Golovanevsky  <olga@il.ibm.com>
 
        * doc/invoke.texi (Optimiza Options): Document new -fipa-struct-reorg
        option and struct-reorg-cold-struct-ratio parameter.
        * stormy16/stormy16.h (EXPAND_BUILTIN_VA_START): Remove.
        * stormy16/stormy16-protos.h (xstormy16_expand_builtin_va_start):
        Remove prototype.
-       * stormy16/stormy16.c (xstormy16_expand_builtin_va_start): Made
-       static.
+       * stormy16/stormy16.c (xstormy16_expand_builtin_va_start): Made static.
        (TARGET_EXPAND_BUILTIN_VA_START): Define.
        * s390/s390-protos.h (s390_va_start): Remove prototype.
        * s390/s390.c (s390_va_start): Made static.
index c3f73b72bc4c450f37f8b0b5381b2333c90d49b1..1351ebd915501b671819e62ba3864727beee94f0 100644 (file)
@@ -62,8 +62,9 @@ _mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
   return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L);
 }
 #else
-#define _mm_extracti_si64(X, I, L) \
-  ((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L))
+#define _mm_extracti_si64(X, I, L)                                     \
+  ((__m128i) __builtin_ia32_extrqi ((__v2di)(__m128i)(X),              \
+                                   (unsigned int)(I), (unsigned int)(L)))
 #endif
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
@@ -79,8 +80,10 @@ _mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned cons
   return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L);
 }
 #else
-#define _mm_inserti_si64(X, Y, I, L) \
-  ((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L))
+#define _mm_inserti_si64(X, Y, I, L)                                   \
+  ((__m128i) __builtin_ia32_insertqi ((__v2di)(__m128i)(X),            \
+                                     (__v2di)(__m128i)(Y),             \
+                                     (unsigned int)(I), (unsigned int)(L)))
 #endif
 
 #endif /* __SSE4A__ */
index cfd113dc68939b0689d581f500873800230c7fa1..48830f39fb2a31d4a338ddfe27ccfc46e90ad38e 100644 (file)
@@ -352,33 +352,37 @@ _mm_rot_epi64(__m128i __A,  __m128i __B)
 /* Rotates - Immediate form */
 #ifdef __OPTIMIZE__
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
-_mm_roti_epi8(__m128i __A,  int __B)
+_mm_roti_epi8(__m128i __A, const int __B)
 {
   return  (__m128i) __builtin_ia32_protbi ((__v16qi)__A, __B);
 }
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
-_mm_roti_epi16(__m128i __A, int __B)
+_mm_roti_epi16(__m128i __A, const int __B)
 {
   return  (__m128i) __builtin_ia32_protwi ((__v8hi)__A, __B);
 }
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
-_mm_roti_epi32(__m128i __A, int __B)
+_mm_roti_epi32(__m128i __A, const int __B)
 {
   return  (__m128i) __builtin_ia32_protdi ((__v4si)__A, __B);
 }
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
-_mm_roti_epi64(__m128i __A, int __B)
+_mm_roti_epi64(__m128i __A, const int __B)
 {
   return  (__m128i) __builtin_ia32_protqi ((__v2di)__A, __B);
 }
 #else
-#define _mm_roti_epi8(A, B) ((_m128i) __builtin_ia32_protbi ((__v16qi)(A), B)
-#define _mm_roti_epi16(A, B) ((_m128i) __builtin_ia32_protwi ((__v8hi)(A), B)
-#define _mm_roti_epi32(A, B) ((_m128i) __builtin_ia32_protdi ((__v4si)(A), B)
-#define _mm_roti_epi64(A, B) ((_m128i) __builtin_ia32_protqi ((__v2di)(A), B)
+#define _mm_roti_epi8(A, B) \
+  ((__m128i) __builtin_ia32_protbi ((__v16qi)(__m128i)(A), (int)(B)))
+#define _mm_roti_epi16(A, B) \
+  ((__m128i) __builtin_ia32_protwi ((__v8hi)(__m128i)(A), (int)(B)))
+#define _mm_roti_epi32(A, B) \
+  ((__m128i) __builtin_ia32_protdi ((__v4si)(__m128i)(A), (int)(B)))
+#define _mm_roti_epi64(A, B) \
+  ((__m128i) __builtin_ia32_protqi ((__v2di)(__m128i)(A), (int)(B))
 #endif
 
 /* pshl */
index 0451ed7deffb7091247646aceb68342de68cea20..1a0affc775517a3a6a0cf685adeff00c52b9d9e7 100644 (file)
@@ -887,8 +887,9 @@ _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
   return (__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, __mask);
 }
 #else
-#define _mm_shuffle_pd(__A, __B, __C) \
-  ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
+#define _mm_shuffle_pd(__A, __B, __C)                                  \
+  ((__m128d)__builtin_ia32_shufpd ((__v2df)(__m128d)__A,               \
+                                  (__v2df)(__m128d)__B, (int)(__C)))
 #endif
 
 static __inline __m128d __attribute__((__always_inline__, __artificial__))
@@ -1320,9 +1321,10 @@ _mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
 }
 #else
 #define _mm_extract_epi16(A, N) \
-  ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
-#define _mm_insert_epi16(A, D, N) \
-  ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
+  ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(__m128i)(A), (int)(N)))
+#define _mm_insert_epi16(A, D, N)                              \
+  ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(__m128i)(A),        \
+                                         (int)(D), (int)(N)))
 #endif
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
@@ -1381,11 +1383,11 @@ _mm_shuffle_epi32 (__m128i __A, const int __mask)
 }
 #else
 #define _mm_shufflehi_epi16(__A, __B) \
-  ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
+  ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__m128i)__A, (int)__B))
 #define _mm_shufflelo_epi16(__A, __B) \
-  ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
+  ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__m128i)__A, (int)__B))
 #define _mm_shuffle_epi32(__A, __B) \
-  ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
+  ((__m128i)__builtin_ia32_pshufd ((__v4si)(__m128i)__A, (int)__B))
 #endif
 
 static __inline void __attribute__((__always_inline__, __artificial__))
index 4d0f7513459d83a43ad7eb8a97d8feffd6f860f7..3be83335f296eb1ad73db600241a4af0e21281e6 100644 (file)
@@ -108,10 +108,11 @@ _mm_round_sd(__m128d __D, __m128d __V, const int __M)
 }
 #else
 #define _mm_round_pd(V, M) \
-  ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
+  ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M)))
 
-#define _mm_round_sd(D, V, M) \
-  ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
+#define _mm_round_sd(D, V, M)                                          \
+  ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D),             \
+                                    (__v2df)(__m128d)(V), (int)(M)))
 #endif
 
 /* Packed/scalar single precision floating point rounding.  */
@@ -132,10 +133,11 @@ _mm_round_ss (__m128 __D, __m128 __V, const int __M)
 }
 #else
 #define _mm_round_ps(V, M) \
-  ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
+  ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M)))
 
-#define _mm_round_ss(D, V, M) \
-  ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
+#define _mm_round_ss(D, V, M)                                          \
+  ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D),               \
+                                   (__v4sf)(__m128)(V), (int)(M)))
 #endif
 
 /* Macros for ceil/floor intrinsics.  */
index 693ebf4d3aa41b0491ebb22f599bee1a356e5252..3989773e5739019f69605c54810aa90f45ab6e64 100644 (file)
@@ -53,8 +53,9 @@ _mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
                                              __M);
 }
 #else
-#define _mm_blend_epi16(X, Y, M) \
-  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
+#define _mm_blend_epi16(X, Y, M)                                       \
+  ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X),          \
+                                       (__v8hi)(__m128i)(Y), (int)(M)))
 #endif
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
@@ -77,8 +78,9 @@ _mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
                                          __M);
 }
 #else
-#define _mm_blend_ps(X, Y, M) \
-  ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
+#define _mm_blend_ps(X, Y, M)                                          \
+  ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X),               \
+                                   (__v4sf)(__m128)(Y), (int)(M)))
 #endif
 
 static __inline __m128 __attribute__((__always_inline__, __artificial__))
@@ -101,8 +103,9 @@ _mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
                                           __M);
 }
 #else
-#define _mm_blend_pd(X, Y, M) \
-  ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
+#define _mm_blend_pd(X, Y, M)                                          \
+  ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X),             \
+                                    (__v2df)(__m128d)(Y), (int)(M)))
 #endif
 
 static __inline __m128d __attribute__((__always_inline__, __artificial__))
@@ -133,11 +136,13 @@ _mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
                                        __M);
 }
 #else
-#define _mm_dp_ps(X, Y, M) \
-  ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
+#define _mm_dp_ps(X, Y, M)                                             \
+  ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X),                  \
+                                (__v4sf)(__m128)(Y), (int)(M)))
 
-#define _mm_dp_pd(X, Y, M) \
-  ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
+#define _mm_dp_pd(X, Y, M)                                             \
+  ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X),                        \
+                                 (__v2df)(__m128d)(Y), (int)(M)))
 #endif
 
 /* Packed integer 64-bit comparison, zeroing or filling with ones
@@ -228,8 +233,9 @@ _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
                                              __N);
 }
 #else
-#define _mm_insert_ps(D, S, N) \
-  ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
+#define _mm_insert_ps(D, S, N)                                         \
+  ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D),           \
+                                       (__v4sf)(__m128)(S), (int)(N)))
 #endif
 
 /* Helper macro to create the N value for _mm_insert_ps.  */
@@ -247,14 +253,13 @@ _mm_extract_ps (__m128 __X, const int __N)
   return __tmp.i;
 }
 #else
-#define _mm_extract_ps(X, N) \
-  (__extension__                                               \
-   ({                                                          \
-      union { int i; float f; } __tmp;                         \
-      __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N));        \
-      __tmp.i;                                                 \
-    })                                                         \
-   )
+#define _mm_extract_ps(X, N)                                           \
+  (__extension__                                                       \
+   ({                                                                  \
+     union { int i; float f; } __tmp;                                  \
+     __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), (int)(N)); \
+     __tmp.i;                                                          \
+   }))
 #endif
 
 /* Extract binary representation of single precision float into
@@ -296,15 +301,18 @@ _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
 }
 #endif
 #else
-#define _mm_insert_epi8(D, S, N) \
-  ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
+#define _mm_insert_epi8(D, S, N)                                       \
+  ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D),      \
+                                          (int)(S), (int)(N)))
 
-#define _mm_insert_epi32(D, S, N) \
-  ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
+#define _mm_insert_epi32(D, S, N)                              \
+  ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D),        \
+                                         (int)(S), (int)(N)))
 
 #ifdef __x86_64__
-#define _mm_insert_epi64(D, S, N) \
-  ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
+#define _mm_insert_epi64(D, S, N)                                      \
+  ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D),                \
+                                         (long long)(S), (int)(N)))
 #endif
 #endif
 
@@ -333,13 +341,13 @@ _mm_extract_epi64 (__m128i __X, const int __N)
 #endif
 #else
 #define _mm_extract_epi8(X, N) \
-  __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
+  __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N))
 #define _mm_extract_epi32(X, N) \
-  __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
+  __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N))
 
 #ifdef __x86_64__
 #define _mm_extract_epi64(X, N) \
-  ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
+  ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N)))
 #endif
 #endif
 
@@ -447,8 +455,9 @@ _mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
                                              (__v16qi)__Y, __M);
 }
 #else
-#define _mm_mpsadbw_epu8(X, Y, M) \
-  ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
+#define _mm_mpsadbw_epu8(X, Y, M)                                      \
+  ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X),         \
+                                       (__v16qi)(__m128i)(Y), (int)(M)))
 #endif
 
 /* Load double quadword using non-temporal aligned hint.  */
@@ -521,17 +530,21 @@ _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
                                      __M);
 }
 #else
-#define _mm_cmpistrm(X, Y, M) \
-  ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M)))
-#define _mm_cmpistri(X, Y, M) \
-  __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M))
-
-#define _mm_cmpestrm(X, LX, Y, LY, M) \
-  ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \
-                                         (__v16qi)(Y), (int)(LY), (M)))
-#define _mm_cmpestri(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \
-                              (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpistrm(X, Y, M)                                          \
+  ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X),       \
+                                         (__v16qi)(__m128i)(Y), (int)(M)))
+#define _mm_cmpistri(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X),           \
+                                     (__v16qi)(__m128i)(Y), (int)(M)))
+
+#define _mm_cmpestrm(X, LX, Y, LY, M)                                  \
+  ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X),       \
+                                         (int)(LX), (__v16qi)(__m128i)(Y), \
+                                         (int)(LY), (int)(M)))
+#define _mm_cmpestri(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX),        \
+                                     (__v16qi)(__m128i)(Y), (int)(LY), \
+                                     (int)(M))
 #endif
 
 /* Intrinsics for text/string processing and reading values of
@@ -618,32 +631,42 @@ _mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
                                       __M);
 }
 #else
-#define _mm_cmpistra(X, Y, M) \
-  __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M))
-#define _mm_cmpistrc(X, Y, M) \
-  __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M))
-#define _mm_cmpistro(X, Y, M) \
-  __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M))
-#define _mm_cmpistrs(X, Y, M) \
-  __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M))
-#define _mm_cmpistrz(X, Y, M) \
-  __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M))
-
-#define _mm_cmpestra(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \
-                               (__v16qi)(Y), (int)(LY), (M))
-#define _mm_cmpestrc(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \
-                               (__v16qi)(Y), (int)(LY), (M))
-#define _mm_cmpestro(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \
-                               (__v16qi)(Y), (int)(LY), (M))
-#define _mm_cmpestrs(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \
-                               (__v16qi)(Y), (int)(LY), (M))
-#define _mm_cmpestrz(X, LX, Y, LY, M) \
-  __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \
-                               (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpistra(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X),          \
+                                      (__v16qi)(__m128i)(Y), (int)(M)))
+#define _mm_cmpistrc(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X),          \
+                                      (__v16qi)(__m128i)(Y), (int)(M)))
+#define _mm_cmpistro(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X),          \
+                                      (__v16qi)(__m128i)(Y), (int)(M)))
+#define _mm_cmpistrs(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X),          \
+                                      (__v16qi)(__m128i)(Y), (int)(M)))
+#define _mm_cmpistrz(X, Y, M)                                          \
+  ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X),          \
+                                      (__v16qi)(__m128i)(Y), (int)(M)))
+
+#define _mm_cmpestra(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \
+                                      (__v16qi)(__m128i)(Y), (int)(LY), \
+                                      (int)(M)))
+#define _mm_cmpestrc(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \
+                                      (__v16qi)(__m128i)(Y), (int)(LY), \
+                                      (int)(M)))
+#define _mm_cmpestro(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \
+                                      (__v16qi)(__m128i)(Y), (int)(LY), \
+                                      (int)(M)))
+#define _mm_cmpestrs(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \
+                                      (__v16qi)(__m128i)(Y), (int)(LY), \
+                                      (int)(M)))
+#define _mm_cmpestrz(X, LX, Y, LY, M)                                  \
+  ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \
+                                      (__v16qi)(__m128i)(Y), (int)(LY), \
+                                      (int)(M)))
 #endif
 
 /* Packed integer 64-bit comparison, zeroing or filling with ones
index 900bfbfec08d79e5c3f3809229dab399fb914c1a..6b389133079e2608169d7df0c096c0bb1afb5609 100644 (file)
@@ -185,18 +185,25 @@ _mm_sign_pi32 (__m64 __X, __m64 __Y)
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
 _mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N)
 {
-  return (__m128i)__builtin_ia32_palignr128 ((__v2di)__X, (__v2di)__Y, __N * 8);}
+  return (__m128i) __builtin_ia32_palignr128 ((__v2di)__X,
+                                             (__v2di)__Y, __N * 8);
+}
 
 static __inline __m64 __attribute__((__always_inline__, __artificial__))
 _mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N)
 {
-  return (__m64)__builtin_ia32_palignr ((long long)__X, (long long)__Y, __N * 8);
+  return (__m64) __builtin_ia32_palignr ((long long)__X,
+                                        (long long)__Y, __N * 8);
 }
 #else
-#define _mm_alignr_epi8(__X, __Y, __N) \
-  ((__m128i)__builtin_ia32_palignr128 ((__v2di) __X, (__v2di) __Y, (__N) * 8))
-#define _mm_alignr_pi8(__X, __Y, __N) \
-  ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
+#define _mm_alignr_epi8(X, Y, N)                                       \
+  ((__m128i) __builtin_ia32_palignr128 ((__v2di)(__m128i)(X),          \
+                                       (__v2di)(__m128i)(Y),           \
+                                       (int)(N) * 8))
+#define _mm_alignr_pi8(X, Y, N)                                                \
+  ((__m64) __builtin_ia32_palignr ((long long)(__m64)(__X),            \
+                                  (long long)(__m64)(__Y),             \
+                                  (int)(N) * 8))
 #endif
 
 static __inline __m128i __attribute__((__always_inline__, __artificial__))
index c06b5acbd2316d0d7dba032c36b9accbaa25f9ea..ab3acebd3a36f043f25aab808002775879e65716 100644 (file)
@@ -723,8 +723,9 @@ _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
   return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
 }
 #else
-#define _mm_shuffle_ps(A, B, MASK) \
- ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
+#define _mm_shuffle_ps(A, B, MASK)                                     \
+  ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A),                        \
+                                  (__v4sf)(__m128)(B), (int)(MASK)))
 #endif
 
 /* Selects and interleaves the upper two SPFP values from A and B.  */
@@ -1004,8 +1005,10 @@ _m_pextrw (__m64 const __A, int const __N)
   return _mm_extract_pi16 (__A, __N);
 }
 #else
-#define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
-#define _m_pextrw(A, N)                _mm_extract_pi16((A), (N))
+#define _mm_extract_pi16(A, N) \
+  ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
+#define _m_pextrw(A, N) \
+  ((int) _mm_extract_pi16((__m64)(A),(int)(N)))
 #endif
 
 /* Inserts word D into one of four words of A.  The selector N must be
@@ -1023,9 +1026,11 @@ _m_pinsrw (__m64 const __A, int const __D, int const __N)
   return _mm_insert_pi16 (__A, __D, __N);
 }
 #else
-#define _mm_insert_pi16(A, D, N) \
-  ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
-#define _m_pinsrw(A, D, N)      _mm_insert_pi16((A), (D), (N))
+#define _mm_insert_pi16(A, D, N)                               \
+  ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A),    \
+                                       (int)(D), (int)(N)))
+#define _m_pinsrw(A, D, N) \
+  ((__m64) _mm_insert_pi16((__m64)(A), (int)(D), (int)(N))
 #endif
 
 /* Compute the element-wise maximum of signed 16-bit values.  */
@@ -1123,8 +1128,9 @@ _m_pshufw (__m64 __A, int const __N)
 }
 #else
 #define _mm_shuffle_pi16(A, N) \
-  ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
-#define _m_pshufw(A, N)                _mm_shuffle_pi16 ((A), (N))
+  ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
+#define _m_pshufw(A, N) \
+  ((__m64) _mm_shuffle_pi16 ((__m64)(A), (int)(N))
 #endif
 
 /* Conditionally store byte elements of A into P.  The high bit of each
index 087a0c8c36fc4cb788d0ce5e8d4280be14efb2b7..234c425cb3446c9b57e96ac5175db93bfdabca9b 100644 (file)
@@ -1,4 +1,9 @@
-2007-11-26  Olga Golovanevsky  <olga@il.ibm.com>
+2007-12-13  Uros Bizjak  <ubizjak@gmail.com>
+
+       PR target/34435
+       * g++.dg/other/pr34435.C: New testcase.
+
+2007-12-13  Olga Golovanevsky  <olga@il.ibm.com>
 
        * gcc.dg/struct: New directory with tests 
        for struct-reorg optimizaion.
diff --git a/gcc/testsuite/g++.dg/other/pr34435.C b/gcc/testsuite/g++.dg/other/pr34435.C
new file mode 100644 (file)
index 0000000..a9c6878
--- /dev/null
@@ -0,0 +1,19 @@
+/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+/* { dg-options "-msse2" } */
+
+#include <emmintrin.h>
+
+class Vec {
+    __m128i vec;
+public:
+    Vec(int mm) {
+        vec = _mm_set1_epi16(mm);
+    }
+  operator __m128i() const {
+      return vec;
+    }
+};
+
+int main() {
+  _mm_shuffle_epi32(Vec(5), _MM_SHUFFLE(3,3,3,3));
+}