AVX-512. 85/n. Add intrinsics headers.
authorAlexander Ivchenko <alexander.ivchenko@intel.com>
Tue, 28 Oct 2014 14:11:00 +0000 (14:11 +0000)
committerKirill Yukhin <kyukhin@gcc.gnu.org>
Tue, 28 Oct 2014 14:11:00 +0000 (14:11 +0000)
gcc/
* config/i386/avx512bwintrin.h: New.
* config/i386/avx512dqintrin.h: Ditto.
* config/i386/avx512vlbwintrin.h: Ditto.
* config/i386/avx512vldqintrin.h: Ditto.
* config/i386/avx512vlintrin.h: Ditto.
* config/i386/immintrin.h: Include avx512vlintrin.h, avx512bwintrin.h,
avx512dqintrin.h, avx512vlbwintrin.h, avx512vldqintrin.h.

Co-Authored-By: Andrey Turetskiy <andrey.turetskiy@intel.com>
Co-Authored-By: Anna Tikhonova <anna.tikhonova@intel.com>
Co-Authored-By: Ilya Tocar <ilya.tocar@intel.com>
Co-Authored-By: Ilya Verbin <ilya.verbin@intel.com>
Co-Authored-By: Kirill Yukhin <kirill.yukhin@intel.com>
Co-Authored-By: Maxim Kuznetsov <maxim.kuznetsov@intel.com>
Co-Authored-By: Michael Zolotukhin <michael.v.zolotukhin@intel.com>
From-SVN: r216798

gcc/ChangeLog
gcc/config.gcc
gcc/config/i386/avx512bwintrin.h [new file with mode: 0644]
gcc/config/i386/avx512dqintrin.h [new file with mode: 0644]
gcc/config/i386/avx512vlbwintrin.h [new file with mode: 0644]
gcc/config/i386/avx512vldqintrin.h [new file with mode: 0644]
gcc/config/i386/avx512vlintrin.h [new file with mode: 0644]
gcc/config/i386/immintrin.h

index a7da6e766d410da73ae5eb89467886ac279983fa..64ed03b87b27cad243637db4f22bbe61fc0fc2b6 100644 (file)
@@ -1,3 +1,20 @@
+2014-10-28  Alexander Ivchenko  <alexander.ivchenko@intel.com>
+           Maxim Kuznetsov  <maxim.kuznetsov@intel.com>
+           Anna Tikhonova  <anna.tikhonova@intel.com>
+           Ilya Tocar  <ilya.tocar@intel.com>
+           Andrey Turetskiy  <andrey.turetskiy@intel.com>
+           Ilya Verbin  <ilya.verbin@intel.com>
+           Kirill Yukhin  <kirill.yukhin@intel.com>
+           Michael Zolotukhin  <michael.v.zolotukhin@intel.com>
+
+       * config/i386/avx512bwintrin.h: New.
+       * config/i386/avx512dqintrin.h: Ditto.
+       * config/i386/avx512vlbwintrin.h: Ditto.
+       * config/i386/avx512vldqintrin.h: Ditto.
+       * config/i386/avx512vlintrin.h: Ditto.
+       * config/i386/immintrin.h: Include avx512vlintrin.h, avx512bwintrin.h,
+       avx512dqintrin.h, avx512vlbwintrin.h, avx512vldqintrin.h.
+
 2014-10-28  Alexander Ivchenko  <alexander.ivchenko@intel.com>
            Maxim Kuznetsov  <maxim.kuznetsov@intel.com>
            Anna Tikhonova  <anna.tikhonova@intel.com>
index 63f0b9c835fa8759bdfa69e06f7c4a07e7b416e2..137373ff552b1f04a087738a89e75b8606c60f79 100644 (file)
@@ -364,7 +364,8 @@ i[34567]86-*-*)
                       adxintrin.h fxsrintrin.h xsaveintrin.h xsaveoptintrin.h
                       avx512cdintrin.h avx512erintrin.h avx512pfintrin.h
                       shaintrin.h clflushoptintrin.h xsavecintrin.h
-                      xsavesintrin.h"
+                      xsavesintrin.h avx512dqintrin.h avx512bwintrin.h
+                      avx512vlintrin.h avx512vlbwintrin.h avx512vldqintrin.h"
        ;;
 x86_64-*-*)
        cpu_type=i386
@@ -382,7 +383,8 @@ x86_64-*-*)
                       adxintrin.h fxsrintrin.h xsaveintrin.h xsaveoptintrin.h
                       avx512cdintrin.h avx512erintrin.h avx512pfintrin.h
                       shaintrin.h clflushoptintrin.h xsavecintrin.h
-                      xsavesintrin.h"
+                      xsavesintrin.h avx512dqintrin.h avx512bwintrin.h
+                      avx512vlintrin.h avx512vlbwintrin.h avx512vldqintrin.h"
        ;;
 ia64-*-*)
        extra_headers=ia64intrin.h
diff --git a/gcc/config/i386/avx512bwintrin.h b/gcc/config/i386/avx512bwintrin.h
new file mode 100644 (file)
index 0000000..47b3f50
--- /dev/null
@@ -0,0 +1,2656 @@
+/* Copyright (C) 2014
+   Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512BWINTRIN_H_INCLUDED
+#define _AVX512BWINTRIN_H_INCLUDED
+
+#ifndef __AVX512BW__
+#pragma GCC push_options
+#pragma GCC target("avx512bw")
+#define __DISABLE_AVX512BW__
+#endif /* __AVX512BW__ */
+
+/* Internal data types for implementing the intrinsics.  */
+typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+
+typedef unsigned long long __mmask64;
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setzero_qi (void)
+{
+  return __extension__ (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setzero_hi (void)
+{
+  return __extension__ (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0,
+                                          0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+                                                    (__v32hi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
+{
+  __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
+                                    (__v32hi) __A,
+                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A,
+                                                   (__v64qi) __W,
+                                                   (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A,
+                                                   (__v64qi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask64) __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
+{
+  return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+                                             (__mmask32) __B);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
+{
+  return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
+                                             (__mmask64) __B);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+                                                    (__v64qi) __W,
+                                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
+{
+  return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+                                                    (__v64qi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask64) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
+{
+  __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
+                                    (__v64qi) __A,
+                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sad_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
+                                            (__v64qi) __B);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepi16_epi8 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+                                                 (__v32qi) _mm256_undefined_si256(),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+                                                 (__v32qi) __O, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtsepi16_epi8 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+                                                  (__v32qi)_mm256_undefined_si256(),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+                                                  (__v32qi)__O,
+                                                  __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+                                                  (__v32qi)
+                                                  _mm256_setzero_si256 (),
+                                                  __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtusepi16_epi8 (__m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+                                                   (__v32qi)_mm256_undefined_si256(),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+                                                   (__v32qi) __O,
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A)
+{
+  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+                                                   (__v32qi)
+                                                   _mm256_setzero_si256 (),
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcastb_epi8 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A,
+                                                      (__v64qi)_mm512_undefined_si512(),
+                                                      (__mmask64) -
+                                                      1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A,
+                                                      (__v64qi) __O,
+                                                      __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A,
+                                                      (__v64qi)
+                                                      _mm512_setzero_qi(),
+                                                      __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+                                                          (__v64qi) __O,
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A,
+                                                          (__v64qi)
+                                                          _mm512_setzero_qi(),
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcastw_epi16 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A,
+                                                      (__v32hi)_mm512_undefined_si512(),
+                                                      (__mmask32)-1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A,
+                                                      (__v32hi) __O,
+                                                      __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A,
+                                                      (__v32hi)
+                                                      _mm512_setzero_hi(),
+                                                      __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+                                                          (__v32hi) __O,
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
+{
+  return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A,
+                                                          (__v32hi)
+                                                          _mm512_setzero_hi(),
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                         __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mullo_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepi8_epi16 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepi8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepi8_epi16 (__mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi(),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepu8_epi16 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepu8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi(),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                                                    (__v32hi) __A,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
+                               __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                                                    (__v32hi) __A,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi(),
+                                                    (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+                              __m512i __B)
+{
+  return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
+                                                    (__v32hi) __A,
+                                                    (__v32hi) __W,
+                                                    (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I
+                                                       /* idx */ ,
+                                                       (__v32hi) __A,
+                                                       (__v32hi) __B,
+                                                       (__mmask32) -
+                                                       1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U,
+                               __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I
+                                                       /* idx */ ,
+                                                       (__v32hi) __A,
+                                                       (__v32hi) __B,
+                                                       (__mmask32)
+                                                       __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I,
+                                __mmask32 __U, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A,
+                                                       (__v32hi) __I
+                                                       /* idx */ ,
+                                                       (__v32hi) __B,
+                                                       (__mmask32)
+                                                       __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
+                                __m512i __I, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I
+                                                        /* idx */ ,
+                                                        (__v32hi) __A,
+                                                        (__v32hi) __B,
+                                                        (__mmask32)
+                                                        __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_avg_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi) __W,
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi(),
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_add_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi) __W,
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sub_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi) __W,
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_avg_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi(),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_subs_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_subs_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi)
+                                                  _mm512_setzero_qi (),
+                                                  (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi) __W,
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi)
+                                                  _mm512_setzero_qi (),
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_adds_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_adds_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi)
+                                                  _mm512_setzero_qi (),
+                                                  (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi) __W,
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
+                                                  (__v64qi) __B,
+                                                  (__v64qi)
+                                                  _mm512_setzero_qi (),
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sub_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_subs_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_subs_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_add_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_adds_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_adds_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_srl_epi16 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_srl_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_srl_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packs_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi)
+                                                   _mm512_setzero_qi (),
+                                                   (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sll_epi16 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sll_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maddubs_epi16 (__m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+                                                    (__v64qi) __Y,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
+                          __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+                                                    (__v64qi) __Y,
+                                                    (__v32hi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y)
+{
+  return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+                                                    (__v64qi) __Y,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_madd_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v16si)
+                                                  _mm512_setzero_si512 (),
+                                                  (__mmask16) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v16si) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+                                                  (__v32hi) __B,
+                                                  (__v16si)
+                                                  _mm512_setzero_si512 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_unpackhi_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi)
+                                                    _mm512_setzero_qi (),
+                                                    (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                          __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi) __W,
+                                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi)
+                                                    _mm512_setzero_qi(),
+                                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_unpackhi_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                           __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi(),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_unpacklo_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi)
+                                                    _mm512_setzero_qi (),
+                                                    (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                          __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi) __W,
+                                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__v64qi)
+                                                    _mm512_setzero_qi(),
+                                                    (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_unpacklo_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi (),
+                                                    (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                           __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__v32hi)
+                                                    _mm512_setzero_hi(),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmpeq_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_pcmpeqb512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__mmask64) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmpeq_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_pcmpeqb512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmpeq_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpeqw512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmpeq_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpeqw512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmpgt_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_pcmpgtb512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    (__mmask64) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmpgt_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_pcmpgtb512_mask ((__v64qi) __A,
+                                                    (__v64qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmpgt_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpgtw512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmpgt_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpgtw512_mask ((__v32hi) __A,
+                                                    (__v32hi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movepi8_mask (__m512i __A)
+{
+  return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movepi16_mask (__m512i __A)
+{
+  return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movm_epi8 (__mmask64 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movm_epi16 (__mmask32 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_test_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+                                               (__v64qi) __B,
+                                               (__mmask64) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A,
+                                               (__v64qi) __B, __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_test_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+                                               (__v32hi) __B,
+                                               (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A,
+                                               (__v32hi) __B, __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+                                                (__v64qi) __B,
+                                                (__mmask64) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A,
+                                                (__v64qi) __B, __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+                                                (__v32hi) __B,
+                                                (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A,
+                                                (__v32hi) __B, __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_shuffle_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                         __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi(),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi(),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi(),
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi(),
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_epu8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi(),
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_min_epi8 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi (),
+                                                 (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi)
+                                                 _mm512_setzero_qi(),
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
+                     __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
+                                                 (__v64qi) __B,
+                                                 (__v64qi) __W,
+                                                 (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi(),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_max_epu16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi(),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
+                      __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sra_epi16 (__m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sra_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                      __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sra_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+{
+  return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
+                                                (__v8hi) __B,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_srav_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_srav_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_srav_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_srlv_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_srlv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_srlv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_sllv_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_sllv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
+                                                 (__v32hi) __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packs_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi) __W,
+                                                   (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packs_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi)
+                                                   _mm512_setzero_qi(),
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packus_epi16 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi)
+                                                   _mm512_setzero_qi (),
+                                                   (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packus_epi16 (__m512i __W, __mmask64 __M, __m512i __A,
+                         __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi) __W,
+                                                   (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packus_epi16 (__mmask64 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
+                                                   (__v32hi) __B,
+                                                   (__v64qi)
+                                                   _mm512_setzero_qi(),
+                                                   (__mmask64) __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_abs_epi8 (__m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+                                                (__v64qi) __W,
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
+                                                (__v64qi)
+                                                _mm512_setzero_qi (),
+                                                (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_abs_epi16 (__m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+                                                (__v32hi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
+{
+  return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
+                                                (__v32hi)
+                                                _mm512_setzero_hi (),
+                                                (__mmask32) __U);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_alignr_epi8 (__m512i __A, __m512i __B, const int __N)
+{
+  return (__m512i) __builtin_ia32_palignr512 ((__v8di) __A,
+                                             (__v8di) __B, __N * 8);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_alignr_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+                        __m512i __B, const int __N)
+{
+  return (__m512i) __builtin_ia32_palignr512_mask ((__v8di) __A,
+                                                  (__v8di) __B,
+                                                  __N * 8,
+                                                  (__v8di) __W,
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_alignr_epi8 (__mmask64 __U, __m512i __A, __m512i __B,
+                         const int __N)
+{
+  return (__m512i) __builtin_ia32_palignr512_mask ((__v8di) __A,
+                                                  (__v8di) __B,
+                                                  __N * 8,
+                                                  (__v8di)
+                                                  _mm512_setzero_si512 (),
+                                                  (__mmask64) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_dbsad_epu8 (__m512i __A, __m512i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A,
+                                                   (__v64qi) __B,
+                                                   __imm,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_dbsad_epu8 (__m512i __W, __mmask32 __U, __m512i __A,
+                       __m512i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A,
+                                                   (__v64qi) __B,
+                                                   __imm,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_dbsad_epu8 (__mmask32 __U, __m512i __A, __m512i __B,
+                        const int __imm)
+{
+  return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A,
+                                                   (__v64qi) __B,
+                                                   __imm,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi(),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_srli_epi16 (__m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_srli_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_srli_epi16 (__mmask32 __U, __m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_slli_epi16 (__m512i __A, const int __B)
+{
+  return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_slli_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       const int __B)
+{
+  return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_slli_epi16 (__mmask32 __U, __m512i __A, const int __B)
+{
+  return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_shufflehi_epi16 (__m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_shufflehi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                            const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_shufflehi_epi16 (__mmask32 __U, __m512i __A,
+                             const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_shufflelo_epi16 (__m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_shufflelo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                            const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_shufflelo_epi16 (__mmask32 __U, __m512i __A,
+                             const int __imm)
+{
+  return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A,
+                                                  __imm,
+                                                  (__v32hi)
+                                                  _mm512_setzero_hi (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_srai_epi16 (__m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_srai_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+                       const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_srai_epi16 (__mmask32 __U, __m512i __A, const int __imm)
+{
+  return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm,
+                                                 (__v32hi)
+                                                 _mm512_setzero_hi (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
+{
+  return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A,
+                                                   (__v32hi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
+{
+  return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A,
+                                                   (__v64qi) __W,
+                                                   (__mmask64) __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_epi16_mask (__mmask32 __U, __m512i __X, __m512i __Y,
+                           const int __P)
+{
+  return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X,
+                                                 (__v32hi) __Y, __P,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_epi16_mask (__m512i __X, __m512i __Y, const int __P)
+{
+  return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X,
+                                                 (__v32hi) __Y, __P,
+                                                 (__mmask32) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_epi8_mask (__mmask32 __U, __m512i __X, __m512i __Y,
+                          const int __P)
+{
+  return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X,
+                                                 (__v64qi) __Y, __P,
+                                                 (__mmask64) __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_epi8_mask (__m512i __X, __m512i __Y, const int __P)
+{
+  return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X,
+                                                 (__v64qi) __Y, __P,
+                                                 (__mmask64) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_epu16_mask (__mmask32 __U, __m512i __X, __m512i __Y,
+                           const int __P)
+{
+  return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X,
+                                                  (__v32hi) __Y, __P,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_epu16_mask (__m512i __X, __m512i __Y, const int __P)
+{
+  return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X,
+                                                  (__v32hi) __Y, __P,
+                                                  (__mmask32) -1);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cmp_epu8_mask (__mmask32 __U, __m512i __X, __m512i __Y,
+                          const int __P)
+{
+  return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X,
+                                                  (__v64qi) __Y, __P,
+                                                  (__mmask64) __U);
+}
+
+extern __inline __mmask64
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cmp_epu8_mask (__m512i __X, __m512i __Y, const int __P)
+{
+  return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X,
+                                                  (__v64qi) __Y, __P,
+                                                  (__mmask64) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packs_epi32 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi(),
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi) __W,
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_packus_epi32 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi (),
+                                                   (__mmask32) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi)
+                                                   _mm512_setzero_hi(),
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A,
+                         __m512i __B)
+{
+  return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
+                                                   (__v16si) __B,
+                                                   (__v32hi) __W,
+                                                   __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_bslli_epi128 (__m512i __A, const int __N)
+{
+  return (__m512i) __builtin_ia32_pslldq512 (__A, __N * 8);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_bsrli_epi128 (__m512i __A, const int __N)
+{
+  return (__m512i) __builtin_ia32_psrldq512 (__A, __N * 8);
+}
+
+#else
+#define _mm512_alignr_epi8(X, Y, N)                                                \
+  ((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X),                      \
+                                       (__v8di)(__m512i)(Y),                       \
+                                       (int)(N * 8)))
+
+#define _mm512_mask_alignr_epi8(W, U, X, Y, N)                                     \
+  ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X),                 \
+                                           (__v8di)(__m512i)(Y), (int)(N * 8),     \
+                                           (__v8di)(__m512i)(W), (__mmask64)(U)))
+
+#define _mm512_maskz_alignr_epi8(U, X, Y, N)                                       \
+  ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X),                 \
+                                           (__v8di)(__m512i)(Y), (int)(N * 8),     \
+                                           (__v8di)(__m512i)_mm512_setzero_si512 (),   \
+                                           (__mmask64)(U)))
+
+#define _mm512_dbsad_epu8(X, Y, C)                                                  \
+  ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X),               \
+                                              (__v64qi)(__m512i) (Y), (int) (C),    \
+                                              (__v32hi)(__m512i)_mm512_setzero_si512 (),\
+                                              (__mmask32)-1))
+
+#define _mm512_mask_dbsad_epu8(W, U, X, Y, C)                                       \
+  ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X),               \
+                                              (__v64qi)(__m512i) (Y), (int) (C),    \
+                                              (__v32hi)(__m512i)(W),                \
+                                              (__mmask32)(U)))
+
+#define _mm512_maskz_dbsad_epu8(U, X, Y, C)                                         \
+  ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X),               \
+                                              (__v64qi)(__m512i) (Y), (int) (C),    \
+                                              (__v32hi)(__m512i)_mm512_setzero_si512 (),\
+                                              (__mmask32)(U)))
+
+#define _mm512_srli_epi16(A, B)                                         \
+  ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)_mm512_setzero_hi(), (__mmask32)-1))
+
+#define _mm512_mask_srli_epi16(W, U, A, B)                              \
+  ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U)))
+
+#define _mm512_maskz_srli_epi16(U, A, B)                                \
+  ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)_mm512_setzero_hi(), (__mmask32)(U)))
+
+#define _mm512_slli_epi16(X, C)                                                   \
+  ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\
+    (__v32hi)(__m512i)_mm512_setzero_hi(),\
+    (__mmask32)-1))
+
+#define _mm512_mask_slli_epi16(W, U, X, C)                                 \
+  ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\
+    (__v32hi)(__m512i)(W),\
+    (__mmask32)(U)))
+
+#define _mm512_maskz_slli_epi16(U, X, C)                                   \
+  ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\
+    (__v32hi)(__m512i)_mm512_setzero_hi(),\
+    (__mmask32)(U)))
+
+#define _mm512_shufflehi_epi16(A, B)                                                \
+  ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)_mm512_setzero_hi(), \
+                                             (__mmask32)-1))
+
+#define _mm512_mask_shufflehi_epi16(W, U, A, B)                                     \
+  ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)(W),                 \
+                                             (__mmask32)(U)))
+
+#define _mm512_maskz_shufflehi_epi16(U, A, B)                                       \
+  ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)_mm512_setzero_hi(), \
+                                             (__mmask32)(U)))
+
+#define _mm512_shufflelo_epi16(A, B)                                                \
+  ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)_mm512_setzero_hi(), \
+                                             (__mmask32)-1))
+
+#define _mm512_mask_shufflelo_epi16(W, U, A, B)                                     \
+  ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)(W),                 \
+                                             (__mmask32)(U)))
+
+#define _mm512_maskz_shufflelo_epi16(U, A, B)                                       \
+  ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B),       \
+                                             (__v32hi)(__m512i)_mm512_setzero_hi(), \
+                                             (__mmask32)(U)))
+
+#define _mm512_srai_epi16(A, B)                                         \
+  ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)_mm512_setzero_hi(), (__mmask32)-1))
+
+#define _mm512_mask_srai_epi16(W, U, A, B)                              \
+  ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U)))
+
+#define _mm512_maskz_srai_epi16(U, A, B)                                \
+  ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A),      \
+    (int)(B), (__v32hi)_mm512_setzero_hi(), (__mmask32)(U)))
+
+#define _mm512_mask_blend_epi16(__U, __A, __W)                       \
+  ((__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) (__A),       \
+                                                   (__v32hi) (__W),  \
+                                                   (__mmask32) (__U)))
+
+#define _mm512_mask_blend_epi8(__U, __A, __W)                        \
+  ((__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) (__A),       \
+                                                   (__v64qi) (__W),  \
+                                                   (__mmask64) (__U)))
+
+#define _mm512_cmp_epi16_mask(X, Y, P)                         \
+  ((__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi)(__m512i)(X),     \
+                                           (__v32hi)(__m512i)(Y), (int)(P),\
+                                           (__mmask32)(-1)))
+
+#define _mm512_cmp_epi8_mask(X, Y, P)                          \
+  ((__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi)(__m512i)(X),     \
+                                           (__v64qi)(__m512i)(Y), (int)(P),\
+                                           (__mmask64)(-1)))
+
+#define _mm512_cmp_epu16_mask(X, Y, P)                         \
+  ((__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi)(__m512i)(X),    \
+                                           (__v32hi)(__m512i)(Y), (int)(P),\
+                                           (__mmask32)(-1)))
+
+#define _mm512_cmp_epu8_mask(X, Y, P)                          \
+  ((__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi)(__m512i)(X),    \
+                                           (__v64qi)(__m512i)(Y), (int)(P),\
+                                           (__mmask64)(-1)))
+
+#define _mm512_mask_cmp_epi16_mask(M, X, Y, P)                         \
+  ((__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi)(__m512i)(X),     \
+                                           (__v32hi)(__m512i)(Y), (int)(P),\
+                                           (__mmask32)(M)))
+
+#define _mm512_mask_cmp_epi8_mask(M, X, Y, P)                          \
+  ((__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi)(__m512i)(X),     \
+                                           (__v64qi)(__m512i)(Y), (int)(P),\
+                                           (__mmask64)(M)))
+
+#define _mm512_mask_cmp_epu16_mask(M, X, Y, P)                         \
+  ((__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi)(__m512i)(X),    \
+                                           (__v32hi)(__m512i)(Y), (int)(P),\
+                                           (__mmask32)(M)))
+
+#define _mm512_mask_cmp_epu8_mask(M, X, Y, P)                          \
+  ((__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi)(__m512i)(X),    \
+                                           (__v64qi)(__m512i)(Y), (int)(P),\
+                                           (__mmask64)(M)))
+
+#define _mm512_bslli_epi128(A, N)                                         \
+  ((__m512i)__builtin_ia32_pslldq512 ((__m512i)(A), (int)(N) * 8))
+
+#define _mm512_bsrli_epi128(A, N)                                         \
+  ((__m512i)__builtin_ia32_psrldq512 ((__m512i)(A), (int)(N) * 8))
+
+#endif
+
+#ifdef __DISABLE_AVX512BW__
+#undef __DISABLE_AVX512BW__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512BW__ */
+
+#endif /* _AVX512BWINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512dqintrin.h b/gcc/config/i386/avx512dqintrin.h
new file mode 100644 (file)
index 0000000..6014a1c
--- /dev/null
@@ -0,0 +1,2306 @@
+/* Copyright (C) 2014
+   Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512DQINTRIN_H_INCLUDED
+#define _AVX512DQINTRIN_H_INCLUDED
+
+#ifndef __AVX512DQ__
+#pragma GCC push_options
+#pragma GCC target("avx512dq")
+#define __DISABLE_AVX512DQ__
+#endif /* __AVX512DQ__ */
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_f64x2 (__m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df)
+                                                          __A,
+                                                          _mm512_undefined_pd(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df)
+                                                          __A,
+                                                          (__v8df)
+                                                          __O, __M);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+  return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df)
+                                                          __A,
+                                                          (__v8df)
+                                                          _mm512_setzero_ps (),
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_i64x2 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di)
+                                                          __A,
+                                                          _mm512_undefined_si512(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di)
+                                                          __A,
+                                                          (__v8di)
+                                                          __O, __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di)
+                                                          __A,
+                                                          (__v8di)
+                                                          _mm512_setzero_si512 (),
+                                                          __M);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_f32x2 (__m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                                                         (__v16sf)_mm512_undefined_ps(),
+                                                         (__mmask16) -
+                                                         1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                                                         (__v16sf)
+                                                         __O, __M);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A,
+                                                         (__v16sf)
+                                                         _mm512_setzero_ps (),
+                                                         __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_i32x2 (__m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si)
+                                                          __A,
+                                                          (__v16si)_mm512_undefined_si512(),
+                                                          (__mmask16)
+                                                          -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si)
+                                                          __A,
+                                                          (__v16si)
+                                                          __O, __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si)
+                                                          __A,
+                                                          (__v16si)
+                                                          _mm512_setzero_si512 (),
+                                                          __M);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_f32x8 (__m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                                                         _mm512_undefined_ps(),
+                                                         (__mmask16) -
+                                                         1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                                                         (__v16sf)__O,
+                                                         __M);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A)
+{
+  return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A,
+                                                         (__v16sf)
+                                                         _mm512_setzero_ps (),
+                                                         __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_broadcast_i32x8 (__m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si)
+                                                          __A,
+                                                          (__v16si)_mm512_undefined_si512(),
+                                                          (__mmask16)
+                                                          -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si)
+                                                          __A,
+                                                          (__v16si)__O,
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A)
+{
+  return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si)
+                                                          __A,
+                                                          (__v16si)
+                                                          _mm512_setzero_si512 (),
+                                                          __M);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mullo_epi64 (__m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+                                                 (__v8di) __B,
+                                                 (__v8di)
+                                                 _mm512_setzero_si512 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A,
+                        __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+                                                 (__v8di) __B,
+                                                 (__v8di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+{
+  return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+                                                 (__v8di) __B,
+                                                 (__v8di)
+                                                 _mm512_setzero_si512 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_xor_pd (__m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df)
+                                                _mm512_setzero_pd (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A,
+                   __m512d __B)
+{
+  return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df)
+                                                _mm512_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_xor_ps (__m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf)
+                                               _mm512_setzero_ps (),
+                                               (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf) __W,
+                                               (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf)
+                                               _mm512_setzero_ps (),
+                                               (__mmask16) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_or_pd (__m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+                                               (__v8df) __B,
+                                               (__v8df)
+                                               _mm512_setzero_pd (),
+                                               (__mmask8) -1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+                                               (__v8df) __B,
+                                               (__v8df) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
+                                               (__v8df) __B,
+                                               (__v8df)
+                                               _mm512_setzero_pd (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_or_ps (__m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+                                              (__v16sf) __B,
+                                              (__v16sf)
+                                              _mm512_setzero_ps (),
+                                              (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+                                              (__v16sf) __B,
+                                              (__v16sf) __W,
+                                              (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
+                                              (__v16sf) __B,
+                                              (__v16sf)
+                                              _mm512_setzero_ps (),
+                                              (__mmask16) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_and_pd (__m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df)
+                                                _mm512_setzero_pd (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A,
+                   __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
+                                                (__v8df) __B,
+                                                (__v8df)
+                                                _mm512_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_and_ps (__m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf)
+                                               _mm512_setzero_ps (),
+                                               (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf) __W,
+                                               (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
+                                               (__v16sf) __B,
+                                               (__v16sf)
+                                               _mm512_setzero_ps (),
+                                               (__mmask16) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_andnot_pd (__m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+                                                 (__v8df) __B,
+                                                 (__v8df)
+                                                 _mm512_setzero_pd (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A,
+                      __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+                                                 (__v8df) __B,
+                                                 (__v8df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B)
+{
+  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
+                                                 (__v8df) __B,
+                                                 (__v8df)
+                                                 _mm512_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_andnot_ps (__m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+                                                (__v16sf) __B,
+                                                (__v16sf)
+                                                _mm512_setzero_ps (),
+                                                (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A,
+                      __m512 __B)
+{
+  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+                                                (__v16sf) __B,
+                                                (__v16sf) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B)
+{
+  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
+                                                (__v16sf) __B,
+                                                (__v16sf)
+                                                _mm512_setzero_ps (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movepi32_mask (__m512i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movepi64_mask (__m512i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movm_epi32 (__mmask16 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_movm_epi64 (__mmask8 __A)
+{
+  return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvttpd_epi64 (__m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvttpd_epu64 (__m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) -1,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di) __W,
+                                                     (__mmask8) __U,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) __U,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvttps_epi64 (__m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvttps_epu64 (__m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) -1,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di) __W,
+                                                     (__mmask8) __U,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) __U,
+                                                     _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtpd_epi64 (__m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) -1,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di) __W,
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtpd_epu64 (__m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtps_epi64 (__m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) -1,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di) __W,
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtps_epu64 (__m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepi64_ps (__m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) -1,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepu64_ps (__m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepi64_pd (__m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) -1,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df) __W,
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) __U,
+                                                   _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtepu64_pd (__m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df)
+                                                    _mm512_setzero_pd (),
+                                                    (__mmask8) -1,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df) __W,
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df)
+                                                    _mm512_setzero_pd (),
+                                                    (__mmask8) __U,
+                                                    _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_range_pd (__m512d __A, __m512d __B, int __C)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df)
+                                                  _mm512_setzero_pd (),
+                                                  (__mmask8) -1,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_range_pd (__m512d __W, __mmask8 __U,
+                     __m512d __A, __m512d __B, int __C)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df) __W,
+                                                  (__mmask8) __U,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_range_pd (__mmask8 __U, __m512d __A, __m512d __B, int __C)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df)
+                                                  _mm512_setzero_pd (),
+                                                  (__mmask8) __U,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_range_ps (__m512 __A, __m512 __B, int __C)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf)
+                                                 _mm512_setzero_ps (),
+                                                 (__mmask16) -1,
+                                                 _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_range_ps (__m512 __W, __mmask16 __U,
+                     __m512 __A, __m512 __B, int __C)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf) __W,
+                                                 (__mmask16) __U,
+                                                 _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_range_ps (__mmask16 __U, __m512 __A, __m512 __B, int __C)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf)
+                                                 _mm512_setzero_ps (),
+                                                 (__mmask16) __U,
+                                                 _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_sd (__m128d __A, __m128d __B, int __C)
+{
+  return (__m128d) __builtin_ia32_reducesd ((__v2df) __A,
+                                                (__v2df) __B, __C);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_ss (__m128 __A, __m128 __B, int __C)
+{
+  return (__m128) __builtin_ia32_reducess ((__v4sf) __A,
+                                               (__v4sf) __B, __C);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_sd (__m128d __A, __m128d __B, int __C)
+{
+  return (__m128d) __builtin_ia32_rangesd128_round ((__v2df) __A,
+                                                  (__v2df) __B, __C,
+                                                  _MM_FROUND_CUR_DIRECTION);
+}
+
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_ss (__m128 __A, __m128 __B, int __C)
+{
+  return (__m128) __builtin_ia32_rangess128_round ((__v4sf) __A,
+                                                 (__v4sf) __B, __C,
+                                                 _MM_FROUND_CUR_DIRECTION);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_round_sd (__m128d __A, __m128d __B, int __C, const int __R)
+{
+  return (__m128d) __builtin_ia32_rangesd128_round ((__v2df) __A,
+                                                  (__v2df) __B, __C,
+                                                  __R);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_round_ss (__m128 __A, __m128 __B, int __C, const int __R)
+{
+  return (__m128) __builtin_ia32_rangess128_round ((__v4sf) __A,
+                                                 (__v4sf) __B, __C,
+                                                 __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_ss_mask (__m128 __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclassss ((__v4sf) __A, __imm);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_sd_mask (__m128d __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasssd ((__v2df) __A, __imm);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtt_roundpd_epi64 (__m512d __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtt_roundpd_epi64 (__mmask8 __U, __m512d __A,
+                                const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtt_roundpd_epu64 (__m512d __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) -1,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di) __W,
+                                                     (__mmask8) __U,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m512d __A,
+                                const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) __U,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtt_roundps_epi64 (__m256 __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m256 __A,
+                                const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvtt_roundps_epu64 (__m256 __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) -1,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvtt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di) __W,
+                                                     (__mmask8) __U,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m256 __A,
+                                const int __R)
+{
+  return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+                                                     (__v8di)
+                                                     _mm512_setzero_si512 (),
+                                                     (__mmask8) __U,
+                                                     __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundpd_epi64 (__m512d __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) -1,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A,
+                              const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di) __W,
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundpd_epi64 (__mmask8 __U, __m512d __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundpd_epu64 (__m512d __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A,
+                              const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m512d __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundps_epi64 (__m256 __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) -1,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A,
+                              const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di) __W,
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundps_epi64 (__mmask8 __U, __m256 __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+                                                   (__v8di)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundps_epu64 (__m256 __A, const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) -1,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A,
+                              const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di) __W,
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundps_epu64 (__mmask8 __U, __m256 __A,
+                               const int __R)
+{
+  return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+                                                    (__v8di)
+                                                    _mm512_setzero_si512 (),
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundepi64_ps (__m512i __A, const int __R)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) -1,
+                                                  __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundepi64_ps (__m256 __W, __mmask8 __U, __m512i __A,
+                              const int __R)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U,
+                                                  __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundepi64_ps (__mmask8 __U, __m512i __A,
+                               const int __R)
+{
+  return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U,
+                                                  __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundepu64_ps (__m512i __A, const int __R)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1,
+                                                   __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundepu64_ps (__m256 __W, __mmask8 __U, __m512i __A,
+                              const int __R)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundepu64_ps (__mmask8 __U, __m512i __A,
+                               const int __R)
+{
+  return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundepi64_pd (__m512i __A, const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) -1,
+                                                   __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundepi64_pd (__m512d __W, __mmask8 __U, __m512i __A,
+                              const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df) __W,
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundepi64_pd (__mmask8 __U, __m512i __A,
+                               const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) __U,
+                                                   __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_cvt_roundepu64_pd (__m512i __A, const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df)
+                                                    _mm512_setzero_pd (),
+                                                    (__mmask8) -1,
+                                                    __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_cvt_roundepu64_pd (__m512d __W, __mmask8 __U, __m512i __A,
+                              const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df) __W,
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_cvt_roundepu64_pd (__mmask8 __U, __m512i __A,
+                               const int __R)
+{
+  return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+                                                    (__v8df)
+                                                    _mm512_setzero_pd (),
+                                                    (__mmask8) __U,
+                                                    __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_pd (__m512d __A, int __B)
+{
+  return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_pd (__m512d __W, __mmask8 __U, __m512d __A, int __B)
+{
+  return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
+                                                   (__v8df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_pd (__mmask8 __U, __m512d __A, int __B)
+{
+  return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B,
+                                                   (__v8df)
+                                                   _mm512_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_reduce_ps (__m512 __A, int __B)
+{
+  return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
+                                                  (__v16sf)
+                                                  _mm512_setzero_ps (),
+                                                  (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_reduce_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B)
+{
+  return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
+                                                  (__v16sf) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_reduce_ps (__mmask16 __U, __m512 __A, int __B)
+{
+  return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B,
+                                                  (__v16sf)
+                                                  _mm512_setzero_ps (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_extractf32x8_ps (__m512 __A, const int __imm)
+{
+  return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
+                                                   __imm,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_extractf32x8_ps (__m256 __W, __mmask8 __U, __m512 __A,
+                            const int __imm)
+{
+  return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
+                                                   __imm,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_extractf32x8_ps (__mmask8 __U, __m512 __A,
+                             const int __imm)
+{
+  return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A,
+                                                   __imm,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_extractf64x2_pd (__m512d __A, const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
+                                                        __imm,
+                                                        (__v2df)
+                                                        _mm_setzero_pd (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_extractf64x2_pd (__m128d __W, __mmask8 __U, __m512d __A,
+                            const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
+                                                        __imm,
+                                                        (__v2df) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_extractf64x2_pd (__mmask8 __U, __m512d __A,
+                             const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A,
+                                                        __imm,
+                                                        (__v2df)
+                                                        _mm_setzero_pd (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_extracti32x8_epi32 (__m512i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
+                                                    __imm,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_extracti32x8_epi32 (__m256i __W, __mmask8 __U, __m512i __A,
+                               const int __imm)
+{
+  return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
+                                                    __imm,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_extracti32x8_epi32 (__mmask8 __U, __m512i __A,
+                                const int __imm)
+{
+  return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A,
+                                                    __imm,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_extracti64x2_epi64 (__m512i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
+                                                        __imm,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_extracti64x2_epi64 (__m128i __W, __mmask8 __U, __m512i __A,
+                               const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
+                                                        __imm,
+                                                        (__v2di) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_extracti64x2_epi64 (__mmask8 __U, __m512i __A,
+                                const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A,
+                                                        __imm,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_range_round_pd (__m512d __A, __m512d __B, int __C,
+                      const int __R)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df)
+                                                  _mm512_setzero_pd (),
+                                                  (__mmask8) -1,
+                                                  __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_range_round_pd (__m512d __W, __mmask8 __U,
+                           __m512d __A, __m512d __B, int __C,
+                           const int __R)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df) __W,
+                                                  (__mmask8) __U,
+                                                  __R);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_range_round_pd (__mmask8 __U, __m512d __A, __m512d __B,
+                            int __C, const int __R)
+{
+  return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A,
+                                                  (__v8df) __B, __C,
+                                                  (__v8df)
+                                                  _mm512_setzero_pd (),
+                                                  (__mmask8) __U,
+                                                  __R);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_range_round_ps (__m512 __A, __m512 __B, int __C, const int __R)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf)
+                                                 _mm512_setzero_ps (),
+                                                 (__mmask16) -1,
+                                                 __R);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_range_round_ps (__m512 __W, __mmask16 __U,
+                           __m512 __A, __m512 __B, int __C,
+                           const int __R)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf) __W,
+                                                 (__mmask16) __U,
+                                                 __R);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_range_round_ps (__mmask16 __U, __m512 __A, __m512 __B,
+                            int __C, const int __R)
+{
+  return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,
+                                                 (__v16sf) __B, __C,
+                                                 (__v16sf)
+                                                 _mm512_setzero_ps (),
+                                                 (__mmask16) __U,
+                                                 __R);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_inserti32x8 (__m512i __A, __m256i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
+                                                   (__v8si) __B,
+                                                   __imm,
+                                                   (__v16si)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask16) -1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_inserti32x8 (__m512i __W, __mmask16 __U, __m512i __A,
+                        __m256i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
+                                                   (__v8si) __B,
+                                                   __imm,
+                                                   (__v16si) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_inserti32x8 (__mmask16 __U, __m512i __A, __m256i __B,
+                         const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A,
+                                                   (__v8si) __B,
+                                                   __imm,
+                                                   (__v16si)
+                                                   _mm512_setzero_si512 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_insertf32x8 (__m512 __A, __m256 __B, const int __imm)
+{
+  return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
+                                                  (__v8sf) __B,
+                                                  __imm,
+                                                  (__v16sf)
+                                                  _mm512_setzero_ps (),
+                                                  (__mmask16) -1);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_insertf32x8 (__m512 __W, __mmask16 __U, __m512 __A,
+                        __m256 __B, const int __imm)
+{
+  return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
+                                                  (__v8sf) __B,
+                                                  __imm,
+                                                  (__v16sf) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m512
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_insertf32x8 (__mmask16 __U, __m512 __A, __m256 __B,
+                         const int __imm)
+{
+  return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A,
+                                                  (__v8sf) __B,
+                                                  __imm,
+                                                  (__v16sf)
+                                                  _mm512_setzero_ps (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_inserti64x2 (__m512i __A, __m128i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v8di)
+                                                       _mm512_setzero_si512 (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_inserti64x2 (__m512i __W, __mmask8 __U, __m512i __A,
+                        __m128i __B, const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v8di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m512i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_inserti64x2 (__mmask8 __U, __m512i __A, __m128i __B,
+                         const int __imm)
+{
+  return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v8di)
+                                                       _mm512_setzero_si512 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_insertf64x2 (__m512d __A, __m128d __B, const int __imm)
+{
+  return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v8df)
+                                                       _mm512_setzero_pd (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_insertf64x2 (__m512d __W, __mmask8 __U, __m512d __A,
+                        __m128d __B, const int __imm)
+{
+  return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v8df) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m512d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_maskz_insertf64x2 (__mmask8 __U, __m512d __A, __m128d __B,
+                         const int __imm)
+{
+  return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v8df)
+                                                       _mm512_setzero_pd (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_fpclass_pd_mask (__mmask8 __U, __m512d __A,
+                            const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A,
+                                                     __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_fpclass_pd_mask (__m512d __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_mask_fpclass_ps_mask (__mmask16 __U, __m512 __A,
+                            const int __imm)
+{
+  return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A,
+                                                      __imm, __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+{
+  return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A,
+                                                      __imm,
+                                                      (__mmask16) -
+                                                      1);
+}
+
+#else
+#define _mm_range_sd(A, B, C)                                          \
+  ((__m128d) __builtin_ia32_rangesd128_round ((__v2df)(__m128d)(A),    \
+    (__v2df)(__m128d)(B), (int)(C),                                    \
+    _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_range_ss(A, B, C)                                          \
+  ((__m128) __builtin_ia32_rangess128_round ((__v4sf)(__m128)(A),      \
+    (__v4sf)(__m128)(B), (int)(C),                                     \
+    _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_range_round_sd(A, B, C, R)                                 \
+  ((__m128d) __builtin_ia32_rangesd128_round ((__v2df)(__m128d)(A),    \
+    (__v2df)(__m128d)(B), (int)(C), (R)))
+
+#define _mm_range_round_ss(A, B, C, R)                                 \
+  ((__m128) __builtin_ia32_rangess128_round ((__v4sf)(__m128)(A),      \
+    (__v4sf)(__m128)(B), (int)(C), (R)))
+
+#define _mm512_cvtt_roundpd_epi64(A, B)                    \
+    ((__m512i)__builtin_ia32_cvttpd2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, B)  \
+    ((__m512i)__builtin_ia32_cvttpd2qq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvtt_roundpd_epi64(U, A, B)    \
+    ((__m512i)__builtin_ia32_cvttpd2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvtt_roundpd_epu64(A, B)                    \
+    ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, B)  \
+    ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvtt_roundpd_epu64(U, A, B)    \
+    ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvtt_roundps_epi64(A, B)                    \
+    ((__m512i)__builtin_ia32_cvttps2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvtt_roundps_epi64(W, U, A, B)  \
+    ((__m512i)__builtin_ia32_cvttps2qq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvtt_roundps_epi64(U, A, B)    \
+    ((__m512i)__builtin_ia32_cvttps2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvtt_roundps_epu64(A, B)                    \
+    ((__m512i)__builtin_ia32_cvttps2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvtt_roundps_epu64(W, U, A, B)  \
+    ((__m512i)__builtin_ia32_cvttps2uqq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvtt_roundps_epu64(U, A, B)    \
+    ((__m512i)__builtin_ia32_cvttps2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvt_roundpd_epi64(A, B)             \
+    ((__m512i)__builtin_ia32_cvtpd2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvt_roundpd_epi64(W, U, A, B)   \
+    ((__m512i)__builtin_ia32_cvtpd2qq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundpd_epi64(U, A, B)     \
+    ((__m512i)__builtin_ia32_cvtpd2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvt_roundpd_epu64(A, B)             \
+    ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvt_roundpd_epu64(W, U, A, B)   \
+    ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundpd_epu64(U, A, B)     \
+    ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvt_roundps_epi64(A, B)             \
+    ((__m512i)__builtin_ia32_cvtps2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvt_roundps_epi64(W, U, A, B)   \
+    ((__m512i)__builtin_ia32_cvtps2qq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundps_epi64(U, A, B)     \
+    ((__m512i)__builtin_ia32_cvtps2qq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvt_roundps_epu64(A, B)             \
+    ((__m512i)__builtin_ia32_cvtps2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), -1, (B)))
+
+#define _mm512_mask_cvt_roundps_epu64(W, U, A, B)   \
+    ((__m512i)__builtin_ia32_cvtps2uqq512_mask((A), (__v8di)(W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundps_epu64(U, A, B)     \
+    ((__m512i)__builtin_ia32_cvtps2uqq512_mask((A), (__v8di)_mm512_setzero_si512 (), (U), (B)))
+
+#define _mm512_cvt_roundepi64_ps(A, B)             \
+    ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(A), (__v8sf)_mm256_setzero_ps(), -1, (B)))
+
+#define _mm512_mask_cvt_roundepi64_ps(W, U, A, B)   \
+    ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(A), (W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundepi64_ps(U, A, B)     \
+    ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(A), (__v8sf)_mm256_setzero_ps(), (U), (B)))
+
+#define _mm512_cvt_roundepu64_ps(A, B)             \
+    ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(A), (__v8sf)_mm256_setzero_ps(), -1, (B)))
+
+#define _mm512_mask_cvt_roundepu64_ps(W, U, A, B)   \
+    ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(A), (W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundepu64_ps(U, A, B)     \
+    ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(A), (__v8sf)_mm256_setzero_ps(), (U), (B)))
+
+#define _mm512_cvt_roundepi64_pd(A, B)             \
+    ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(A), (__v8df)_mm512_setzero_pd(), -1, (B)))
+
+#define _mm512_mask_cvt_roundepi64_pd(W, U, A, B)   \
+    ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(A), (W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundepi64_pd(U, A, B)     \
+    ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(A), (__v8df)_mm512_setzero_pd(), (U), (B)))
+
+#define _mm512_cvt_roundepu64_pd(A, B)             \
+    ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(A), (__v8df)_mm512_setzero_pd(), -1, (B)))
+
+#define _mm512_mask_cvt_roundepu64_pd(W, U, A, B)   \
+    ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(A), (W), (U), (B)))
+
+#define _mm512_maskz_cvt_roundepu64_pd(U, A, B)     \
+    ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(A), (__v8df)_mm512_setzero_pd(), (U), (B)))
+
+#define _mm512_reduce_pd(A, B)                                         \
+  ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),    \
+    (int)(B), (__v8df)_mm512_setzero_pd(), (__mmask8)-1))
+
+#define _mm512_mask_reduce_pd(W, U, A, B)                              \
+  ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),    \
+    (int)(B), (__v8df)(__m512d)(W), (__mmask8)(U)))
+
+#define _mm512_maskz_reduce_pd(U, A, B)                                        \
+  ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A),    \
+    (int)(B), (__v8df)_mm512_setzero_pd(), (__mmask8)(U)))
+
+#define _mm512_reduce_ps(A, B)                                         \
+  ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),     \
+    (int)(B), (__v16sf)_mm512_setzero_ps(), (__mmask16)-1))
+
+#define _mm512_mask_reduce_ps(W, U, A, B)                              \
+  ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),     \
+    (int)(B), (__v16sf)(__m512)(W), (__mmask16)(U)))
+
+#define _mm512_maskz_reduce_ps(U, A, B)                                        \
+  ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A),     \
+    (int)(B), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U)))
+
+#define _mm512_extractf32x8_ps(X, C)                                    \
+  ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
+    (int) (C), (__v8sf)(__m256) _mm256_setzero_ps(), (__mmask8)-1))
+
+#define _mm512_mask_extractf32x8_ps(W, U, X, C)                         \
+  ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
+    (int) (C), (__v8sf)(__m256) (W), (__mmask8) (U)))
+
+#define _mm512_maskz_extractf32x8_ps(U, X, C)                           \
+  ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X),    \
+    (int) (C), (__v8sf)(__m256) _mm256_setzero_ps(), (__mmask8) (U)))
+
+#define _mm512_extractf64x2_pd(X, C)                                    \
+  ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8)-1))
+
+#define _mm512_mask_extractf64x2_pd(W, U, X, C)                         \
+  ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (int) (C), (__v2df)(__m128d) (W), (__mmask8) (U)))
+
+#define _mm512_maskz_extractf64x2_pd(U, X, C)                           \
+  ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8) (U)))
+
+#define _mm512_extracti32x8_epi32(X, C)                                 \
+  ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
+    (int) (C), (__v8si)(__m256i) _mm256_setzero_si256(), (__mmask8)-1))
+
+#define _mm512_mask_extracti32x8_epi32(W, U, X, C)                      \
+  ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
+    (int) (C), (__v8si)(__m256i) (W), (__mmask8) (U)))
+
+#define _mm512_maskz_extracti32x8_epi32(U, X, C)                        \
+  ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X),  \
+    (int) (C), (__v8si)(__m256i) _mm256_setzero_si256(), (__mmask8) (U)))
+
+#define _mm512_extracti64x2_epi64(X, C)                                 \
+  ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (int) (C), (__v2di)(__m128i) _mm_setzero_di(), (__mmask8)-1))
+
+#define _mm512_mask_extracti64x2_epi64(W, U, X, C)                      \
+  ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (int) (C), (__v2di)(__m128i) (W), (__mmask8) (U)))
+
+#define _mm512_maskz_extracti64x2_epi64(U, X, C)                        \
+  ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (int) (C), (__v2di)(__m128i) _mm_setzero_di(), (__mmask8) (U)))
+
+#define _mm512_range_pd(A, B, C)                                       \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)_mm512_setzero_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_range_pd(W, U, A, B, C)                            \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)(__m512d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_range_pd(U, A, B, C)                              \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)_mm512_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_range_ps(A, B, C)                                       \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_range_ps(W, U, A, B, C)                            \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)(__m512)(W), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_range_ps(U, A, B, C)                              \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_range_round_pd(A, B, C, R)                                      \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)_mm512_setzero_pd(), (__mmask8)-1, (R)))
+
+#define _mm512_mask_range_round_pd(W, U, A, B, C, R)                           \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)(__m512d)(W), (__mmask8)(U), (R)))
+
+#define _mm512_maskz_range_round_pd(U, A, B, C, R)                             \
+  ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A),     \
+    (__v8df)(__m512d)(B), (int)(C),                                    \
+    (__v8df)_mm512_setzero_pd(), (__mmask8)(U), (R)))
+
+#define _mm512_range_round_ps(A, B, C, R)                                      \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, (R)))
+
+#define _mm512_mask_range_round_ps(W, U, A, B, C, R)                           \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)(__m512)(W), (__mmask16)(U), (R)))
+
+#define _mm512_maskz_range_round_ps(U, A, B, C, R)                             \
+  ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A),      \
+    (__v16sf)(__m512)(B), (int)(C),                                    \
+    (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), (R)))
+
+#define _mm512_insertf64x2(X, Y, C)                                     \
+  ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (X),            \
+    (__mmask8)-1))
+
+#define _mm512_mask_insertf64x2(W, U, X, Y, C)                          \
+  ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (W),            \
+    (__mmask8) (U)))
+
+#define _mm512_maskz_insertf64x2(U, X, Y, C)                            \
+  ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C),                                   \
+    (__v8df)(__m512d) _mm512_setzero_pd(), (__mmask8) (U)))
+
+#define _mm512_inserti64x2(X, Y, C)                                     \
+  ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (X), (__mmask8)-1))
+
+#define _mm512_mask_inserti64x2(W, U, X, Y, C)                          \
+  ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (W),            \
+    (__mmask8) (U)))
+
+#define _mm512_maskz_inserti64x2(U, X, Y, C)                            \
+  ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C),                                   \
+    (__v8di)(__m512i) _mm512_setzero_si512 (), (__mmask8) (U)))
+
+#define _mm512_insertf32x8(X, Y, C)                                     \
+  ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
+    (__v8sf)(__m256) (Y), (int) (C),\
+    (__v16sf)(__m512)_mm512_setzero_ps(),\
+    (__mmask16)-1))
+
+#define _mm512_mask_insertf32x8(W, U, X, Y, C)                          \
+  ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
+    (__v8sf)(__m256) (Y), (int) (C),\
+    (__v16sf)(__m512)(W),\
+    (__mmask16)(U)))
+
+#define _mm512_maskz_insertf32x8(U, X, Y, C)                            \
+  ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X),     \
+    (__v8sf)(__m256) (Y), (int) (C),\
+    (__v16sf)(__m512)_mm512_setzero_ps(),\
+    (__mmask16)(U)))
+
+#define _mm512_inserti32x8(X, Y, C)                                     \
+  ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
+    (__v8si)(__m256i) (Y), (int) (C),\
+    (__v16si)(__m512i)_mm512_setzero_si512 (),\
+    (__mmask16)-1))
+
+#define _mm512_mask_inserti32x8(W, U, X, Y, C)                          \
+  ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
+    (__v8si)(__m256i) (Y), (int) (C),\
+    (__v16si)(__m512i)(W),\
+    (__mmask16)(U)))
+
+#define _mm512_maskz_inserti32x8(U, X, Y, C)                            \
+  ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X),   \
+    (__v8si)(__m256i) (Y), (int) (C),\
+    (__v16si)(__m512i)_mm512_setzero_si512 (),\
+    (__mmask16)(U)))
+
+#define _mm_fpclass_ss_mask(X, C)                                              \
+  ((__mmask8) __builtin_ia32_fpclassss ((__v4sf) (__m128) (X), (int) (C)))  \
+
+#define _mm_fpclass_sd_mask(X, C)                                              \
+  ((__mmask8) __builtin_ia32_fpclasssd ((__v2df) (__m128d) (X), (int) (C))) \
+
+#define _mm512_mask_fpclass_pd_mask(u, X, C)                            \
+  ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \
+                                               (int) (C), (__mmask8)(u)))
+
+#define _mm512_mask_fpclass_ps_mask(u, x, c)                           \
+  ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\
+                                                (int) (c),(__mmask8)(u)))
+
+#define _mm512_fpclass_pd_mask(X, C)                                    \
+  ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \
+                                               (int) (C), (__mmask8)-1))
+
+#define _mm512_fpclass_ps_mask(x, c)                                    \
+  ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\
+                                                (int) (c),(__mmask8)-1))
+
+#define _mm_reduce_sd(A, B, C)                                         \
+  ((__m128d) __builtin_ia32_reducesd ((__v2df)(__m128d)(A),    \
+    (__v2df)(__m128d)(B), (int)(C)))                                   \
+
+#define _mm_reduce_ss(A, B, C)                                         \
+  ((__m128) __builtin_ia32_reducess ((__v4sf)(__m128)(A),              \
+    (__v4sf)(__m128)(A), (int)(C)))                                    \
+
+#endif
+
+#ifdef __DISABLE_AVX512DQ__
+#undef __DISABLE_AVX512DQ__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512DQ__ */
+
+#endif /* _AVX512DQINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512vlbwintrin.h b/gcc/config/i386/avx512vlbwintrin.h
new file mode 100644 (file)
index 0000000..1a4fe2c
--- /dev/null
@@ -0,0 +1,4224 @@
+/* Copyright (C) 2014
+   Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx512vlbwintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512VLBWINTRIN_H_INCLUDED
+#define _AVX512VLBWINTRIN_H_INCLUDED
+
+#if !defined(__AVX512VL__) || !defined(__AVX512BW__)
+#pragma GCC push_options
+#pragma GCC target("avx512vl,avx512bw")
+#define __DISABLE_AVX512VLBW__
+#endif /* __AVX512VLBW__ */
+
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A,
+                                                   (__v32qi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A,
+                                                   (__v32qi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A,
+                                                   (__v16qi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_hi (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
+{
+  __builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
+                                    (__v32qi) __A,
+                                    (__mmask32) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
+{
+  __builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
+                                    (__v16qi) __A,
+                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+                                                    (__v16hi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+                                                    (__v8hi)
+                                                    _mm_setzero_hi (),
+                                                    (__mmask8) __U);
+}
+
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_hi (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+                                                    (__v32qi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+                                                    (__v32qi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+                                                    (__v16qi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+                                                    (__v16qi)
+                                                    _mm_setzero_hi (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi16_epi8 (__m256i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask16) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi16_epi8 (__m128i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi16_epi8 (__m256i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask16) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi16_epi8 (__m128i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi16_epi8 (__m256i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask16) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_mask ((__v16qi) __A,
+                                                      (__v32qi) __O,
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_mask ((__v16qi) __A,
+                                                      (__v32qi)
+                                                      _mm256_setzero_si256 (),
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+                                                          (__v32qi) __O,
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A,
+                                                          (__v32qi)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_mask ((__v16qi) __A,
+                                                      (__v16qi) __O,
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_mask ((__v16qi) __A,
+                                                      (__v16qi)
+                                                      _mm_setzero_si128 (),
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+                                                          (__v16qi) __O,
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A,
+                                                          (__v16qi)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_mask ((__v8hi) __A,
+                                                      (__v16hi) __O,
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_mask ((__v8hi) __A,
+                                                      (__v16hi)
+                                                      _mm256_setzero_si256 (),
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+                                                          (__v16hi) __O,
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A,
+                                                          (__v16hi)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_mask ((__v8hi) __A,
+                                                      (__v8hi) __O,
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_mask ((__v8hi) __A,
+                                                      (__v8hi)
+                                                      _mm_setzero_si128 (),
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+                                                          (__v8hi) __O,
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A,
+                                                          (__v8hi)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                                                    (__v16hi) __A,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
+                               __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                                                    (__v16hi) __A,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+                              __m256i __B)
+{
+  return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
+                                                    (__v16hi) __A,
+                                                    (__v16hi) __W,
+                                                    (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                                                    (__v8hi) __A,
+                                                    (__v8hi)
+                                                    _mm_setzero_hi (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                                                    (__v8hi) __A,
+                                                    (__v8hi)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+                           __m128i __B)
+{
+  return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
+                                                    (__v8hi) __A,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I
+                                                       /* idx */ ,
+                                                       (__v16hi) __A,
+                                                       (__v16hi) __B,
+                                                       (__mmask16) -
+                                                       1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U,
+                               __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I
+                                                       /* idx */ ,
+                                                       (__v16hi) __A,
+                                                       (__v16hi) __B,
+                                                       (__mmask16)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I,
+                                __mmask16 __U, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A,
+                                                       (__v16hi) __I
+                                                       /* idx */ ,
+                                                       (__v16hi) __B,
+                                                       (__mmask16)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
+                                __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I
+                                                        /* idx */ ,
+                                                        (__v16hi) __A,
+                                                        (__v16hi) __B,
+                                                        (__mmask16)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I
+                                                       /* idx */ ,
+                                                       (__v8hi) __A,
+                                                       (__v8hi) __B,
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I,
+                            __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I
+                                                       /* idx */ ,
+                                                       (__v8hi) __A,
+                                                       (__v8hi) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A,
+                                                       (__v8hi) __I
+                                                       /* idx */ ,
+                                                       (__v8hi) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I
+                                                        /* idx */ ,
+                                                        (__v8hi) __A,
+                                                        (__v8hi) __B,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+                          __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+                                                    (__v32qi) __Y,
+                                                    (__v16hi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+                                                    (__v32qi) __Y,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X,
+                       __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+                                                    (__v16qi) __Y,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+                                                    (__v16qi) __Y,
+                                                    (__v8hi)
+                                                    _mm_setzero_hi (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v8si) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v8si)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v4si) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v4si)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi8_mask (__m128i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movepi8_mask (__m256i __A)
+{
+  return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi16_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movepi16_mask (__m256i __A)
+{
+  return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movm_epi8 (__mmask16 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movm_epi8 (__mmask32 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movm_epi16 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movm_epi16 (__mmask16 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_test_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+                                               (__v16qi) __B,
+                                               (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A,
+                                               (__v16qi) __B, __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_test_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+                                               (__v32qi) __B,
+                                               (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A,
+                                               (__v32qi) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_test_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+                                              (__v8hi) __B,
+                                              (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A,
+                                              (__v8hi) __B, __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_test_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+                                               (__v16hi) __B,
+                                               (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A,
+                                               (__v16hi) __B, __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __M);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_alignr_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                        __m256i __B, const int __N)
+{
+  return (__m256i) __builtin_ia32_palignr256_mask ((__v4di) __A,
+                                                  (__v4di) __B,
+                                                  __N * 8,
+                                                  (__v4di) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_alignr_epi8 (__mmask32 __U, __m256i __A, __m256i __B,
+                         const int __N)
+{
+  return (__m256i) __builtin_ia32_palignr256_mask ((__v4di) __A,
+                                                  (__v4di) __B,
+                                                  __N * 8,
+                                                  (__v4di)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_alignr_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                     __m128i __B, const int __N)
+{
+  return (__m128i) __builtin_ia32_palignr128_mask ((__v2di) __A,
+                                                  (__v2di) __B,
+                                                  __N * 8,
+                                                  (__v2di) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_alignr_epi8 (__mmask16 __U, __m128i __A, __m128i __B,
+                      const int __N)
+{
+  return (__m128i) __builtin_ia32_palignr128_mask ((__v2di) __A,
+                                                  (__v2di) __B,
+                                                  __N * 8,
+                                                  (__v2di)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_dbsad_epu8 (__m256i __A, __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A,
+                                                   (__v32qi) __B,
+                                                   __imm,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_dbsad_epu8 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A,
+                                                   (__v32qi) __B,
+                                                   __imm,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_dbsad_epu8 (__mmask16 __U, __m256i __A, __m256i __B,
+                        const int __imm)
+{
+  return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A,
+                                                   (__v32qi) __B,
+                                                   __imm,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_dbsad_epu8 (__m128i __A, __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A,
+                                                   (__v16qi) __B,
+                                                   __imm,
+                                                   (__v8hi)
+                                                   _mm_setzero_hi (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_dbsad_epu8 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A,
+                                                   (__v16qi) __B,
+                                                   __imm,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_dbsad_epu8 (__mmask8 __U, __m128i __A, __m128i __B,
+                     const int __imm)
+{
+  return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A,
+                                                   (__v16qi) __B,
+                                                   __imm,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+  return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
+{
+  return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A,
+                                                   (__v16qi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
+{
+  return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
+{
+  return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A,
+                                                   (__v32qi) __W,
+                                                   (__mmask32) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epi16_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, __P,
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epi16_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, __P,
+                                                (__mmask8) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epi16_mask (__mmask16 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, __P,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epi16_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, __P,
+                                                 (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epi8_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                       const int __P)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, __P,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epi8_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, __P,
+                                                 (__mmask16) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epi8_mask (__mmask16 __U, __m256i __X, __m256i __Y,
+                          const int __P)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, __P,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epi8_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, __P,
+                                                 (__mmask32) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epu16_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epu16_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epu16_mask (__mmask16 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, __P,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epu16_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, __P,
+                                                  (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epu8_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                       const int __P)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, __P,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epu8_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, __P,
+                                                  (__mmask16) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epu8_mask (__mmask16 __U, __m256i __X, __m256i __Y,
+                          const int __P)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, __P,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epu8_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, __P,
+                                                  (__mmask32) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srli_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi) __A, __imm,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srli_epi16 (__mmask16 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi) __A, __imm,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srli_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi) __A, __imm,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi) __A, __imm,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shufflehi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                            const int __imm)
+{
+  return (__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi) __A,
+                                                  __imm,
+                                                  (__v16hi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shufflehi_epi16 (__mmask16 __U, __m256i __A,
+                             const int __imm)
+{
+  return (__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi) __A,
+                                                  __imm,
+                                                  (__v16hi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shufflehi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                         const int __imm)
+{
+  return (__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi) __A, __imm,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shufflehi_epi16 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi) __A, __imm,
+                                                  (__v8hi)
+                                                  _mm_setzero_hi (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shufflelo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                            const int __imm)
+{
+  return (__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi) __A,
+                                                  __imm,
+                                                  (__v16hi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shufflelo_epi16 (__mmask16 __U, __m256i __A,
+                             const int __imm)
+{
+  return (__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi) __A,
+                                                  __imm,
+                                                  (__v16hi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shufflelo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                         const int __imm)
+{
+  return (__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi) __A, __imm,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shufflelo_epi16 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi) __A, __imm,
+                                                  (__v8hi)
+                                                  _mm_setzero_hi (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srai_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrawi256_mask ((__v16hi) __A, __imm,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srai_epi16 (__mmask16 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrawi256_mask ((__v16hi) __A, __imm,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srai_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrawi128_mask ((__v8hi) __A, __imm,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srai_epi16 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrawi128_mask ((__v8hi) __A, __imm,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_slli_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       int __B)
+{
+  return (__m256i) __builtin_ia32_psllwi256_mask ((__v16hi) __A, __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_slli_epi16 (__mmask16 __U, __m256i __A, int __B)
+{
+  return (__m256i) __builtin_ia32_psllwi256_mask ((__v16hi) __A, __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_slli_epi16 (__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_psllwi128_mask ((__v8hi) __A, __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_psllwi128_mask ((__v8hi) __A, __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+#else
+#define _mm256_mask_alignr_epi8(W, U, X, Y, N)                                     \
+  ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X),                 \
+                                           (__v4di)(__m256i)(Y), (int)(N * 8),     \
+                                           (__v4di)(__m256i)(X), (__mmask32)(U)))
+
+#define _mm256_mask_srli_epi16(W, U, A, B)                              \
+  ((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A),      \
+    (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U)))
+
+#define _mm256_maskz_srli_epi16(U, A, B)                                \
+  ((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A),      \
+    (int)(B), (__v16hi)_mm256_setzero_si256 (), (__mmask16)(U)))
+
+#define _mm_mask_srli_epi16(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi)(__m128i)(A),       \
+    (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srli_epi16(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi)(__m128i)(A),       \
+    (int)(B), (__v8hi)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_mask_srai_epi16(W, U, A, B)                              \
+  ((__m256i) __builtin_ia32_psrawi256_mask ((__v16hi)(__m256i)(A),      \
+    (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U)))
+
+#define _mm256_maskz_srai_epi16(U, A, B)                                \
+  ((__m256i) __builtin_ia32_psrawi256_mask ((__v16hi)(__m256i)(A),      \
+    (int)(B), (__v16hi)_mm256_setzero_si256 (), (__mmask16)(U)))
+
+#define _mm_mask_srai_epi16(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psrawi128_mask ((__v8hi)(__m128i)(A),       \
+    (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srai_epi16(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psrawi128_mask ((__v8hi)(__m128i)(A),       \
+    (int)(B), (__v8hi)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_mask_shufflehi_epi16(W, U, A, B)                                     \
+  ((__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi)(__m256i)(A), (int)(B),       \
+                                             (__v16hi)(__m256i)(W),                 \
+                                             (__mmask16)(U)))
+
+#define _mm256_maskz_shufflehi_epi16(U, A, B)                                       \
+  ((__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi)(__m256i)(A), (int)(B),       \
+                                             (__v16hi)(__m256i)_mm256_setzero_si256 (), \
+                                             (__mmask16)(U)))
+
+#define _mm_mask_shufflehi_epi16(W, U, A, B)                                        \
+  ((__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi)(__m128i)(A), (int)(B),        \
+                                             (__v8hi)(__m128i)(W),                  \
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_shufflehi_epi16(U, A, B)                                          \
+  ((__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi)(__m128i)(A), (int)(B),        \
+                                             (__v8hi)(__m128i)_mm_setzero_hi(),     \
+                                             (__mmask8)(U)))
+
+#define _mm256_mask_shufflelo_epi16(W, U, A, B)                                     \
+  ((__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi)(__m256i)(A), (int)(B),       \
+                                             (__v16hi)(__m256i)(W),                 \
+                                             (__mmask16)(U)))
+
+#define _mm256_maskz_shufflelo_epi16(U, A, B)                                       \
+  ((__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi)(__m256i)(A), (int)(B),       \
+                                             (__v16hi)(__m256i)_mm256_setzero_si256 (), \
+                                             (__mmask16)(U)))
+
+#define _mm_mask_shufflelo_epi16(W, U, A, B)                                        \
+  ((__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi)(__m128i)(A), (int)(B),        \
+                                             (__v8hi)(__m128i)(W),                  \
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_shufflelo_epi16(U, A, B)                                          \
+  ((__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi)(__m128i)(A), (int)(B),        \
+                                             (__v8hi)(__m128i)_mm_setzero_hi(),     \
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_alignr_epi8(U, X, Y, N)                                       \
+  ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X),                 \
+                                           (__v4di)(__m256i)(Y), (int)(N * 8),     \
+                                           (__v4di)(__m256i)_mm256_setzero_si256 (),   \
+                                           (__mmask32)(U)))
+
+#define _mm_mask_alignr_epi8(W, U, X, Y, N)                                        \
+  ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),                 \
+                                           (__v2di)(__m128i)(Y), (int)(N * 8),     \
+                                           (__v2di)(__m128i)(X), (__mmask16)(U)))
+
+#define _mm_maskz_alignr_epi8(U, X, Y, N)                                          \
+  ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),                 \
+                                           (__v2di)(__m128i)(Y), (int)(N * 8),     \
+                                           (__v2di)(__m128i)_mm_setzero_di(),      \
+                                           (__mmask16)(U)))
+
+#define _mm_mask_slli_epi16(W, U, X, C)                                          \
+  ((__m128i)__builtin_ia32_psllwi128_mask ((__v8hi)(__m128i)(X), (int)(C),\
+    (__v8hi)(__m128i)(W),\
+    (__mmask8)(U)))
+
+#define _mm_maskz_slli_epi16(U, X, C)                                    \
+  ((__m128i)__builtin_ia32_psllwi128_mask ((__v8hi)(__m128i)(X), (int)(C),\
+    (__v8hi)(__m128i)_mm_setzero_hi(),\
+    (__mmask8)(U)))
+
+#define _mm256_dbsad_epu8(X, Y, C)                                                  \
+  ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X),               \
+                                              (__v32qi)(__m256i) (Y), (int) (C),    \
+                                              (__v16hi)(__m256i)_mm256_setzero_si256(),\
+                                              (__mmask16)-1))
+
+#define _mm256_mask_slli_epi16(W, U, X, C)                                 \
+  ((__m256i)__builtin_ia32_psllwi256_mask ((__v16hi)(__m256i)(X), (int)(C),\
+    (__v16hi)(__m256i)(W),\
+    (__mmask16)(U)))
+
+#define _mm256_maskz_slli_epi16(U, X, C)                                   \
+  ((__m256i)__builtin_ia32_psllwi256_mask ((__v16hi)(__m256i)(X), (int)(C),\
+    (__v16hi)(__m256i)_mm256_setzero_si256 (),\
+    (__mmask16)(U)))
+
+#define _mm256_mask_dbsad_epu8(W, U, X, Y, C)                                       \
+  ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X),               \
+                                              (__v32qi)(__m256i) (Y), (int) (C),    \
+                                              (__v16hi)(__m256i)(W),                \
+                                              (__mmask16)(U)))
+
+#define _mm256_maskz_dbsad_epu8(U, X, Y, C)                                         \
+  ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X),               \
+                                              (__v32qi)(__m256i) (Y), (int) (C),    \
+                                              (__v16hi)(__m256i)_mm256_setzero_si256(),\
+                                              (__mmask16)(U)))
+
+#define _mm_dbsad_epu8(X, Y, C)                                                     \
+  ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X),               \
+                                              (__v16qi)(__m128i) (Y), (int) (C),    \
+                                              (__v8hi)(__m128i)_mm_setzero_si128(), \
+                                              (__mmask8)-1))
+
+#define _mm_mask_dbsad_epu8(W, U, X, Y, C)                                          \
+  ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X),               \
+                                              (__v16qi)(__m128i) (Y), (int) (C),    \
+                                              (__v8hi)(__m128i)(W),                 \
+                                              (__mmask8)(U)))
+
+#define _mm_maskz_dbsad_epu8(U, X, Y, C)                                            \
+  ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X),               \
+                                              (__v16qi)(__m128i) (Y), (int) (C),    \
+                                              (__v8hi)(__m128i)_mm_setzero_si128(), \
+                                              (__mmask8)(U)))
+
+#define _mm_mask_blend_epi16(__U, __A, __W)                          \
+  ((__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) (__A),        \
+                                                   (__v8hi) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm_mask_blend_epi8(__U, __A, __W)                           \
+  ((__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) (__A),       \
+                                                   (__v16qi) (__W),  \
+                                                   (__mmask16) (__U)))
+
+#define _mm256_mask_blend_epi16(__U, __A, __W)                       \
+  ((__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) (__A),       \
+                                                   (__v16hi) (__W),  \
+                                                   (__mmask16) (__U)))
+
+#define _mm256_mask_blend_epi8(__U, __A, __W)                        \
+  ((__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) (__A),       \
+                                                   (__v32qi) (__W),  \
+                                                   (__mmask32) (__U)))
+
+#define _mm_cmp_epi16_mask(X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi)(__m128i)(X),       \
+                                           (__v8hi)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)(-1)))
+
+#define _mm_cmp_epi8_mask(X, Y, P)                             \
+  ((__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi)(__m128i)(X),     \
+                                           (__v16qi)(__m128i)(Y), (int)(P),\
+                                           (__mmask16)(-1)))
+
+#define _mm256_cmp_epi16_mask(X, Y, P)                         \
+  ((__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi)(__m256i)(X),     \
+                                           (__v16hi)(__m256i)(Y), (int)(P),\
+                                           (__mmask16)(-1)))
+
+#define _mm256_cmp_epi8_mask(X, Y, P)                          \
+  ((__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi)(__m256i)(X),     \
+                                           (__v32qi)(__m256i)(Y), (int)(P),\
+                                           (__mmask32)(-1)))
+
+#define _mm_cmp_epu16_mask(X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi)(__m128i)(X),      \
+                                           (__v8hi)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)(-1)))
+
+#define _mm_cmp_epu8_mask(X, Y, P)                             \
+  ((__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi)(__m128i)(X),    \
+                                           (__v16qi)(__m128i)(Y), (int)(P),\
+                                           (__mmask16)(-1)))
+
+#define _mm256_cmp_epu16_mask(X, Y, P)                         \
+  ((__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi)(__m256i)(X),    \
+                                           (__v16hi)(__m256i)(Y), (int)(P),\
+                                           (__mmask16)(-1)))
+
+#define _mm256_cmp_epu8_mask(X, Y, P)                          \
+  ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X),    \
+                                           (__v32qi)(__m256i)(Y), (int)(P),\
+                                           (__mmask32)-1))
+
+#define _mm_mask_cmp_epi16_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi)(__m128i)(X),       \
+                                           (__v8hi)(__m128i)(Y), (int)(P),\
+                                           (__mmask16)(M)))
+
+#define _mm_mask_cmp_epi8_mask(M, X, Y, P)                             \
+  ((__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi)(__m128i)(X),     \
+                                           (__v16qi)(__m128i)(Y), (int)(P),\
+                                           (__mmask16)(M)))
+
+#define _mm256_mask_cmp_epi16_mask(M, X, Y, P)                         \
+  ((__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi)(__m256i)(X),     \
+                                           (__v16hi)(__m256i)(Y), (int)(P),\
+                                           (__mmask16)(M)))
+
+#define _mm256_mask_cmp_epi8_mask(M, X, Y, P)                          \
+  ((__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi)(__m256i)(X),     \
+                                           (__v32qi)(__m256i)(Y), (int)(P),\
+                                           (__mmask32)(M)))
+
+#define _mm_mask_cmp_epu16_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi)(__m128i)(X),      \
+                                           (__v8hi)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm_mask_cmp_epu8_mask(M, X, Y, P)                             \
+  ((__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi)(__m128i)(X),    \
+                                           (__v16qi)(__m128i)(Y), (int)(P),\
+                                           (__mmask16)(M)))
+
+#define _mm256_mask_cmp_epu16_mask(M, X, Y, P)                         \
+  ((__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi)(__m256i)(X),    \
+                                           (__v16hi)(__m256i)(Y), (int)(P),\
+                                           (__mmask16)(M)))
+
+#define _mm256_mask_cmp_epu8_mask(M, X, Y, P)                          \
+  ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X),    \
+                                           (__v32qi)(__m256i)(Y), (int)(P),\
+                                           (__mmask32)M))
+#endif
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epi8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, 4,
+                                                 (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epi8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, 1,
+                                                 (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epi8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, 5,
+                                                 (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epi8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X,
+                                                 (__v32qi) __Y, 2,
+                                                 (__mmask32) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epi16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, 4,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epi16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, 1,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epi16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, 5,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epi16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X,
+                                                 (__v16hi) __Y, 2,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epu8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, 4,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epu8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, 1,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epu8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, 5,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epu8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X,
+                                                  (__v16qi) __Y, 2,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epu16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, 4,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epu16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, 1,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epu16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, 5,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epu16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X,
+                                                 (__v8hi) __Y, 2,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epi8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, 4,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, 1,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epi8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, 5,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epi8_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X,
+                                                 (__v16qi) __Y, 2,
+                                                 (__mmask16) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epi16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, 4,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, 1,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epi16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, 5,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epi16_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X,
+                                                (__v8hi) __Y, 2,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+                         __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+                                                   (__v16hi) __Y,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+                                                   (__v16hi) __Y,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_hi (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi)
+                                                  _mm_setzero_hi (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X,
+                      __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+                                                   (__v8hi) __Y,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+                                                   (__v8hi) __Y,
+                                                   (__v8hi)
+                                                   _mm_setzero_hi (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_hi (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi8_epi16 (__mmask16 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi8_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+                                                   (__v16hi) __W,
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+                                                   (__v8hi) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu8_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+                                                  (__v32qi) __B,
+                                                  (__v32qi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
+                                                  (__v32qi) __B,
+                                                  (__v32qi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__v32qi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+                                                  (__v32qi) __B,
+                                                  (__v32qi) __W,
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
+                                                  (__v32qi) __B,
+                                                  (__v32qi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
+                                                  (__v16hi) __B,
+                                                  (__v16hi)
+                                                  _mm256_setzero_si256 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                          __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__v32qi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__v32qi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                       __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__v16qi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__v16qi)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__v16hi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+                                                    (__v8hi) __B,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+                                                    (__v8hi) __B,
+                                                    (__v8hi)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                          __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__v32qi) __W,
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__v32qi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                       __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__v16qi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__v16qi)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__v16hi) __W,
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__v16hi)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+                                                    (__v8hi) __B,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+                                                    (__v8hi) __B,
+                                                    (__v8hi)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpeqb128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpeq_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpeqb128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpeq_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpeqb256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpeq_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpeqb256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqw128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpeq_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqw128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B, __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpeq_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpeqw256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpeq_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpeqw256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpgtb128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpgt_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpgtb128_mask ((__v16qi) __A,
+                                                    (__v16qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpgt_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpgtb256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpgt_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_pcmpgtb256_mask ((__v32qi) __A,
+                                                    (__v32qi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtw128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpgt_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtw128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B, __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpgt_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpgtw256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpgt_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_pcmpgtw256_mask ((__v16hi) __A,
+                                                    (__v16hi) __B,
+                                                    __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_testn_epi8_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A,
+                                                (__v16qi) __B, __U);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+                                                (__v32qi) __B,
+                                                (__mmask32) -1);
+}
+
+extern __inline __mmask32
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A,
+                                                (__v32qi) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_testn_epi16_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+                                               (__v8hi) __B,
+                                               (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A,
+                                               (__v8hi) __B, __U);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+                                                (__v16hi) __B,
+                                                (__mmask16) -1);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A,
+                                                (__v16hi) __B, __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+                         __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi) __W,
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
+                                                 (__v32qi) __B,
+                                                 (__v32qi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                      __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+                                                   (__v16hi) __B,
+                                                   (__v32qi)
+                                                   _mm256_setzero_si256 (),
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
+                                                   (__v16hi) __B,
+                                                   (__v32qi) __W,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__v16qi) __W,
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+                                                   (__v16hi) __B,
+                                                   (__v32qi)
+                                                   _mm256_setzero_si256 (),
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
+                         __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
+                                                   (__v16hi) __B,
+                                                   (__v32qi) __W,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
+                      __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
+                                                   (__v8hi) __B,
+                                                   (__v16qi) __W,
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+                                                (__v32qi) __W,
+                                                (__mmask32) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
+                                                (__v32qi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask32) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+                                                (__v16qi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
+                                                (__v16qi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epu8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, 4,
+                                                  (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epu8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, 1,
+                                                  (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epu8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, 5,
+                                                  (__mmask32) - 1);
+}
+
+extern __inline __mmask32
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epu8_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X,
+                                                  (__v32qi) __Y, 2,
+                                                  (__mmask32) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epu16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, 4,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epu16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, 1,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epu16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, 5,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline __mmask16
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epu16_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X,
+                                                  (__v16hi) __Y, 2,
+                                                  (__mmask16) - 1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
+{
+  __builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
+                                    (__v16hi) __A,
+                                    (__mmask16) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
+                                    (__v8hi) __A,
+                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+                                                  (__v16qi) __B,
+                                                  (__v16qi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
+                                                  (__v16qi) __B,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srl_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srl_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srl_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sra_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sra_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sra_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sra_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
+                                                 (__v8hi) __B,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+                                                  (__v16qi) __B,
+                                                  (__v16qi) __W,
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
+                                                  (__v16qi) __B,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
+                                                  (__v8hi) __B,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                  __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
+                                                (__v16qi) __B,
+                                                (__v16qi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
+                                                 (__v16qi) __B,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi16_epi8 (__m128i __A)
+{
+
+  return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_srav_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srav_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srav_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srav_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_hi (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srav_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srav_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_srlv_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srlv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srlv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srlv_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_hi (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srlv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srlv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_sllv_epi16 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sllv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi) __W,
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sllv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
+                                                 (__v16hi) __B,
+                                                 (__v16hi)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask16) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sllv_epi16 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_hi (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sllv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sllv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sll_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
+                                                (__v8hi) __B,
+                                                (__v8hi)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sll_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi) __W,
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sll_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
+                                                (__v8hi) __B,
+                                                (__v16hi)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask16) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+                         __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__v16hi) __W,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+                      __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__v8hi) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__v16hi)
+                                                   _mm256_setzero_si256 (),
+                                                   __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__v16hi) __W,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__v8hi) __W, __M);
+}
+
+#ifdef __DISABLE_AVX512VLBW__
+#undef __DISABLE_AVX512VLBW__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512VLBW__ */
+
+#endif /* _AVX512VLBWINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512vldqintrin.h b/gcc/config/i386/avx512vldqintrin.h
new file mode 100644 (file)
index 0000000..43a7388
--- /dev/null
@@ -0,0 +1,2034 @@
+/* Copyright (C) 2014
+   Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512VLDQINTRIN_H_INCLUDED
+#define _AVX512VLDQINTRIN_H_INCLUDED
+
+#if !defined(__AVX512VL__) || !defined(__AVX512DQ__)
+#pragma GCC push_options
+#pragma GCC target("avx512vl,avx512dq")
+#define __DISABLE_AVX512VLDQ__
+#endif /* __AVX512VLDQ__ */
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttpd_epi64 (__m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_epi64 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttpd_epu64 (__m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+                                                     (__v4di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_epu64 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+                                                     (__v2di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+                                                     (__v2di)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtpd_epi64 (__m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_epi64 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_di (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtpd_epu64 (__m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A)
+{
+  return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_epu64 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttps_epi64 (__m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_epi64 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttps_epu64 (__m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+                                                     (__v4di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_epu64 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+                                                     (__v2di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_f64x2 (__m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df)
+                                                          __A,
+                                                          (__v4df)_mm256_undefined_pd(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_f64x2 (__m256d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df)
+                                                          __A,
+                                                          (__v4df)
+                                                          __O, __M);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df)
+                                                          __A,
+                                                          (__v4df)
+                                                          _mm256_setzero_ps (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_i64x2 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di)
+                                                          __A,
+                                                          (__v4di)_mm256_undefined_si256(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_i64x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di)
+                                                          __A,
+                                                          (__v4di)
+                                                          __O, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di)
+                                                          __A,
+                                                          (__v4di)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_f32x2 (__m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                                                         (__v8sf)_mm256_undefined_ps(),
+                                                         (__mmask16) -
+                                                         1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                                                         (__v8sf) __O,
+                                                         __M);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A,
+                                                         (__v8sf)
+                                                         _mm256_setzero_ps (),
+                                                         __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_i32x2 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si)
+                                                          __A,
+                                                         (__v8si)_mm256_undefined_si256(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si)
+                                                          __A,
+                                                          (__v8si)
+                                                          __O, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si)
+                                                          __A,
+                                                          (__v8si)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_broadcast_i32x2 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si)
+                                                          __A,
+                                                         (__v4si)_mm_undefined_si128(),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si)
+                                                          __A,
+                                                          (__v4si)
+                                                          __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si)
+                                                          __A,
+                                                          (__v4si)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mullo_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mullo_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                      __m256d __B)
+{
+  return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+                                                 (__v4df) __B,
+                                                 (__v4df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
+                                                 (__v4df) __B,
+                                                 (__v4df)
+                                                 _mm256_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                   __m128d __B)
+{
+  return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+                                                 (__v2df) __B,
+                                                 (__v2df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
+                                                 (__v2df) __B,
+                                                 (__v2df)
+                                                 _mm_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                      __m256 __B)
+{
+  return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+                                                (__v8sf) __B,
+                                                (__v8sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
+                                                (__v8sf) __B,
+                                                (__v8sf)
+                                                _mm256_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+                                                (__v4sf) __B,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
+                                                (__v4sf) __B,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtps_epi64 (__m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_epi64 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_di (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_di (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtps_epu64 (__m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_epu64 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi64_ps (__m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi64_ps (__m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepu64_ps (__m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepu64_ps (__m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi64_pd (__m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi64_pd (__m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepu64_pd (__m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A)
+{
+  return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df)
+                                                _mm_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf)
+                                               _mm_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepu64_pd (__m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df)
+                                                _mm_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf)
+                                               _mm_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+                                               (__v4df) __B,
+                                               (__v4df) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
+                                               (__v4df) __B,
+                                               (__v4df)
+                                               _mm256_setzero_pd (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+                                               (__v2df) __B,
+                                               (__v2df) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
+                                               (__v2df) __B,
+                                               (__v2df)
+                                               _mm_setzero_pd (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+                                              (__v8sf) __B,
+                                              (__v8sf) __W,
+                                              (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
+                                              (__v8sf) __B,
+                                              (__v8sf)
+                                              _mm256_setzero_ps (),
+                                              (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+                                              (__v4sf) __B,
+                                              (__v4sf) __W,
+                                              (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
+                                              (__v4sf) __B,
+                                              (__v4sf)
+                                              _mm_setzero_ps (),
+                                              (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movm_epi32 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2d128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movm_epi32 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2d256 (__A);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movm_epi64 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_cvtmask2q128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movm_epi64 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_cvtmask2q256 (__A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi32_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movepi32_mask (__m256i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_movepi64_mask (__m128i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_movepi64_mask (__m256i __A)
+{
+  return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_extractf64x2_pd (__m256d __A, const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A,
+                                                        __imm,
+                                                        (__v2df)
+                                                        _mm_setzero_pd (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_extractf64x2_pd (__m128d __W, __mmask8 __U, __m256d __A,
+                            const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A,
+                                                        __imm,
+                                                        (__v2df) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_extractf64x2_pd (__mmask8 __U, __m256d __A,
+                             const int __imm)
+{
+  return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A,
+                                                        __imm,
+                                                        (__v2df)
+                                                        _mm_setzero_pd (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_extracti64x2_epi64 (__m256i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A,
+                                                        __imm,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_extracti64x2_epi64 (__m128i __W, __mmask8 __U, __m256i __A,
+                               const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A,
+                                                        __imm,
+                                                        (__v2di) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_extracti64x2_epi64 (__mmask8 __U, __m256i __A,
+                                const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A,
+                                                        __imm,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_reduce_pd (__m256d __A, int __B)
+{
+  return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_reduce_pd (__m256d __W, __mmask8 __U, __m256d __A, int __B)
+{
+  return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_reduce_pd (__mmask8 __U, __m256d __A, int __B)
+{
+  return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_pd (__m128d __A, int __B)
+{
+  return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_pd (__m128d __W, __mmask8 __U, __m128d __A, int __B)
+{
+  return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_pd (__mmask8 __U, __m128d __A, int __B)
+{
+  return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_reduce_ps (__m256 __A, int __B)
+{
+  return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_reduce_ps (__m256 __W, __mmask8 __U, __m256 __A, int __B)
+{
+  return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_reduce_ps (__mmask8 __U, __m256 __A, int __B)
+{
+  return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_reduce_ps (__m128 __A, int __B)
+{
+  return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_reduce_ps (__m128 __W, __mmask8 __U, __m128 __A, int __B)
+{
+  return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_reduce_ps (__mmask8 __U, __m128 __A, int __B)
+{
+  return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_range_pd (__m256d __A, __m256d __B, int __C)
+{
+  return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A,
+                                                  (__v4df) __B, __C,
+                                                  (__v4df)
+                                                  _mm256_setzero_pd (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_range_pd (__m256d __W, __mmask8 __U,
+                     __m256d __A, __m256d __B, int __C)
+{
+  return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A,
+                                                  (__v4df) __B, __C,
+                                                  (__v4df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_range_pd (__mmask8 __U, __m256d __A, __m256d __B, int __C)
+{
+  return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A,
+                                                  (__v4df) __B, __C,
+                                                  (__v4df)
+                                                  _mm256_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_pd (__m128d __A, __m128d __B, int __C)
+{
+  return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A,
+                                                  (__v2df) __B, __C,
+                                                  (__v2df)
+                                                  _mm_setzero_pd (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_pd (__m128d __W, __mmask8 __U,
+                  __m128d __A, __m128d __B, int __C)
+{
+  return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A,
+                                                  (__v2df) __B, __C,
+                                                  (__v2df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_pd (__mmask8 __U, __m128d __A, __m128d __B, int __C)
+{
+  return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A,
+                                                  (__v2df) __B, __C,
+                                                  (__v2df)
+                                                  _mm_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_range_ps (__m256 __A, __m256 __B, int __C)
+{
+  return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A,
+                                                 (__v8sf) __B, __C,
+                                                 (__v8sf)
+                                                 _mm256_setzero_ps (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_range_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
+                     int __C)
+{
+  return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A,
+                                                 (__v8sf) __B, __C,
+                                                 (__v8sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_range_ps (__mmask8 __U, __m256 __A, __m256 __B, int __C)
+{
+  return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A,
+                                                 (__v8sf) __B, __C,
+                                                 (__v8sf)
+                                                 _mm256_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_range_ps (__m128 __A, __m128 __B, int __C)
+{
+  return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A,
+                                                 (__v4sf) __B, __C,
+                                                 (__v4sf)
+                                                 _mm_setzero_ps (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_range_ps (__m128 __W, __mmask8 __U,
+                  __m128 __A, __m128 __B, int __C)
+{
+  return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A,
+                                                 (__v4sf) __B, __C,
+                                                 (__v4sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_range_ps (__mmask8 __U, __m128 __A, __m128 __B, int __C)
+{
+  return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A,
+                                                 (__v4sf) __B, __C,
+                                                 (__v4sf)
+                                                 _mm_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fpclass_pd_mask (__mmask8 __U, __m256d __A,
+                            const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) __A,
+                                                     __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fpclass_pd_mask (__m256d __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) __A,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fpclass_ps_mask (__mmask8 __U, __m256 __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) __A,
+                                                     __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fpclass_ps_mask (__m256 __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) __A,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fpclass_pd_mask (__mmask8 __U, __m128d __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) __A,
+                                                     __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_pd_mask (__m128d __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) __A,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fpclass_ps_mask (__mmask8 __U, __m128 __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) __A,
+                                                     __imm, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fpclass_ps_mask (__m128 __A, const int __imm)
+{
+  return (__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) __A,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_inserti64x2 (__m256i __A, __m128i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v4di)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_inserti64x2 (__m256i __W, __mmask8 __U, __m256i __A,
+                        __m128i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v4di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_inserti64x2 (__mmask8 __U, __m256i __A, __m128i __B,
+                         const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A,
+                                                       (__v2di) __B,
+                                                       __imm,
+                                                       (__v4di)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_insertf64x2 (__m256d __A, __m128d __B, const int __imm)
+{
+  return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v4df)
+                                                       _mm256_setzero_pd (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_insertf64x2 (__m256d __W, __mmask8 __U, __m256d __A,
+                        __m128d __B, const int __imm)
+{
+  return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v4df) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_insertf64x2 (__mmask8 __U, __m256d __A, __m128d __B,
+                         const int __imm)
+{
+  return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A,
+                                                       (__v2df) __B,
+                                                       __imm,
+                                                       (__v4df)
+                                                       _mm256_setzero_pd (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+#else
+#define _mm256_insertf64x2(X, Y, C)                                     \
+  ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C),                                  \
+    (__v4df)(__m256d)_mm256_setzero_pd(),                              \
+    (__mmask8)-1))
+
+#define _mm256_mask_insertf64x2(W, U, X, Y, C)                          \
+  ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C),                                  \
+    (__v4df)(__m256d)(W),                                              \
+    (__mmask8)(U)))
+
+#define _mm256_maskz_insertf64x2(U, X, Y, C)                           \
+  ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (__v2df)(__m128d) (Y), (int) (C),                                  \
+    (__v4df)(__m256d)_mm256_setzero_pd(),                              \
+    (__mmask8)(U)))
+
+#define _mm256_inserti64x2(X, Y, C)                                     \
+  ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C),                                  \
+    (__v4di)(__m256i)_mm256_setzero_si256 (),                          \
+    (__mmask8)-1))
+
+#define _mm256_mask_inserti64x2(W, U, X, Y, C)                          \
+  ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C),                                  \
+    (__v4di)(__m256i)(W),                                              \
+    (__mmask8)(U)))
+
+#define _mm256_maskz_inserti64x2(U, X, Y, C)                            \
+  ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (__v2di)(__m128i) (Y), (int) (C),                                  \
+    (__v4di)(__m256i)_mm256_setzero_si256 (),                          \
+    (__mmask8)(U)))
+
+#define _mm256_extractf64x2_pd(X, C)                                    \
+  ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8)-1))
+
+#define _mm256_mask_extractf64x2_pd(W, U, X, C)                         \
+  ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (int) (C), (__v2df)(__m128d) (W), (__mmask8) (U)))
+
+#define _mm256_maskz_extractf64x2_pd(U, X, C)                           \
+  ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\
+    (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8) (U)))
+
+#define _mm256_extracti64x2_epi64(X, C)                                 \
+  ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (int) (C), (__v2di)(__m128i) _mm_setzero_di(), (__mmask8)-1))
+
+#define _mm256_mask_extracti64x2_epi64(W, U, X, C)                     \
+  ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (int) (C), (__v2di)(__m128i) (W), (__mmask8) (U)))
+
+#define _mm256_maskz_extracti64x2_epi64(U, X, C)                        \
+  ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\
+    (int) (C), (__v2di)(__m128i) _mm_setzero_di(), (__mmask8) (U)))
+
+#define _mm256_reduce_pd(A, B)                                         \
+  ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A),    \
+    (int)(B), (__v4df)_mm256_setzero_pd(), (__mmask8)-1))
+
+#define _mm256_mask_reduce_pd(W, U, A, B)                              \
+  ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A),    \
+    (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_reduce_pd(U, A, B)                                        \
+  ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A),    \
+    (int)(B), (__v4df)_mm256_setzero_pd(), (__mmask8)(U)))
+
+#define _mm_reduce_pd(A, B)                                            \
+  ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A),    \
+    (int)(B), (__v2df)_mm_setzero_pd(), (__mmask8)-1))
+
+#define _mm_mask_reduce_pd(W, U, A, B)                                 \
+  ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A),    \
+    (int)(B), (__v2df)(__m128d)(W), (__mmask8)(U)))
+
+#define _mm_maskz_reduce_pd(U, A, B)                                   \
+  ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A),    \
+    (int)(B), (__v2df)_mm_setzero_pd(), (__mmask8)(U)))
+
+#define _mm256_reduce_ps(A, B)                                         \
+  ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A),      \
+    (int)(B), (__v8sf)_mm256_setzero_ps(), (__mmask8)-1))
+
+#define _mm256_mask_reduce_ps(W, U, A, B)                              \
+  ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A),      \
+    (int)(B), (__v8sf)(__m256)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_reduce_ps(U, A, B)                                        \
+  ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A),      \
+    (int)(B), (__v8sf)_mm256_setzero_ps(), (__mmask8)(U)))
+
+#define _mm_reduce_ps(A, B)                                            \
+  ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A),      \
+    (int)(B), (__v4sf)_mm_setzero_ps(), (__mmask8)-1))
+
+#define _mm_mask_reduce_ps(W, U, A, B)                                 \
+  ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A),      \
+    (int)(B), (__v4sf)(__m128)(W), (__mmask8)(U)))
+
+#define _mm_maskz_reduce_ps(U, A, B)                                   \
+  ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A),      \
+    (int)(B), (__v4sf)_mm_setzero_ps(), (__mmask8)(U)))
+
+#define _mm256_range_pd(A, B, C)                                       \
+  ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A),     \
+    (__v4df)(__m256d)(B), (int)(C),                                    \
+    (__v4df)_mm256_setzero_pd(), (__mmask8)-1))
+
+#define _mm256_maskz_range_pd(U, A, B, C)                              \
+  ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A),     \
+    (__v4df)(__m256d)(B), (int)(C),                                    \
+    (__v4df)_mm256_setzero_pd(), (__mmask8)(U)))
+
+#define _mm_range_pd(A, B, C)                                          \
+  ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A),     \
+    (__v2df)(__m128d)(B), (int)(C),                                    \
+    (__v2df)_mm_setzero_pd(), (__mmask8)-1))
+
+#define _mm256_range_ps(A, B, C)                                       \
+  ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A),       \
+    (__v8sf)(__m256)(B), (int)(C),                                     \
+    (__v8sf)_mm256_setzero_ps(), (__mmask8)-1))
+
+#define _mm256_mask_range_ps(W, U, A, B, C)                            \
+  ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A),       \
+    (__v8sf)(__m256)(B), (int)(C),                                     \
+    (__v8sf)(__m256)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_range_ps(U, A, B, C)                              \
+  ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A),       \
+    (__v8sf)(__m256)(B), (int)(C),                                     \
+    (__v8sf)_mm256_setzero_ps(), (__mmask8)(U)))
+
+#define _mm_range_ps(A, B, C)                                          \
+  ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A),       \
+    (__v4sf)(__m128)(B), (int)(C),                                     \
+    (__v4sf)_mm_setzero_ps(), (__mmask8)-1))
+
+#define _mm_mask_range_ps(W, U, A, B, C)                               \
+  ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A),       \
+    (__v4sf)(__m128)(B), (int)(C),                                     \
+    (__v4sf)(__m128)(W), (__mmask8)(U)))
+
+#define _mm_maskz_range_ps(U, A, B, C)                                 \
+  ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A),       \
+    (__v4sf)(__m128)(B), (int)(C),                                     \
+    (__v4sf)_mm_setzero_ps(), (__mmask8)(U)))
+
+#define _mm256_mask_range_pd(W, U, A, B, C)                            \
+  ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A),     \
+    (__v4df)(__m256d)(B), (int)(C),                                    \
+    (__v4df)(__m256d)(W), (__mmask8)(U)))
+
+#define _mm_mask_range_pd(W, U, A, B, C)                               \
+  ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A),     \
+    (__v2df)(__m128d)(B), (int)(C),                                    \
+    (__v2df)(__m128d)(W), (__mmask8)(U)))
+
+#define _mm_maskz_range_pd(U, A, B, C)                                 \
+  ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A),     \
+    (__v2df)(__m128d)(B), (int)(C),                                    \
+    (__v2df)_mm_setzero_pd(), (__mmask8)(U)))
+
+#define _mm256_mask_fpclass_pd_mask(u, X, C)                            \
+  ((__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) (__m256d) (X), \
+                                               (int) (C),(__mmask8)(u)))
+
+#define _mm256_mask_fpclass_ps_mask(u, X, C)                           \
+  ((__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) (__m256) (X),  \
+                                               (int) (C),(__mmask8)(u)))
+
+#define _mm_mask_fpclass_pd_mask(u, X, C)                               \
+  ((__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) (__m128d) (X), \
+                                               (int) (C),(__mmask8)(u)))
+
+#define _mm_mask_fpclass_ps_mask(u, X, C)                               \
+  ((__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) (__m128) (X),  \
+                                               (int) (C),(__mmask8)(u)))
+
+#define _mm256_fpclass_pd_mask(X, C)                                    \
+  ((__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) (__m256d) (X), \
+                                               (int) (C),(__mmask8)-1))
+
+#define _mm256_fpclass_ps_mask(X, C)                                    \
+  ((__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) (__m256) (X),  \
+                                               (int) (C),(__mmask8)-1))
+
+#define _mm_fpclass_pd_mask(X, C)                                       \
+  ((__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) (__m128d) (X), \
+                                               (int) (C),(__mmask8)-1))
+
+#define _mm_fpclass_ps_mask(X, C)                                       \
+  ((__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) (__m128) (X),  \
+                                               (int) (C),(__mmask8)-1))
+
+#endif
+
+#ifdef __DISABLE_AVX512VLDQ__
+#undef __DISABLE_AVX512VLDQ__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512VLDQ__ */
+
+#endif /* _AVX512VLDQINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/avx512vlintrin.h b/gcc/config/i386/avx512vlintrin.h
new file mode 100644 (file)
index 0000000..2f5e048
--- /dev/null
@@ -0,0 +1,13199 @@
+/* Copyright (C) 2014
+   Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _AVX512VLINTRIN_H_INCLUDED
+#define _AVX512VLINTRIN_H_INCLUDED
+
+/* Doesn't require avx512vl target and is used in avx512dqintrin.h.  */
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_di (void)
+{
+  return __extension__ (__m128i)(__v2di){ 0LL, 0LL};
+}
+
+#ifndef __AVX512VL__
+#pragma GCC push_options
+#pragma GCC target("avx512vl")
+#define __DISABLE_AVX512VL__
+#endif /* __AVX512VL__ */
+
+/* Internal data types for implementing the intrinsics.  */
+typedef unsigned int __mmask32;
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A,
+                                                 (__v4df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A,
+                                                 (__v4df)
+                                                 _mm256_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A,
+                                                 (__v2df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A,
+                                                 (__v2df)
+                                                 _mm_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+                                                  (__v4df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+                                                  (__v4df)
+                                                  _mm256_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+                                                  (__v2df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+                                                  (__v2df)
+                                                  _mm_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+  __builtin_ia32_storeapd256_mask ((__v4df *) __P,
+                                  (__v4df) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+  __builtin_ia32_storeapd128_mask ((__v2df *) __P,
+                                  (__v2df) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A,
+                                                (__v8sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A,
+                                                (__v8sf)
+                                                _mm256_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+                                                 (__v8sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+                                                 (__v8sf)
+                                                 _mm256_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+                                                 (__v4sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+                                                 (__v4sf)
+                                                 _mm_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+  __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
+                                  (__v8sf) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+  __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
+                                  (__v4sf) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+                                                       (__v4di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+                                                       (__v4di)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+                                                       (__v2di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+                                                       (__v2di)
+                                                       _mm_setzero_di (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
+                                       (__v4di) __A,
+                                       (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
+                                       (__v2di) __A,
+                                       (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+                                                       (__v8si) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+                                                       (__v8si)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+                                                       (__v4si) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+                                                       (__v4si)
+                                                       _mm_setzero_si128 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
+                                       (__v8si) __A,
+                                       (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
+                                       (__v4si) __A,
+                                       (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_hi (void)
+{
+  return __extension__ (__m128i) (__v8hi)
+  {
+  0, 0, 0, 0, 0, 0, 0, 0};
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df)
+                                                _mm_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf)
+                                               _mm_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+                                                (__v2df) __B,
+                                                (__v2df)
+                                                _mm_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+                                               (__v4sf) __B,
+                                               (__v4sf)
+                                               _mm_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_store_epi64 (void *__P, __m256i __A)
+{
+  *(__m256i *) __P = __A;
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_epi64 (void *__P, __m128i __A)
+{
+  *(__m128i *) __P = __A;
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+                                                  (__v4df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+                                                  (__v4df)
+                                                  _mm256_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+                                                  (__v2df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+                                                  (__v2df)
+                                                  _mm_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+  __builtin_ia32_storeupd256_mask ((__v4df *) __P,
+                                  (__v4df) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+  __builtin_ia32_storeupd128_mask ((__v2df *) __P,
+                                  (__v2df) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+                                                 (__v8sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+                                                 (__v8sf)
+                                                 _mm256_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+                                                 (__v4sf) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+                                                 (__v4sf)
+                                                 _mm_setzero_ps (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+  __builtin_ia32_storeups256_mask ((__v8sf *) __P,
+                                  (__v8sf) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+  __builtin_ia32_storeups128_mask ((__v4sf *) __P,
+                                  (__v4sf) __A,
+                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
+                                    (__v4di) __A,
+                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
+                                    (__v2di) __A,
+                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
+                                    (__v8si) __A,
+                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
+                                    (__v4si) __A,
+                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_abs_epi64 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_abs_epi64 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtpd_epu32 (__m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtpd_epu32 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttps_epu32 (__m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+                                                     (__v8si)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+                                                     (__v8si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+                                                     (__v8si)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttps_epu32 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+                                                     (__v4si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvttpd_epu32 (__m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+                                                     (__v4si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvttpd_epu32 (__m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+                                                     (__v4si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A)
+{
+  return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepu32_pd (__m128i __A)
+{
+  return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepu32_pd (__m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A)
+{
+  return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A)
+{
+  return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepu32_ps (__m256i __A)
+{
+  return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A)
+{
+  return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepu32_ps (__m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A)
+{
+  return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A)
+{
+  return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A)
+{
+  return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi32_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi32_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+                                                 (__v8hi) _mm_setzero_si128 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+                                                 (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+                                                 (__v8hi)_mm_setzero_si128 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_storeu_epi16 (void *  __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+                                                 (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+                                                  (__v8hi)_mm_setzero_si128 (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+                                                  (__v8hi)__O,
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+                                                  (__v8hi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+                                                  (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi32_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                                                   (__v8hi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                                                   (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi32_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                                                   (__v8hi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                                                   (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+                                                 (__v16qi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+                                                 (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
+                                                 (__v16qi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+                                                  (__v16qi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+                                                  (__v16qi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
+                                                  (__v16qi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi64_epi8 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi64_epi8 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                                                   (__v16qi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                                                   (__v16qi) __O,
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
+                                                   (__v16qi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+                                                 (__v8hi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+                                                 (__v8hi)__O,
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+                                                 (__v8hi)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+                                                 (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
+                                                 (__v8hi)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+                                                  (__v8hi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+                                                  (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+                                                  (__v8hi)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+                                                  (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
+                                                  (__v8hi)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi64_epi16 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                                                   (__v8hi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                                                   (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi64_epi16 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                                                   (__v8hi)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                                                   (__v8hi) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
+                                                   (__v8hi)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+                                                 (__v4si)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+                                                 (__v4si) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+                                                 (__v4si)_mm_undefined_si128(),
+                                                 (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+                                                 (__v4si) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtsepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+                                                  (__v4si)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+                                                  (__v4si) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
+                                                  (__v4si)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtsepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+                                                  (__v4si)_mm_undefined_si128(),
+                                                  (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+                                                  (__v4si)__O,
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
+                                                  (__v4si)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtusepi64_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                                                   (__v4si)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
+{
+  __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                                                   (__v4si) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtusepi64_epi32 (__m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                                                   (__v4si)_mm_undefined_si128(),
+                                                   (__mmask8) -1);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
+{
+  __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                                                   (__v4si) __O, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
+{
+  return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   __M);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastss256_mask ((__v4sf) __A,
+                                                     (__v8sf) __O,
+                                                     __M);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastss256_mask ((__v4sf) __A,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     __M);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m128) __builtin_ia32_broadcastss128_mask ((__v4sf) __A,
+                                                     (__v4sf) __O,
+                                                     __M);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
+{
+  return (__m128) __builtin_ia32_broadcastss128_mask ((__v4sf) __A,
+                                                     (__v4sf)
+                                                     _mm_setzero_ps (),
+                                                     __M);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastsd256_mask ((__v2df) __A,
+                                                      (__v4df) __O,
+                                                      __M);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
+{
+  return (__m256d) __builtin_ia32_broadcastsd256_mask ((__v2df) __A,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastd256_mask ((__v4si) __A,
+                                                      (__v8si) __O,
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastd256_mask ((__v4si) __A,
+                                                      (__v8si)
+                                                      _mm256_setzero_si256 (),
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_set1_epi32 (__m256i __O, __mmask8 __M, int __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastd256_gpr_mask (__A, (__v8si) __O,
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_set1_epi32 (__mmask8 __M, int __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastd256_gpr_mask (__A,
+                                                          (__v8si)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastd128_mask ((__v4si) __A,
+                                                      (__v4si) __O,
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastd128_mask ((__v4si) __A,
+                                                      (__v4si)
+                                                      _mm_setzero_si128 (),
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_set1_epi32 (__m128i __O, __mmask8 __M, int __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastd128_gpr_mask (__A, (__v4si) __O,
+                                                          __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_set1_epi32 (__mmask8 __M, int __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastd128_gpr_mask (__A,
+                                                          (__v4si)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastq256_mask ((__v2di) __A,
+                                                      (__v4di) __O,
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pbroadcastq256_mask ((__v2di) __A,
+                                                      (__v4di)
+                                                      _mm256_setzero_si256 (),
+                                                      __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
+{
+#ifdef TARGET_64BIT
+  return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, (__v4di) __O,
+                                                          __M);
+#else
+  return (__m256i) __builtin_ia32_pbroadcastq256_mem_mask (__A, (__v4di) __O,
+                                                          __M);
+#endif
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+#ifdef TARGET_64BIT
+  return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A,
+                                                          (__v4di)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+#else
+  return (__m256i) __builtin_ia32_pbroadcastq256_mem_mask (__A,
+                                                          (__v4di)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+#endif
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastq128_mask ((__v2di) __A,
+                                                      (__v2di) __O,
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pbroadcastq128_mask ((__v2di) __A,
+                                                      (__v2di)
+                                                      _mm_setzero_si128 (),
+                                                      __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
+{
+#ifdef TARGET_64BIT
+  return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, (__v2di) __O,
+                                                          __M);
+#else
+  return (__m128i) __builtin_ia32_pbroadcastq128_mem_mask (__A, (__v2di) __O,
+                                                          __M);
+#endif
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
+{
+#ifdef TARGET_64BIT
+  return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A,
+                                                          (__v2di)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+#else
+  return (__m128i) __builtin_ia32_pbroadcastq128_mem_mask (__A,
+                                                          (__v2di)
+                                                          _mm_setzero_si128 (),
+                                                          __M);
+#endif
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_f32x4 (__m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                                                         (__v8sf)_mm256_undefined_pd (),
+                                                         (__mmask8) -
+                                                         1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_f32x4 (__m256 __O, __mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                                                         (__v8sf) __O,
+                                                         __M);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
+{
+  return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A,
+                                                         (__v8sf)
+                                                         _mm256_setzero_ps (),
+                                                         __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcast_i32x4 (__m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si)
+                                                          __A,
+                                                          (__v8si)_mm256_undefined_si256 (),
+                                                          (__mmask8) -
+                                                          1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_broadcast_i32x4 (__m256i __O, __mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si)
+                                                          __A,
+                                                          (__v8si)
+                                                          __O, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_broadcast_i32x4 (__mmask8 __M, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si)
+                                                          __A,
+                                                          (__v8si)
+                                                          _mm256_setzero_si256 (),
+                                                          __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
+                                                   (__v8si)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
+                                                   (__v8si)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepi32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepi32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
+                                                   (__v8si)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
+                                                   (__v8si)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtepu32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
+                                                   (__v4di)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtepu32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+{
+  return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
+                                                   (__v2di)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rcp14_pd (__m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                                             (__v4df)
+                                             _mm256_setzero_pd (),
+                                             (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                                             (__v4df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
+                                             (__v4df)
+                                             _mm256_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp14_pd (__m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                                             (__v2df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rcp14_ps (__m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+                                            (__v8sf)
+                                            _mm256_setzero_ps (),
+                                            (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+                                            (__v8sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
+                                            (__v8sf)
+                                            _mm256_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rcp14_ps (__m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+                                            (__v4sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rsqrt14_pd (__m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt14_pd (__m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rsqrt14_ps (__m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rsqrt14_ps (__m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+                                                 (__v4df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+                                                 (__v4df)
+                                                 _mm256_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+                                                 (__v2df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+                                                 (__v2df)
+                                                 _mm_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+                                                (__v8sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+                                                (__v8sf)
+                                                _mm256_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_getexp_ps (__m256 __A)
+{
+  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_getexp_pd (__m256d __A)
+{
+  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_ps (__m128 __A)
+{
+  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getexp_pd (__m128d __A)
+{
+  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srl_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srl_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srl_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srl_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srl_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srl_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srl_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srl_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_scalef_pd (__m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                      __m256d __B)
+{
+  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_scalef_ps (__m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                      __m256 __B)
+{
+  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_pd (__m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                   __m128d __B)
+{
+  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_scalef_ps (__m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmadd_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                     __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmadd_pd (__m256d __A, __m256d __B, __m256d __C,
+                      __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmadd_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                      __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_pd (__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_pd (__m128d __A, __m128d __B, __m128d __C,
+                   __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                   __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmadd_ps (__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf) __C,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmadd_ps (__m256 __A, __m256 __B, __m256 __C,
+                      __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmadd_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                      __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf) __C,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmadd_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmadd_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmsub_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                     __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   -(__v4df) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmsub_pd (__m256d __A, __m256d __B, __m256d __C,
+                      __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmsub_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                      __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    -(__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_pd (__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   -(__v2df) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_pd (__m128d __A, __m128d __B, __m128d __C,
+                   __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                   __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    -(__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmsub_ps (__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  -(__v8sf) __C,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmsub_ps (__m256 __A, __m256 __B, __m256 __C,
+                      __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmsub_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                      __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   -(__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  -(__v4sf) __C,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsub_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsub_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   -(__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmaddsub_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                        __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      (__v4df) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmaddsub_pd (__m256d __A, __m256d __B, __m256d __C,
+                         __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A,
+                                                       (__v4df) __B,
+                                                       (__v4df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmaddsub_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                         __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+                                                       (__v4df) __B,
+                                                       (__v4df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmaddsub_pd (__m128d __A, __mmask8 __U, __m128d __B,
+                     __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+                                                      (__v2df) __B,
+                                                      (__v2df) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmaddsub_pd (__m128d __A, __m128d __B, __m128d __C,
+                      __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A,
+                                                       (__v2df) __B,
+                                                       (__v2df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmaddsub_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                      __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+                                                       (__v2df) __B,
+                                                       (__v2df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmaddsub_ps (__m256 __A, __mmask8 __U, __m256 __B,
+                        __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     (__v8sf) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmaddsub_ps (__m256 __A, __m256 __B, __m256 __C,
+                         __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      (__v8sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmaddsub_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                         __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      (__v8sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmaddsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+                                                     (__v4sf) __B,
+                                                     (__v4sf) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmaddsub_ps (__m128 __A, __m128 __B, __m128 __C,
+                      __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      (__v4sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmaddsub_ps (__mmask8 __U, __m128 __A, __m128 __B,
+                      __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      (__v4sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmsubadd_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                        __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      -(__v4df) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmsubadd_pd (__m256d __A, __m256d __B, __m256d __C,
+                         __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A,
+                                                       (__v4df) __B,
+                                                       (__v4df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmsubadd_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                         __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
+                                                       (__v4df) __B,
+                                                       -(__v4df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsubadd_pd (__m128d __A, __mmask8 __U, __m128d __B,
+                     __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
+                                                      (__v2df) __B,
+                                                      -(__v2df) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsubadd_pd (__m128d __A, __m128d __B, __m128d __C,
+                      __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A,
+                                                       (__v2df) __B,
+                                                       (__v2df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsubadd_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                      __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
+                                                       (__v2df) __B,
+                                                       -(__v2df) __C,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fmsubadd_ps (__m256 __A, __mmask8 __U, __m256 __B,
+                        __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     -(__v8sf) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fmsubadd_ps (__m256 __A, __m256 __B, __m256 __C,
+                         __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      (__v8sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fmsubadd_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                         __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      -(__v8sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fmsubadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
+                                                     (__v4sf) __B,
+                                                     -(__v4sf) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fmsubadd_ps (__m128 __A, __m128 __B, __m128 __C,
+                      __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      (__v4sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fmsubadd_ps (__mmask8 __U, __m128 __A, __m128 __B,
+                      __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      -(__v4sf) __C,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmadd_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                      __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmadd_pd (__m256d __A, __m256d __B, __m256d __C,
+                       __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmadd_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                       __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_pd (__m128d __A, __mmask8 __U, __m128d __B,
+                   __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_pd (__m128d __A, __m128d __B, __m128d __C,
+                    __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                    __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmadd_ps (__m256 __A, __mmask8 __U, __m256 __B,
+                      __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmadd_ps (__m256 __A, __m256 __B, __m256 __C,
+                       __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmadd_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                       __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmadd_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmadd_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmsub_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                      __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A,
+                                                    (__v4df) __B,
+                                                    (__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmsub_pd (__m256d __A, __m256d __B, __m256d __C,
+                       __mmask8 __U)
+{
+  return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A,
+                                                     (__v4df) __B,
+                                                     (__v4df) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmsub_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                       __m256d __C)
+{
+  return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
+                                                    (__v4df) __B,
+                                                    -(__v4df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_pd (__m128d __A, __mmask8 __U, __m128d __B,
+                   __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A,
+                                                    (__v2df) __B,
+                                                    (__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_pd (__m128d __A, __m128d __B, __m128d __C,
+                    __mmask8 __U)
+{
+  return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A,
+                                                     (__v2df) __B,
+                                                     (__v2df) __C,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                    __m128d __C)
+{
+  return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
+                                                    (__v2df) __B,
+                                                    -(__v2df) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fnmsub_ps (__m256 __A, __mmask8 __U, __m256 __B,
+                      __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   (__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask3_fnmsub_ps (__m256 __A, __m256 __B, __m256 __C,
+                       __mmask8 __U)
+{
+  return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A,
+                                                    (__v8sf) __B,
+                                                    (__v8sf) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fnmsub_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                       __m256 __C)
+{
+  return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
+                                                   (__v8sf) __B,
+                                                   -(__v8sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fnmsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   (__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask3_fnmsub_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
+{
+  return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A,
+                                                    (__v4sf) __B,
+                                                    (__v4sf) __C,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fnmsub_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
+                                                   (__v4sf) __B,
+                                                   -(__v4sf) __C,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                         __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                      __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+                                               (__v8si) __B,
+                                               (__v8si) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A,
+                                               (__v8si) __B,
+                                               (__v8si)
+                                               _mm256_setzero_si256 (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+                                               (__v4si) __B,
+                                               (__v4si) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A,
+                                               (__v4si) __B,
+                                               (__v4si)
+                                               _mm_setzero_si128 (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A,
+                                                (__v8si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+                                               (__v4sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A)
+{
+  return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+                                               (__v4sf)
+                                               _mm_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A)
+{
+  return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A)
+{
+  return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+                                                   (__v8si)
+                                                   _mm256_setzero_si256 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+                                                   (__v4si)
+                                                   _mm_setzero_si128 (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cvtps_epu32 (__m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A)
+{
+  return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cvtps_epu32 (__m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A)
+{
+  return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_movddup256_mask ((__v4df) __A,
+                                                  (__v4df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_movddup256_mask ((__v4df) __A,
+                                                  (__v4df)
+                                                  _mm256_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_movddup128_mask ((__v2df) __A,
+                                                  (__v2df) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_movddup128_mask ((__v2df) __A,
+                                                  (__v2df)
+                                                  _mm_setzero_pd (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movshdup256_mask ((__v8sf) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movshdup256_mask ((__v8sf) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movshdup128_mask ((__v4sf) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movshdup128_mask ((__v4sf) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movsldup256_mask ((__v8sf) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_movsldup256_mask ((__v8sf) __A,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movsldup128_mask ((__v4sf) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_movsldup128_mask ((__v4sf) __A,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhdq128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhdq128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhdq256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhdq256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhqdq128_mask ((__v2di) __A,
+                                                     (__v2di) __B,
+                                                     (__v2di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckhqdq128_mask ((__v2di) __A,
+                                                     (__v2di) __B,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhqdq256_mask ((__v4di) __A,
+                                                     (__v4di) __B,
+                                                     (__v4di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckhqdq256_mask ((__v4di) __A,
+                                                     (__v4di) __B,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckldq128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpckldq128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckldq256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpckldq256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                        __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklqdq128_mask ((__v2di) __A,
+                                                     (__v2di) __B,
+                                                     (__v2di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_punpcklqdq128_mask ((__v2di) __A,
+                                                     (__v2di) __B,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                           __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklqdq256_mask ((__v4di) __A,
+                                                     (__v4di) __B,
+                                                     (__v4di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_punpcklqdq256_mask ((__v4di) __A,
+                                                     (__v4di) __B,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqd128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpeq_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqd128_mask ((__v4si) __A,
+                                                   (__v4si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpeq_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqd256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpeq_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqd256_mask ((__v8si) __A,
+                                                   (__v8si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpeq_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqq128_mask ((__v2di) __A,
+                                                   (__v2di) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpeq_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqq128_mask ((__v2di) __A,
+                                                   (__v2di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpeq_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqq256_mask ((__v4di) __A,
+                                                   (__v4di) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpeq_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpeqq256_mask ((__v4di) __A,
+                                                   (__v4di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtd128_mask ((__v4si) __A,
+                                                   (__v4si) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpgt_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtd128_mask ((__v4si) __A,
+                                                   (__v4si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpgt_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtd256_mask ((__v8si) __A,
+                                                   (__v8si) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpgt_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtd256_mask ((__v8si) __A,
+                                                   (__v8si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpgt_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtq128_mask ((__v2di) __A,
+                                                   (__v2di) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmpgt_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtq128_mask ((__v2di) __A,
+                                                   (__v2di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpgt_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtq256_mask ((__v4di) __A,
+                                                   (__v4di) __B,
+                                                   (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmpgt_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_pcmpgtq256_mask ((__v4di) __A,
+                                                   (__v4di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_test_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+                                              (__v4si) __B,
+                                              (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A,
+                                              (__v4si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_test_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+                                              (__v8si) __B,
+                                              (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A,
+                                              (__v8si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_test_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+                                              (__v2di) __B,
+                                              (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A,
+                                              (__v2di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_test_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+                                              (__v4di) __B,
+                                              (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A,
+                                              (__v4di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_testn_epi32_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+                                               (__v4si) __B,
+                                               (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A,
+                                               (__v4si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+                                               (__v8si) __B,
+                                               (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A,
+                                               (__v8si) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_testn_epi64_mask (__m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+                                               (__v2di) __B,
+                                               (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A,
+                                               (__v2di) __B, __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+                                               (__v4di) __B,
+                                               (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A,
+                                               (__v4di) __B, __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+                                                     (__v4df) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+                                                     (__v4df)
+                                                     _mm256_setzero_pd (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A)
+{
+  __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
+                                         (__v4df) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+                                                     (__v2df) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_compress_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+                                                     (__v2df)
+                                                     _mm_setzero_pd (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A)
+{
+  __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
+                                         (__v2df) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+                                                    (__v8sf) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+                                                    (__v8sf)
+                                                    _mm256_setzero_ps (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A)
+{
+  __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
+                                         (__v8sf) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+                                                    (__v4sf) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_compress_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+                                                    (__v4sf)
+                                                    _mm_setzero_ps (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A)
+{
+  __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
+                                         (__v4sf) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+                                                     (__v4di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+                                                     (__v4di)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
+                                         (__v4di) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+                                                     (__v2di) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+                                                     (__v2di)
+                                                     _mm_setzero_di (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
+                                         (__v2di) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+                                                     (__v8si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+                                                     (__v8si)
+                                                     _mm256_setzero_si256 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A)
+{
+  __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
+                                         (__v8si) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+                                                     (__v4si) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+                                                     (__v4si)
+                                                     _mm_setzero_si128 (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A)
+{
+  __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
+                                         (__v4si) __A,
+                                         (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A)
+{
+  return (__m256d) __builtin_ia32_expanddf256_maskz ((__v4df) __A,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+                                                       (__v4df) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m256d) __builtin_ia32_expandloaddf256_maskz ((__v4df *) __P,
+                                                        (__v4df)
+                                                        _mm256_setzero_pd (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expand_pd (__mmask8 __U, __m128d __A)
+{
+  return (__m128d) __builtin_ia32_expanddf128_maskz ((__v2df) __A,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+                                                       (__v2df) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P)
+{
+  return (__m128d) __builtin_ia32_expandloaddf128_maskz ((__v2df *) __P,
+                                                        (__v2df)
+                                                        _mm_setzero_pd (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A)
+{
+  return (__m256) __builtin_ia32_expandsf256_maskz ((__v8sf) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+                                                      (__v8sf) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m256) __builtin_ia32_expandloadsf256_maskz ((__v8sf *) __P,
+                                                       (__v8sf)
+                                                       _mm256_setzero_ps (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expand_ps (__mmask8 __U, __m128 __A)
+{
+  return (__m128) __builtin_ia32_expandsf128_maskz ((__v4sf) __A,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+                                                      (__v4sf) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P)
+{
+  return (__m128) __builtin_ia32_expandloadsf128_maskz ((__v4sf *) __P,
+                                                       (__v4sf)
+                                                       _mm_setzero_ps (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_expanddi256_maskz ((__v4di) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
+                              void const *__P)
+{
+  return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+                                                       (__v4di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_expandloaddi256_maskz ((__v4di *) __P,
+                                                        (__v4di)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_expanddi128_maskz ((__v2di) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+                                                       (__v2di) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_expandloaddi128_maskz ((__v2di *) __P,
+                                                        (__v2di)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_expandsi256_maskz ((__v8si) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
+                              void const *__P)
+{
+  return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+                                                       (__v8si) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m256i) __builtin_ia32_expandloadsi256_maskz ((__v8si *) __P,
+                                                        (__v8si)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_expandsi128_maskz ((__v4si) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+                                                       (__v4si) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P)
+{
+  return (__m128i) __builtin_ia32_expandloadsi128_maskz ((__v4si *) __P,
+                                                        (__v4si)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+                                                       /* idx */ ,
+                                                       (__v4df) __A,
+                                                       (__v4df) __B,
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
+                            __m256d __B)
+{
+  return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+                                                       /* idx */ ,
+                                                       (__v4df) __A,
+                                                       (__v4df) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
+                             __m256d __B)
+{
+  return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
+                                                       (__v4di) __I
+                                                       /* idx */ ,
+                                                       (__v4df) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
+                             __m256d __B)
+{
+  return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
+                                                        /* idx */ ,
+                                                        (__v4df) __A,
+                                                        (__v4df) __B,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B)
+{
+  return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
+                            __m256 __B)
+{
+  return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8sf) __A,
+                                                      (__v8sf) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
+                             __m256 __B)
+{
+  return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
+                                                      (__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8sf) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
+                             __m256 __B)
+{
+  return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
+                                                       /* idx */ ,
+                                                       (__v8sf) __A,
+                                                       (__v8sf) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+                                                      /* idx */ ,
+                                                      (__v2di) __A,
+                                                      (__v2di) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
+                            __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+                                                      /* idx */ ,
+                                                      (__v2di) __A,
+                                                      (__v2di) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
+                                                      (__v2di) __I
+                                                      /* idx */ ,
+                                                      (__v2di) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
+                                                       /* idx */ ,
+                                                       (__v2di) __A,
+                                                       (__v2di) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4si) __A,
+                                                      (__v4si) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
+                            __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4si) __A,
+                                                      (__v4si) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
+                                                      (__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4si) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
+                             __m128i __B)
+{
+  return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
+                                                       /* idx */ ,
+                                                       (__v4si) __A,
+                                                       (__v4si) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+                                                      /* idx */ ,
+                                                      (__v4di) __A,
+                                                      (__v4di) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
+                               __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+                                                      /* idx */ ,
+                                                      (__v4di) __A,
+                                                      (__v4di) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
+                                __mmask8 __U, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
+                                                      (__v4di) __I
+                                                      /* idx */ ,
+                                                      (__v4di) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
+                                __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
+                                                       /* idx */ ,
+                                                       (__v4di) __A,
+                                                       (__v4di) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8si) __A,
+                                                      (__v8si) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
+                               __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8si) __A,
+                                                      (__v8si) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
+                                __mmask8 __U, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
+                                                      (__v8si) __I
+                                                      /* idx */ ,
+                                                      (__v8si) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
+                                __m256i __I, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
+                                                       /* idx */ ,
+                                                       (__v8si) __A,
+                                                       (__v8si) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+                                                       /* idx */ ,
+                                                       (__v2df) __A,
+                                                       (__v2df) __B,
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
+                         __m128d __B)
+{
+  return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+                                                       /* idx */ ,
+                                                       (__v2df) __A,
+                                                       (__v2df) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
+                          __m128d __B)
+{
+  return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
+                                                       (__v2di) __I
+                                                       /* idx */ ,
+                                                       (__v2df) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
+                          __m128d __B)
+{
+  return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
+                                                        /* idx */ ,
+                                                        (__v2df) __A,
+                                                        (__v2df) __B,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B)
+{
+  return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
+                         __m128 __B)
+{
+  return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4sf) __A,
+                                                      (__v4sf) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
+                          __m128 __B)
+{
+  return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
+                                                      (__v4si) __I
+                                                      /* idx */ ,
+                                                      (__v4sf) __B,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
+                          __m128 __B)
+{
+  return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
+                                                       /* idx */ ,
+                                                       (__v4sf) __A,
+                                                       (__v4sf) __B,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srav_epi64 (__m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srav_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srav_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sllv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sllv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sllv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sllv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sllv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+                                                (__v4di) __Y,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sllv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
+                                                (__v4di) __Y,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sllv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+                                                (__v2di) __Y,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sllv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
+                                                (__v2di) __Y,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srav_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srav_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srav_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srav_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srlv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srlv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
+                                                (__v8si) __Y,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srlv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srlv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
+                                                (__v4si) __Y,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srlv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+                                                (__v4di) __Y,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srlv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
+                                                (__v4di) __Y,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srlv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
+                    __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+                                                (__v2di) __Y,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srlv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
+                                                (__v2di) __Y,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rolv_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rolv_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rorv_epi32 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rorv_epi32 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rolv_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rolv_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rorv_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                       __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rorv_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                    __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_srav_epi64 (__m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srav_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
+                       __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di) __W, __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_pd (),
+                                                __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W, __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_pd (),
+                                                __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                         __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W, __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_pd (),
+                                                 __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                      __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W, __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_pd (),
+                                                 __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                     __m256i __B)
+{
+  return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+                                               (__v4di) __B,
+                                               (__v4di) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A,
+                                               (__v4di) __B,
+                                               (__v4di)
+                                               _mm256_setzero_si256 (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+                                               (__v2di) __B,
+                                               (__v2di) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A,
+                                               (__v2di) __B,
+                                               (__v2di)
+                                               _mm_setzero_si128 (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A,
+                                                (__v4di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf) __W,
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+                                            (__v4sf) __B,
+                                            (__v4sf)
+                                            _mm_setzero_ps (),
+                                            (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df) __W,
+                                             (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+                                             (__v2df) __B,
+                                             (__v2df)
+                                             _mm_setzero_pd (),
+                                             (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf) __W,
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+                                               (__v8sf) __B,
+                                               (__v8sf)
+                                               _mm256_setzero_ps (),
+                                               (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                   __m256d __B)
+{
+  return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+                                                (__v4df) __B,
+                                                (__v4df)
+                                                _mm256_setzero_pd (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_min_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_max_epi64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_max_epu64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_min_epu64 (__m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+                                                 (__v4di) __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+                      __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_max_epu64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_min_epu64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+                                                 (__v2di) __B,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W, __M);
+}
+
+#ifndef __AVX512CD__
+#pragma GCC push_options
+#pragma GCC target("avx512vl,avx512cd")
+#define __DISABLE_AVX512VLCD__
+#endif
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_broadcastmb_epi64 (__mmask8 __A)
+{
+  return (__m128i) __builtin_ia32_broadcastmb128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcastmb_epi64 (__mmask8 __A)
+{
+  return (__m256i) __builtin_ia32_broadcastmb256 (__A);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_broadcastmw_epi32 (__mmask16 __A)
+{
+  return (__m128i) __builtin_ia32_broadcastmw128 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_broadcastmw_epi32 (__mmask16 __A)
+{
+  return (__m256i) __builtin_ia32_broadcastmw256 (__A);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_lzcnt_epi32 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                                                    (__v8si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_lzcnt_epi64 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                                                    (__v4di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_conflict_epi64 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+                                                        (__v4di)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+                                                        (__v4di) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
+                                                        (__v4di)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_conflict_epi32 (__m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+                                                        (__v8si)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+                                                        (__v8si) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
+{
+  return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
+                                                        (__v8si)
+                                                        _mm256_setzero_si256 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_lzcnt_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                                                    (__v4si) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
+                                                    (__v4si)
+                                                    _mm_setzero_si128 (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_lzcnt_epi64 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                                                    (__v2di) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
+                                                    (__v2di)
+                                                    _mm_setzero_di (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_conflict_epi64 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+                                                        (__v2di) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
+                                                        (__v2di)
+                                                        _mm_setzero_di (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_conflict_epi32 (__m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+                                                        (__v4si)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+                                                        (__v4si) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
+{
+  return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
+                                                        (__v4si)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+#ifdef __DISABLE_AVX512VLCD__
+#pragma GCC pop_options
+#endif
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                        __m256d __B)
+{
+  return (__m256d) __builtin_ia32_unpcklpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_unpcklpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                     __m128d __B)
+{
+  return (__m128d) __builtin_ia32_unpcklpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_unpcklpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpacklo_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                        __m256 __B)
+{
+  return (__m256) __builtin_ia32_unpcklps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                        __m256d __B)
+{
+  return (__m256d) __builtin_ia32_unpckhpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_pd (__mmask8 __U, __m256d __A, __m256d __B)
+{
+  return (__m256d) __builtin_ia32_unpckhpd256_mask ((__v4df) __A,
+                                                   (__v4df) __B,
+                                                   (__v4df)
+                                                   _mm256_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                     __m128d __B)
+{
+  return (__m128d) __builtin_ia32_unpckhpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_pd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+  return (__m128d) __builtin_ia32_unpckhpd128_mask ((__v2df) __A,
+                                                   (__v2df) __B,
+                                                   (__v2df)
+                                                   _mm_setzero_pd (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_unpackhi_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                        __m256 __B)
+{
+  return (__m256) __builtin_ia32_unpckhps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpackhi_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_unpckhps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpackhi_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_unpckhps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpackhi_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_unpckhps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_unpacklo_ps (__mmask8 __U, __m256 __A, __m256 __B)
+{
+  return (__m256) __builtin_ia32_unpcklps256_mask ((__v8sf) __A,
+                                                  (__v8sf) __B,
+                                                  (__v8sf)
+                                                  _mm256_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
+{
+  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
+{
+  return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_unpacklo_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_unpcklps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf) __W,
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_unpacklo_ps (__mmask8 __U, __m128 __A, __m128 __B)
+{
+  return (__m128) __builtin_ia32_unpcklps128_mask ((__v4sf) __A,
+                                                  (__v4sf) __B,
+                                                  (__v4sf)
+                                                  _mm_setzero_ps (),
+                                                  (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sra_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sra_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sra_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sra_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_sra_epi64 (__m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sra_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sra_epi64 (__m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sra_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sra_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sll_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sll_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
+                                                (__v4si) __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sll_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sll_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
+                                                (__v2di) __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sll_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sll_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
+                                                (__v4si) __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_sll_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_sll_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+{
+  return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
+                                                (__v2di) __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X,
+                           __m256 __Y)
+{
+  return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+                                                   (__v8si) __X,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y)
+{
+  return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
+                                                   (__v8si) __X,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                                                    (__v4di) __X,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
+                           __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                                                    (__v4di) __X,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
+{
+  return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
+                                                    (__v4di) __X,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutevar_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                          __m256i __C)
+{
+  return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+                                                       (__v4di) __C,
+                                                       (__v4df) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutevar_pd (__mmask8 __U, __m256d __A, __m256i __C)
+{
+  return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
+                                                       (__v4di) __C,
+                                                       (__v4df)
+                                                       _mm256_setzero_pd (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutevar_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                          __m256i __C)
+{
+  return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+                                                      (__v8si) __C,
+                                                      (__v8sf) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutevar_ps (__mmask8 __U, __m256 __A, __m256i __C)
+{
+  return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
+                                                      (__v8si) __C,
+                                                      (__v8sf)
+                                                      _mm256_setzero_ps (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutevar_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                       __m128i __C)
+{
+  return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+                                                    (__v2di) __C,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutevar_pd (__mmask8 __U, __m128d __A, __m128i __C)
+{
+  return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
+                                                    (__v2di) __C,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permutevar_ps (__m128 __W, __mmask8 __U, __m128 __A,
+                       __m128i __C)
+{
+  return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+                                                   (__v4si) __C,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permutevar_ps (__mmask8 __U, __m128 __A, __m128i __C)
+{
+  return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
+                                                   (__v4si) __C,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+                                                    (__v4di) __X,
+                                                    (__v4di)
+                                                    _mm256_setzero_si256 (),
+                                                    __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+                        __m256i __B)
+{
+  return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
+                                                 (__v8si) __B,
+                                                 (__v8si) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
+                     __m128i __B)
+{
+  return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
+                                                 (__v4si) __B,
+                                                 (__v4si) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+                      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+                                                 (__v8si) __Y,
+                                                 (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
+                                                 (__v8si) __Y,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
+                   __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+                                                 (__v4si) __Y,
+                                                 (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
+                                                 (__v4si) __Y,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
+                              __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
+                                                    (__v4di) __X,
+                                                    (__v4di) __W,
+                                                    __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
+                      __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+                                                  (__v8si) __Y,
+                                                  (__v4di) __W, __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+                                                    (__v8si) __X,
+                                                    (__v8si)
+                                                    _mm256_setzero_si256 (),
+                                                    __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
+                                                  (__v8si) __Y,
+                                                  (__v4di)
+                                                  _mm256_setzero_si256 (),
+                                                  __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
+                   __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+                                                  (__v4si) __Y,
+                                                  (__v2di) __W, __M);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y)
+{
+  return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
+                                                  (__v4si) __Y,
+                                                  (__v2di)
+                                                  _mm_setzero_si128 (),
+                                                  __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
+                              __m256i __Y)
+{
+  return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
+                                                    (__v8si) __X,
+                                                    (__v8si) __W,
+                                                    __M);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex_epi64 (__m256i __W, __mmask8 __M,
+                           __m256i __X, const int __I)
+{
+  return (__m256i) __builtin_ia32_permdi256_mask ((__v4di) __X,
+                                                 __I,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex_epi64 (__mmask8 __M, __m256i __X, const int __I)
+{
+  return (__m256i) __builtin_ia32_permdi256_mask ((__v4di) __X,
+                                                 __I,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __M);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                       __m256d __B, const int __imm)
+{
+  return (__m256d) __builtin_ia32_shufpd256_mask ((__v4df) __A,
+                                                 (__v4df) __B, __imm,
+                                                 (__v4df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                        const int __imm)
+{
+  return (__m256d) __builtin_ia32_shufpd256_mask ((__v4df) __A,
+                                                 (__v4df) __B, __imm,
+                                                 (__v4df)
+                                                 _mm256_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shuffle_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                    __m128d __B, const int __imm)
+{
+  return (__m128d) __builtin_ia32_shufpd128_mask ((__v2df) __A,
+                                                 (__v2df) __B, __imm,
+                                                 (__v2df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shuffle_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                     const int __imm)
+{
+  return (__m128d) __builtin_ia32_shufpd128_mask ((__v2df) __A,
+                                                 (__v2df) __B, __imm,
+                                                 (__v2df)
+                                                 _mm_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                       __m256 __B, const int __imm)
+{
+  return (__m256) __builtin_ia32_shufps256_mask ((__v8sf) __A,
+                                                (__v8sf) __B, __imm,
+                                                (__v8sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                        const int __imm)
+{
+  return (__m256) __builtin_ia32_shufps256_mask ((__v8sf) __A,
+                                                (__v8sf) __B, __imm,
+                                                (__v8sf)
+                                                _mm256_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shuffle_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B,
+                    const int __imm)
+{
+  return (__m128) __builtin_ia32_shufps128_mask ((__v4sf) __A,
+                                                (__v4sf) __B, __imm,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shuffle_ps (__mmask8 __U, __m128 __A, __m128 __B,
+                     const int __imm)
+{
+  return (__m128) __builtin_ia32_shufps128_mask ((__v4sf) __A,
+                                                (__v4sf) __B, __imm,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_inserti32x4 (__m256i __A, __m128i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A,
+                                                       (__v4si) __B,
+                                                       __imm,
+                                                       (__v8si)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_inserti32x4 (__m256i __W, __mmask8 __U, __m256i __A,
+                        __m128i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A,
+                                                       (__v4si) __B,
+                                                       __imm,
+                                                       (__v8si) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_inserti32x4 (__mmask8 __U, __m256i __A, __m128i __B,
+                         const int __imm)
+{
+  return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A,
+                                                       (__v4si) __B,
+                                                       __imm,
+                                                       (__v8si)
+                                                       _mm256_setzero_si256 (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_insertf32x4 (__m256 __A, __m128 __B, const int __imm)
+{
+  return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A,
+                                                      (__v4sf) __B,
+                                                      __imm,
+                                                      (__v8sf)
+                                                      _mm256_setzero_ps (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_insertf32x4 (__m256 __W, __mmask8 __U, __m256 __A,
+                        __m128 __B, const int __imm)
+{
+  return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A,
+                                                      (__v4sf) __B,
+                                                      __imm,
+                                                      (__v8sf) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_insertf32x4 (__mmask8 __U, __m256 __A, __m128 __B,
+                         const int __imm)
+{
+  return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A,
+                                                      (__v4sf) __B,
+                                                      __imm,
+                                                      (__v8sf)
+                                                      _mm256_setzero_ps (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_extracti32x4_epi32 (__m256i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A,
+                                                        __imm,
+                                                        (__v4si)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8) -
+                                                        1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_extracti32x4_epi32 (__m128i __W, __mmask8 __U, __m256i __A,
+                               const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A,
+                                                        __imm,
+                                                        (__v4si) __W,
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_extracti32x4_epi32 (__mmask8 __U, __m256i __A,
+                                const int __imm)
+{
+  return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A,
+                                                        __imm,
+                                                        (__v4si)
+                                                        _mm_setzero_si128 (),
+                                                        (__mmask8)
+                                                        __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_extractf32x4_ps (__m256 __A, const int __imm)
+{
+  return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A,
+                                                       __imm,
+                                                       (__v4sf)
+                                                       _mm_setzero_ps (),
+                                                       (__mmask8) -
+                                                       1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_extractf32x4_ps (__m128 __W, __mmask8 __U, __m256 __A,
+                            const int __imm)
+{
+  return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A,
+                                                       __imm,
+                                                       (__v4sf) __W,
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_extractf32x4_ps (__mmask8 __U, __m256 __A,
+                             const int __imm)
+{
+  return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A,
+                                                       __imm,
+                                                       (__v4sf)
+                                                       _mm_setzero_ps (),
+                                                       (__mmask8)
+                                                       __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_shuffle_i64x2 (__m256i __A, __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A,
+                                                      (__v4di) __B,
+                                                      __imm,
+                                                      (__v4di)
+                                                      _mm256_setzero_si256 (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_i64x2 (__m256i __W, __mmask8 __U, __m256i __A,
+                          __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A,
+                                                      (__v4di) __B,
+                                                      __imm,
+                                                      (__v4di) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_i64x2 (__mmask8 __U, __m256i __A, __m256i __B,
+                           const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A,
+                                                      (__v4di) __B,
+                                                      __imm,
+                                                      (__v4di)
+                                                      _mm256_setzero_si256 (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_shuffle_i32x4 (__m256i __A, __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A,
+                                                      (__v8si) __B,
+                                                      __imm,
+                                                      (__v8si)
+                                                      _mm256_setzero_si256 (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_i32x4 (__m256i __W, __mmask8 __U, __m256i __A,
+                          __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A,
+                                                      (__v8si) __B,
+                                                      __imm,
+                                                      (__v8si) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_i32x4 (__mmask8 __U, __m256i __A, __m256i __B,
+                           const int __imm)
+{
+  return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A,
+                                                      (__v8si) __B,
+                                                      __imm,
+                                                      (__v8si)
+                                                      _mm256_setzero_si256 (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_shuffle_f64x2 (__m256d __A, __m256d __B, const int __imm)
+{
+  return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      __imm,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_f64x2 (__m256d __W, __mmask8 __U, __m256d __A,
+                          __m256d __B, const int __imm)
+{
+  return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      __imm,
+                                                      (__v4df) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_f64x2 (__mmask8 __U, __m256d __A, __m256d __B,
+                           const int __imm)
+{
+  return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      __imm,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_shuffle_f32x4 (__m256 __A, __m256 __B, const int __imm)
+{
+  return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     __imm,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_f32x4 (__m256 __W, __mmask8 __U, __m256 __A,
+                          __m256 __B, const int __imm)
+{
+  return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     __imm,
+                                                     (__v8sf) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_f32x4 (__mmask8 __U, __m256 __A, __m256 __B,
+                           const int __imm)
+{
+  return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     __imm,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fixupimm_pd (__m256d __A, __m256d __B, __m256i __C,
+                   const int __imm)
+{
+  return (__m256d) __builtin_ia32_fixupimmpd256_mask ((__v4df) __A,
+                                                     (__v4df) __B,
+                                                     (__v4di) __C,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fixupimm_pd (__m256d __A, __mmask8 __U, __m256d __B,
+                        __m256i __C, const int __imm)
+{
+  return (__m256d) __builtin_ia32_fixupimmpd256_mask ((__v4df) __A,
+                                                     (__v4df) __B,
+                                                     (__v4di) __C,
+                                                     __imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fixupimm_pd (__mmask8 __U, __m256d __A, __m256d __B,
+                         __m256i __C, const int __imm)
+{
+  return (__m256d) __builtin_ia32_fixupimmpd256_maskz ((__v4df) __A,
+                                                      (__v4df) __B,
+                                                      (__v4di) __C,
+                                                      __imm,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_fixupimm_ps (__m256 __A, __m256 __B, __m256i __C,
+                   const int __imm)
+{
+  return (__m256) __builtin_ia32_fixupimmps256_mask ((__v8sf) __A,
+                                                    (__v8sf) __B,
+                                                    (__v8si) __C,
+                                                    __imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_fixupimm_ps (__m256 __A, __mmask8 __U, __m256 __B,
+                        __m256i __C, const int __imm)
+{
+  return (__m256) __builtin_ia32_fixupimmps256_mask ((__v8sf) __A,
+                                                    (__v8sf) __B,
+                                                    (__v8si) __C,
+                                                    __imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_fixupimm_ps (__mmask8 __U, __m256 __A, __m256 __B,
+                         __m256i __C, const int __imm)
+{
+  return (__m256) __builtin_ia32_fixupimmps256_maskz ((__v8sf) __A,
+                                                     (__v8sf) __B,
+                                                     (__v8si) __C,
+                                                     __imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_pd (__m128d __A, __m128d __B, __m128i __C,
+                const int __imm)
+{
+  return (__m128d) __builtin_ia32_fixupimmpd128_mask ((__v2df) __A,
+                                                     (__v2df) __B,
+                                                     (__v2di) __C,
+                                                     __imm,
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_pd (__m128d __A, __mmask8 __U, __m128d __B,
+                     __m128i __C, const int __imm)
+{
+  return (__m128d) __builtin_ia32_fixupimmpd128_mask ((__v2df) __A,
+                                                     (__v2df) __B,
+                                                     (__v2di) __C,
+                                                     __imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_pd (__mmask8 __U, __m128d __A, __m128d __B,
+                      __m128i __C, const int __imm)
+{
+  return (__m128d) __builtin_ia32_fixupimmpd128_maskz ((__v2df) __A,
+                                                      (__v2df) __B,
+                                                      (__v2di) __C,
+                                                      __imm,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_fixupimm_ps (__m128 __A, __m128 __B, __m128i __C, const int __imm)
+{
+  return (__m128) __builtin_ia32_fixupimmps128_mask ((__v4sf) __A,
+                                                    (__v4sf) __B,
+                                                    (__v4si) __C,
+                                                    __imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_fixupimm_ps (__m128 __A, __mmask8 __U, __m128 __B,
+                     __m128i __C, const int __imm)
+{
+  return (__m128) __builtin_ia32_fixupimmps128_mask ((__v4sf) __A,
+                                                    (__v4sf) __B,
+                                                    (__v4si) __C,
+                                                    __imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_fixupimm_ps (__mmask8 __U, __m128 __A, __m128 __B,
+                      __m128i __C, const int __imm)
+{
+  return (__m128) __builtin_ia32_fixupimmps128_maskz ((__v4sf) __A,
+                                                     (__v4sf) __B,
+                                                     (__v4si) __C,
+                                                     __imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srli_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrldi256_mask ((__v8si) __A, __imm,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srli_epi32 (__mmask8 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrldi256_mask ((__v8si) __A, __imm,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srli_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrldi128_mask ((__v4si) __A, __imm,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srli_epi32 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrldi128_mask ((__v4si) __A, __imm,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srli_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrlqi256_mask ((__v4di) __A, __imm,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srli_epi64 (__mmask8 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psrlqi256_mask ((__v4di) __A, __imm,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srli_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrlqi128_mask ((__v2di) __A, __imm,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srli_epi64 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psrlqi128_mask ((__v2di) __A, __imm,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_ternarylogic_epi64 (__m256i __A, __m256i __B, __m256i __C,
+                          const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogq256_mask ((__v4di) __A,
+                                                    (__v4di) __B,
+                                                    (__v4di) __C, imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_ternarylogic_epi64 (__m256i __A, __mmask8 __U,
+                               __m256i __B, __m256i __C,
+                               const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogq256_mask ((__v4di) __A,
+                                                    (__v4di) __B,
+                                                    (__v4di) __C, imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_ternarylogic_epi64 (__mmask8 __U, __m256i __A,
+                                __m256i __B, __m256i __C,
+                                const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogq256_maskz ((__v4di) __A,
+                                                     (__v4di) __B,
+                                                     (__v4di) __C,
+                                                     imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_ternarylogic_epi32 (__m256i __A, __m256i __B, __m256i __C,
+                          const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogd256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si) __C, imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_ternarylogic_epi32 (__m256i __A, __mmask8 __U,
+                               __m256i __B, __m256i __C,
+                               const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogd256_mask ((__v8si) __A,
+                                                    (__v8si) __B,
+                                                    (__v8si) __C, imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_ternarylogic_epi32 (__mmask8 __U, __m256i __A,
+                                __m256i __B, __m256i __C,
+                                const int imm)
+{
+  return (__m256i) __builtin_ia32_pternlogd256_maskz ((__v8si) __A,
+                                                     (__v8si) __B,
+                                                     (__v8si) __C,
+                                                     imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ternarylogic_epi64 (__m128i __A, __m128i __B, __m128i __C,
+                       const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogq128_mask ((__v2di) __A,
+                                                    (__v2di) __B,
+                                                    (__v2di) __C, imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_ternarylogic_epi64 (__m128i __A, __mmask8 __U,
+                            __m128i __B, __m128i __C, const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogq128_mask ((__v2di) __A,
+                                                    (__v2di) __B,
+                                                    (__v2di) __C, imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_ternarylogic_epi64 (__mmask8 __U, __m128i __A,
+                             __m128i __B, __m128i __C, const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogq128_maskz ((__v2di) __A,
+                                                     (__v2di) __B,
+                                                     (__v2di) __C,
+                                                     imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ternarylogic_epi32 (__m128i __A, __m128i __B, __m128i __C,
+                       const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogd128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si) __C, imm,
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_ternarylogic_epi32 (__m128i __A, __mmask8 __U,
+                            __m128i __B, __m128i __C, const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogd128_mask ((__v4si) __A,
+                                                    (__v4si) __B,
+                                                    (__v4si) __C, imm,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_ternarylogic_epi32 (__mmask8 __U, __m128i __A,
+                             __m128i __B, __m128i __C, const int imm)
+{
+  return (__m128i) __builtin_ia32_pternlogd128_maskz ((__v4si) __A,
+                                                     (__v4si) __B,
+                                                     (__v4si) __C,
+                                                     imm,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_roundscale_ps (__m256 __A, const int __imm)
+{
+  return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,
+                                                     __imm,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_roundscale_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                          const int __imm)
+{
+  return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,
+                                                     __imm,
+                                                     (__v8sf) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_roundscale_ps (__mmask8 __U, __m256 __A, const int __imm)
+{
+  return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,
+                                                     __imm,
+                                                     (__v8sf)
+                                                     _mm256_setzero_ps (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_roundscale_pd (__m256d __A, const int __imm)
+{
+  return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A,
+                                                      __imm,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_roundscale_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                          const int __imm)
+{
+  return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A,
+                                                      __imm,
+                                                      (__v4df) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_roundscale_pd (__mmask8 __U, __m256d __A, const int __imm)
+{
+  return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A,
+                                                      __imm,
+                                                      (__v4df)
+                                                      _mm256_setzero_pd (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_ps (__m128 __A, const int __imm)
+{
+  return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A,
+                                                     __imm,
+                                                     (__v4sf)
+                                                     _mm_setzero_ps (),
+                                                     (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_ps (__m128 __W, __mmask8 __U, __m128 __A,
+                       const int __imm)
+{
+  return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A,
+                                                     __imm,
+                                                     (__v4sf) __W,
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_ps (__mmask8 __U, __m128 __A, const int __imm)
+{
+  return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A,
+                                                     __imm,
+                                                     (__v4sf)
+                                                     _mm_setzero_ps (),
+                                                     (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_roundscale_pd (__m128d __A, const int __imm)
+{
+  return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A,
+                                                      __imm,
+                                                      (__v2df)
+                                                      _mm_setzero_pd (),
+                                                      (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_roundscale_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                       const int __imm)
+{
+  return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A,
+                                                      __imm,
+                                                      (__v2df) __W,
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_roundscale_pd (__mmask8 __U, __m128d __A, const int __imm)
+{
+  return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A,
+                                                      __imm,
+                                                      (__v2df)
+                                                      _mm_setzero_pd (),
+                                                      (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_getmant_ps (__m256 __A, _MM_MANTISSA_NORM_ENUM __B,
+                  _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_getmant_ps (__m256 __W, __mmask8 __U, __m256 __A,
+                       _MM_MANTISSA_NORM_ENUM __B,
+                       _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_getmant_ps (__mmask8 __U, __m256 __A,
+                        _MM_MANTISSA_NORM_ENUM __B,
+                        _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_ps (__m128 __A, _MM_MANTISSA_NORM_ENUM __B,
+               _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) -1);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_ps (__m128 __W, __mmask8 __U, __m128 __A,
+                    _MM_MANTISSA_NORM_ENUM __B,
+                    _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_ps (__mmask8 __U, __m128 __A,
+                     _MM_MANTISSA_NORM_ENUM __B,
+                     _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A,
+                                                   (__C << 2) | __B,
+                                                   (__v4sf)
+                                                   _mm_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_getmant_pd (__m256d __A, _MM_MANTISSA_NORM_ENUM __B,
+                  _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_getmant_pd (__m256d __W, __mmask8 __U, __m256d __A,
+                       _MM_MANTISSA_NORM_ENUM __B,
+                       _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_getmant_pd (__mmask8 __U, __m256d __A,
+                        _MM_MANTISSA_NORM_ENUM __B,
+                        _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_getmant_pd (__m128d __A, _MM_MANTISSA_NORM_ENUM __B,
+               _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) -1);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_getmant_pd (__m128d __W, __mmask8 __U, __m128d __A,
+                    _MM_MANTISSA_NORM_ENUM __B,
+                    _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_getmant_pd (__mmask8 __U, __m128d __A,
+                     _MM_MANTISSA_NORM_ENUM __B,
+                     _MM_MANTISSA_SIGN_ENUM __C)
+{
+  return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A,
+                                                    (__C << 2) | __B,
+                                                    (__v2df)
+                                                    _mm_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i32gather_ps (__m256 __v1_old, __mmask8 __mask,
+                          __m256i __index, float const *__addr,
+                          int __scale)
+{
+  return (__m256) __builtin_ia32_gather3siv8sf ((__v8sf) __v1_old,
+                                               __addr,
+                                               (__v8si) __index,
+                                               __mask, __scale);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i32gather_ps (__m128 __v1_old, __mmask8 __mask,
+                       __m128i __index, float const *__addr,
+                       int __scale)
+{
+  return (__m128) __builtin_ia32_gather3siv4sf ((__v4sf) __v1_old,
+                                               __addr,
+                                               (__v4si) __index,
+                                               __mask, __scale);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i32gather_pd (__m256d __v1_old, __mmask8 __mask,
+                          __m128i __index, double const *__addr,
+                          int __scale)
+{
+  return (__m256d) __builtin_ia32_gather3siv4df ((__v4df) __v1_old,
+                                                __addr,
+                                                (__v4si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i32gather_pd (__m128d __v1_old, __mmask8 __mask,
+                       __m128i __index, double const *__addr,
+                       int __scale)
+{
+  return (__m128d) __builtin_ia32_gather3siv2df ((__v2df) __v1_old,
+                                                __addr,
+                                                (__v4si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i64gather_ps (__m128 __v1_old, __mmask8 __mask,
+                          __m256i __index, float const *__addr,
+                          int __scale)
+{
+  return (__m128) __builtin_ia32_gather3div8sf ((__v4sf) __v1_old,
+                                               __addr,
+                                               (__v4di) __index,
+                                               __mask, __scale);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i64gather_ps (__m128 __v1_old, __mmask8 __mask,
+                       __m128i __index, float const *__addr,
+                       int __scale)
+{
+  return (__m128) __builtin_ia32_gather3div4sf ((__v4sf) __v1_old,
+                                               __addr,
+                                               (__v2di) __index,
+                                               __mask, __scale);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i64gather_pd (__m256d __v1_old, __mmask8 __mask,
+                          __m256i __index, double const *__addr,
+                          int __scale)
+{
+  return (__m256d) __builtin_ia32_gather3div4df ((__v4df) __v1_old,
+                                                __addr,
+                                                (__v4di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i64gather_pd (__m128d __v1_old, __mmask8 __mask,
+                       __m128i __index, double const *__addr,
+                       int __scale)
+{
+  return (__m128d) __builtin_ia32_gather3div2df ((__v2df) __v1_old,
+                                                __addr,
+                                                (__v2di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i32gather_epi32 (__m256i __v1_old, __mmask8 __mask,
+                             __m256i __index, int const *__addr,
+                             int __scale)
+{
+  return (__m256i) __builtin_ia32_gather3siv8si ((__v8si) __v1_old,
+                                                __addr,
+                                                (__v8si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i32gather_epi32 (__m128i __v1_old, __mmask8 __mask,
+                          __m128i __index, int const *__addr,
+                          int __scale)
+{
+  return (__m128i) __builtin_ia32_gather3siv4si ((__v4si) __v1_old,
+                                                __addr,
+                                                (__v4si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i32gather_epi64 (__m256i __v1_old, __mmask8 __mask,
+                             __m128i __index, long long const *__addr,
+                             int __scale)
+{
+  return (__m256i) __builtin_ia32_gather3siv4di ((__v4di) __v1_old,
+                                                __addr,
+                                                (__v4si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i32gather_epi64 (__m128i __v1_old, __mmask8 __mask,
+                          __m128i __index, long long const *__addr,
+                          int __scale)
+{
+  return (__m128i) __builtin_ia32_gather3siv2di ((__v2di) __v1_old,
+                                                __addr,
+                                                (__v4si) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i64gather_epi32 (__m128i __v1_old, __mmask8 __mask,
+                             __m256i __index, int const *__addr,
+                             int __scale)
+{
+  return (__m128i) __builtin_ia32_gather3div8si ((__v4si) __v1_old,
+                                                __addr,
+                                                (__v4di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i64gather_epi32 (__m128i __v1_old, __mmask8 __mask,
+                          __m128i __index, int const *__addr,
+                          int __scale)
+{
+  return (__m128i) __builtin_ia32_gather3div4si ((__v4si) __v1_old,
+                                                __addr,
+                                                (__v2di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mmask_i64gather_epi64 (__m256i __v1_old, __mmask8 __mask,
+                             __m256i __index, long long const *__addr,
+                             int __scale)
+{
+  return (__m256i) __builtin_ia32_gather3div4di ((__v4di) __v1_old,
+                                                __addr,
+                                                (__v4di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mmask_i64gather_epi64 (__m128i __v1_old, __mmask8 __mask,
+                          __m128i __index, long long const *__addr,
+                          int __scale)
+{
+  return (__m128i) __builtin_ia32_gather3div2di ((__v2di) __v1_old,
+                                                __addr,
+                                                (__v2di) __index,
+                                                __mask, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i32scatter_ps (float *__addr, __m256i __index,
+                     __m256 __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF,
+                               (__v8si) __index, (__v8sf) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i32scatter_ps (float *__addr, __mmask8 __mask,
+                          __m256i __index, __m256 __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index,
+                               (__v8sf) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i32scatter_ps (float *__addr, __m128i __index, __m128 __v1,
+                  const int __scale)
+{
+  __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v4sf) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i32scatter_ps (float *__addr, __mmask8 __mask,
+                       __m128i __index, __m128 __v1,
+                       const int __scale)
+{
+  __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index,
+                               (__v4sf) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i32scatter_pd (double *__addr, __m128i __index,
+                     __m256d __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v4df) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i32scatter_pd (double *__addr, __mmask8 __mask,
+                          __m128i __index, __m256d __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index,
+                               (__v4df) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i32scatter_pd (double *__addr, __m128i __index,
+                  __m128d __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v2df) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i32scatter_pd (double *__addr, __mmask8 __mask,
+                       __m128i __index, __m128d __v1,
+                       const int __scale)
+{
+  __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index,
+                               (__v2df) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i64scatter_ps (float *__addr, __m256i __index,
+                     __m128 __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF,
+                               (__v4di) __index, (__v4sf) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i64scatter_ps (float *__addr, __mmask8 __mask,
+                          __m256i __index, __m128 __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index,
+                               (__v4sf) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i64scatter_ps (float *__addr, __m128i __index, __m128 __v1,
+                  const int __scale)
+{
+  __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF,
+                               (__v2di) __index, (__v4sf) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i64scatter_ps (float *__addr, __mmask8 __mask,
+                       __m128i __index, __m128 __v1,
+                       const int __scale)
+{
+  __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index,
+                               (__v4sf) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i64scatter_pd (double *__addr, __m256i __index,
+                     __m256d __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF,
+                               (__v4di) __index, (__v4df) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i64scatter_pd (double *__addr, __mmask8 __mask,
+                          __m256i __index, __m256d __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index,
+                               (__v4df) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i64scatter_pd (double *__addr, __m128i __index,
+                  __m128d __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv2df (__addr, (__mmask8) 0xFF,
+                               (__v2di) __index, (__v2df) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i64scatter_pd (double *__addr, __mmask8 __mask,
+                       __m128i __index, __m128d __v1,
+                       const int __scale)
+{
+  __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index,
+                               (__v2df) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i32scatter_epi32 (int *__addr, __m256i __index,
+                        __m256i __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF,
+                               (__v8si) __index, (__v8si) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i32scatter_epi32 (int *__addr, __mmask8 __mask,
+                             __m256i __index, __m256i __v1,
+                             const int __scale)
+{
+  __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index,
+                               (__v8si) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i32scatter_epi32 (int *__addr, __m128i __index,
+                     __m128i __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v4si) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i32scatter_epi32 (int *__addr, __mmask8 __mask,
+                          __m128i __index, __m128i __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index,
+                               (__v4si) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i32scatter_epi64 (long long *__addr, __m128i __index,
+                        __m256i __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v4di) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i32scatter_epi64 (long long *__addr, __mmask8 __mask,
+                             __m128i __index, __m256i __v1,
+                             const int __scale)
+{
+  __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index,
+                               (__v4di) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i32scatter_epi64 (long long *__addr, __m128i __index,
+                     __m128i __v1, const int __scale)
+{
+  __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF,
+                               (__v4si) __index, (__v2di) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i32scatter_epi64 (long long *__addr, __mmask8 __mask,
+                          __m128i __index, __m128i __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index,
+                               (__v2di) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i64scatter_epi32 (int *__addr, __m256i __index,
+                        __m128i __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF,
+                               (__v4di) __index, (__v4si) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i64scatter_epi32 (int *__addr, __mmask8 __mask,
+                             __m256i __index, __m128i __v1,
+                             const int __scale)
+{
+  __builtin_ia32_scatterdiv8si (__addr, __mask, (__v4di) __index,
+                               (__v4si) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i64scatter_epi32 (int *__addr, __m128i __index,
+                     __m128i __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF,
+                               (__v2di) __index, (__v4si) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i64scatter_epi32 (int *__addr, __mmask8 __mask,
+                          __m128i __index, __m128i __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index,
+                               (__v4si) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_i64scatter_epi64 (long long *__addr, __m256i __index,
+                        __m256i __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF,
+                               (__v4di) __index, (__v4di) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_i64scatter_epi64 (long long *__addr, __mmask8 __mask,
+                             __m256i __index, __m256i __v1,
+                             const int __scale)
+{
+  __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index,
+                               (__v4di) __v1, __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_i64scatter_epi64 (long long *__addr, __m128i __index,
+                     __m128i __v1, const int __scale)
+{
+  __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF,
+                               (__v2di) __index, (__v2di) __v1,
+                               __scale);
+}
+
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_i64scatter_epi64 (long long *__addr, __mmask8 __mask,
+                          __m128i __index, __m128i __v1,
+                          const int __scale)
+{
+  __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index,
+                               (__v2di) __v1, __scale);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_shuffle_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                          _MM_PERM_ENUM __mask)
+{
+  return (__m256i) __builtin_ia32_pshufd256_mask ((__v8si) __A, __mask,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_shuffle_epi32 (__mmask8 __U, __m256i __A,
+                           _MM_PERM_ENUM __mask)
+{
+  return (__m256i) __builtin_ia32_pshufd256_mask ((__v8si) __A, __mask,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_shuffle_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                       _MM_PERM_ENUM __mask)
+{
+  return (__m128i) __builtin_ia32_pshufd128_mask ((__v4si) __A, __mask,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_shuffle_epi32 (__mmask8 __U, __m128i __A,
+                        _MM_PERM_ENUM __mask)
+{
+  return (__m128i) __builtin_ia32_pshufd128_mask ((__v4si) __A, __mask,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rol_epi32 (__m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rol_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      const int __B)
+{
+  return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rol_epi32 (__mmask8 __U, __m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rol_epi32 (__m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rol_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   const int __B)
+{
+  return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rol_epi32 (__mmask8 __U, __m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_ror_epi32 (__m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_ror_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                      const int __B)
+{
+  return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B,
+                                                (__v8si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_ror_epi32 (__mmask8 __U, __m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B,
+                                                (__v8si)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ror_epi32 (__m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_ror_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                   const int __B)
+{
+  return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B,
+                                                (__v4si) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_ror_epi32 (__mmask8 __U, __m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B,
+                                                (__v4si)
+                                                _mm_setzero_si128 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_rol_epi64 (__m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_rol_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      const int __B)
+{
+  return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_rol_epi64 (__mmask8 __U, __m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_rol_epi64 (__m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_rol_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   const int __B)
+{
+  return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_rol_epi64 (__mmask8 __U, __m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_ror_epi64 (__m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_ror_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                      const int __B)
+{
+  return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B,
+                                                (__v4di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_ror_epi64 (__mmask8 __U, __m256i __A, const int __B)
+{
+  return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B,
+                                                (__v4di)
+                                                _mm256_setzero_si256 (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_ror_epi64 (__m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_ror_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                   const int __B)
+{
+  return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B,
+                                                (__v2di) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_ror_epi64 (__mmask8 __U, __m128i __A, const int __B)
+{
+  return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B,
+                                                (__v2di)
+                                                _mm_setzero_di (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_alignr_epi32 (__m128i __A, __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A,
+                                                 (__v4si) __B, __imm,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_alignr_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                      __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A,
+                                                 (__v4si) __B, __imm,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_alignr_epi32 (__mmask8 __U, __m128i __A, __m128i __B,
+                       const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A,
+                                                 (__v4si) __B, __imm,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_alignr_epi64 (__m128i __A, __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A,
+                                                 (__v2di) __B, __imm,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_alignr_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                      __m128i __B, const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A,
+                                                 (__v2di) __B, __imm,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_alignr_epi64 (__mmask8 __U, __m128i __A, __m128i __B,
+                       const int __imm)
+{
+  return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A,
+                                                 (__v2di) __B, __imm,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_alignr_epi32 (__m256i __A, __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A,
+                                                 (__v8si) __B, __imm,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_alignr_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                         __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A,
+                                                 (__v8si) __B, __imm,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_alignr_epi32 (__mmask8 __U, __m256i __A, __m256i __B,
+                          const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A,
+                                                 (__v8si) __B, __imm,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_alignr_epi64 (__m256i __A, __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A,
+                                                 (__v4di) __B, __imm,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_alignr_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                         __m256i __B, const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A,
+                                                 (__v4di) __B, __imm,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_alignr_epi64 (__mmask8 __U, __m256i __A, __m256i __B,
+                          const int __imm)
+{
+  return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A,
+                                                 (__v4di) __B, __imm,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A,
+                  const int __I)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, __I,
+                                                 (__v8hi) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A, const int __I)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, __I,
+                                                 (__v8hi)
+                                                 _mm_setzero_hi (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A,
+                     const int __I)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, __I,
+                                                    (__v8hi) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_cvtps_ph (__mmask8 __U, __m256 __A, const int __I)
+{
+  return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, __I,
+                                                    (__v8hi)
+                                                    _mm_setzero_hi (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srai_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psradi256_mask ((__v8si) __A, __imm,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srai_epi32 (__mmask8 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psradi256_mask ((__v8si) __A, __imm,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srai_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psradi128_mask ((__v4si) __A, __imm,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srai_epi32 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psradi128_mask ((__v4si) __A, __imm,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_srai_epi64 (__m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_srai_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                       const int __imm)
+{
+  return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_srai_epi64 (__mmask8 __U, __m256i __A, const int __imm)
+{
+  return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_srai_epi64 (__m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_srai_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
+                    const int __imm)
+{
+  return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_srai_epi64 (__mmask8 __U, __m128i __A, const int __imm)
+{
+  return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm,
+                                                 (__v2di)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_slli_epi32 (__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_pslldi128_mask ((__v4si) __A, __B,
+                                                 (__v4si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_slli_epi32 (__mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_pslldi128_mask ((__v4si) __A, __B,
+                                                 (__v4si)
+                                                 _mm_setzero_si128 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_slli_epi64 (__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_psllqi128_mask ((__v2di) __A, __B,
+                                                 (__v2di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_slli_epi64 (__mmask8 __U, __m128i __A, int __B)
+{
+  return (__m128i) __builtin_ia32_psllqi128_mask ((__v2di) __A, __B,
+                                                 (__v2di)
+                                                 _mm_setzero_di (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_slli_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
+                       int __B)
+{
+  return (__m256i) __builtin_ia32_pslldi256_mask ((__v8si) __A, __B,
+                                                 (__v8si) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_slli_epi32 (__mmask8 __U, __m256i __A, int __B)
+{
+  return (__m256i) __builtin_ia32_pslldi256_mask ((__v8si) __A, __B,
+                                                 (__v8si)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_slli_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
+                       int __B)
+{
+  return (__m256i) __builtin_ia32_psllqi256_mask ((__v4di) __A, __B,
+                                                 (__v4di) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_slli_epi64 (__mmask8 __U, __m256i __A, int __B)
+{
+  return (__m256i) __builtin_ia32_psllqi256_mask ((__v4di) __A, __B,
+                                                 (__v4di)
+                                                 _mm256_setzero_si256 (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permutex_pd (__m256d __W, __mmask8 __U, __m256d __X,
+                        const int __imm)
+{
+  return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __imm,
+                                                 (__v4df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permutex_pd (__mmask8 __U, __m256d __X, const int __imm)
+{
+  return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __imm,
+                                                 (__v4df)
+                                                 _mm256_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permute_pd (__m256d __W, __mmask8 __U, __m256d __X,
+                       const int __C)
+{
+  return (__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df) __X, __C,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permute_pd (__mmask8 __U, __m256d __X, const int __C)
+{
+  return (__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df) __X, __C,
+                                                    (__v4df)
+                                                    _mm256_setzero_pd (),
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permute_pd (__m128d __W, __mmask8 __U, __m128d __X,
+                    const int __C)
+{
+  return (__m128d) __builtin_ia32_vpermilpd_mask ((__v2df) __X, __C,
+                                                 (__v2df) __W,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permute_pd (__mmask8 __U, __m128d __X, const int __C)
+{
+  return (__m128d) __builtin_ia32_vpermilpd_mask ((__v2df) __X, __C,
+                                                 (__v2df)
+                                                 _mm_setzero_pd (),
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_permute_ps (__m256 __W, __mmask8 __U, __m256 __X,
+                       const int __C)
+{
+  return (__m256) __builtin_ia32_vpermilps256_mask ((__v8sf) __X, __C,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_permute_ps (__mmask8 __U, __m256 __X, const int __C)
+{
+  return (__m256) __builtin_ia32_vpermilps256_mask ((__v8sf) __X, __C,
+                                                   (__v8sf)
+                                                   _mm256_setzero_ps (),
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_permute_ps (__m128 __W, __mmask8 __U, __m128 __X,
+                    const int __C)
+{
+  return (__m128) __builtin_ia32_vpermilps_mask ((__v4sf) __X, __C,
+                                                (__v4sf) __W,
+                                                (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_permute_ps (__mmask8 __U, __m128 __X, const int __C)
+{
+  return (__m128) __builtin_ia32_vpermilps_mask ((__v4sf) __X, __C,
+                                                (__v4sf)
+                                                _mm_setzero_ps (),
+                                                (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W)
+{
+  return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+                                                    (__v4df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W)
+{
+  return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+                                                   (__v8sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W)
+{
+  return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+                                                   (__v4di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m256i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W)
+{
+  return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+                                                   (__v8si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W)
+{
+  return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+                                                    (__v2df) __W,
+                                                    (__mmask8) __U);
+}
+
+extern __inline __m128
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W)
+{
+  return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+                                                   (__v4sf) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+  return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+                                                   (__v2di) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __m128i
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W)
+{
+  return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+                                                   (__v4si) __W,
+                                                   (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epi64_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, __P,
+                                                (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epi32_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, __P,
+                                                (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epu64_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_epu32_mask (__m256i __X, __m256i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_pd_mask (__m256d __X, __m256d __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmppd256_mask ((__v4df) __X,
+                                                 (__v4df) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_ps_mask (__m256 __X, __m256 __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf) __X,
+                                                 (__v8sf) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epi64_mask (__mmask8 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, __P,
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epi32_mask (__mmask8 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, __P,
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epu64_mask (__mmask8 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_epu32_mask (__mmask8 __U, __m256i __X, __m256i __Y,
+                           const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_pd_mask (__mmask8 __U, __m256d __X, __m256d __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmppd256_mask ((__v4df) __X,
+                                                 (__v4df) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_ps_mask (__mmask8 __U, __m256 __X, __m256 __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf) __X,
+                                                 (__v8sf) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epi64_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, __P,
+                                                (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epi32_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, __P,
+                                                (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epu64_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_epu32_mask (__m128i __X, __m128i __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_pd_mask (__m128d __X, __m128d __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmppd128_mask ((__v2df) __X,
+                                                 (__v2df) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmp_ps_mask (__m128 __X, __m128 __Y, const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf) __X,
+                                                 (__v4sf) __Y, __P,
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epi64_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, __P,
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epi32_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, __P,
+                                                (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epu64_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_epu32_mask (__mmask8 __U, __m128i __X, __m128i __Y,
+                        const int __P)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_pd_mask (__mmask8 __U, __m128d __X, __m128d __Y,
+                     const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmppd128_mask ((__v2df) __X,
+                                                 (__v2df) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_cmp_ps_mask (__mmask8 __U, __m128 __X, __m128 __Y,
+                     const int __P)
+{
+  return (__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf) __X,
+                                                 (__v4sf) __Y, __P,
+                                                 (__mmask8) __U);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_permutex_pd (__m256d __X, const int __M)
+{
+  return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __M,
+                                                 (__v4df)
+                                                 _mm256_undefined_pd (),
+                                                 (__mmask8) -1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epu32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, 4,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epu32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, 1,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epu32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, 5,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epu32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X,
+                                                 (__v8si) __Y, 2,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epu64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, 4,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epu64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, 1,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epu64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, 5,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epu64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X,
+                                                 (__v4di) __Y, 2,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epi32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, 4,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epi32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, 1,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epi32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, 5,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epi32_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X,
+                                                (__v8si) __Y, 2,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpneq_epi64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, 4,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmplt_epi64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, 1,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmpge_epi64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, 5,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmple_epi64_mask (__m256i __X, __m256i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X,
+                                                (__v4di) __Y, 2,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epu32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, 4,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epu32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, 1,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epu32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, 5,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epu32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X,
+                                                 (__v4si) __Y, 2,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epu64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, 4,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epu64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, 1,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epu64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, 5,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epu64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X,
+                                                 (__v2di) __Y, 2,
+                                                 (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epi32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, 4,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, 1,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epi32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, 5,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epi32_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X,
+                                                (__v4si) __Y, 2,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpneq_epi64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, 4,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmplt_epi64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, 1,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmpge_epi64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, 5,
+                                                (__mmask8) - 1);
+}
+
+extern __inline __mmask8
+  __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_cmple_epi64_mask (__m128i __X, __m128i __Y)
+{
+  return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X,
+                                                (__v2di) __Y, 2,
+                                                (__mmask8) - 1);
+}
+
+#else
+#define _mm256_permutex_pd(X, M)                                               \
+  ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(X), (int)(M),    \
+                                           (__v4df)(__m256d)_mm256_undefined_pd(),\
+                                           (__mmask8)-1))
+
+#define _mm256_maskz_permutex_epi64(M, X, I)                    \
+  ((__m256i) __builtin_ia32_permdi256_mask ((__v4di)(__m256i)(X),    \
+                                           (int)(I),                \
+                                           (__v4di)(__m256i)        \
+                                           (_mm256_setzero_si256()),\
+                                           (__mmask8)(M)))
+
+#define _mm256_mask_permutex_epi64(W, M, X, I)               \
+  ((__m256i) __builtin_ia32_permdi256_mask ((__v4di)(__m256i)(X), \
+                                           (int)(I),             \
+                                           (__v4di)(__m256i)(W), \
+                                           (__mmask8)(M)))
+
+#define _mm256_insertf32x4(X, Y, C)                                     \
+  ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X),  \
+    (__v4sf)(__m128) (Y), (int) (C),                                   \
+    (__v8sf)(__m256)_mm256_setzero_ps(),                               \
+    (__mmask8)-1))
+
+#define _mm256_mask_insertf32x4(W, U, X, Y, C)                          \
+  ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X),  \
+    (__v4sf)(__m128) (Y), (int) (C),                                   \
+    (__v8sf)(__m256)(W),                                               \
+    (__mmask8)(U)))
+
+#define _mm256_maskz_insertf32x4(U, X, Y, C)                            \
+  ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X), \
+    (__v4sf)(__m128) (Y), (int) (C),                                   \
+    (__v8sf)(__m256)_mm256_setzero_ps(),                               \
+    (__mmask8)(U)))
+
+#define _mm256_inserti32x4(X, Y, C)                                     \
+  ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (__v4si)(__m128i) (Y), (int) (C),                                  \
+    (__v8si)(__m256i)_mm256_setzero_si256(),                           \
+    (__mmask8)-1))
+
+#define _mm256_mask_inserti32x4(W, U, X, Y, C)                          \
+  ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (__v4si)(__m128i) (Y), (int) (C),                                  \
+    (__v8si)(__m256i)(W),                                              \
+    (__mmask8)(U)))
+
+#define _mm256_maskz_inserti32x4(U, X, Y, C)                            \
+  ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (__v4si)(__m128i) (Y), (int) (C),                                  \
+    (__v8si)(__m256i)_mm256_setzero_si256(),                           \
+    (__mmask8)(U)))
+
+#define _mm256_extractf32x4_ps(X, C)                                    \
+  ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \
+    (int) (C),                                                         \
+    (__v4sf)(__m128)_mm_setzero_ps(),                                  \
+    (__mmask8)-1))
+
+#define _mm256_mask_extractf32x4_ps(W, U, X, C)                         \
+  ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \
+    (int) (C),                                                         \
+    (__v4sf)(__m128)(W),                                               \
+    (__mmask8)(U)))
+
+#define _mm256_maskz_extractf32x4_ps(U, X, C)                           \
+  ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \
+    (int) (C),                                                         \
+    (__v4sf)(__m128)_mm_setzero_ps(),                                  \
+    (__mmask8)(U)))
+
+#define _mm256_extracti32x4_epi32(X, C)                                 \
+  ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (int) (C), (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)-1))
+
+#define _mm256_mask_extracti32x4_epi32(W, U, X, C)                      \
+  ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (int) (C), (__v4si)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_extracti32x4_epi32(U, X, C)                        \
+  ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\
+    (int) (C), (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(U)))
+
+#define _mm256_shuffle_i64x2(X, Y, C)                                                   \
+  ((__m256i)  __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X),                 \
+                                                  (__v4di)(__m256i)(Y), (int)(C),       \
+                                                  (__v4di)(__m256i)_mm256_setzero_si256 (), \
+                                                  (__mmask8)-1))
+
+#define _mm256_mask_shuffle_i64x2(W, U, X, Y, C)                                        \
+  ((__m256i)  __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X),                 \
+                                                  (__v4di)(__m256i)(Y), (int)(C),       \
+                                                  (__v4di)(__m256i)(W),\
+                                                  (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_i64x2(U, X, Y, C)                                          \
+  ((__m256i)  __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X),                 \
+                                                  (__v4di)(__m256i)(Y), (int)(C),       \
+                                                  (__v4di)(__m256i)_mm256_setzero_si256 (), \
+                                                  (__mmask8)(U)))
+
+#define _mm256_shuffle_i32x4(X, Y, C)                                                   \
+  ((__m256i)  __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X),                 \
+                                                  (__v8si)(__m256i)(Y), (int)(C),       \
+                                                  (__v8si)(__m256i)_mm256_setzero_si256(), \
+                                                  (__mmask8)-1))
+
+#define _mm256_mask_shuffle_i32x4(W, U, X, Y, C)                                        \
+  ((__m256i)  __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X),                 \
+                                                  (__v8si)(__m256i)(Y), (int)(C),       \
+                                                  (__v8si)(__m256i)(W),                 \
+                                                  (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_i32x4(U, X, Y, C)                                          \
+  ((__m256i)  __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X),                 \
+                                                  (__v8si)(__m256i)(Y), (int)(C),       \
+                                                  (__v8si)(__m256i)_mm256_setzero_si256(), \
+                                                  (__mmask8)(U)))
+
+#define _mm256_shuffle_f64x2(X, Y, C)                                                   \
+  ((__m256d)  __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X),                 \
+                                                  (__v4df)(__m256d)(Y), (int)(C),       \
+                                                  (__v4df)(__m256d)_mm256_setzero_pd(), \
+                                                  (__mmask8)-1))
+
+#define _mm256_mask_shuffle_f64x2(W, U, X, Y, C)                                        \
+  ((__m256d)  __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X),                 \
+                                                  (__v4df)(__m256d)(Y), (int)(C),       \
+                                                  (__v4df)(__m256d)(W),                 \
+                                                  (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_f64x2(U, X, Y, C)                                          \
+  ((__m256d)  __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X),                 \
+                                                  (__v4df)(__m256d)(Y), (int)(C),       \
+                                                  (__v4df)(__m256d)_mm256_setzero_pd(), \
+                                                  (__mmask8)(U)))
+
+#define _mm256_shuffle_f32x4(X, Y, C)                                                   \
+  ((__m256)  __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X),                   \
+                                                 (__v8sf)(__m256)(Y), (int)(C),         \
+                                                 (__v8sf)(__m256)_mm256_setzero_ps(),   \
+                                                 (__mmask8)-1))
+
+#define _mm256_mask_shuffle_f32x4(W, U, X, Y, C)                                        \
+  ((__m256)  __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X),                   \
+                                                 (__v8sf)(__m256)(Y), (int)(C),         \
+                                                 (__v8sf)(__m256)(W),                   \
+                                                 (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_f32x4(U, X, Y, C)                                          \
+  ((__m256)  __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X),                   \
+                                                 (__v8sf)(__m256)(Y), (int)(C),         \
+                                                 (__v8sf)(__m256)_mm256_setzero_ps(),   \
+                                                 (__mmask8)(U)))
+
+#define _mm256_mask_shuffle_pd(W, U, A, B, C)                                   \
+  ((__m256d)__builtin_ia32_shufpd256_mask ((__v4df)(__m256d)(A),                \
+                                           (__v4df)(__m256d)(B), (int)(C),      \
+                                           (__v4df)(__m256d)(W),                \
+                                           (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_pd(U, A, B, C)                                     \
+  ((__m256d)__builtin_ia32_shufpd256_mask ((__v4df)(__m256d)(A),                \
+                                           (__v4df)(__m256d)(B), (int)(C),      \
+                                           (__v4df)(__m256d)_mm256_setzero_pd(),\
+                                           (__mmask8)(U)))
+
+#define _mm_mask_shuffle_pd(W, U, A, B, C)                                      \
+  ((__m128d)__builtin_ia32_shufpd128_mask ((__v2df)(__m128d)(A),                \
+                                           (__v2df)(__m128d)(B), (int)(C),      \
+                                           (__v2df)(__m128d)(W),                \
+                                           (__mmask8)(U)))
+
+#define _mm_maskz_shuffle_pd(U, A, B, C)                                        \
+  ((__m128d)__builtin_ia32_shufpd128_mask ((__v2df)(__m128d)(A),                \
+                                           (__v2df)(__m128d)(B), (int)(C),      \
+                                           (__v2df)(__m128d)_mm_setzero_pd(),   \
+                                           (__mmask8)(U)))
+
+#define _mm256_mask_shuffle_ps(W, U, A, B, C)                                   \
+  ((__m256) __builtin_ia32_shufps256_mask ((__v8sf)(__m256)(A),                 \
+                                           (__v8sf)(__m256)(B), (int)(C),       \
+                                           (__v8sf)(__m256)(W),                 \
+                                           (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_ps(U, A, B, C)                                     \
+  ((__m256) __builtin_ia32_shufps256_mask ((__v8sf)(__m256)(A),                 \
+                                           (__v8sf)(__m256)(B), (int)(C),       \
+                                           (__v8sf)(__m256)_mm256_setzero_ps(), \
+                                           (__mmask8)(U)))
+
+#define _mm_mask_shuffle_ps(W, U, A, B, C)                                      \
+  ((__m128) __builtin_ia32_shufps128_mask ((__v4sf)(__m128)(A),                 \
+                                           (__v4sf)(__m128)(B), (int)(C),       \
+                                           (__v4sf)(__m128)(W),                 \
+                                           (__mmask8)(U)))
+
+#define _mm_maskz_shuffle_ps(U, A, B, C)                                        \
+  ((__m128) __builtin_ia32_shufps128_mask ((__v4sf)(__m128)(A),                 \
+                                           (__v4sf)(__m128)(B), (int)(C),       \
+                                           (__v4sf)(__m128)_mm_setzero_ps(),    \
+                                           (__mmask8)(U)))
+
+#define _mm256_fixupimm_pd(X, Y, Z, C)                                          \
+  ((__m256d)__builtin_ia32_fixupimmpd256_mask ((__v4df)(__m256d)(X),           \
+                                              (__v4df)(__m256d)(Y),            \
+                                              (__v4di)(__m256i)(Z), (int)(C),  \
+                                              (__mmask8)(-1)))
+
+#define _mm256_mask_fixupimm_pd(X, U, Y, Z, C)                                  \
+   ((__m256d)__builtin_ia32_fixupimmpd256_mask ((__v4df)(__m256d)(X),           \
+                                               (__v4df)(__m256d)(Y),           \
+                                               (__v4di)(__m256i)(Z), (int)(C), \
+                                               (__mmask8)(U)))
+
+#define _mm256_maskz_fixupimm_pd(U, X, Y, Z, C)                                 \
+   ((__m256d)__builtin_ia32_fixupimmpd256_maskz ((__v4df)(__m256d)(X),          \
+                                                (__v4df)(__m256d)(Y),          \
+                                                (__v4di)(__m256i)(Z), (int)(C),\
+                                                (__mmask8)(U)))
+
+#define _mm256_fixupimm_ps(X, Y, Z, C)                                         \
+  ((__m256)__builtin_ia32_fixupimmps256_mask ((__v8sf)(__m256)(X),             \
+                                             (__v8sf)(__m256)(Y),              \
+                                             (__v8si)(__m256i)(Z), (int)(C),   \
+                                             (__mmask8)(-1)))
+
+
+#define _mm256_mask_fixupimm_ps(X, U, Y, Z, C)                                  \
+    ((__m256)__builtin_ia32_fixupimmps256_mask ((__v8sf)(__m256)(X),            \
+                                               (__v8sf)(__m256)(Y),            \
+                                               (__v8si)(__m256i)(Z), (int)(C), \
+                                               (__mmask8)(U)))
+
+#define _mm256_maskz_fixupimm_ps(U, X, Y, Z, C)                                 \
+    ((__m256)__builtin_ia32_fixupimmps256_maskz ((__v8sf)(__m256)(X),           \
+                                                (__v8sf)(__m256)(Y),           \
+                                                (__v8si)(__m256i)(Z), (int)(C),\
+                                                (__mmask8)(U)))
+
+#define _mm_fixupimm_pd(X, Y, Z, C)                                            \
+  ((__m128d)__builtin_ia32_fixupimmpd128_mask ((__v2df)(__m128d)(X),           \
+                                              (__v2df)(__m128d)(Y),            \
+                                              (__v2di)(__m128i)(Z), (int)(C),  \
+                                              (__mmask8)(-1)))
+
+
+#define _mm_mask_fixupimm_pd(X, U, Y, Z, C)                                       \
+     ((__m128d)__builtin_ia32_fixupimmpd128_mask ((__v2df)(__m128d)(X),           \
+                                                 (__v2df)(__m128d)(Y),           \
+                                                 (__v2di)(__m128i)(Z), (int)(C), \
+                                                 (__mmask8)(U)))
+
+#define _mm_maskz_fixupimm_pd(U, X, Y, Z, C)                                      \
+     ((__m128d)__builtin_ia32_fixupimmpd128_maskz ((__v2df)(__m128d)(X),          \
+                                                  (__v2df)(__m128d)(Y),          \
+                                                  (__v2di)(__m128i)(Z), (int)(C),\
+                                                  (__mmask8)(U)))
+
+#define _mm_fixupimm_ps(X, Y, Z, C)                                            \
+   ((__m128)__builtin_ia32_fixupimmps128_mask ((__v4sf)(__m128)(X),            \
+                                              (__v4sf)(__m128)(Y),             \
+                                              (__v4si)(__m128i)(Z), (int)(C),  \
+                                              (__mmask8)(-1)))
+
+#define _mm_mask_fixupimm_ps(X, U, Y, Z, C)                                      \
+      ((__m128)__builtin_ia32_fixupimmps128_mask ((__v4sf)(__m128)(X),           \
+                                                 (__v4sf)(__m128)(Y),           \
+                                                 (__v4si)(__m128i)(Z), (int)(C),\
+                                                 (__mmask8)(U)))
+
+#define _mm_maskz_fixupimm_ps(U, X, Y, Z, C)                                      \
+      ((__m128)__builtin_ia32_fixupimmps128_maskz ((__v4sf)(__m128)(X),           \
+                                                  (__v4sf)(__m128)(Y),           \
+                                                  (__v4si)(__m128i)(Z), (int)(C),\
+                                                  (__mmask8)(U)))
+
+#define _mm256_mask_srli_epi32(W, U, A, B)                             \
+  ((__m256i) __builtin_ia32_psrldi256_mask ((__v8si)(__m256i)(A),      \
+    (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_srli_epi32(U, A, B)                               \
+  ((__m256i) __builtin_ia32_psrldi256_mask ((__v8si)(__m256i)(A),      \
+    (int)(B), (__v8si)_mm256_setzero_si256(), (__mmask8)(U)))
+
+#define _mm_mask_srli_epi32(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psrldi128_mask ((__v4si)(__m128i)(A),       \
+    (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srli_epi32(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psrldi128_mask ((__v4si)(__m128i)(A),       \
+    (int)(B), (__v4si)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_mask_srli_epi64(W, U, A, B)                             \
+  ((__m256i) __builtin_ia32_psrlqi256_mask ((__v4di)(__m256i)(A),      \
+    (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_srli_epi64(U, A, B)                               \
+  ((__m256i) __builtin_ia32_psrlqi256_mask ((__v4di)(__m256i)(A),      \
+    (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)(U)))
+
+#define _mm_mask_srli_epi64(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psrlqi128_mask ((__v2di)(__m128i)(A),       \
+    (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srli_epi64(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psrlqi128_mask ((__v2di)(__m128i)(A),       \
+    (int)(B), (__v2di)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_mask_slli_epi32(W, U, X, C)                                \
+  ((__m256i)__builtin_ia32_pslldi256_mask ((__v8si)(__m256i)(X), (int)(C),\
+    (__v8si)(__m256i)(W),\
+    (__mmask8)(U)))
+
+#define _mm256_maskz_slli_epi32(U, X, C)                                  \
+  ((__m256i)__builtin_ia32_pslldi256_mask ((__v8si)(__m256i)(X), (int)(C),\
+    (__v8si)(__m256i)_mm256_setzero_si256(),\
+    (__mmask8)(U)))
+
+#define _mm256_mask_slli_epi64(W, U, X, C)                                \
+  ((__m256i)__builtin_ia32_psllqi256_mask ((__v4di)(__m256i)(X), (int)(C),\
+    (__v4di)(__m256i)(W),\
+    (__mmask8)(U)))
+
+#define _mm256_maskz_slli_epi64(U, X, C)                                  \
+  ((__m256i)__builtin_ia32_psllqi256_mask ((__v4di)(__m256i)(X), (int)(C),\
+    (__v4di)(__m256i)_mm256_setzero_si256 (),\
+    (__mmask8)(U)))
+
+#define _mm_mask_slli_epi32(W, U, X, C)                                          \
+  ((__m128i)__builtin_ia32_pslldi128_mask ((__v4si)(__m128i)(X), (int)(C),\
+    (__v4si)(__m128i)(W),\
+    (__mmask8)(U)))
+
+#define _mm_maskz_slli_epi32(U, X, C)                                    \
+  ((__m128i)__builtin_ia32_pslldi128_mask ((__v4si)(__m128i)(X), (int)(C),\
+    (__v4si)(__m128i)_mm_setzero_si128 (),\
+    (__mmask8)(U)))
+
+#define _mm_mask_slli_epi64(W, U, X, C)                                          \
+  ((__m128i)__builtin_ia32_psllqi128_mask ((__v2di)(__m128i)(X), (int)(C),\
+    (__v2di)(__m128i)(W),\
+    (__mmask8)(U)))
+
+#define _mm_maskz_slli_epi64(U, X, C)                                    \
+  ((__m128i)__builtin_ia32_psllqi128_mask ((__v2di)(__m128i)(X), (int)(C),\
+    (__v2di)(__m128i)_mm_setzero_di(),\
+    (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi64(A, B, C, I)                           \
+  ((__m256i) __builtin_ia32_pternlogq256_mask ((__v4di)(__m256i)(A),   \
+    (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), (int)(I), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, I)                  \
+  ((__m256i) __builtin_ia32_pternlogq256_mask ((__v4di)(__m256i)(A),   \
+    (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, I)                 \
+  ((__m256i) __builtin_ia32_pternlogq256_maskz ((__v4di)(__m256i)(A),  \
+    (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi32(A, B, C, I)                           \
+  ((__m256i) __builtin_ia32_pternlogd256_mask ((__v8si)(__m256i)(A),   \
+    (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), (int)(I), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, I)                   \
+  ((__m256i) __builtin_ia32_pternlogd256_mask ((__v8si)(__m256i)(A),   \
+    (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, I)                 \
+  ((__m256i) __builtin_ia32_pternlogd256_maskz ((__v8si)(__m256i)(A),  \
+    (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm_ternarylogic_epi64(A, B, C, I)                              \
+  ((__m128i) __builtin_ia32_pternlogq128_mask ((__v2di)(__m128i)(A),   \
+    (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), (int)(I), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, I)                     \
+  ((__m128i) __builtin_ia32_pternlogq128_mask ((__v2di)(__m128i)(A),   \
+    (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, I)                    \
+  ((__m128i) __builtin_ia32_pternlogq128_maskz ((__v2di)(__m128i)(A),  \
+    (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm_ternarylogic_epi32(A, B, C, I)                              \
+  ((__m128i) __builtin_ia32_pternlogd128_mask ((__v4si)(__m128i)(A),   \
+    (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), (int)(I), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, I)                     \
+  ((__m128i) __builtin_ia32_pternlogd128_mask ((__v4si)(__m128i)(A),   \
+    (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, I)                    \
+  ((__m128i) __builtin_ia32_pternlogd128_maskz ((__v4si)(__m128i)(A),  \
+    (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), (int)(I), (__mmask8)(U)))
+
+#define _mm256_roundscale_ps(A, B)                                     \
+  ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A),    \
+    (int)(B), (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)-1))
+
+#define _mm256_mask_roundscale_ps(W, U, A, B)                          \
+  ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A),    \
+    (int)(B), (__v8sf)(__m256)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_roundscale_ps(U, A, B)                            \
+  ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A),    \
+    (int)(B), (__v8sf)(__m256)_mm256_setzero_ps(), (__mmask8)(U)))
+
+#define _mm256_roundscale_pd(A, B)                                     \
+  ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A),  \
+    (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)-1))
+
+#define _mm256_mask_roundscale_pd(W, U, A, B)                          \
+  ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A),  \
+    (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_roundscale_pd(U, A, B)                            \
+  ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A),  \
+    (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)(U)))
+
+#define _mm_roundscale_ps(A, B)                                                \
+  ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A),    \
+    (int)(B), (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)-1))
+
+#define _mm_mask_roundscale_ps(W, U, A, B)                             \
+  ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A),    \
+    (int)(B), (__v4sf)(__m128)(W), (__mmask8)(U)))
+
+#define _mm_maskz_roundscale_ps(U, A, B)                               \
+  ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A),    \
+    (int)(B), (__v4sf)(__m128)_mm_setzero_ps(), (__mmask8)(U)))
+
+#define _mm_roundscale_pd(A, B)                                                \
+  ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A),  \
+    (int)(B), (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)-1))
+
+#define _mm_mask_roundscale_pd(W, U, A, B)                             \
+  ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A),  \
+    (int)(B), (__v2df)(__m128d)(W), (__mmask8)(U)))
+
+#define _mm_maskz_roundscale_pd(U, A, B)                               \
+  ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A),  \
+    (int)(B), (__v2df)(__m128d)_mm_setzero_pd(), (__mmask8)(U)))
+
+#define _mm256_getmant_ps(X, B, C)                                              \
+  ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v8sf)(__m256)_mm256_setzero_ps(),   \
+                                         (__mmask8)-1))
+
+#define _mm256_mask_getmant_ps(W, U, X, B, C)                                   \
+  ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v8sf)(__m256)(W),                   \
+                                         (__mmask8)(U)))
+
+#define _mm256_maskz_getmant_ps(U, X, B, C)                                     \
+  ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v8sf)(__m256)_mm256_setzero_ps(),   \
+                                         (__mmask8)(U)))
+
+#define _mm_getmant_ps(X, B, C)                                                 \
+  ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v4sf)(__m128)_mm_setzero_ps(),      \
+                                         (__mmask8)-1))
+
+#define _mm_mask_getmant_ps(W, U, X, B, C)                                      \
+  ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v4sf)(__m128)(W),                   \
+                                         (__mmask8)(U)))
+
+#define _mm_maskz_getmant_ps(U, X, B, C)                                        \
+  ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X),             \
+                                         (int)(((C)<<2) | (B)),                 \
+                                         (__v4sf)(__m128)_mm_setzero_ps(),      \
+                                         (__mmask8)(U)))
+
+#define _mm256_getmant_pd(X, B, C)                                              \
+  ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v4df)(__m256d)_mm256_setzero_pd(), \
+                                          (__mmask8)-1))
+
+#define _mm256_mask_getmant_pd(W, U, X, B, C)                                   \
+  ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v4df)(__m256d)(W),                 \
+                                          (__mmask8)(U)))
+
+#define _mm256_maskz_getmant_pd(U, X, B, C)                                     \
+  ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v4df)(__m256d)_mm256_setzero_pd(), \
+                                          (__mmask8)(U)))
+
+#define _mm_getmant_pd(X, B, C)                                                 \
+  ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v2df)(__m128d)_mm_setzero_pd(),    \
+                                          (__mmask8)-1))
+
+#define _mm_mask_getmant_pd(W, U, X, B, C)                                      \
+  ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v2df)(__m128d)(W),                 \
+                                          (__mmask8)(U)))
+
+#define _mm_maskz_getmant_pd(U, X, B, C)                                        \
+  ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X),           \
+                                         (int)(((C)<<2) | (B)),                 \
+                                          (__v2df)(__m128d)_mm_setzero_pd(),    \
+                                          (__mmask8)(U)))
+
+#define _mm256_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m256) __builtin_ia32_gather3siv8sf ((__v8sf)(__m256)V1OLD,                \
+                                        (float const *)ADDR,           \
+                                        (__v8si)(__m256i)INDEX,        \
+                                        (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE)                \
+  (__m128) __builtin_ia32_gather3siv4sf ((__v4sf)(__m128)V1OLD,                \
+                                        (float const *)ADDR,           \
+                                        (__v4si)(__m128i)INDEX,        \
+                                        (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m256d) __builtin_ia32_gather3siv4df ((__v4df)(__m256d)V1OLD,      \
+                                         (double const *)ADDR,         \
+                                         (__v4si)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE)                \
+  (__m128d) __builtin_ia32_gather3siv2df ((__v2df)(__m128d)V1OLD,      \
+                                         (double const *)ADDR,         \
+                                         (__v4si)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m128) __builtin_ia32_gather3div8sf ((__v4sf)(__m128)V1OLD,                \
+                                        (float const *)ADDR,           \
+                                        (__v4di)(__m256i)INDEX,        \
+                                        (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE)                \
+  (__m128) __builtin_ia32_gather3div4sf ((__v4sf)(__m128)V1OLD,                \
+                                        (float const *)ADDR,           \
+                                        (__v2di)(__m128i)INDEX,        \
+                                        (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m256d) __builtin_ia32_gather3div4df ((__v4df)(__m256d)V1OLD,      \
+                                         (double const *)ADDR,         \
+                                         (__v4di)(__m256i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE)                \
+  (__m128d) __builtin_ia32_gather3div2df ((__v2df)(__m128d)V1OLD,      \
+                                         (double const *)ADDR,         \
+                                         (__v2di)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE)  \
+  (__m256i) __builtin_ia32_gather3siv8si ((__v8si)(__m256i)V1OLD,      \
+                                         (int const *)ADDR,            \
+                                         (__v8si)(__m256i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m128i) __builtin_ia32_gather3siv4si ((__v4si)(__m128i)V1OLD,      \
+                                         (int const *)ADDR,            \
+                                         (__v4si)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE)  \
+  (__m256i) __builtin_ia32_gather3siv4di ((__v4di)(__m256i)V1OLD,      \
+                                         (long long const *)ADDR,      \
+                                         (__v4si)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m128i) __builtin_ia32_gather3siv2di ((__v2di)(__m128i)V1OLD,      \
+                                         (long long const *)ADDR,      \
+                                         (__v4si)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE)  \
+  (__m128i) __builtin_ia32_gather3div8si ((__v4si)(__m128i)V1OLD,      \
+                                         (int const *)ADDR,            \
+                                         (__v4di)(__m256i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m128i) __builtin_ia32_gather3div4si ((__v4si)(__m128i)V1OLD,      \
+                                         (int const *)ADDR,            \
+                                         (__v2di)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE)  \
+  (__m256i) __builtin_ia32_gather3div4di ((__v4di)(__m256i)V1OLD,      \
+                                         (long long const *)ADDR,      \
+                                         (__v4di)(__m256i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE)     \
+  (__m128i) __builtin_ia32_gather3div2di ((__v2di)(__m128i)V1OLD,      \
+                                         (long long const *)ADDR,      \
+                                         (__v2di)(__m128i)INDEX,       \
+                                         (__mmask8)MASK, (int)SCALE)
+
+#define _mm256_i32scatter_ps(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scattersiv8sf ((float *)ADDR, (__mmask8)0xFF,         \
+                               (__v8si)(__m256i)INDEX,                 \
+                               (__v8sf)(__m256)V1, (int)SCALE)
+
+#define _mm256_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scattersiv8sf ((float *)ADDR, (__mmask8)MASK,         \
+                               (__v8si)(__m256i)INDEX,                 \
+                               (__v8sf)(__m256)V1, (int)SCALE)
+
+#define _mm_i32scatter_ps(ADDR, INDEX, V1, SCALE)                      \
+  __builtin_ia32_scattersiv4sf ((float *)ADDR, (__mmask8)0xFF,         \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE)           \
+  __builtin_ia32_scattersiv4sf ((float *)ADDR, (__mmask8)MASK,         \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm256_i32scatter_pd(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scattersiv4df ((double *)ADDR, (__mmask8)0xFF,                \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4df)(__m256d)V1, (int)SCALE)
+
+#define _mm256_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scattersiv4df ((double *)ADDR, (__mmask8)MASK,                \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4df)(__m256d)V1, (int)SCALE)
+
+#define _mm_i32scatter_pd(ADDR, INDEX, V1, SCALE)                      \
+  __builtin_ia32_scattersiv2df ((double *)ADDR, (__mmask8)0xFF,                \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v2df)(__m128d)V1, (int)SCALE)
+
+#define _mm_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE)           \
+  __builtin_ia32_scattersiv2df ((double *)ADDR, (__mmask8)MASK,                \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v2df)(__m128d)V1, (int)SCALE)
+
+#define _mm256_i64scatter_ps(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scatterdiv8sf ((float *)ADDR, (__mmask8)0xFF,         \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm256_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scatterdiv8sf ((float *)ADDR, (__mmask8)MASK,         \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm_i64scatter_ps(ADDR, INDEX, V1, SCALE)                      \
+  __builtin_ia32_scatterdiv4sf ((float *)ADDR, (__mmask8)0xFF,         \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE)           \
+  __builtin_ia32_scatterdiv4sf ((float *)ADDR, (__mmask8)MASK,         \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v4sf)(__m128)V1, (int)SCALE)
+
+#define _mm256_i64scatter_pd(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scatterdiv4df ((double *)ADDR, (__mmask8)0xFF,                \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4df)(__m256d)V1, (int)SCALE)
+
+#define _mm256_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scatterdiv4df ((double *)ADDR, (__mmask8)MASK,                \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4df)(__m256d)V1, (int)SCALE)
+
+#define _mm_i64scatter_pd(ADDR, INDEX, V1, SCALE)                      \
+  __builtin_ia32_scatterdiv2df ((double *)ADDR, (__mmask8)0xFF,                \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v2df)(__m128d)V1, (int)SCALE)
+
+#define _mm_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE)           \
+  __builtin_ia32_scatterdiv2df ((double *)ADDR, (__mmask8)MASK,                \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v2df)(__m128d)V1, (int)SCALE)
+
+#define _mm256_i32scatter_epi32(ADDR, INDEX, V1, SCALE)                        \
+  __builtin_ia32_scattersiv8si ((int *)ADDR, (__mmask8)0xFF,           \
+                               (__v8si)(__m256i)INDEX,                 \
+                               (__v8si)(__m256i)V1, (int)SCALE)
+
+#define _mm256_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE)     \
+  __builtin_ia32_scattersiv8si ((int *)ADDR, (__mmask8)MASK,           \
+                               (__v8si)(__m256i)INDEX,                 \
+                               (__v8si)(__m256i)V1, (int)SCALE)
+
+#define _mm_i32scatter_epi32(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scattersiv4si ((int *)ADDR, (__mmask8)0xFF,           \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scattersiv4si ((int *)ADDR, (__mmask8)MASK,           \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm256_i32scatter_epi64(ADDR, INDEX, V1, SCALE)                        \
+  __builtin_ia32_scattersiv4di ((long long *)ADDR, (__mmask8)0xFF,     \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4di)(__m256i)V1, (int)SCALE)
+
+#define _mm256_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE)     \
+  __builtin_ia32_scattersiv4di ((long long *)ADDR, (__mmask8)MASK,     \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v4di)(__m256i)V1, (int)SCALE)
+
+#define _mm_i32scatter_epi64(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scattersiv2di ((long long *)ADDR, (__mmask8)0xFF,     \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v2di)(__m128i)V1, (int)SCALE)
+
+#define _mm_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scattersiv2di ((long long *)ADDR, (__mmask8)MASK,     \
+                               (__v4si)(__m128i)INDEX,                 \
+                               (__v2di)(__m128i)V1, (int)SCALE)
+
+#define _mm256_i64scatter_epi32(ADDR, INDEX, V1, SCALE)                        \
+  __builtin_ia32_scatterdiv8si ((int *)ADDR, (__mmask8)0xFF,           \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm256_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE)     \
+  __builtin_ia32_scatterdiv8si ((int *)ADDR, (__mmask8)MASK,           \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm_i64scatter_epi32(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scatterdiv4si ((int *)ADDR, (__mmask8)0xFF,           \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scatterdiv4si ((int *)ADDR, (__mmask8)MASK,           \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v4si)(__m128i)V1, (int)SCALE)
+
+#define _mm256_i64scatter_epi64(ADDR, INDEX, V1, SCALE)                        \
+  __builtin_ia32_scatterdiv4di ((long long *)ADDR, (__mmask8)0xFF,     \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4di)(__m256i)V1, (int)SCALE)
+
+#define _mm256_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE)     \
+  __builtin_ia32_scatterdiv4di ((long long *)ADDR, (__mmask8)MASK,     \
+                               (__v4di)(__m256i)INDEX,                 \
+                               (__v4di)(__m256i)V1, (int)SCALE)
+
+#define _mm_i64scatter_epi64(ADDR, INDEX, V1, SCALE)                   \
+  __builtin_ia32_scatterdiv2di ((long long *)ADDR, (__mmask8)0xFF,     \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v2di)(__m128i)V1, (int)SCALE)
+
+#define _mm_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE)                \
+  __builtin_ia32_scatterdiv2di ((long long *)ADDR, (__mmask8)MASK,     \
+                               (__v2di)(__m128i)INDEX,                 \
+                               (__v2di)(__m128i)V1, (int)SCALE)
+
+#define _mm256_mask_shuffle_epi32(W, U, X, C)                                       \
+  ((__m256i)  __builtin_ia32_pshufd256_mask ((__v8si)(__m256i)(X), (int)(C),        \
+                                             (__v8si)(__m256i)(W),                  \
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_shuffle_epi32(U, X, C)                                         \
+  ((__m256i)  __builtin_ia32_pshufd256_mask ((__v8si)(__m256i)(X), (int)(C),        \
+                                             (__v8si)(__m256i)_mm256_setzero_si256(),  \
+                                             (__mmask8)(U)))
+
+#define _mm_mask_shuffle_epi32(W, U, X, C)                                          \
+  ((__m128i)  __builtin_ia32_pshufd128_mask ((__v4si)(__m128i)(X), (int)(C),        \
+                                             (__v4si)(__m128i)(W),                  \
+                                             (__mmask8)(U)))
+
+#define _mm_maskz_shuffle_epi32(U, X, C)                                            \
+  ((__m128i)  __builtin_ia32_pshufd128_mask ((__v4si)(__m128i)(X), (int)(C),        \
+                                             (__v4si)(__m128i)_mm_setzero_si128 (),     \
+                                             (__mmask8)(U)))
+
+#define _mm256_rol_epi64(A, B)                                                 \
+  ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)_mm256_setzero_si256 (),\
+                                          (__mmask8)-1))
+
+#define _mm256_mask_rol_epi64(W, U, A, B)                                      \
+  ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm256_maskz_rol_epi64(U, A, B)                                        \
+  ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)_mm256_setzero_si256 (),\
+                                          (__mmask8)(U)))
+
+#define _mm_rol_epi64(A, B)                                                    \
+  ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)_mm_setzero_di(),   \
+                                          (__mmask8)-1))
+
+#define _mm_mask_rol_epi64(W, U, A, B)                                         \
+  ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm_maskz_rol_epi64(U, A, B)                                           \
+  ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)_mm_setzero_di(),   \
+                                          (__mmask8)(U)))
+
+#define _mm256_ror_epi64(A, B)                                                 \
+  ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)_mm256_setzero_si256 (),\
+                                          (__mmask8)-1))
+
+#define _mm256_mask_ror_epi64(W, U, A, B)                                      \
+  ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm256_maskz_ror_epi64(U, A, B)                                        \
+  ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B),      \
+                                          (__v4di)(__m256i)_mm256_setzero_si256 (),\
+                                          (__mmask8)(U)))
+
+#define _mm_ror_epi64(A, B)                                                    \
+  ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)_mm_setzero_di(),   \
+                                          (__mmask8)-1))
+
+#define _mm_mask_ror_epi64(W, U, A, B)                                         \
+  ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm_maskz_ror_epi64(U, A, B)                                           \
+  ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B),      \
+                                          (__v2di)(__m128i)_mm_setzero_di(),   \
+                                          (__mmask8)(U)))
+
+#define _mm256_rol_epi32(A, B)                                                 \
+  ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)_mm256_setzero_si256(),\
+                                          (__mmask8)-1))
+
+#define _mm256_mask_rol_epi32(W, U, A, B)                                      \
+  ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm256_maskz_rol_epi32(U, A, B)                                        \
+  ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)_mm256_setzero_si256(),\
+                                          (__mmask8)(U)))
+
+#define _mm_rol_epi32(A, B)                                                    \
+  ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)_mm_setzero_si128 (),   \
+                                          (__mmask8)-1))
+
+#define _mm_mask_rol_epi32(W, U, A, B)                                         \
+  ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm_maskz_rol_epi32(U, A, B)                                           \
+  ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)_mm_setzero_si128 (),   \
+                                          (__mmask8)(U)))
+
+#define _mm256_ror_epi32(A, B)                                                 \
+  ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)_mm256_setzero_si256(),\
+                                          (__mmask8)-1))
+
+#define _mm256_mask_ror_epi32(W, U, A, B)                                      \
+  ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm256_maskz_ror_epi32(U, A, B)                                        \
+  ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B),      \
+                                          (__v8si)(__m256i)_mm256_setzero_si256(),\
+                                          (__mmask8)(U)))
+
+#define _mm_ror_epi32(A, B)                                                    \
+  ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)_mm_setzero_si128 (),   \
+                                          (__mmask8)-1))
+
+#define _mm_mask_ror_epi32(W, U, A, B)                                         \
+  ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)(W),                \
+                                          (__mmask8)(U)))
+
+#define _mm_maskz_ror_epi32(U, A, B)                                           \
+  ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B),      \
+                                          (__v4si)(__m128i)_mm_setzero_si128 (),   \
+                                          (__mmask8)(U)))
+
+#define _mm256_alignr_epi32(X, Y, C)                                        \
+    ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X),          \
+        (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)(X), (__mmask8)-1))
+
+#define _mm256_mask_alignr_epi32(W, U, X, Y, C)                             \
+    ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X),          \
+        (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_alignr_epi32(U, X, Y, C)                               \
+    ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X),          \
+        (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)_mm256_setzero_si256 (),\
+        (__mmask8)(U)))
+
+#define _mm256_alignr_epi64(X, Y, C)                                        \
+    ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X),          \
+        (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)(X), (__mmask8)-1))
+
+#define _mm256_mask_alignr_epi64(W, U, X, Y, C)                             \
+    ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X),          \
+        (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_alignr_epi64(U, X, Y, C)                               \
+    ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X),          \
+        (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)_mm256_setzero_si256 (),\
+        (__mmask8)(U)))
+
+#define _mm_alignr_epi32(X, Y, C)                                           \
+    ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X),          \
+        (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)(X), (__mmask8)-1))
+
+#define _mm_mask_alignr_epi32(W, U, X, Y, C)                                \
+    ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X),          \
+        (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_alignr_epi32(U, X, Y, C)                                  \
+    ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X),          \
+        (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)_mm_setzero_si128(),\
+        (__mmask8)(U)))
+
+#define _mm_alignr_epi64(X, Y, C)                                           \
+    ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X),          \
+        (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1))
+
+#define _mm_mask_alignr_epi64(W, U, X, Y, C)                                \
+    ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X),          \
+        (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1))
+
+#define _mm_maskz_alignr_epi64(U, X, Y, C)                                  \
+    ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X),          \
+        (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)_mm_setzero_si128(),\
+        (__mmask8)(U)))
+
+#define _mm_mask_cvtps_ph(W, U, A, I)                                          \
+  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I),      \
+      (__v8hi)(__m128i) (W), (__mmask8) (U)))
+
+#define _mm_maskz_cvtps_ph(U, A, I)                                            \
+  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I),      \
+      (__v8hi)(__m128i) _mm_setzero_hi(), (__mmask8) (U)))
+
+#define _mm256_mask_cvtps_ph(W, U, A, I)                                       \
+  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I),  \
+      (__v8hi)(__m128i) (W), (__mmask8) (U)))
+
+#define _mm256_maskz_cvtps_ph(U, A, I)                                         \
+  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I),   \
+      (__v8hi)(__m128i) _mm_setzero_hi(), (__mmask8) (U)))
+
+#define _mm256_mask_srai_epi32(W, U, A, B)                             \
+  ((__m256i) __builtin_ia32_psradi256_mask ((__v8si)(__m256i)(A),      \
+    (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_srai_epi32(U, A, B)                               \
+  ((__m256i) __builtin_ia32_psradi256_mask ((__v8si)(__m256i)(A),      \
+    (int)(B), (__v8si)_mm256_setzero_si256(), (__mmask8)(U)))
+
+#define _mm_mask_srai_epi32(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psradi128_mask ((__v4si)(__m128i)(A),       \
+    (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srai_epi32(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psradi128_mask ((__v4si)(__m128i)(A),       \
+    (int)(B), (__v4si)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_srai_epi64(A, B)                                                \
+  ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A),      \
+    (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)-1))
+
+#define _mm256_mask_srai_epi64(W, U, A, B)                             \
+  ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A),      \
+    (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_srai_epi64(U, A, B)                               \
+  ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A),      \
+    (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)(U)))
+
+#define _mm_srai_epi64(A, B)                                           \
+  ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A),       \
+    (int)(B), (__v2di)_mm_setzero_si128(), (__mmask8)-1))
+
+#define _mm_mask_srai_epi64(W, U, A, B)                                 \
+  ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A),       \
+    (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U)))
+
+#define _mm_maskz_srai_epi64(U, A, B)                                   \
+  ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A),       \
+    (int)(B), (__v2di)_mm_setzero_si128(), (__mmask8)(U)))
+
+#define _mm256_mask_permutex_pd(W, U, A, B)                             \
+  ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(A),       \
+    (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_permutex_pd(U, A, B)                              \
+  ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(A),       \
+    (int)(B), (__v4df)(__m256d)_mm256_setzero_pd(), (__mmask8)(U)))
+
+#define _mm256_mask_permute_pd(W, U, X, C)                                         \
+  ((__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df)(__m256d)(X), (int)(C),     \
+                                             (__v4df)(__m256d)(W),                 \
+                                             (__mmask8)(U)))
+
+#define _mm256_maskz_permute_pd(U, X, C)                                           \
+  ((__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df)(__m256d)(X), (int)(C),     \
+                                             (__v4df)(__m256d)_mm256_setzero_pd(), \
+                                             (__mmask8)(U)))
+
+#define _mm256_mask_permute_ps(W, U, X, C)                                         \
+  ((__m256) __builtin_ia32_vpermilps256_mask ((__v8sf)(__m256)(X), (int)(C),       \
+                                             (__v8sf)(__m256)(W), (__mmask8)(U)))
+
+#define _mm256_maskz_permute_ps(U, X, C)                                           \
+  ((__m256) __builtin_ia32_vpermilps256_mask ((__v8sf)(__m256)(X), (int)(C),       \
+                                             (__v8sf)(__m256)_mm256_setzero_ps(),  \
+                                             (__mmask8)(U)))
+
+#define _mm_mask_permute_pd(W, U, X, C)                                                    \
+  ((__m128d) __builtin_ia32_vpermilpd_mask ((__v2df)(__m128d)(X), (int)(C),        \
+                                           (__v2df)(__m128d)(W), (__mmask8)(U)))
+
+#define _mm_maskz_permute_pd(U, X, C)                                              \
+  ((__m128d) __builtin_ia32_vpermilpd_mask ((__v2df)(__m128d)(X), (int)(C),        \
+                                           (__v2df)(__m128d)_mm_setzero_pd(),      \
+                                           (__mmask8)(U)))
+
+#define _mm_mask_permute_ps(W, U, X, C)                                                    \
+  ((__m128) __builtin_ia32_vpermilps_mask ((__v4sf)(__m128)(X), (int)(C),          \
+                                         (__v4sf)(__m128)(W), (__mmask8)(U)))
+
+#define _mm_maskz_permute_ps(U, X, C)                                              \
+  ((__m128) __builtin_ia32_vpermilps_mask ((__v4sf)(__m128)(X), (int)(C),          \
+                                         (__v4sf)(__m128)_mm_setzero_ps(),         \
+                                         (__mmask8)(U)))
+
+#define _mm256_mask_blend_pd(__U, __A, __W)                          \
+  ((__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) (__A),       \
+                                                    (__v4df) (__W),  \
+                                                    (__mmask8) (__U)))
+
+#define _mm256_mask_blend_ps(__U, __A, __W)                          \
+  ((__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) (__A),        \
+                                                   (__v8sf) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm256_mask_blend_epi64(__U, __A, __W)                       \
+  ((__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) (__A),        \
+                                                   (__v4di) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm256_mask_blend_epi32(__U, __A, __W)                       \
+  ((__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) (__A),        \
+                                                   (__v8si) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm_mask_blend_pd(__U, __A, __W)                             \
+  ((__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) (__A),       \
+                                                    (__v2df) (__W),  \
+                                                    (__mmask8) (__U)))
+
+#define _mm_mask_blend_ps(__U, __A, __W)                             \
+  ((__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) (__A),        \
+                                                   (__v4sf) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm_mask_blend_epi64(__U, __A, __W)                          \
+  ((__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) (__A),        \
+                                                   (__v2di) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm_mask_blend_epi32(__U, __A, __W)                          \
+  ((__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) (__A),        \
+                                                   (__v4si) (__W),   \
+                                                   (__mmask8) (__U)))
+
+#define _mm256_cmp_epu32_mask(X, Y, P)                                 \
+  ((__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si)(__m256i)(X),      \
+                                           (__v8si)(__m256i)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm256_cmp_epi64_mask(X, Y, P)                                 \
+  ((__mmask8) __builtin_ia32_cmpq256_mask ((__v4di)(__m256i)(X),       \
+                                          (__v4di)(__m256i)(Y), (int)(P),\
+                                          (__mmask8)-1))
+
+#define _mm256_cmp_epi32_mask(X, Y, P)                                 \
+  ((__mmask8) __builtin_ia32_cmpd256_mask ((__v8si)(__m256i)(X),       \
+                                          (__v8si)(__m256i)(Y), (int)(P),\
+                                          (__mmask8)-1))
+
+#define _mm256_cmp_epu64_mask(X, Y, P)                                 \
+  ((__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di)(__m256i)(X),      \
+                                           (__v4di)(__m256i)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm256_cmp_pd_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_cmppd256_mask ((__v4df)(__m256d)(X),      \
+                                           (__v4df)(__m256d)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm256_cmp_ps_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf)(__m256)(X),       \
+                                            (__v8sf)(__m256)(Y), (int)(P),\
+                                            (__mmask8)-1))
+
+#define _mm256_mask_cmp_epi64_mask(M, X, Y, P)                         \
+  ((__mmask8) __builtin_ia32_cmpq256_mask ((__v4di)(__m256i)(X),       \
+                                          (__v4di)(__m256i)(Y), (int)(P),\
+                                          (__mmask8)(M)))
+
+#define _mm256_mask_cmp_epi32_mask(M, X, Y, P)                         \
+  ((__mmask8) __builtin_ia32_cmpd256_mask ((__v8si)(__m256i)(X),       \
+                                          (__v8si)(__m256i)(Y), (int)(P),\
+                                          (__mmask8)(M)))
+
+#define _mm256_mask_cmp_epu64_mask(M, X, Y, P)                         \
+  ((__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di)(__m256i)(X),      \
+                                           (__v4di)(__m256i)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm256_mask_cmp_epu32_mask(M, X, Y, P)                         \
+  ((__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si)(__m256i)(X),      \
+                                           (__v8si)(__m256i)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm256_mask_cmp_pd_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmppd256_mask ((__v4df)(__m256d)(X),      \
+                                           (__v4df)(__m256d)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm256_mask_cmp_ps_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf)(__m256)(X),       \
+                                            (__v8sf)(__m256)(Y), (int)(P),\
+                                            (__mmask8)(M)))
+
+#define _mm_cmp_epi64_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_cmpq128_mask ((__v2di)(__m128i)(X),       \
+                                          (__v2di)(__m128i)(Y), (int)(P),\
+                                          (__mmask8)-1))
+
+#define _mm_cmp_epi32_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_cmpd128_mask ((__v4si)(__m128i)(X),       \
+                                          (__v4si)(__m128i)(Y), (int)(P),\
+                                          (__mmask8)-1))
+
+#define _mm_cmp_epu64_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di)(__m128i)(X),      \
+                                           (__v2di)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm_cmp_epu32_mask(X, Y, P)                                    \
+  ((__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si)(__m128i)(X),      \
+                                           (__v4si)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm_cmp_pd_mask(X, Y, P)                                       \
+  ((__mmask8) __builtin_ia32_cmppd128_mask ((__v2df)(__m128d)(X),      \
+                                           (__v2df)(__m128d)(Y), (int)(P),\
+                                           (__mmask8)-1))
+
+#define _mm_cmp_ps_mask(X, Y, P)                                       \
+  ((__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf)(__m128)(X),       \
+                                            (__v4sf)(__m128)(Y), (int)(P),\
+                                            (__mmask8)-1))
+
+#define _mm_mask_cmp_epi64_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmpq128_mask ((__v2di)(__m128i)(X),       \
+                                          (__v2di)(__m128i)(Y), (int)(P),\
+                                          (__mmask8)(M)))
+
+#define _mm_mask_cmp_epi32_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_cmpd128_mask ((__v4si)(__m128i)(X),       \
+                                          (__v4si)(__m128i)(Y), (int)(P),\
+                                          (__mmask8)(M)))
+
+#define _mm_mask_cmp_epu64_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di)(__m128i)(X),      \
+                                           (__v2di)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm_mask_cmp_epu32_mask(M, X, Y, P)                            \
+  ((__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si)(__m128i)(X),      \
+                                           (__v4si)(__m128i)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm_mask_cmp_pd_mask(M, X, Y, P)                               \
+  ((__mmask8) __builtin_ia32_cmppd128_mask ((__v2df)(__m128d)(X),      \
+                                           (__v2df)(__m128d)(Y), (int)(P),\
+                                           (__mmask8)(M)))
+
+#define _mm_mask_cmp_ps_mask(M, X, Y, P)                               \
+  ((__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf)(__m128)(X),       \
+                                            (__v4sf)(__m128)(Y), (int)(P),\
+                                            (__mmask8)(M)))
+
+#endif
+
+#define _mm256_permutexvar_ps(A, B)    _mm256_permutevar8x32_ps((B), (A))
+
+#ifdef __DISABLE_AVX512VL__
+#undef __DISABLE_AVX512VL__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX512VL__ */
+
+#endif /* _AVX512VLINTRIN_H_INCLUDED */
index 73b48599277d248a25976ad3534e695abde4c468..5d921822248a180607843435f78a388eddfd1519 100644 (file)
 
 #include <avx512cdintrin.h>
 
+#include <avx512vlintrin.h>
+
+#include <avx512bwintrin.h>
+
+#include <avx512dqintrin.h>
+
+#include <avx512vlbwintrin.h>
+
+#include <avx512vldqintrin.h>
+
 #include <shaintrin.h>
 
 #include <lzcntintrin.h>