summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorMichael Zuckerman <Michael.zuckerman@intel.com>2016-03-28 12:23:09 +0000
committerMichael Zuckerman <Michael.zuckerman@intel.com>2016-03-28 12:23:09 +0000
commitdef78750b74f7d6f711840c9a8fe59f09cc3c5f8 (patch)
tree245387ac10a011762ccb523e4760bdf0c590c550 /clang/lib
parent5c83a090bc3f1a018a32608fced52e5c652837e0 (diff)
downloadbcm5719-llvm-def78750b74f7d6f711840c9a8fe59f09cc3c5f8.tar.gz
bcm5719-llvm-def78750b74f7d6f711840c9a8fe59f09cc3c5f8.zip
[CLANG][avx512][BUILTIN] Adding fixupimm{pd|ps|sd|ss}
getexp{sd|ss} getmant{sd|ss} kunpck{di|si} loada{pd|ps} loaddqu{di|hi|qi|si} max{sd|ss} min{sd|ss} kmov16 builtins to clang Differential Revision: http://reviews.llvm.org/D18215 llvm-svn: 264574
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/Headers/avx512bwintrin.h47
-rw-r--r--clang/lib/Headers/avx512fintrin.h271
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h68
-rw-r--r--clang/lib/Headers/avx512vlintrin.h300
4 files changed, 686 insertions, 0 deletions
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index afa79b7d173..f641c71256c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1934,6 +1934,53 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
__M);
}
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
+ (__mmask64) __B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+ (__mmask32) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
+ (__v32hi)
+ _mm512_setzero_hi (),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
+{
+ return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
+ (__v64qi)
+ _mm512_setzero_hi (),
+ (__mmask64) __U);
+}
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 22a27299e24..e1f81a0e2a1 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -46,6 +46,21 @@ typedef unsigned short __mmask16;
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
+typedef enum
+{
+ _MM_MANT_NORM_1_2, /* interval [1, 2) */
+ _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */
+ _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */
+ _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */
+} _MM_MANTISSA_NORM_ENUM;
+
+typedef enum
+{
+ _MM_MANT_SIGN_src, /* sign = sign(SRC) */
+ _MM_MANT_SIGN_zero, /* sign = 0 */
+ _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */
+} _MM_MANTISSA_SIGN_ENUM;
+
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
@@ -3695,6 +3710,262 @@ _mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
(__mmask8) __U);
}
+#define _mm512_fixupimm_round_pd( __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ (__imm),\
+ (__mmask8) -1, (__R));\
+})
+
+#define _mm512_mask_fixupimm_round_pd( __A, __U, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ (__imm),\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm512_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ ( __imm),\
+ (__mmask8) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm512_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm512_maskz_fixupimm_round_pd( __U, __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_maskz ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ (__imm),\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm512_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd512_maskz ((__v8df)( __A),\
+ (__v8df)( __B),\
+ (__v8di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm512_fixupimm_round_ps( __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ (__imm),\
+ (__mmask16) -1, (__R));\
+})
+
+#define _mm512_mask_fixupimm_round_ps( __A, __U, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ (__imm),\
+ (__mmask16)( __U), (__R));\
+})
+
+#define _mm512_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ ( __imm),\
+ (__mmask16) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm512_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ ( __imm),\
+ (__mmask16)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm512_maskz_fixupimm_round_ps( __U, __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmps512_maskz ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ (__imm),\
+ (__mmask16)( __U), (__R));\
+})
+
+#define _mm512_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps512_maskz ((__v16sf)( __A),\
+ (__v16sf)( __B),\
+ (__v16si)( __C),\
+ ( __imm),\
+ (__mmask16)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_fixupimm_round_sd( __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C), __imm,\
+ (__mmask8) -1, (__R));\
+})
+
+#define _mm_mask_fixupimm_round_sd( __A, __U, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C), __imm,\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm_fixupimm_sd( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),( __imm),\
+ (__mmask8) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_mask_fixupimm_sd( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_maskz_fixupimm_round_sd( __U, __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmsd_maskz ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),\
+ __imm,\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm_maskz_fixupimm_sd( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmsd_maskz ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_fixupimm_round_ss( __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C), (__imm),\
+ (__mmask8) -1, (__R));\
+})
+
+#define _mm_mask_fixupimm_round_ss( __A, __U, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C), (__imm),\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm_fixupimm_ss( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),( __imm),\
+ (__mmask8) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_mask_fixupimm_ss( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_maskz_fixupimm_round_ss( __U, __A, __B, __C, __imm, __R) __extension__ ({ \
+__builtin_ia32_fixupimmss_maskz ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C), (__imm),\
+ (__mmask8)( __U), (__R));\
+})
+
+#define _mm_maskz_fixupimm_ss( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmss_maskz ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),( __imm),\
+ (__mmask8)( __U),\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_getexp_round_sd( __A, __B ,__R) __extension__ ({ \
+__builtin_ia32_getexpsd128_round_mask ((__v2df)(__A),\
+ (__v2df)( __B), (__v2df) _mm_setzero_pd(), (__mmask8) -1,\
+ ( __R));\
+})
+
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
+ (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_getexp_round_ss( __A, __B, __R) __extension__ ({ \
+__builtin_ia32_getexpss128_round_mask ((__v4sf)( __A),\
+ (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), (__mmask8) -1,\
+ ( __R));\
+})
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
+ (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_getmant_round_sd( __A, __B, __C, __D, __R) __extension__ ({ \
+__builtin_ia32_getmantsd_round_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (( __D) << 2) |( __C), (__v2df) _mm_setzero_pd(), (__mmask8) -1,\
+ ( __R));\
+})
+
+#define _mm_getmant_sd( __A, __B, __C, __D) __extension__ ({ \
+__builtin_ia32_getmantsd_round_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (( __D) << 2) |( __C), (__v2df) _mm_setzero_pd(), (__mmask8) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+#define _mm_getmant_round_ss( __A, __B, __C, __D, __R) __extension__ ({ \
+__builtin_ia32_getmantss_round_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ ((__D) << 2) |( __C), (__v4sf) _mm_setzero_ps(), (__mmask8) -1,\
+ ( __R));\
+})
+
+#define _mm_getmant_ss(__A, __B, __C, __D) __extension__ ({ \
+__builtin_ia32_getmantss_round_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ ((__D) << 2) |( __C), (__v4sf) _mm_setzero_ps(), (__mmask8) -1,\
+ _MM_FROUND_CUR_DIRECTION);\
+})
+
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_kmov (__mmask16 __A)
+{
+ return __A;
+}
+
#define _mm_comi_round_sd(__A, __B, __P, __R) __extension__ ({\
__builtin_ia32_vcomisd ((__v2df) (__A), (__v2df) (__B), ( __P), ( __R));\
})
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 9209de38cca..bb69b1bfdd3 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2952,6 +2952,74 @@ _mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
_mm256_setzero_si256 (),
__M);
}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
+ (__v8hi)
+ _mm_setzero_hi (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
+ (__v16hi)
+ _mm256_setzero_si256 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
+ (__v16qi)
+ _mm_setzero_si128 (),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
+ (__v32qi)
+ _mm256_setzero_si256 (),
+ (__mmask32) __U);
+}
#undef __DEFAULT_FN_ATTRS
#endif /* __AVX512VLBWINTRIN_H */
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 0949cb9238a..6080a6d5324 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -6068,6 +6068,306 @@ _mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
_mm256_setzero_si256 (),
__M);
}
+
+#define _mm_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd128_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),\
+ ( __imm),\
+ (__mmask8) -1);\
+})
+
+#define _mm_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd128_mask ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd128_maskz ((__v2df)( __A),\
+ (__v2df)( __B),\
+ (__v2di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm256_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd256_mask ((__v4df)( __A),\
+ (__v4df)( __B),\
+ (__v4di)( __C),\
+ ( __imm),\
+ (__mmask8) -1);\
+})
+
+#define _mm256_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd256_mask ((__v4df)( __A),\
+ (__v4df)( __B),\
+ (__v4di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm256_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmpd256_maskz ((__v4df)( __A),\
+ (__v4df)( __B),\
+ (__v4di)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps128_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),\
+ ( __imm),\
+ (__mmask8) -1);\
+})
+
+#define _mm_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps128_mask ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps128_maskz ((__v4sf)( __A),\
+ (__v4sf)( __B),\
+ (__v4si)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm256_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps256_mask ((__v8sf)( __A),\
+ (__v8sf)( __B),\
+ (__v8si)( __C),\
+ ( __imm),\
+ (__mmask8) -1);\
+})
+
+#define _mm256_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps256_mask ((__v8sf)( __A),\
+ (__v8sf)( __B),\
+ (__v8si)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+#define _mm256_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \
+__builtin_ia32_fixupimmps256_maskz ((__v8sf)( __A),\
+ (__v8sf)( __B),\
+ (__v8si)( __C),\
+ ( __imm),\
+ (__mmask8)( __U));\
+})
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
+{
+ return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+ return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
+{
+ return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+ return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
+{
+ return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_BOTH
OpenPOWER on IntegriCloud