summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-07-21 07:38:39 +0000
committerCraig Topper <craig.topper@gmail.com>2016-07-21 07:38:39 +0000
commit45db56c375a2f085b6eceb476349cd177bdff7a4 (patch)
tree3bdee2ec84075e13ad6abc56ec25d41270eeadc1 /clang/lib
parent825e4ab9e39ce2a3f115e19ecb09fd97c8a03432 (diff)
downloadbcm5719-llvm-45db56c375a2f085b6eceb476349cd177bdff7a4.tar.gz
bcm5719-llvm-45db56c375a2f085b6eceb476349cd177bdff7a4.zip
[X86] Add missing __x86_64__ qualifiers on a bunch of intrinsics that assume 64-bit GPRs are available.
Usages of these intrinsics in a 32-bit build results in assertions in the backend. llvm-svn: 276249
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/Headers/avx512bwintrin.h2
-rw-r--r--clang/lib/Headers/avx512fintrin.h36
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h2
-rw-r--r--clang/lib/Headers/avx512vlintrin.h2
-rw-r--r--clang/lib/Headers/fxsrintrin.h10
-rw-r--r--clang/lib/Headers/xmmintrin.h2
6 files changed, 47 insertions, 7 deletions
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index d3c5a6c9644..169d798c03c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -2099,6 +2099,7 @@ _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
(__v64qi) _mm512_setzero_hi ());
}
+#ifdef __x86_64__
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
{
@@ -2115,6 +2116,7 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
_mm512_setzero_qi(),
__M);
}
+#endif
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index badc43650ba..49d047c9207 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -5911,8 +5911,10 @@ _mm512_kmov (__mmask16 __A)
(int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
(int)(P), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
@@ -6309,8 +6311,10 @@ _mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)(__m512i)(C), (int)(imm), \
(__mmask8)(U)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#endif
#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6328,6 +6332,7 @@ _mm_cvtsd_u32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
(int)(R)); })
@@ -6339,6 +6344,7 @@ _mm_cvtsd_u64 (__m128d __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6346,11 +6352,13 @@ _mm_cvtsd_u64 (__m128d __A)
#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#endif
#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6362,6 +6370,7 @@ _mm_cvtss_u32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
(int)(R)); })
@@ -6373,6 +6382,7 @@ _mm_cvtss_u64 (__m128 __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6387,6 +6397,7 @@ _mm_cvttsd_i32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
@@ -6399,6 +6410,7 @@ _mm_cvttsd_i64 (__m128d __A)
return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6410,6 +6422,7 @@ _mm_cvttsd_u32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
(int)(R)); })
@@ -6421,6 +6434,7 @@ _mm_cvttsd_u64 (__m128d __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6435,6 +6449,7 @@ _mm_cvttss_i32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
@@ -6447,6 +6462,7 @@ _mm_cvttss_i64 (__m128 __A)
return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6458,6 +6474,7 @@ _mm_cvttss_u32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
(int)(R)); })
@@ -6469,6 +6486,7 @@ _mm_cvttss_u64 (__m128 __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U,
@@ -9341,14 +9359,17 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
}
#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtss_i64 _mm_cvtss_si64
#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti64_sd _mm_cvtsi64_sd
#define _mm_cvti32_ss _mm_cvtsi32_ss
+#ifdef __x86_64__
+#define _mm_cvtss_i64 _mm_cvtss_si64
+#define _mm_cvtsd_i64 _mm_cvtsd_si64
+#define _mm_cvti64_sd _mm_cvtsi64_sd
#define _mm_cvti64_ss _mm_cvtsi64_ss
+#endif
+#ifdef __x86_64__
#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
(int)(R)); })
@@ -9356,6 +9377,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
(int)(R)); })
+#endif
#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
@@ -9363,6 +9385,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
(int)(R)); })
@@ -9370,6 +9393,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
(int)(R)); })
+#endif
#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
@@ -9413,6 +9437,7 @@ _mm_cvtu32_sd (__m128d __A, unsigned __B)
return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
}
+#ifdef __x86_64__
#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
(unsigned long long)(B), (int)(R)); })
@@ -9423,6 +9448,7 @@ _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
@@ -9435,6 +9461,7 @@ _mm_cvtu32_ss (__m128 __A, unsigned __B)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
(unsigned long long)(B), (int)(R)); })
@@ -9445,6 +9472,7 @@ _mm_cvtu64_ss (__m128 __A, unsigned long long __B)
return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
@@ -9453,12 +9481,14 @@ _mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
__M);
}
+#ifdef __x86_64__
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
{
return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O,
__M);
}
+#endif
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_set_epi32 (int __A, int __B, int __C, int __D,
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 990e992a113..bacfb1713b9 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2887,6 +2887,7 @@ _mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
}
+#ifdef __x86_64__
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
{
@@ -2920,6 +2921,7 @@ _mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
_mm256_setzero_si256 (),
__M);
}
+#endif
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 295ce291f7c..3c90f6485e1 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -5975,6 +5975,7 @@ _mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
(__v8si)_mm256_setzero_si256(), \
(__mmask8)(M)); })
+#ifdef __x86_64__
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
{
@@ -6006,6 +6007,7 @@ _mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
_mm256_setzero_si256 (),
__M);
}
+#endif
#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \
(__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
diff --git a/clang/lib/Headers/fxsrintrin.h b/clang/lib/Headers/fxsrintrin.h
index ac6026aa5ba..f77aa484b58 100644
--- a/clang/lib/Headers/fxsrintrin.h
+++ b/clang/lib/Headers/fxsrintrin.h
@@ -36,19 +36,21 @@ _fxsave(void *__p) {
}
static __inline__ void __DEFAULT_FN_ATTRS
-_fxsave64(void *__p) {
- return __builtin_ia32_fxsave64(__p);
+_fxrstor(void *__p) {
+ return __builtin_ia32_fxrstor(__p);
}
+#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
-_fxrstor(void *__p) {
- return __builtin_ia32_fxrstor(__p);
+_fxsave64(void *__p) {
+ return __builtin_ia32_fxsave64(__p);
}
static __inline__ void __DEFAULT_FN_ATTRS
_fxrstor64(void *__p) {
return __builtin_ia32_fxrstor64(__p);
}
+#endif
#undef __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 99cddb0fac8..373fc76aa7c 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -1371,6 +1371,7 @@ _mm_cvtt_ss2si(__m128 __a)
return _mm_cvttss_si32(__a);
}
+#ifdef __x86_64__
/// \brief Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 64-bit integer, truncating the result when it is
/// inexact.
@@ -1388,6 +1389,7 @@ _mm_cvttss_si64(__m128 __a)
{
return __builtin_ia32_cvttss2si64((__v4sf)__a);
}
+#endif
/// \brief Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result
OpenPOWER on IntegriCloud