diff options
Diffstat (limited to 'clang/lib')
-rw-r--r-- | clang/lib/Basic/Targets.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 24 | ||||
-rw-r--r-- | clang/lib/Headers/emmintrin.h | 35 | ||||
-rw-r--r-- | clang/lib/Headers/ia32intrin.h | 6 | ||||
-rw-r--r-- | clang/lib/Headers/intrin.h | 127 | ||||
-rw-r--r-- | clang/lib/Headers/xmmintrin.h | 29 | ||||
-rw-r--r-- | clang/lib/Sema/SemaDecl.cpp | 4 |
7 files changed, 115 insertions, 112 deletions
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index b549d1485ba..62ecd1e28ce 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -2303,6 +2303,8 @@ const Builtin::Info BuiltinInfo[] = { { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr }, #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE }, +#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \ + { #ID, TYPE, ATTRS, HEADER, LANGS, FEATURE }, #include "clang/Basic/BuiltinsX86.def" }; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 27c0b837766..e105a06b147 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -681,6 +681,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, "cast"); return RValue::get(Result); } + case Builtin::BI__popcnt16: + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: case Builtin::BI__builtin_popcount: case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: { @@ -6954,6 +6957,25 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Value *F = CGM.getIntrinsic(Intrinsic::prefetch); return Builder.CreateCall(F, {Address, RW, Locality, Data}); } + case X86::BI_mm_clflush: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), + Ops[0]); + } + case X86::BI_mm_lfence: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); + } + case X86::BI_mm_mfence: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); + } + case X86::BI_mm_sfence: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); + } + case X86::BI_mm_pause: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); + } + case X86::BI__rdtsc: { + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); + } case X86::BI__builtin_ia32_undef128: case X86::BI__builtin_ia32_undef256: case X86::BI__builtin_ia32_undef512: @@ -6966,12 +6988,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_vec_ext_v2si: return Builder.CreateExtractElement(Ops[0], llvm::ConstantInt::get(Ops[1]->getType(), 0)); + case X86::BI_mm_setcsr: case X86::BI__builtin_ia32_ldmxcsr: { Address Tmp = CreateMemTemp(E->getArg(0)->getType()); Builder.CreateStore(Ops[0], Tmp); return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); } + case X86::BI_mm_getcsr: case X86::BI__builtin_ia32_stmxcsr: { Address Tmp = CreateMemTemp(E->getType()); Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h index d73b3a8eda1..3703b4879c2 100644 --- a/clang/lib/Headers/emmintrin.h +++ b/clang/lib/Headers/emmintrin.h @@ -2447,6 +2447,10 @@ _mm_stream_si64(long long *__p, long long __a) } #endif +#if defined(__cplusplus) +extern "C" { +#endif + /// \brief The cache line containing __p is flushed and invalidated from all /// caches in the coherency domain. /// @@ -2457,11 +2461,7 @@ _mm_stream_si64(long long *__p, long long __a) /// \param __p /// A pointer to the memory location used to identify the cache line to be /// flushed. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_clflush(void const *__p) -{ - __builtin_ia32_clflush(__p); -} +void _mm_clflush(void const *); /// \brief Forces strong memory ordering (serialization) between load /// instructions preceding this instruction and load instructions following @@ -2472,11 +2472,7 @@ _mm_clflush(void const *__p) /// /// This intrinsic corresponds to the \c LFENCE instruction. /// -static __inline__ void __DEFAULT_FN_ATTRS -_mm_lfence(void) -{ - __builtin_ia32_lfence(); -} +void _mm_lfence(void); /// \brief Forces strong memory ordering (serialization) between load and store /// instructions preceding this instruction and load and store instructions @@ -2487,11 +2483,11 @@ _mm_lfence(void) /// /// This intrinsic corresponds to the \c MFENCE instruction. /// -static __inline__ void __DEFAULT_FN_ATTRS -_mm_mfence(void) -{ - __builtin_ia32_mfence(); -} +void _mm_mfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif /// \brief Converts 16-bit signed integers from both 128-bit integer vector /// operands into 8-bit signed integers, and packs the results into the @@ -3213,11 +3209,10 @@ _mm_castsi128_pd(__m128i __a) /// /// This intrinsic corresponds to the \c PAUSE instruction. /// -static __inline__ void __DEFAULT_FN_ATTRS -_mm_pause(void) -{ - __builtin_ia32_pause(); -} +#if defined(__cplusplus) +extern "C" +#endif +void _mm_pause(void); #undef __DEFAULT_FN_ATTRS diff --git a/clang/lib/Headers/ia32intrin.h b/clang/lib/Headers/ia32intrin.h index 397f3fd13e0..4928300103a 100644 --- a/clang/lib/Headers/ia32intrin.h +++ b/clang/lib/Headers/ia32intrin.h @@ -60,12 +60,6 @@ __rdpmc(int __A) { return __builtin_ia32_rdpmc(__A); } -/* __rdtsc */ -static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) -__rdtsc(void) { - return __builtin_ia32_rdtsc(); -} - /* __rdtscp */ static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) __rdtscp(unsigned int *__A) { diff --git a/clang/lib/Headers/intrin.h b/clang/lib/Headers/intrin.h index c19202eb613..ed2032f35e5 100644 --- a/clang/lib/Headers/intrin.h +++ b/clang/lib/Headers/intrin.h @@ -463,14 +463,6 @@ _BitScanReverse(unsigned long *_Index, unsigned long _Mask) { *_Index = 31 - __builtin_clzl(_Mask); return 1; } -static __inline__ unsigned short __DEFAULT_FN_ATTRS -__popcnt16(unsigned short _Value) { - return __builtin_popcount((int)_Value); -} -static __inline__ unsigned int __DEFAULT_FN_ATTRS -__popcnt(unsigned int _Value) { - return __builtin_popcount(_Value); -} static __inline__ unsigned char __DEFAULT_FN_ATTRS _bittest(long const *_BitBase, long _BitPos) { return (*_BitBase >> _BitPos) & 1; @@ -513,11 +505,6 @@ _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) { *_Index = 63 - __builtin_clzll(_Mask); return 1; } -static __inline__ -unsigned __int64 __DEFAULT_FN_ATTRS -__popcnt64(unsigned __int64 _Value) { - return __builtin_popcountll(_Value); -} static __inline__ unsigned char __DEFAULT_FN_ATTRS _bittest64(__int64 const *_BitBase, __int64 _BitPos) { return (*_BitBase >> _BitPos) & 1; @@ -546,63 +533,63 @@ _interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) { __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST); return (_PrevVal >> _BitPos) & 1; } -/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Sub
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
- return _Value;
-}
+/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange Add +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange Sub +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) { + return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Increment +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedIncrement64(__int64 volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Decrement +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedDecrement64(__int64 volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked And +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Or +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Xor +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST); +} +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange +\*----------------------------------------------------------------------------*/ +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST); + return _Value; +} #endif /*----------------------------------------------------------------------------*\ |* Barriers diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h index 373fc76aa7c..2d86482f9f8 100644 --- a/clang/lib/Headers/xmmintrin.h +++ b/clang/lib/Headers/xmmintrin.h @@ -2094,11 +2094,10 @@ _mm_stream_ps(float *__p, __m128 __a) /// /// This intrinsic corresponds to the \c SFENCE instruction. /// -static __inline__ void __DEFAULT_FN_ATTRS -_mm_sfence(void) -{ - __builtin_ia32_sfence(); -} +#if defined(__cplusplus) +extern "C" +#endif +void _mm_sfence(void); /// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and /// returns it, as specified by the immediate integer operand. @@ -2376,6 +2375,10 @@ _mm_sad_pu8(__m64 __a, __m64 __b) return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b); } +#if defined(__cplusplus) +extern "C" { +#endif + /// \brief Returns the contents of the MXCSR register as a 32-bit unsigned /// integer value. There are several groups of macros associated with this /// intrinsic, including: @@ -2408,11 +2411,7 @@ _mm_sad_pu8(__m64 __a, __m64 __b) /// /// \returns A 32-bit unsigned integer containing the contents of the MXCSR /// register. -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_mm_getcsr(void) -{ - return __builtin_ia32_stmxcsr(); -} +unsigned int _mm_getcsr(void); /// \brief Sets the MXCSR register with the 32-bit unsigned integer value. There /// are several groups of macros associated with this intrinsic, including: @@ -2450,11 +2449,11 @@ _mm_getcsr(void) /// /// \param __i /// A 32-bit unsigned integer value to be written to the MXCSR register. -static __inline__ void __DEFAULT_FN_ATTRS -_mm_setcsr(unsigned int __i) -{ - __builtin_ia32_ldmxcsr(__i); -} +void _mm_setcsr(unsigned int); + +#if defined(__cplusplus) +} // extern "C" +#endif /// \brief Selects 4 float values from the 128-bit operands of [4 x float], as /// specified by the immediate value operand. diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 67a3497efb4..f7b67658e60 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -1791,7 +1791,9 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, return nullptr; } - if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) { + if (!ForRedeclaration && + (Context.BuiltinInfo.isPredefinedLibFunction(ID) || + Context.BuiltinInfo.isHeaderDependentFunction(ID))) { Diag(Loc, diag::ext_implicit_lib_function_decl) << Context.BuiltinInfo.getName(ID) << R; if (Context.BuiltinInfo.getHeaderName(ID) && |