diff options
author | Gheorghe-Teodor Bercea <gheorghe-teod.bercea@ibm.com> | 2019-05-15 20:18:21 +0000 |
---|---|---|
committer | Gheorghe-Teodor Bercea <gheorghe-teod.bercea@ibm.com> | 2019-05-15 20:18:21 +0000 |
commit | 7641f310d7b0e763e6df6060a672901e915a2a5a (patch) | |
tree | 5de8915541471d6ccb20337ac74ac62736d4220f /clang/lib/Headers/__clang_cuda_device_functions.h | |
parent | f6c645f9fd93190be479fff697cff9a45c208bbf (diff) | |
download | bcm5719-llvm-7641f310d7b0e763e6df6060a672901e915a2a5a.tar.gz bcm5719-llvm-7641f310d7b0e763e6df6060a672901e915a2a5a.zip |
[OpenMP][bugfix] Fix issues with C++ 17 compilation when handling math functions
Summary: In OpenMP device offloading we must ensure that unde C++ 17, the inclusion of cstdlib will works correctly.
Reviewers: ABataev, tra, jdoerfert, hfinkel, caomhin
Reviewed By: jdoerfert
Subscribers: Hahnfeld, guansong, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D61949
llvm-svn: 360804
Diffstat (limited to 'clang/lib/Headers/__clang_cuda_device_functions.h')
-rw-r--r-- | clang/lib/Headers/__clang_cuda_device_functions.h | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/clang/lib/Headers/__clang_cuda_device_functions.h b/clang/lib/Headers/__clang_cuda_device_functions.h index af3cea739d1..50ad674f948 100644 --- a/clang/lib/Headers/__clang_cuda_device_functions.h +++ b/clang/lib/Headers/__clang_cuda_device_functions.h @@ -37,6 +37,15 @@ #define __FAST_OR_SLOW(fast, slow) slow #endif +// For C++ 17 we need to include noexcept attribute to be compatible +// with the header-defined version. This may be removed once +// variant is supported. +#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L +#define __NOEXCEPT noexcept +#else +#define __NOEXCEPT +#endif + __DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); } __DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); } __DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); } @@ -1474,7 +1483,8 @@ __DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) { return r; } #endif // CUDA_VERSION >= 9020 -__DEVICE__ int abs(int __a) { return __nv_abs(__a); } +__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); } +__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); } __DEVICE__ double acos(double __a) { return __nv_acos(__a); } __DEVICE__ float acosf(float __a) { return __nv_acosf(__a); } __DEVICE__ double acosh(double __a) { return __nv_acosh(__a); } @@ -1533,7 +1543,6 @@ __DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); } __DEVICE__ float expf(float __a) { return __nv_expf(__a); } __DEVICE__ double expm1(double __a) { return __nv_expm1(__a); } __DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); } -__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); } __DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); } __DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); } __DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); } @@ -1572,15 +1581,15 @@ __DEVICE__ float j1f(float __a) { return __nv_j1f(__a); } __DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); } __DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); } #if defined(__LP64__) || defined(_WIN64) -__DEVICE__ long labs(long __a) { return __nv_llabs(__a); }; +__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); }; #else -__DEVICE__ long labs(long __a) { return __nv_abs(__a); }; +__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); }; #endif __DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); } __DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); } __DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); } __DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); } -__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); } +__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); } __DEVICE__ long long llmax(long long __a, long long __b) { return __nv_llmax(__a, __b); } @@ -1778,6 +1787,7 @@ __DEVICE__ float y1f(float __a) { return __nv_y1f(__a); } __DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); } __DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); } +#undef __NOEXCEPT #pragma pop_macro("__DEVICE__") #pragma pop_macro("__FAST_OR_SLOW") #endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__ |