summaryrefslogtreecommitdiffstats
path: root/clang/lib/Headers/__clang_cuda_intrinsics.h
diff options
context:
space:
mode:
authorJustin Lebar <jlebar@google.com>2016-06-09 20:04:57 +0000
committerJustin Lebar <jlebar@google.com>2016-06-09 20:04:57 +0000
commit4fb571175168d9b88dd3c31df1713a0e42fc1972 (patch)
treefc383b848bfc73eee8c5b32bb9047783668f64ea /clang/lib/Headers/__clang_cuda_intrinsics.h
parented2c282d4b1dfba6494934242fbaa626c1fdba74 (diff)
downloadbcm5719-llvm-4fb571175168d9b88dd3c31df1713a0e42fc1972.tar.gz
bcm5719-llvm-4fb571175168d9b88dd3c31df1713a0e42fc1972.zip
[CUDA] Implement __shfl* intrinsics in clang headers.
Summary: Clang changes to make use of the LLVM intrinsics added in D21160. Reviewers: tra Subscribers: jholewinski, cfe-commits Differential Revision: http://reviews.llvm.org/D21162 llvm-svn: 272299
Diffstat (limited to 'clang/lib/Headers/__clang_cuda_intrinsics.h')
-rw-r--r--clang/lib/Headers/__clang_cuda_intrinsics.h70
1 files changed, 70 insertions, 0 deletions
diff --git a/clang/lib/Headers/__clang_cuda_intrinsics.h b/clang/lib/Headers/__clang_cuda_intrinsics.h
index 4ca2940d24a..de5171720ce 100644
--- a/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -26,6 +26,76 @@
#error "This file is for CUDA compilation only."
#endif
+// sm_30 intrinsics: __shfl_{up,down,xor}.
+
+#define __SM_30_INTRINSICS_H__
+#define __SM_30_INTRINSICS_HPP__
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
+#pragma push_macro("__MAKE_SHUFFLES")
+#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask) \
+ inline __device__ int __FnName(int __in, int __offset, \
+ int __width = warpSize) { \
+ return __IntIntrinsic(__in, __offset, \
+ ((warpSize - __width) << 8) | (__Mask)); \
+ } \
+ inline __device__ float __FnName(float __in, int __offset, \
+ int __width = warpSize) { \
+ return __FloatIntrinsic(__in, __offset, \
+ ((warpSize - __width) << 8) | (__Mask)); \
+ } \
+ inline __device__ unsigned int __FnName(unsigned int __in, int __offset, \
+ int __width = warpSize) { \
+ return static_cast<unsigned int>( \
+ ::__FnName(static_cast<int>(__in), __offset, __width)); \
+ } \
+ inline __device__ long long __FnName(long long __in, int __offset, \
+ int __width = warpSize) { \
+ struct __Bits { \
+ int __a, __b; \
+ }; \
+ _Static_assert(sizeof(__in) == sizeof(__Bits)); \
+ _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
+ __Bits __tmp; \
+ memcpy(&__in, &__tmp, sizeof(__in)); \
+ __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \
+ __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \
+ long long __out; \
+ memcpy(&__out, &__tmp, sizeof(__tmp)); \
+ return __out; \
+ } \
+ inline __device__ unsigned long long __FnName( \
+ unsigned long long __in, int __offset, int __width = warpSize) { \
+ return static_cast<unsigned long long>( \
+ ::__FnName(static_cast<unsigned long long>(__in), __offset, __width)); \
+ } \
+ inline __device__ double __FnName(double __in, int __offset, \
+ int __width = warpSize) { \
+ long long __tmp; \
+ _Static_assert(sizeof(__tmp) == sizeof(__in)); \
+ memcpy(&__tmp, &__in, sizeof(__in)); \
+ __tmp = ::__FnName(__tmp, __offset, __width); \
+ double __out; \
+ memcpy(&__out, &__tmp, sizeof(__out)); \
+ return __out; \
+ }
+
+__MAKE_SHUFFLES(__shfl, __builtin_ptx_shfl_idx_i32, __builtin_ptx_shfl_idx_f32,
+ 0x1f);
+// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=
+// maxLane.
+__MAKE_SHUFFLES(__shfl_up, __builtin_ptx_shfl_up_i32, __builtin_ptx_shfl_up_f32,
+ 0);
+__MAKE_SHUFFLES(__shfl_down, __builtin_ptx_shfl_down_i32,
+ __builtin_ptx_shfl_down_f32, 0x1f);
+__MAKE_SHUFFLES(__shfl_xor, __builtin_ptx_shfl_bfly_i32,
+ __builtin_ptx_shfl_bfly_f32, 0x1f);
+
+#pragma pop_macro("__MAKE_SHUFFLES")
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
+
// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.
// Prevent the vanilla sm_32 intrinsics header from being included.
OpenPOWER on IntegriCloud