summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/NVPTX
diff options
context:
space:
mode:
authorJustin Bogner <mail@justinbogner.com>2016-07-06 20:02:45 +0000
committerJustin Bogner <mail@justinbogner.com>2016-07-06 20:02:45 +0000
commita463537a3644d4013b23be20e5446af609342f01 (patch)
treefbd009adc55df065612b62a6727a50cfae640225 /llvm/test/CodeGen/NVPTX
parent2f8de9fb4fea7bd29ffd2ac85e9a0f20ea1410ca (diff)
downloadbcm5719-llvm-a463537a3644d4013b23be20e5446af609342f01.tar.gz
bcm5719-llvm-a463537a3644d4013b23be20e5446af609342f01.zip
NVPTX: Replace uses of cuda.syncthreads with nvvm.barrier0
Everywhere where cuda.syncthreads or __syncthreads is used, use the properly namespaced nvvm.barrier0 instead. llvm-svn: 274664
Diffstat (limited to 'llvm/test/CodeGen/NVPTX')
-rw-r--r--llvm/test/CodeGen/NVPTX/MachineSink-convergent.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/TailDuplication-convergent.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/access-non-generic.ll12
-rw-r--r--llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll8
4 files changed, 14 insertions, 14 deletions
diff --git a/llvm/test/CodeGen/NVPTX/MachineSink-convergent.ll b/llvm/test/CodeGen/NVPTX/MachineSink-convergent.ll
index c06fe224688..91c80182e2f 100644
--- a/llvm/test/CodeGen/NVPTX/MachineSink-convergent.ll
+++ b/llvm/test/CodeGen/NVPTX/MachineSink-convergent.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s | FileCheck %s
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.cuda.syncthreads()
+declare void @llvm.nvvm.barrier0()
; Load a value, then syncthreads. Branch, and use the loaded value only on one
; side of the branch. The load shouldn't be sunk beneath the call, because
@@ -11,7 +11,7 @@ Start:
; CHECK: ld.u32
%ptr_val = load i32, i32* %ptr
; CHECK: bar.sync
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
br i1 %cond, label %L1, label %L2
L1:
%ptr_val2 = add i32 %ptr_val, 100
diff --git a/llvm/test/CodeGen/NVPTX/TailDuplication-convergent.ll b/llvm/test/CodeGen/NVPTX/TailDuplication-convergent.ll
index 02b562d85b9..fc6867eca41 100644
--- a/llvm/test/CodeGen/NVPTX/TailDuplication-convergent.ll
+++ b/llvm/test/CodeGen/NVPTX/TailDuplication-convergent.ll
@@ -2,7 +2,7 @@
target triple = "nvptx64-nvidia-cuda"
declare void @foo()
-declare void @llvm.cuda.syncthreads()
+declare void @llvm.nvvm.barrier0()
; syncthreads shouldn't be duplicated.
; CHECK: .func call_syncthreads
@@ -20,7 +20,7 @@ L2:
store i32 1, i32* %a
br label %L42
L42:
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
br label %Ret
}
diff --git a/llvm/test/CodeGen/NVPTX/access-non-generic.ll b/llvm/test/CodeGen/NVPTX/access-non-generic.ll
index 8645ae612d4..3cd5a922508 100644
--- a/llvm/test/CodeGen/NVPTX/access-non-generic.ll
+++ b/llvm/test/CodeGen/NVPTX/access-non-generic.ll
@@ -34,7 +34,7 @@ define void @ld_st_shared_f32(i32 %i, float %v) {
store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
; use syncthreads to disable optimizations across components
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; cast; load
@@ -45,7 +45,7 @@ define void @ld_st_shared_f32(i32 %i, float %v) {
; cast; store
store float %v, float* %2, align 4
; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; load gep cast
@@ -55,7 +55,7 @@ define void @ld_st_shared_f32(i32 %i, float %v) {
; store gep cast
store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; gep cast; load
@@ -66,7 +66,7 @@ define void @ld_st_shared_f32(i32 %i, float %v) {
; gep cast; store
store float %v, float* %5, align 4
; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
; cast; gep; load
@@ -78,7 +78,7 @@ define void @ld_st_shared_f32(i32 %i, float %v) {
; cast; gep; store
store float %v, float* %8, align 4
; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
; PTX: bar.sync 0;
ret void
@@ -181,7 +181,7 @@ exit:
ret void
}
-declare void @llvm.cuda.syncthreads() #3
+declare void @llvm.nvvm.barrier0() #3
declare void @use(float)
diff --git a/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll b/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
index 2fec31b3791..ca7fb6eddfe 100644
--- a/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
+++ b/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
@@ -3,8 +3,8 @@
; Make sure the call to syncthreads is not duplicate here by the LLVM
; optimizations, because it has the noduplicate attribute set.
-; CHECK: call void @llvm.cuda.syncthreads
-; CHECK-NOT: call void @llvm.cuda.syncthreads
+; CHECK: call void @llvm.nvvm.barrier0
+; CHECK-NOT: call void @llvm.nvvm.barrier0
; Function Attrs: nounwind
define void @foo(float* %output) #1 {
@@ -37,7 +37,7 @@ if.else: ; preds = %entry
br label %if.end
if.end: ; preds = %if.else, %if.then
- call void @llvm.cuda.syncthreads()
+ call void @llvm.nvvm.barrier0()
%6 = load float*, float** %output.addr, align 8
%arrayidx6 = getelementptr inbounds float, float* %6, i64 0
%7 = load float, float* %arrayidx6, align 4
@@ -68,7 +68,7 @@ if.end17: ; preds = %if.else13, %if.then
}
; Function Attrs: noduplicate nounwind
-declare void @llvm.cuda.syncthreads() #2
+declare void @llvm.nvvm.barrier0() #2
!0 = !{void (float*)* @foo, !"kernel", i32 1}
!1 = !{null, !"align", i32 8}
OpenPOWER on IntegriCloud