summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/NVPTX/access-non-generic.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/generic-to-nvvm-ir.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll11
3 files changed, 10 insertions, 14 deletions
diff --git a/llvm/test/CodeGen/NVPTX/access-non-generic.ll b/llvm/test/CodeGen/NVPTX/access-non-generic.ll
index d5776d77b10..62520be2cf5 100644
--- a/llvm/test/CodeGen/NVPTX/access-non-generic.ll
+++ b/llvm/test/CodeGen/NVPTX/access-non-generic.ll
@@ -5,13 +5,6 @@
@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
@scalar = internal addrspace(3) global float 0.000000e+00, align 4
-@generic_scalar = internal global float 0.000000e+00, align 4
-
-define float @ld_from_shared() {
- %1 = addrspacecast float* @generic_scalar to float addrspace(3)*
- %2 = load float, float addrspace(3)* %1
- ret float %2
-}
; Verifies nvptx-favor-non-generic correctly optimizes generic address space
; usage to non-generic address space usage for the patterns we claim to handle:
diff --git a/llvm/test/CodeGen/NVPTX/generic-to-nvvm-ir.ll b/llvm/test/CodeGen/NVPTX/generic-to-nvvm-ir.ll
index 5df5183dc2f..568208d5775 100644
--- a/llvm/test/CodeGen/NVPTX/generic-to-nvvm-ir.ll
+++ b/llvm/test/CodeGen/NVPTX/generic-to-nvvm-ir.ll
@@ -16,11 +16,11 @@ define void @func() !dbg !8 {
;CHECK-LABEL: @func()
;CHECK-SAME: !dbg [[FUNCNODE:![0-9]+]]
entry:
-; References to the variables must be converted back to generic address space via llvm intrinsic call
-; CHECK-DAG: call i8* @llvm.nvvm.ptr.global.to.gen.p0i8.p1i8({{.*}} addrspace(1)* @.str
+; References to the variables must be converted back to generic address space.
+; CHECK-DAG: addrspacecast ([4 x i8] addrspace(1)* @.str to [4 x i8]*)
%0 = load i8, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), align 1
call void @extfunc(i8 signext %0)
-; CHECK-DAG: call i8* @llvm.nvvm.ptr.global.to.gen.p0i8.p1i8(i8 addrspace(1)* @static_var
+; CHECK-DAG: addrspacecast (i8 addrspace(1)* @static_var to i8*)
%1 = load i8, i8* @static_var, align 1
call void @extfunc(i8 signext %1)
ret void
diff --git a/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll b/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
index 66917d5cb18..5b29b219e9f 100644
--- a/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
+++ b/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
@@ -7,14 +7,17 @@ target triple = "nvptx-nvidia-cuda"
; CHECK: .global .align 4 .u32 myglobal = 42;
@myglobal = internal global i32 42, align 4
-; CHECK: .global .align 4 .u32 myconst = 42;
-@myconst = internal constant i32 42, align 4
+; CHECK: .global .align 4 .u32 myconst = 420;
+@myconst = internal constant i32 420, align 4
define void @foo(i32* %a, i32* %b) {
-; CHECK: cvta.global.u32
+; Expect one load -- @myconst isn't loaded from, because we know its value
+; statically.
+; CHECK: ld.global.u32
+; CHECK: st.global.u32
+; CHECK: st.global.u32
%ld1 = load i32, i32* @myglobal
-; CHECK: cvta.global.u32
%ld2 = load i32, i32* @myconst
store i32 %ld1, i32* %a
store i32 %ld2, i32* %b
OpenPOWER on IntegriCloud