summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll164
1 files changed, 82 insertions, 82 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll
index 57e705f2732..938b697251c 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-load.ll
@@ -12,10 +12,10 @@ declare i32 @llvm.amdgcn.workitem.id.x()
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @system_unordered(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in unordered, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in unordered, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -26,10 +26,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @system_monotonic(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in monotonic, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in monotonic, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -40,10 +40,10 @@ entry:
; GCN-NEXT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @system_acquire(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in acquire, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in acquire, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -54,10 +54,10 @@ entry:
; GCN-NEXT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @system_seq_cst(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in seq_cst, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in seq_cst, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -68,10 +68,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @singlethread_unordered(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("singlethread") unordered, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("singlethread") unordered, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -82,10 +82,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @singlethread_monotonic(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("singlethread") monotonic, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("singlethread") monotonic, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -96,10 +96,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @singlethread_acquire(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("singlethread") acquire, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("singlethread") acquire, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -110,10 +110,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @singlethread_seq_cst(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("singlethread") seq_cst, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("singlethread") seq_cst, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -124,10 +124,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @agent_unordered(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("agent") unordered, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("agent") unordered, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -138,10 +138,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @agent_monotonic(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("agent") monotonic, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("agent") monotonic, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -152,10 +152,10 @@ entry:
; GCN-NEXT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @agent_acquire(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("agent") acquire, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("agent") acquire, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -166,10 +166,10 @@ entry:
; GCN-NEXT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @agent_seq_cst(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("agent") seq_cst, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("agent") seq_cst, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -180,10 +180,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @workgroup_unordered(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("workgroup") unordered, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("workgroup") unordered, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -194,10 +194,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @workgroup_monotonic(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("workgroup") monotonic, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("workgroup") monotonic, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -208,10 +208,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @workgroup_acquire(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("workgroup") acquire, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("workgroup") acquire, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -222,10 +222,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @workgroup_seq_cst(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("workgroup") seq_cst, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("workgroup") seq_cst, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -236,10 +236,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @wavefront_unordered(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("wavefront") unordered, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("wavefront") unordered, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -250,10 +250,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @wavefront_monotonic(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("wavefront") monotonic, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("wavefront") monotonic, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -264,10 +264,10 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @wavefront_acquire(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("wavefront") acquire, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("wavefront") acquire, align 4
+ store i32 %val, i32* %out
ret void
}
@@ -278,42 +278,42 @@ entry:
; GCN-NOT: buffer_wbinvl1_vol
; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RET]]
define amdgpu_kernel void @wavefront_seq_cst(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load atomic i32, i32 addrspace(4)* %in syncscope("wavefront") seq_cst, align 4
- store i32 %val, i32 addrspace(4)* %out
+ %val = load atomic i32, i32* %in syncscope("wavefront") seq_cst, align 4
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_private_0
; GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}}
define amdgpu_kernel void @nontemporal_private_0(
- i32* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(5)* %in, i32* %out) {
entry:
- %val = load i32, i32* %in, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ %val = load i32, i32 addrspace(5)* %in, align 4, !nontemporal !0
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_private_1
; GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}}
define amdgpu_kernel void @nontemporal_private_1(
- i32* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(5)* %in, i32* %out) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
- %val.gep = getelementptr inbounds i32, i32* %in, i32 %tid
- %val = load i32, i32* %val.gep, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ %val.gep = getelementptr inbounds i32, i32 addrspace(5)* %in, i32 %tid
+ %val = load i32, i32 addrspace(5)* %val.gep, align 4, !nontemporal !0
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_global_0
; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0x0{{$}}
define amdgpu_kernel void @nontemporal_global_0(
- i32 addrspace(1)* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(1)* %in, i32* %out) {
entry:
%val = load i32, i32 addrspace(1)* %in, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ store i32 %val, i32* %out
ret void
}
@@ -321,56 +321,56 @@ entry:
; GFX8: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}}
; GFX9: global_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], off glc slc{{$}}
define amdgpu_kernel void @nontemporal_global_1(
- i32 addrspace(1)* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(1)* %in, i32* %out) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%val.gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
%val = load i32, i32 addrspace(1)* %val.gep, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_local_0
; GCN: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}}
define amdgpu_kernel void @nontemporal_local_0(
- i32 addrspace(3)* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(3)* %in, i32* %out) {
entry:
%val = load i32, i32 addrspace(3)* %in, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_local_1
; GCN: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}}
define amdgpu_kernel void @nontemporal_local_1(
- i32 addrspace(3)* %in, i32 addrspace(4)* %out) {
+ i32 addrspace(3)* %in, i32* %out) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%val.gep = getelementptr inbounds i32, i32 addrspace(3)* %in, i32 %tid
%val = load i32, i32 addrspace(3)* %val.gep, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_flat_0
; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}}
define amdgpu_kernel void @nontemporal_flat_0(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
- %val = load i32, i32 addrspace(4)* %in, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ %val = load i32, i32* %in, align 4, !nontemporal !0
+ store i32 %val, i32* %out
ret void
}
; GCN-LABEL: {{^}}nontemporal_flat_1
; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}}
define amdgpu_kernel void @nontemporal_flat_1(
- i32 addrspace(4)* %in, i32 addrspace(4)* %out) {
+ i32* %in, i32* %out) {
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
- %val.gep = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tid
- %val = load i32, i32 addrspace(4)* %val.gep, align 4, !nontemporal !0
- store i32 %val, i32 addrspace(4)* %out
+ %val.gep = getelementptr inbounds i32, i32* %in, i32 %tid
+ %val = load i32, i32* %val.gep, align 4, !nontemporal !0
+ store i32 %val, i32* %out
ret void
}
OpenPOWER on IntegriCloud