summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/smrd.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/smrd.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/smrd.ll68
1 files changed, 34 insertions, 34 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/smrd.ll b/llvm/test/CodeGen/AMDGPU/smrd.ll
index 22ee62ef427..c83eb378a1e 100644
--- a/llvm/test/CodeGen/AMDGPU/smrd.ll
+++ b/llvm/test/CodeGen/AMDGPU/smrd.ll
@@ -98,7 +98,7 @@ main_body:
%d1 = insertelement <4 x i32> %d0, i32 1, i32 1
%d2 = insertelement <4 x i32> %d1, i32 2, i32 2
%d3 = insertelement <4 x i32> %d2, i32 3, i32 3
- %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %d3, i32 0)
+ %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %d3, i32 0, i32 0)
ret float %r
}
@@ -110,7 +110,7 @@ define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(4)* inreg %arg, <4 x
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -126,7 +126,7 @@ define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(4)* inreg %arg, <4 x
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1020)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1020, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1020, i32 1)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -149,7 +149,7 @@ define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(4)* inreg %arg, <4 x
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1024)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1024, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1024, i32 0)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -170,7 +170,7 @@ define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(4)* inreg %arg, <4 x
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048572)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1048572, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1048572, i32 0)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -190,7 +190,7 @@ define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(4)* inreg %arg, <4 x
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048576)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1048576, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1048576, i32 0)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -278,7 +278,7 @@ main_body:
; GCN: s_buffer_load_dword s{{[0-9]}}, s[0:3], s4
define amdgpu_ps float @smrd_sgpr_offset(<4 x i32> inreg %desc, i32 inreg %offset) #0 {
main_body:
- %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
+ %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0)
ret float %r
}
@@ -286,7 +286,7 @@ main_body:
; GCN: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
define amdgpu_ps float @smrd_vgpr_offset(<4 x i32> inreg %desc, i32 %offset) #0 {
main_body:
- %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
+ %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0)
ret float %r
}
@@ -296,7 +296,7 @@ main_body:
define amdgpu_ps float @smrd_vgpr_offset_imm(<4 x i32> inreg %desc, i32 %offset) #0 {
main_body:
%off = add i32 %offset, 4092
- %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %off)
+ %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %off, i32 0)
ret float %r
}
@@ -308,7 +308,7 @@ main_body:
define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 {
main_body:
%off = add i32 %offset, 4096
- %r = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %off)
+ %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %off, i32 0)
ret float %r
}
@@ -320,12 +320,12 @@ main_body:
; VIGFX9-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1c
define amdgpu_ps void @smrd_imm_merged(<4 x i32> inreg %desc) #0 {
main_body:
- %r1 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 4)
- %r2 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 8)
- %r3 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 12)
- %r4 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 16)
- %r5 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 28)
- %r6 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 32)
+ %r1 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 4, i32 0)
+ %r2 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 8, i32 0)
+ %r3 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 12, i32 0)
+ %r4 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 16, i32 0)
+ %r5 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 28, i32 0)
+ %r6 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 32, i32 0)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0
ret void
@@ -352,7 +352,7 @@ main_body:
;
define amdgpu_ps float @smrd_imm_merge_m0(<4 x i32> inreg %desc, i32 inreg %prim, float %u, float %v) #0 {
main_body:
- %idx1.f = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 0)
+ %idx1.f = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 0, i32 0)
%idx1 = bitcast float %idx1.f to i32
%v0.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 0, i32 %prim)
@@ -377,7 +377,7 @@ main_body:
%v1 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2
%b = extractelement <3 x float> %v1, i32 %idx1
- %c = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 4)
+ %c = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 4, i32 0)
%res.tmp = fadd float %a, %b
%res = fadd float %res.tmp, %c
@@ -396,12 +396,12 @@ main_body:
%a4 = add i32 %a, 16
%a5 = add i32 %a, 28
%a6 = add i32 %a, 32
- %r1 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a1)
- %r2 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a2)
- %r3 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a3)
- %r4 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a4)
- %r5 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a5)
- %r6 = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %a6)
+ %r1 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a1, i32 0)
+ %r2 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a2, i32 0)
+ %r3 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a3, i32 0)
+ %r4 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a4, i32 0)
+ %r5 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a5, i32 0)
+ %r6 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a6, i32 0)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0
ret void
@@ -428,14 +428,14 @@ ret_block: ; preds = %.outer, %.label22, %
.inner_loop_body:
%descriptor = load <4 x i32>, <4 x i32> addrspace(4)* %descptr, align 16, !invariant.load !0
- %load1result = call float @llvm.SI.load.const.v4i32(<4 x i32> %descriptor, i32 0)
+ %load1result = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %descriptor, i32 0, i32 0)
store float %load1result, float addrspace(1)* undef
%inner_br2 = icmp uge i32 %1, 10
br i1 %inner_br2, label %.inner_loop_header, label %.outer_loop_body
.outer_loop_body:
%offset = shl i32 %loopctr.2, 6
- %load2result = call float @llvm.SI.load.const.v4i32(<4 x i32> %descriptor, i32 %offset)
+ %load2result = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %descriptor, i32 %offset, i32 0)
%outer_br = fcmp ueq float %load2result, 0x0
br i1 %outer_br, label %.outer_loop_header, label %ret_block
}
@@ -451,7 +451,7 @@ define amdgpu_ps void @smrd_load_nonconst0(<4 x i32> addrspace(4)* inreg %arg, <
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 %ncoff)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 %ncoff, i32 0)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -470,7 +470,7 @@ define amdgpu_ps void @smrd_load_nonconst1(<4 x i32> addrspace(4)* inreg %arg, <
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 %ncoff)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 %ncoff, i32 0)
%s.buffer.float = bitcast i32 %s.buffer to float
@@ -489,7 +489,7 @@ define amdgpu_ps void @smrd_load_nonconst2(<4 x i32> addrspace(4)* inreg %arg, <
main_body:
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp
- %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 %ncoff)
+ %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0)
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in
%s.buffer = call <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32> %tmp22, i32 %ncoff, i32 0)
%s.buffer.elt = extractelement <8 x i32> %s.buffer, i32 1
@@ -581,7 +581,7 @@ loop:
%counter = phi i32 [ 0, %main_body ], [ %counter.next, %loop ]
%sum = phi float [ 0.0, %main_body ], [ %sum.next, %loop ]
%offset = shl i32 %counter, 2
- %v = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
+ %v = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0)
%sum.next = fadd float %sum, %v
%counter.next = add i32 %counter, 1
%cc = icmp uge i32 %counter.next, %bound
@@ -607,7 +607,7 @@ loop:
%counter = phi i32 [ 0, %main_body ], [ %counter.next, %loop.a ], [ %counter.next, %loop.b ]
%sum = phi float [ 0.0, %main_body ], [ %sum.next, %loop.a ], [ %sum.next.b, %loop.b ]
%offset = shl i32 %counter, 2
- %v = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 %offset)
+ %v = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0)
%sum.next = fadd float %sum, %v
%counter.next = add i32 %counter, 1
%cc = icmp uge i32 %counter.next, %bound
@@ -644,7 +644,7 @@ if1: ; preds = %main_body
endif1: ; preds = %if1, %main_body
%tmp13 = extractelement <3 x i32> %arg4, i32 0
- %tmp97 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 %tmp13)
+ %tmp97 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> undef, i32 %tmp13, i32 0)
ret float %tmp97
}
@@ -689,10 +689,10 @@ define amdgpu_ps void @s_buffer_load_v16f32(<4 x i32> inreg %rsrc, i32 inreg %of
}
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #2
declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #2
-declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32)
+
+declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32) #1
declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32)
declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32)
declare <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32>, i32, i32)
OpenPOWER on IntegriCloud