diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-04-03 21:45:13 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-04-03 21:45:13 +0000 |
| commit | b600e138cc17e1e4d897747b2e7e1ed21177c8c5 (patch) | |
| tree | 3d55ff0b6ab601a456fa6da665adbd0927e04dd9 /llvm/test | |
| parent | 6ea72527e0275158bb1b390330f2bc28624c4625 (diff) | |
| download | bcm5719-llvm-b600e138cc17e1e4d897747b2e7e1ed21177c8c5.tar.gz bcm5719-llvm-b600e138cc17e1e4d897747b2e7e1ed21177c8c5.zip | |
AMDGPU: Remove llvm.SI.vs.load.input
llvm-svn: 299391
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/wait.ll | 23 | ||||
| -rw-r--r-- | llvm/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll | 4 |
3 files changed, 20 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll index ed58ce68040..c9c8583d5e8 100644 --- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -37,7 +37,8 @@ bb: %tmp15 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0 %tmp16 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0 %tmp17 = add i32 %arg5, %arg7 - %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp16, i32 0, i32 %tmp17) + %tmp16.cast = bitcast <16 x i8> %tmp16 to <4 x i32> + %tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp16.cast, i32 %tmp17, i32 0, i1 false, i1 false) %tmp19 = extractelement <4 x float> %tmp18, i32 0 %tmp20 = extractelement <4 x float> %tmp18, i32 1 %tmp21 = extractelement <4 x float> %tmp18, i32 2 @@ -488,10 +489,11 @@ declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0 declare float @llvm.SI.load.const(<16 x i8>, i32) #1 -declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #1 +declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } +attributes #2 = { nounwind readonly } !0 = !{!1, !1, i64 0, i32 1} !1 = !{!"const", !2} diff --git a/llvm/test/CodeGen/AMDGPU/wait.ll b/llvm/test/CodeGen/AMDGPU/wait.ll index 8be5cd92d30..623cbeae8da 100644 --- a/llvm/test/CodeGen/AMDGPU/wait.ll +++ b/llvm/test/CodeGen/AMDGPU/wait.ll @@ -15,7 +15,8 @@ define amdgpu_vs void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrsp main_body: %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 0 %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 - %tmp11 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp10, i32 0, i32 %arg6) + %tmp10.cast = bitcast <16 x i8> %tmp10 to <4 x i32> + %tmp11 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp10.cast, i32 %arg6, i32 0, i1 false, i1 false) %tmp12 = extractelement <4 x float> %tmp11, i32 0 %tmp13 = extractelement <4 x float> %tmp11, i32 1 call void @llvm.amdgcn.s.barrier() #1 @@ -23,7 +24,8 @@ main_body: %tmp15 = load float, float addrspace(2)* %constptr, align 4 %tmp16 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 1 %tmp17 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp16, !tbaa !0 - %tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp17, i32 0, i32 %arg6) + %tmp17.cast = bitcast <16 x i8> %tmp17 to <4 x i32> + %tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp17.cast, i32 %arg6, i32 0, i1 false, i1 false) %tmp19 = extractelement <4 x float> %tmp18, i32 0 %tmp20 = extractelement <4 x float> %tmp18, i32 1 %tmp21 = extractelement <4 x float> %tmp18, i32 2 @@ -40,15 +42,17 @@ main_body: ; ILPMAX: s_load_dwordx4 ; ILPMAX: s_waitcnt lgkmcnt(0) ; ILPMAX: buffer_load -; ILPMAX: s_waitcnt vmcnt(1) ; ILPMAX: s_waitcnt vmcnt(0) +; ILPMAX: exp pos0 +; ILPMAX-NEXT: exp param0 ; ILPMAX: s_endpgm define amdgpu_vs void @main2([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 { main_body: %tmp = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0 %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0 %tmp12 = add i32 %arg5, %arg7 - %tmp13 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp11, i32 0, i32 %tmp12) + %tmp11.cast = bitcast <16 x i8> %tmp11 to <4 x i32> + %tmp13 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp11.cast, i32 %tmp12, i32 0, i1 false, i1 false) %tmp14 = extractelement <4 x float> %tmp13, i32 0 %tmp15 = extractelement <4 x float> %tmp13, i32 1 %tmp16 = extractelement <4 x float> %tmp13, i32 2 @@ -56,23 +60,24 @@ main_body: %tmp18 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 1 %tmp19 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp18, align 16, !tbaa !0 %tmp20 = add i32 %arg5, %arg7 - %tmp21 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp19, i32 0, i32 %tmp20) + %tmp19.cast = bitcast <16 x i8> %tmp19 to <4 x i32> + %tmp21 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp19.cast, i32 %tmp20, i32 0, i1 false, i1 false) %tmp22 = extractelement <4 x float> %tmp21, i32 0 %tmp23 = extractelement <4 x float> %tmp21, i32 1 %tmp24 = extractelement <4 x float> %tmp21, i32 2 %tmp25 = extractelement <4 x float> %tmp21, i32 3 - call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp14, float %tmp15, float %tmp16, float %tmp17, i1 true, i1 false) #0 - call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp22, float %tmp23, float %tmp24, float %tmp25, i1 false, i1 false) #0 + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp14, float %tmp15, float %tmp16, float %tmp17, i1 false, i1 false) #0 + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %tmp22, float %tmp23, float %tmp24, float %tmp25, i1 true, i1 false) #0 ret void } declare void @llvm.amdgcn.s.barrier() #1 -declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #2 +declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0 attributes #0 = { nounwind } attributes #1 = { convergent nounwind } -attributes #2 = { nounwind readnone } +attributes #2 = { nounwind readonly } !0 = !{!1, !1, i64 0, i32 1} !1 = !{!"const", !2} diff --git a/llvm/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll b/llvm/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll index e4845ed73cc..9d3a84396cf 100644 --- a/llvm/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll +++ b/llvm/test/Transforms/StructurizeCFG/rebuild-ssa-infinite-loop.ll @@ -9,7 +9,7 @@ target triple = "amdgcn--" define amdgpu_vs void @wrapper(i32 inreg %arg, i32 %arg1) { main_body: %tmp = add i32 %arg1, %arg - %tmp2 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> undef, i32 0, i32 %tmp) + %tmp2 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> undef, i32 %tmp, i32 0, i1 false, i1 false) %tmp3 = extractelement <4 x float> %tmp2, i32 1 %tmp4 = fptosi float %tmp3 to i32 %tmp5 = insertelement <2 x i32> undef, i32 %tmp4, i32 1 @@ -46,7 +46,7 @@ main.exit: ; preds = %if28.i, %loop11.i declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0 ; Function Attrs: nounwind readnone -declare <4 x float> @llvm.SI.vs.load.input(<16 x i8>, i32, i32) #1 +declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2 ; Function Attrs: nounwind readonly declare <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2 |

