diff options
| author | Nicolai Haehnle <nhaehnle@gmail.com> | 2018-02-21 13:31:35 +0000 |
|---|---|---|
| committer | Nicolai Haehnle <nhaehnle@gmail.com> | 2018-02-21 13:31:35 +0000 |
| commit | 770397f4cdcfbf2c0e0a9604a4d6065063197317 (patch) | |
| tree | 8104868ad30833555c235eb51b2c376922d07ef4 /llvm/test | |
| parent | d6e1a9404db84990eb428484c28ed978040561ef (diff) | |
| download | bcm5719-llvm-770397f4cdcfbf2c0e0a9604a4d6065063197317.tar.gz bcm5719-llvm-770397f4cdcfbf2c0e0a9604a4d6065063197317.zip | |
AMDGPU: Do not combine loads/store across physreg defs
Summary:
Since this pass operates on machine SSA form, this should only really
affect M0 in practice.
Fixes various piglit variable-indexing/vs-varying-array-mat4-index-*
Change-Id: Ib2a1dc3a8d7b08225a8da49a86f533faa0986aa8
Fixes: r317751 ("AMDGPU: Merge S_BUFFER_LOAD_DWORD_IMM into x2, x4")
Reviewers: arsenm, mareko, rampitec
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D40343
llvm-svn: 325677
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/ds_read2.ll | 19 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll | 34 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/smrd.ll | 45 |
3 files changed, 83 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/ds_read2.ll b/llvm/test/CodeGen/AMDGPU/ds_read2.ll index 131afb0c6ae..e72b6c1b7a6 100644 --- a/llvm/test/CodeGen/AMDGPU/ds_read2.ll +++ b/llvm/test/CodeGen/AMDGPU/ds_read2.ll @@ -613,6 +613,24 @@ bb: ret void } +; GCN-LABEL: ds_read_call_read: +; GCN: ds_read_b32 +; GCN: s_swappc_b64 +; GCN: ds_read_b32 +define amdgpu_kernel void @ds_read_call_read(i32 addrspace(1)* %out, i32 addrspace(3)* %arg) { + %x = call i32 @llvm.amdgcn.workitem.id.x() + %arrayidx0 = getelementptr i32, i32 addrspace(3)* %arg, i32 %x + %arrayidx1 = getelementptr i32, i32 addrspace(3)* %arrayidx0, i32 1 + %v0 = load i32, i32 addrspace(3)* %arrayidx0, align 4 + call void @void_func_void() + %v1 = load i32, i32 addrspace(3)* %arrayidx1, align 4 + %r = add i32 %v0, %v1 + store i32 %r, i32 addrspace(1)* %out, align 4 + ret void +} + +declare void @void_func_void() #3 + declare i32 @llvm.amdgcn.workgroup.id.x() #1 declare i32 @llvm.amdgcn.workgroup.id.y() #1 declare i32 @llvm.amdgcn.workitem.id.x() #1 @@ -623,3 +641,4 @@ declare void @llvm.amdgcn.s.barrier() #2 attributes #0 = { nounwind } attributes #1 = { nounwind readnone speculatable } attributes #2 = { convergent nounwind } +attributes #3 = { nounwind noinline } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll index c4795a23cd5..c48d90ea7ad 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll @@ -160,21 +160,25 @@ bb: ; SI won't merge ds memory operations, because of the signed offset bug, so ; we only have check lines for VI. -; VI-LABEL: v_interp_readnone: -; VI: s_mov_b32 m0, 0 -; VI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0 -; VI-DAG: v_interp_mov_f32 v{{[0-9]+}}, p0, attr0.x{{$}} -; VI: s_mov_b32 m0, -1{{$}} -; VI: ds_write2_b32 v{{[0-9]+}}, [[ZERO]], [[ZERO]] offset1:4 -define amdgpu_ps void @v_interp_readnone(float addrspace(3)* %lds) #0 { -bb: - store float 0.000000e+00, float addrspace(3)* %lds - %tmp1 = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 0) - %tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4 - store float 0.000000e+00, float addrspace(3)* %tmp2 - call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp1, float %tmp1, float %tmp1, float %tmp1, i1 true, i1 true) #0 - ret void -} +; +; TODO: VI won't merge them either, because we are conservative about moving +; instructions past changes to physregs. +; +; TODO-VI-LABEL: v_interp_readnone: +; TODO-VI: s_mov_b32 m0, 0 +; TODO-VI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0 +; TODO-VI-DAG: v_interp_mov_f32 v{{[0-9]+}}, p0, attr0.x{{$}} +; TODO-VI: s_mov_b32 m0, -1{{$}} +; TODO-VI: ds_write2_b32 v{{[0-9]+}}, [[ZERO]], [[ZERO]] offset1:4 +;define amdgpu_ps void @v_interp_readnone(float addrspace(3)* %lds) #0 { +;bb: +; store float 0.000000e+00, float addrspace(3)* %lds +; %tmp1 = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 0) +; %tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4 +; store float 0.000000e+00, float addrspace(3)* %tmp2 +; call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp1, float %tmp1, float %tmp1, float %tmp1, i1 true, i1 true) #0 +; ret void +;} ; Thest that v_interp_p1 uses different source and destination registers ; on 16 bank LDS chips. diff --git a/llvm/test/CodeGen/AMDGPU/smrd.ll b/llvm/test/CodeGen/AMDGPU/smrd.ll index a326942e43d..12190d52039 100644 --- a/llvm/test/CodeGen/AMDGPU/smrd.ll +++ b/llvm/test/CodeGen/AMDGPU/smrd.ll @@ -232,6 +232,48 @@ main_body: ret void } +; GCN-LABEL: {{^}}smrd_imm_nomerge_m0: +; +; In principle we could merge the loads here as well, but it would require +; careful tracking of physical registers since both v_interp* and v_movrel* +; instructions (or gpr idx mode) use M0. +; +; GCN: s_buffer_load_dword +; GCN: s_buffer_load_dword +define amdgpu_ps float @smrd_imm_nomerge_m0(<4 x i32> inreg %desc, i32 inreg %prim, float %u, float %v) #0 { +main_body: + %idx1.f = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 0) + %idx1 = bitcast float %idx1.f to i32 + + %v0.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 0, i32 %prim) + %v0.x = call nsz float @llvm.amdgcn.interp.p2(float %v0.x1, float %v, i32 0, i32 0, i32 %prim) + %v0.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 1, i32 %prim) + %v0.y = call nsz float @llvm.amdgcn.interp.p2(float %v0.y1, float %v, i32 0, i32 1, i32 %prim) + %v0.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 2, i32 %prim) + %v0.z = call nsz float @llvm.amdgcn.interp.p2(float %v0.z1, float %v, i32 0, i32 2, i32 %prim) + %v0.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0 + %v0.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1 + %v0 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2 + %a = extractelement <3 x float> %v0, i32 %idx1 + + %v1.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 0, i32 %prim) + %v1.x = call nsz float @llvm.amdgcn.interp.p2(float %v1.x1, float %v, i32 1, i32 0, i32 %prim) + %v1.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 1, i32 %prim) + %v1.y = call nsz float @llvm.amdgcn.interp.p2(float %v1.y1, float %v, i32 1, i32 1, i32 %prim) + %v1.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 2, i32 %prim) + %v1.z = call nsz float @llvm.amdgcn.interp.p2(float %v1.z1, float %v, i32 1, i32 2, i32 %prim) + %v1.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0 + %v1.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1 + %v1 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2 + + %b = extractelement <3 x float> %v1, i32 %idx1 + %c = call float @llvm.SI.load.const.v4i32(<4 x i32> %desc, i32 4) + + %res.tmp = fadd float %a, %b + %res = fadd float %res.tmp, %c + ret float %res +} + ; GCN-LABEL: {{^}}smrd_vgpr_merged: ; GCN-NEXT: %bb. ; GCN-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 @@ -289,8 +331,11 @@ ret_block: ; preds = %.outer, %.label22, % declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0 declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1 +declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #2 +declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #2 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } +attributes #2 = { nounwind readnone speculatable } !0 = !{} |

