diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-01-23 22:48:53 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-01-23 22:48:53 +0000 |
| commit | 4e305c6c1e0d247758eb9db13a9f9476e678e671 (patch) | |
| tree | e78773d216812f95722cae0f9f031e2f1efa719d /llvm/test/CodeGen | |
| parent | ce9d6faed6a76c907e54e2e749c726187491f138 (diff) | |
| download | bcm5719-llvm-4e305c6c1e0d247758eb9db13a9f9476e678e671.tar.gz bcm5719-llvm-4e305c6c1e0d247758eb9db13a9f9476e678e671.zip | |
DAG: Don't fold vector extract into load if target doesn't want to
Fixes turning a 32-bit scalar load into an extending vector load
for AMDGPU when dynamically indexing a vector.
llvm-svn: 292842
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll | 31 |
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll index eea44b8a006..c32c5fccc04 100644 --- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll @@ -15,6 +15,34 @@ define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 ret void } +; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_sgpr: +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_lshr_b32 s{{[0-9]+}}, [[IDX]], 16 +; GCN: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]] +define void @extract_vector_elt_v2i16_dynamic_sgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %idx) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %elt = extractelement <2 x i16> %vec, i32 %idx + store i16 %elt, i16 addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_vgpr: +; GCN: {{buffer|flat}}_load_dword [[IDX:v[0-9]+]] +; GCN: buffer_load_dword [[VEC:v[0-9]+]] +; GCN: v_lshrrev_b32_e32 [[ELT:v[0-9]+]], 16, [[VEC]] +define void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext + %idx = load volatile i32, i32 addrspace(1)* %gep + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vec.ptr + %elt = extractelement <2 x i16> %vec, i32 %idx + store i16 %elt, i16 addrspace(1)* %out.gep, align 2 + ret void +} + ; GCN-LABEL: {{^}}extract_vector_elt_v3i16: ; GCN: buffer_load_ushort ; GCN: buffer_store_short @@ -80,4 +108,7 @@ define void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> ret void } +declare i32 @llvm.amdgcn.workitem.id.x() #1 + attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } |

