summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll
diff options
context:
space:
mode:
authorStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2017-06-28 02:37:11 +0000
committerStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2017-06-28 02:37:11 +0000
commiteb40733bf07e523ff898cbad942c9dbbc5beaca2 (patch)
treeec268dd62711903cf58d48cdc997499709c50aae /llvm/test/CodeGen/AMDGPU/alignbit-pat.ll
parent8ef03802f12ab99c547abb46fe1ef6ae8aaf505f (diff)
downloadbcm5719-llvm-eb40733bf07e523ff898cbad942c9dbbc5beaca2.tar.gz
bcm5719-llvm-eb40733bf07e523ff898cbad942c9dbbc5beaca2.zip
Allow to truncate left shift with non-constant shift amount
That is pretty common for clang to produce code like (shl %x, (and %amt, 31)). In this situation we can still perform trunc (shl) into shl (trunc) conversion given the known value range of shift amount. Differential Revision: https://reviews.llvm.org/D34723 llvm-svn: 306499
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/alignbit-pat.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/alignbit-pat.ll69
1 files changed, 0 insertions, 69 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll
index e60083ebb63..5176f47f977 100644
--- a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll
+++ b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll
@@ -16,23 +16,6 @@ bb:
ret void
}
-; GCN-LABEL: {{^}}alignbit_shl_pat:
-; GCN-DAG: s_load_dword s[[SHL:[0-9]+]]
-; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; GCN-DAG: s_sub_i32 s[[SHR:[0-9]+]], 32, s[[SHL]]
-; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
-
-define amdgpu_kernel void @alignbit_shl_pat(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
-bb:
- %tmp = load i64, i64 addrspace(1)* %arg, align 8
- %tmp3 = and i32 %arg2, 31
- %tmp4 = zext i32 %tmp3 to i64
- %tmp5 = shl i64 %tmp, %tmp4
- %tmp6 = trunc i64 %tmp5 to i32
- store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
- ret void
-}
-
; GCN-LABEL: {{^}}alignbit_shr_pat_v:
; GCN-DAG: load_dword v[[SHR:[0-9]+]],
; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
@@ -53,27 +36,6 @@ bb:
ret void
}
-; GCN-LABEL: {{^}}alignbit_shl_pat_v:
-; GCN-DAG: load_dword v[[SHL:[0-9]+]],
-; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; GCN-DAG: v_sub_i32_e32 v[[SHR:[0-9]+]], {{[^,]+}}, 32, v[[SHL]]
-; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
-
-define amdgpu_kernel void @alignbit_shl_pat_v(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
-bb:
- %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
- %gep1 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tid
- %tmp = load i64, i64 addrspace(1)* %gep1, align 8
- %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tid
- %amt = load i32, i32 addrspace(1)* %gep2, align 4
- %tmp3 = and i32 %amt, 31
- %tmp4 = zext i32 %tmp3 to i64
- %tmp5 = shl i64 %tmp, %tmp4
- %tmp6 = trunc i64 %tmp5 to i32
- store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
- ret void
-}
-
; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30:
; Negative test, wrong constant
; GCN: v_lshr_b64
@@ -90,22 +52,6 @@ bb:
ret void
}
-; GCN-LABEL: {{^}}alignbit_shl_pat_wrong_and30:
-; Negative test, wrong constant
-; GCN: v_lshl_b64
-; GCN-NOT: v_alignbit_b32
-
-define amdgpu_kernel void @alignbit_shl_pat_wrong_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
-bb:
- %tmp = load i64, i64 addrspace(1)* %arg, align 8
- %tmp3 = and i32 %arg2, 30
- %tmp4 = zext i32 %tmp3 to i64
- %tmp5 = shl i64 %tmp, %tmp4
- %tmp6 = trunc i64 %tmp5 to i32
- store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
- ret void
-}
-
; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63:
; Negative test, wrong constant
; GCN: v_lshr_b64
@@ -122,21 +68,6 @@ bb:
ret void
}
-; GCN-LABEL: {{^}}alignbit_shl_pat_wrong_and63:
-; Negative test, wrong constant
-; GCN: v_lshl_b64
-; GCN-NOT: v_alignbit_b32
-
-define amdgpu_kernel void @alignbit_shl_pat_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
-bb:
- %tmp = load i64, i64 addrspace(1)* %arg, align 8
- %tmp3 = and i32 %arg2, 63
- %tmp4 = zext i32 %tmp3 to i64
- %tmp5 = shl i64 %tmp, %tmp4
- %tmp6 = trunc i64 %tmp5 to i32
- store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
- ret void
-}
declare i32 @llvm.amdgcn.workitem.id.x() #0
attributes #0 = { nounwind readnone speculatable }
OpenPOWER on IntegriCloud