diff options
| author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2017-05-23 15:59:58 +0000 |
|---|---|---|
| committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2017-05-23 15:59:58 +0000 |
| commit | a96ec3f36016eee02c346e9b8100f31f28a6f079 (patch) | |
| tree | 0480a9f477e61611ef9b69acfeeb9769c9d38253 /llvm/test | |
| parent | bf35e6ab2aae9b3d529d0659248ebb1af1a330d6 (diff) | |
| download | bcm5719-llvm-a96ec3f36016eee02c346e9b8100f31f28a6f079.tar.gz bcm5719-llvm-a96ec3f36016eee02c346e9b8100f31f28a6f079.zip | |
[AMDGPU] Convert shl (add) into add (shl)
shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1)
This allows to fold a constant into an address in some cases as
well as to eliminate second shift if the expression is used as
an address and second shift is a result of a GEP.
Differential Revision: https://reviews.llvm.org/D33432
llvm-svn: 303641
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll b/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll new file mode 100644 index 00000000000..1cdfec9fdb5 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/shl-add-to-add-shl.ll @@ -0,0 +1,40 @@ +; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck %s + +; Check transformation shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1) +; Only one shift if expected, GEP shall not produce a separate shift + +; CHECK-LABEL: {{^}}add_const_offset: +; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 +; CHECK: v_add_i32_e32 v[[ADD:[0-9]+]], vcc, 0xc80, v[[SHL]] +; CHECK-NOT: v_lshl +; CHECK: v_add_i32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADD]] +; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: +define amdgpu_kernel void @add_const_offset(i32 addrspace(1)* nocapture %arg) { +bb: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %add = add i32 %id, 200 + %shl = shl i32 %add, 2 + %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl + %val = load i32, i32 addrspace(1)* %ptr, align 4 + store i32 %val, i32 addrspace(1)* %arg, align 4 + ret void +} + +; CHECK-LABEL: {{^}}or_const_offset: +; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 +; CHECK: v_or_b32_e32 v[[OR:[0-9]+]], 0x1000, v[[SHL]] +; CHECK-NOT: v_lshl +; CHECK: v_add_i32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[OR]] +; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: +define amdgpu_kernel void @or_const_offset(i32 addrspace(1)* nocapture %arg) { +bb: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %add = or i32 %id, 256 + %shl = shl i32 %add, 2 + %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl + %val = load i32, i32 addrspace(1)* %ptr, align 4 + store i32 %val, i32 addrspace(1)* %arg, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() |

