diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-25 01:16:40 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-25 01:16:40 +0000 |
commit | 8c8fcb2585b45dec7091fc9f3617dd8ef2bc4ac4 (patch) | |
tree | 46170830d064bdb9b38629dfa7b4bd2209e57c30 /llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll | |
parent | 4ae5119eeba206e2b1d88491e8ed76ed0f80721f (diff) | |
download | bcm5719-llvm-8c8fcb2585b45dec7091fc9f3617dd8ef2bc4ac4.tar.gz bcm5719-llvm-8c8fcb2585b45dec7091fc9f3617dd8ef2bc4ac4.zip |
AMDGPU: Cost model for basic integer operations
This resolves bug 21148 by preventing promotion to
i64 induction variables.
llvm-svn: 264376
Diffstat (limited to 'llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll')
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll new file mode 100644 index 00000000000..76b21d26faa --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/add-sub.ll @@ -0,0 +1,138 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck %s + +; CHECK: 'add_i32' +; CHECK: estimated cost of 1 for {{.*}} add i32 +define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %add = add i32 %vec, %b + store i32 %add, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i32' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i32> +define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %add = add <2 x i32> %vec, %b + store <2 x i32> %add, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i32' +; CHECK: estimated cost of 3 for {{.*}} add <3 x i32> +define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr + %add = add <3 x i32> %vec, %b + store <3 x i32> %add, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i32' +; CHECK: estimated cost of 4 for {{.*}} add <4 x i32> +define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr + %add = add <4 x i32> %vec, %b + store <4 x i32> %add, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i64' +; CHECK: estimated cost of 2 for {{.*}} add i64 +define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %add = add i64 %vec, %b + store i64 %add, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i64' +; CHECK: estimated cost of 4 for {{.*}} add <2 x i64> +define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %add = add <2 x i64> %vec, %b + store <2 x i64> %add, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i64' +; CHECK: estimated cost of 6 for {{.*}} add <3 x i64> +define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr + %add = add <3 x i64> %vec, %b + store <3 x i64> %add, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i64' +; CHECK: estimated cost of 8 for {{.*}} add <4 x i64> +define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { + %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr + %add = add <4 x i64> %vec, %b + store <4 x i64> %add, <4 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v16i64' +; CHECK: estimated cost of 32 for {{.*}} add <16 x i64> +define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 { + %vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr + %add = add <16 x i64> %vec, %b + store <16 x i64> %add, <16 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i16' +; CHECK: estimated cost of 1 for {{.*}} add i16 +define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %add = add i16 %vec, %b + store i16 %add, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i16' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i16> +define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %add = add <2 x i16> %vec, %b + store <2 x i16> %add, <2 x i16> addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i32' +; CHECK: estimated cost of 1 for {{.*}} sub i32 +define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %sub = sub i32 %vec, %b + store i32 %sub, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i64' +; CHECK: estimated cost of 2 for {{.*}} sub i64 +define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %sub = sub i64 %vec, %b + store i64 %sub, i64 addrspace(1)* %out + ret void +} +; CHECK: 'sub_i16' +; CHECK: estimated cost of 1 for {{.*}} sub i16 +define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %sub = sub i16 %vec, %b + store i16 %sub, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_v2i16' +; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16> +define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %sub = sub <2 x i16> %vec, %b + store <2 x i16> %sub, <2 x i16> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } |