diff options
author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2017-09-29 23:40:19 +0000 |
---|---|---|
committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2017-09-29 23:40:19 +0000 |
commit | 1d8cf2be89087a2babc1dc38b16040fad0a555e2 (patch) | |
tree | d0f27cf3d5a0e455a5ac4997aea666da42c0a5ad /llvm/test/CodeGen/AMDGPU/inline-attr.ll | |
parent | b33607e5a112e8af3df3f1bd6e0a21749c40cd09 (diff) | |
download | bcm5719-llvm-1d8cf2be89087a2babc1dc38b16040fad0a555e2.tar.gz bcm5719-llvm-1d8cf2be89087a2babc1dc38b16040fad0a555e2.zip |
[AMDGPU] Set fast-math flags on functions given the options
We have a single library build without relaxation options.
When inlined library functions remove fast math attributes
from the functions they are integrated into.
This patch sets relaxation attributes on the functions after
linking provided corresponding relaxation options are given.
Math instructions inside the inlined functions remain to have
no fast flags, but inlining does not prevent fast math
transformations of a surrounding caller code anymore.
Differential Revision: https://reviews.llvm.org/D38325
llvm-svn: 314568
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/inline-attr.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/inline-attr.ll | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll new file mode 100644 index 00000000000..6f6b5f4c0b0 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll @@ -0,0 +1,33 @@ +; RUN: opt -mtriple=amdgcn--amdhsa -S -O3 -enable-unsafe-fp-math %s | FileCheck -check-prefix=GCN -check-prefix=UNSAFE %s +; RUN: opt -mtriple=amdgcn--amdhsa -S -O3 -enable-no-nans-fp-math %s | FileCheck -check-prefix=GCN -check-prefix=NONANS %s +; RUN: opt -mtriple=amdgcn--amdhsa -S -O3 -enable-no-infs-fp-math %s | FileCheck -check-prefix=GCN -check-prefix=NOINFS %s + +; GCN: define float @foo(float %x) local_unnamed_addr #0 { +; GCN: define amdgpu_kernel void @caller(float addrspace(1)* nocapture %p) local_unnamed_addr #1 { +; GCN: %mul.i = fmul float %load, 1.500000e+01 + +; UNSAFE: attributes #0 = { norecurse nounwind readnone "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" } +; UNSAFE: attributes #1 = { norecurse nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" } + +; NOINFS: attributes #0 = { norecurse nounwind readnone "no-infs-fp-math"="true" } +; NOINFS: attributes #1 = { norecurse nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "unsafe-fp-math"="false" } + +; NONANS: attributes #0 = { norecurse nounwind readnone "no-nans-fp-math"="true" } +; NONANS: attributes #1 = { norecurse nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "unsafe-fp-math"="false" } + +define float @foo(float %x) #0 { +entry: + %mul = fmul float %x, 1.500000e+01 + ret float %mul +} + +define amdgpu_kernel void @caller(float addrspace(1)* %p) #1 { +entry: + %load = load float, float addrspace(1)* %p, align 4 + %call = call fast float @foo(float %load) #0 + store float %call, float addrspace(1)* %p, align 4 + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" } |