diff options
| author | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-09 18:50:06 +0000 |
|---|---|---|
| committer | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-09 18:50:06 +0000 |
| commit | 83a5fe146e0728a7e61c6f17b06a3a963c5b7e00 (patch) | |
| tree | 59631e51758ef0a1c04d1a9768a856358b5e8004 /llvm/test/CodeGen | |
| parent | fa762cc19bea654a7114f2ae83f9dd8324eac64f (diff) | |
| download | bcm5719-llvm-83a5fe146e0728a7e61c6f17b06a3a963c5b7e00.tar.gz bcm5719-llvm-83a5fe146e0728a7e61c6f17b06a3a963c5b7e00.zip | |
[Power9] Add __float128 builtins for Round To Odd
GCC has builtins for these round to odd instructions:
__float128 __builtin_sqrtf128_round_to_odd (__float128)
__float128 __builtin_{add,sub,mul,div}f128_round_to_odd (__float128, __float128)
__float128 __builtin_fmaf128_round_to_odd (__float128, __float128, __float128)
Differential Revision: https://reviews.llvm.org/D47550
llvm-svn: 336578
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll new file mode 100644 index 00000000000..fa40fa2db31 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-p9-f128.ll @@ -0,0 +1,82 @@ +; RUN: llc -verify-machineinstrs -mcpu=pwr9 -enable-ppc-quad-precision \ +; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s + +@A = common global fp128 0xL00000000000000000000000000000000, align 16 +@B = common global fp128 0xL00000000000000000000000000000000, align 16 +@C = common global fp128 0xL00000000000000000000000000000000, align 16 + +define fp128 @testSqrtOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %0) + ret fp128 %1 +; CHECK-LABEL: testSqrtOdd +; CHECK: xssqrtqpo +} + +declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128) + +define fp128 @testFMAOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = load fp128, fp128* @B, align 16 + %2 = load fp128, fp128* @C, align 16 + %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %0, fp128 %1, fp128 %2) + ret fp128 %3 +; CHECK-LABEL: testFMAOdd +; CHECK: xsmaddqpo +} + +declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128) + +define fp128 @testAddOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = load fp128, fp128* @B, align 16 + %2 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %0, fp128 %1) + ret fp128 %2 +; CHECK-LABEL: testAddOdd +; CHECK: xsaddqpo +} + +declare fp128 @llvm.ppc.addf128.round.to.odd(fp128, fp128) + +define fp128 @testSubOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = load fp128, fp128* @B, align 16 + %2 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %0, fp128 %1) + ret fp128 %2 +; CHECK-LABEL: testSubOdd +; CHECK: xssubqpo +} + +; Function Attrs: nounwind readnone +declare fp128 @llvm.ppc.subf128.round.to.odd(fp128, fp128) + +; Function Attrs: noinline nounwind optnone +define fp128 @testMulOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = load fp128, fp128* @B, align 16 + %2 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %0, fp128 %1) + ret fp128 %2 +; CHECK-LABEL: testMulOdd +; CHECK: xsmulqpo +} + +; Function Attrs: nounwind readnone +declare fp128 @llvm.ppc.mulf128.round.to.odd(fp128, fp128) + +define fp128 @testDivOdd() { +entry: + %0 = load fp128, fp128* @A, align 16 + %1 = load fp128, fp128* @B, align 16 + %2 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %0, fp128 %1) + ret fp128 %2 +; CHECK-LABEL: testDivOdd +; CHECK: xsdivqpo +} + +declare fp128 @llvm.ppc.divf128.round.to.odd(fp128, fp128) + |

