diff options
| author | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-09 20:38:40 +0000 |
|---|---|---|
| committer | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-09 20:38:40 +0000 |
| commit | 133acb22bb0974b0c78e152ea3ff9257fa4547c9 (patch) | |
| tree | d187126ff28d87031e2955f2877a566bfb535741 /llvm/test | |
| parent | cd4d873d159378f032da2033f33fc786f7e196bb (diff) | |
| download | bcm5719-llvm-133acb22bb0974b0c78e152ea3ff9257fa4547c9.tar.gz bcm5719-llvm-133acb22bb0974b0c78e152ea3ff9257fa4547c9.zip | |
[Power9] Add __float128 builtins for Rounding Operations
Added __float128 support for a number of rounding operations:
trunc
rint
nearbyint
round
floor
ceil
Differential Revision: https://reviews.llvm.org/D48415
llvm-svn: 336601
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/f128-rounding.ll | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/f128-rounding.ll b/llvm/test/CodeGen/PowerPC/f128-rounding.ll new file mode 100644 index 00000000000..ac0b3be9cd0 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/f128-rounding.ll @@ -0,0 +1,76 @@ +; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \ +; RUN: -enable-ppc-quad-precision -verify-machineinstrs < %s | FileCheck %s + + +define void @qp_trunc(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.trunc.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_trunc +; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 1 +; CHECK: blr +} +declare fp128 @llvm.trunc.f128(fp128 %Val) + +define void @qp_rint(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.rint.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_rint +; CHECK: xsrqpix 0, {{[0-9]+}}, {{[0-9]+}}, 3 +; CHECK: blr +} +declare fp128 @llvm.rint.f128(fp128 %Val) + +define void @qp_nearbyint(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.nearbyint.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_nearbyint +; CHECK: xsrqpi 0, {{[0-9]+}}, {{[0-9]+}}, 3 +; CHECK: blr +} +declare fp128 @llvm.nearbyint.f128(fp128 %Val) + +define void @qp_round(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.round.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_round +; CHECK: xsrqpi 0, {{[0-9]+}}, {{[0-9]+}}, 0 +; CHECK: blr +} +declare fp128 @llvm.round.f128(fp128 %Val) + +define void @qp_floor(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.floor.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_floor +; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 3 +; CHECK: blr +} +declare fp128 @llvm.floor.f128(fp128 %Val) + +define void @qp_ceil(fp128* nocapture readonly %a, fp128* nocapture %res) { +entry: + %0 = load fp128, fp128* %a, align 16 + %1 = tail call fp128 @llvm.ceil.f128(fp128 %0) + store fp128 %1, fp128* %res, align 16 + ret void +; CHECK-LABEL: qp_ceil +; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 2 +; CHECK: blr +} +declare fp128 @llvm.ceil.f128(fp128 %Val) + |

