diff options
| author | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-11 01:42:22 +0000 |
|---|---|---|
| committer | Stefan Pintilie <stefanp@ca.ibm.com> | 2018-07-11 01:42:22 +0000 |
| commit | b9d01aa29e5d0aa433c2fc62ace709fe69c45ceb (patch) | |
| tree | 13e0b71596172ed47d456f62b0487c964565cefa /llvm/lib | |
| parent | 95720c16b352b7dfaa8e6f1bf9de32fe13932655 (diff) | |
| download | bcm5719-llvm-b9d01aa29e5d0aa433c2fc62ace709fe69c45ceb.tar.gz bcm5719-llvm-b9d01aa29e5d0aa433c2fc62ace709fe69c45ceb.zip | |
[Power9] Add remaining __flaot128 builtin support for FMA round to odd
Implement this as it is done on GCC:
__float128 a, b, c, d;
a = __builtin_fmaf128_round_to_odd (b, c, d); // generates xsmaddqpo
a = __builtin_fmaf128_round_to_odd (b, c, -d); // generates xsmsubqpo
a = - __builtin_fmaf128_round_to_odd (b, c, d); // generates xsnmaddqpo
a = - __builtin_fmaf128_round_to_odd (b, c, -d); // generates xsnmsubpqp
Differential Revision: https://reviews.llvm.org/D48218
llvm-svn: 336754
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCInstrVSX.td | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td index 79d4af34058..ffba0e5aadb 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -2498,17 +2498,26 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in { [(set f128:$vT, (fma f128:$vA, f128:$vB, (fneg f128:$vTi)))]>; - def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" , []>; + def XSMSUBQPO : X_VT5_VA5_VB5_FMA_Ro<63, 420, "xsmsubqpo" , + [(set f128:$vT, + (int_ppc_fmaf128_round_to_odd + f128:$vA, f128:$vB, (fneg f128:$vTi)))]>; def XSNMADDQP : X_VT5_VA5_VB5_FMA <63, 452, "xsnmaddqp", [(set f128:$vT, (fneg (fma f128:$vA, f128:$vB, f128:$vTi)))]>; - def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo", []>; + def XSNMADDQPO: X_VT5_VA5_VB5_FMA_Ro<63, 452, "xsnmaddqpo", + [(set f128:$vT, + (fneg (int_ppc_fmaf128_round_to_odd + f128:$vA, f128:$vB, f128:$vTi)))]>; def XSNMSUBQP : X_VT5_VA5_VB5_FMA <63, 484, "xsnmsubqp", [(set f128:$vT, (fneg (fma f128:$vA, f128:$vB, (fneg f128:$vTi))))]>; - def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo", []>; + def XSNMSUBQPO: X_VT5_VA5_VB5_FMA_Ro<63, 484, "xsnmsubqpo", + [(set f128:$vT, + (fneg (int_ppc_fmaf128_round_to_odd + f128:$vA, f128:$vB, (fneg f128:$vTi))))]>; // Additional fnmsub patterns: -a*c + b == -(a*c - b) def : Pat<(fma (fneg f128:$A), f128:$C, f128:$B), (XSNMSUBQP $B, $C, $A)>; |

