diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2018-08-17 13:23:44 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2018-08-17 13:23:44 +0000 |
| commit | 411b86081e131e13ab4981e21e7752405fb9d49a (patch) | |
| tree | 344e8c12ce4f2a71a5600e0ef6d84b68d381fb8a /llvm/lib/Analysis | |
| parent | c9818ebc02b9ae0ef5cee50a9fb60a9626c6274e (diff) | |
| download | bcm5719-llvm-411b86081e131e13ab4981e21e7752405fb9d49a.tar.gz bcm5719-llvm-411b86081e131e13ab4981e21e7752405fb9d49a.zip | |
[ConstantFolding] add simplifications for funnel shift intrinsics
This is another step towards being able to canonicalize to the funnel shift
intrinsics in IR (see D49242 for the initial patch).
We should not have any loss of simplification power in IR between these and
the equivalent IR constructs.
Differential Revision: https://reviews.llvm.org/D50848
llvm-svn: 340022
Diffstat (limited to 'llvm/lib/Analysis')
| -rw-r--r-- | llvm/lib/Analysis/ConstantFolding.cpp | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index f741f66d462..bbd812bf517 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1389,6 +1389,8 @@ bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) { case Intrinsic::ctpop: case Intrinsic::ctlz: case Intrinsic::cttz: + case Intrinsic::fshl: + case Intrinsic::fshr: case Intrinsic::fma: case Intrinsic::fmuladd: case Intrinsic::copysign: @@ -2081,6 +2083,29 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty, } } + if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { + auto *C0 = dyn_cast<ConstantInt>(Operands[0]); + auto *C1 = dyn_cast<ConstantInt>(Operands[1]); + auto *C2 = dyn_cast<ConstantInt>(Operands[2]); + if (!(C0 && C1 && C2)) + return nullptr; + + // The shift amount is interpreted as modulo the bitwidth. If the shift + // amount is effectively 0, avoid UB due to oversized inverse shift below. + unsigned BitWidth = C0->getBitWidth(); + unsigned ShAmt = C2->getValue().urem(BitWidth); + bool IsRight = IntrinsicID == Intrinsic::fshr; + if (!ShAmt) + return IsRight ? C1 : C0; + + // (X << ShlAmt) | (Y >> LshrAmt) + const APInt &X = C0->getValue(); + const APInt &Y = C1->getValue(); + unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; + unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; + return ConstantInt::get(Ty->getContext(), X.shl(ShlAmt) | Y.lshr(LshrAmt)); + } + return nullptr; } |

