diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2019-08-05 16:59:58 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2019-08-05 16:59:58 +0000 |
| commit | 5dbb90bfe14ace30224239cac7c61a1422fa5144 (patch) | |
| tree | b8763bb81970c1ccaa10d5b26cf2c02f9d696b47 /llvm/lib/Transforms | |
| parent | 2f238bd5baf92db6aba4fe7cc6b9094eff64ed1e (diff) | |
| download | bcm5719-llvm-5dbb90bfe14ace30224239cac7c61a1422fa5144.tar.gz bcm5719-llvm-5dbb90bfe14ace30224239cac7c61a1422fa5144.zip | |
[InstCombine] combine mul+shl separated by zext
This appears to slightly help patterns similar to what's
shown in PR42874:
https://bugs.llvm.org/show_bug.cgi?id=42874
...but not in the way requested.
That fix will require some later IR and/or backend pass to
decompose multiply/shifts into something more optimal per
target. Those transforms already exist in some basic forms,
but probably need enhancing to catch more cases.
https://rise4fun.com/Alive/Qzv2
llvm-svn: 367891
Diffstat (limited to 'llvm/lib/Transforms')
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index a30bcbd64ca..c0a1df6b9a7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -715,14 +715,25 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) { unsigned ShAmt = ShAmtAPInt->getZExtValue(); unsigned BitWidth = Ty->getScalarSizeInBits(); - // shl (zext X), ShAmt --> zext (shl X, ShAmt) - // This is only valid if X would have zeros shifted out. Value *X; if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { unsigned SrcWidth = X->getType()->getScalarSizeInBits(); + // shl (zext X), ShAmt --> zext (shl X, ShAmt) + // This is only valid if X would have zeros shifted out. if (ShAmt < SrcWidth && MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I)) return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty); + + // shl (zext (mul MulOp, C2)), ShAmt --> mul (zext MulOp), (C2 << ShAmt) + // This is valid if the high bits of the wider multiply are shifted out. + Value *MulOp; + const APInt *C2; + if (ShAmt >= (BitWidth - SrcWidth) && + match(X, m_Mul(m_Value(MulOp), m_APInt(C2)))) { + Value *Zext = Builder.CreateZExt(MulOp, Ty); + Constant *NewMulC = ConstantInt::get(Ty, C2->zext(BitWidth).shl(ShAmt)); + return BinaryOperator::CreateMul(Zext, NewMulC); + } } // (X >> C) << C --> X & (-1 << C) |

