summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp41
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h13
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp73
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp45
4 files changed, 60 insertions, 112 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 838ef3a1c6a..acb62b6ad9e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1032,45 +1032,6 @@ static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
return BinaryOperator::CreateNot(NotMask, I.getName());
}
-/// Try to narrow the width of an 'add' if at least 1 operand is an extend of
-/// of a value. This requires a potentially expensive known bits check to make
-/// sure the narrow op does not overflow.
-Instruction *InstCombiner::narrowAddIfNoOverflow(BinaryOperator &I) {
- // We need at least one extended operand.
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- Value *X;
- bool IsSext = match(LHS, m_SExt(m_Value(X)));
- if (!IsSext && !match(LHS, m_ZExt(m_Value(X))))
- return nullptr;
-
- // If both operands are the same extension from the same source type and we
- // can eliminate at least one (hasOneUse), this might work.
- CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
- Value *Y;
- if (!(match(RHS, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
- cast<Operator>(RHS)->getOpcode() == CastOpc &&
- (LHS->hasOneUse() || RHS->hasOneUse()))) {
- // If that did not match, see if the RHS is a constant. Truncating and
- // extending must produce the same constant.
- Constant *WideC;
- if (!LHS->hasOneUse() || !match(RHS, m_Constant(WideC)))
- return nullptr;
- Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType());
- if (ConstantExpr::getCast(CastOpc, NarrowC, I.getType()) != WideC)
- return nullptr;
- Y = NarrowC;
- }
- // Both operands have narrow versions. Last step: the math must not overflow
- // in the narrow width.
- if (!willNotOverflowAdd(X, Y, I, IsSext))
- return nullptr;
-
- // add (ext X), (ext Y) --> ext (add X, Y)
- // add (ext X), C --> ext (add X, C')
- Value *NarrowAdd = Builder.CreateAdd(X, Y, "narrow", !IsSext, IsSext);
- return CastInst::Create(CastOpc, NarrowAdd, I.getType());
-}
-
Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
@@ -1230,7 +1191,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- if (Instruction *Ext = narrowAddIfNoOverflow(I))
+ if (Instruction *Ext = narrowMathIfNoOverflow(I))
return Ext;
// (add (xor A, B) (and A, B)) --> (or A, B)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 1462660a54d..e5d0ac95eef 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -538,13 +538,24 @@ private:
: willNotOverflowUnsignedMul(LHS, RHS, CxtI);
}
+ bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
+ const Value *RHS, const Instruction &CxtI,
+ bool IsSigned) const {
+ switch (Opcode) {
+ case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
+ case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
+ case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
+ default: llvm_unreachable("Unexpected opcode for overflow query");
+ }
+ }
+
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
Instruction *narrowBinOp(TruncInst &Trunc);
Instruction *narrowMaskedBinOp(BinaryOperator &And);
- Instruction *narrowAddIfNoOverflow(BinaryOperator &I);
+ Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
Instruction *narrowRotate(TruncInst &Trunc);
Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index ee2136357ae..d76351fc6fb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -322,77 +322,8 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
- // Check for (mul (sext x), y), see if we can merge this into an
- // integer mul followed by a sext.
- if (SExtInst *Op0Conv = dyn_cast<SExtInst>(Op0)) {
- // (mul (sext x), cst) --> (sext (mul x, cst'))
- if (auto *Op1C = dyn_cast<Constant>(Op1)) {
- if (Op0Conv->hasOneUse()) {
- Constant *CI =
- ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType());
- if (ConstantExpr::getSExt(CI, I.getType()) == Op1C &&
- willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) {
- // Insert the new, smaller mul.
- Value *NewMul =
- Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv");
- return new SExtInst(NewMul, I.getType());
- }
- }
- }
-
- // (mul (sext x), (sext y)) --> (sext (mul int x, y))
- if (SExtInst *Op1Conv = dyn_cast<SExtInst>(Op1)) {
- // Only do this if x/y have the same type, if at last one of them has a
- // single use (so we don't increase the number of sexts), and if the
- // integer mul will not overflow.
- if (Op0Conv->getOperand(0)->getType() ==
- Op1Conv->getOperand(0)->getType() &&
- (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) &&
- willNotOverflowSignedMul(Op0Conv->getOperand(0),
- Op1Conv->getOperand(0), I)) {
- // Insert the new integer mul.
- Value *NewMul = Builder.CreateNSWMul(
- Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
- return new SExtInst(NewMul, I.getType());
- }
- }
- }
-
- // Check for (mul (zext x), y), see if we can merge this into an
- // integer mul followed by a zext.
- if (auto *Op0Conv = dyn_cast<ZExtInst>(Op0)) {
- // (mul (zext x), cst) --> (zext (mul x, cst'))
- if (auto *Op1C = dyn_cast<Constant>(Op1)) {
- if (Op0Conv->hasOneUse()) {
- Constant *CI =
- ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType());
- if (ConstantExpr::getZExt(CI, I.getType()) == Op1C &&
- willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) {
- // Insert the new, smaller mul.
- Value *NewMul =
- Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv");
- return new ZExtInst(NewMul, I.getType());
- }
- }
- }
-
- // (mul (zext x), (zext y)) --> (zext (mul int x, y))
- if (auto *Op1Conv = dyn_cast<ZExtInst>(Op1)) {
- // Only do this if x/y have the same type, if at last one of them has a
- // single use (so we don't increase the number of zexts), and if the
- // integer mul will not overflow.
- if (Op0Conv->getOperand(0)->getType() ==
- Op1Conv->getOperand(0)->getType() &&
- (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) &&
- willNotOverflowUnsignedMul(Op0Conv->getOperand(0),
- Op1Conv->getOperand(0), I)) {
- // Insert the new integer mul.
- Value *NewMul = Builder.CreateNUWMul(
- Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
- return new ZExtInst(NewMul, I.getType());
- }
- }
- }
+ if (Instruction *Ext = narrowMathIfNoOverflow(I))
+ return Ext;
bool Changed = false;
if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 99874b31912..d29cf935751 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1446,6 +1446,51 @@ Instruction *InstCombiner::foldShuffledBinop(BinaryOperator &Inst) {
return nullptr;
}
+/// Try to narrow the width of a binop if at least 1 operand is an extend of
+/// of a value. This requires a potentially expensive known bits check to make
+/// sure the narrow op does not overflow.
+Instruction *InstCombiner::narrowMathIfNoOverflow(BinaryOperator &BO) {
+ // We need at least one extended operand.
+ Value *LHS = BO.getOperand(0), *RHS = BO.getOperand(1);
+ Value *X;
+ bool IsSext = match(LHS, m_SExt(m_Value(X)));
+ if (!IsSext && !match(LHS, m_ZExt(m_Value(X))))
+ return nullptr;
+
+ // If both operands are the same extension from the same source type and we
+ // can eliminate at least one (hasOneUse), this might work.
+ CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
+ Value *Y;
+ if (!(match(RHS, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
+ cast<Operator>(RHS)->getOpcode() == CastOpc &&
+ (LHS->hasOneUse() || RHS->hasOneUse()))) {
+ // If that did not match, see if we have a suitable constant operand.
+ // Truncating and extending must produce the same constant.
+ Constant *WideC;
+ if (!LHS->hasOneUse() || !match(RHS, m_Constant(WideC)))
+ return nullptr;
+ Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType());
+ if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC)
+ return nullptr;
+ Y = NarrowC;
+ }
+ // Both operands have narrow versions. Last step: the math must not overflow
+ // in the narrow width.
+ if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
+ return nullptr;
+
+ // bo (ext X), (ext Y) --> ext (bo X, Y)
+ // bo (ext X), C --> ext (bo X, C')
+ Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
+ if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
+ if (IsSext)
+ NewBinOp->setHasNoSignedWrap();
+ else
+ NewBinOp->setHasNoUnsignedWrap();
+ }
+ return CastInst::Create(CastOpc, NarrowBO, BO.getType());
+}
+
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
Type *GEPType = GEP.getType();
OpenPOWER on IntegriCloud