summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorJingyue Wu <jingyue@google.com>2015-07-28 18:22:40 +0000
committerJingyue Wu <jingyue@google.com>2015-07-28 18:22:40 +0000
commit42f1d67a45f3762ee7a5ffd9f1511b9314602fcc (patch)
treea1ae5444da994a2e0ba39be1abef9af0a476a726 /llvm/lib/Analysis
parent916cea5682a468db21b2872c31b5bef87bb67f17 (diff)
downloadbcm5719-llvm-42f1d67a45f3762ee7a5ffd9f1511b9314602fcc.tar.gz
bcm5719-llvm-42f1d67a45f3762ee7a5ffd9f1511b9314602fcc.zip
[SCEV] Apply NSW and NUW flags via poison value analysis
Summary: Make Scalar Evolution able to propagate NSW and NUW flags from instructions to SCEVs in some cases. This is based on reasoning about when poison from instructions with these flags would trigger undefined behavior. This gives a 13% speed-up on some Eigen3-based Google-internal microbenchmarks for NVPTX. There does not seem to be clear agreement about when poison should be considered to propagate through instructions. In this analysis, poison propagates only in cases where that should be uncontroversial. This change makes LSR able to create induction variables for expressions like &ptr[i + offset] for loops like this: for (int i = 0; i < limit; ++i) { sum += ptr[i + offset]; } Here ptr is a 64 bit pointer and offset is a 32 bit integer. For NVPTX, LSR currently creates an induction variable for i + offset instead, which is not as fast. Improving this situation is what brings the 13% speed-up on some Eigen3-based Google-internal microbenchmarks for NVPTX. There are more details in this discussion on llvmdev. June: http://lists.cs.uiuc.edu/pipermail/llvmdev/2015-June/thread.html#87234 July: http://lists.cs.uiuc.edu/pipermail/llvmdev/2015-July/thread.html#87392 Patch by Bjarke Roune Reviewers: eliben, atrick, sanjoy Subscribers: majnemer, hfinkel, jingyue, meheff, llvm-commits Differential Revision: http://reviews.llvm.org/D11212 llvm-svn: 243460
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp132
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp161
2 files changed, 268 insertions, 25 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 8e65c1a97f0..efeddf07db2 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2936,7 +2936,8 @@ ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
// FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
// instruction to its SCEV, because the Instruction may be guarded by control
// flow and the no-overflow bits may not be valid for the expression in any
- // context.
+ // context. This can be fixed similarly to how these flags are handled for
+ // adds.
SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
@@ -3315,22 +3316,25 @@ bool ScalarEvolution::checkValidity(const SCEV *S) const {
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
+ const SCEV *S = getExistingSCEV(V);
+ if (S == nullptr) {
+ S = createSCEV(V);
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+ }
+ return S;
+}
+
+const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
+ assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
+
ValueExprMapType::iterator I = ValueExprMap.find_as(V);
if (I != ValueExprMap.end()) {
const SCEV *S = I->second;
if (checkValidity(S))
return S;
- else
- ValueExprMap.erase(I);
+ ValueExprMap.erase(I);
}
- const SCEV *S = createSCEV(V);
-
- // The process of creating a SCEV for V may have caused other SCEVs
- // to have been created, so it's necessary to insert the new entry
- // from scratch, rather than trying to remember the insert position
- // above.
- ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
- return S;
+ return nullptr;
}
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
@@ -4089,8 +4093,63 @@ ScalarEvolution::getRange(const SCEV *S,
return setRange(S, SignHint, ConservativeResult);
}
-/// createSCEV - We know that there is no SCEV for the specified value.
-/// Analyze the expression.
+SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
+ const BinaryOperator *BinOp = cast<BinaryOperator>(V);
+
+ // Return early if there are no flags to propagate to the SCEV.
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+ if (BinOp->hasNoUnsignedWrap())
+ Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
+ if (BinOp->hasNoSignedWrap())
+ Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
+ if (Flags == SCEV::FlagAnyWrap) {
+ return SCEV::FlagAnyWrap;
+ }
+
+ // Here we check that BinOp is in the header of the innermost loop
+ // containing BinOp, since we only deal with instructions in the loop
+ // header. The actual loop we need to check later will come from an add
+ // recurrence, but getting that requires computing the SCEV of the operands,
+ // which can be expensive. This check we can do cheaply to rule out some
+ // cases early.
+ Loop *innermostContainingLoop = LI->getLoopFor(BinOp->getParent());
+ if (innermostContainingLoop == nullptr ||
+ innermostContainingLoop->getHeader() != BinOp->getParent())
+ return SCEV::FlagAnyWrap;
+
+ // Only proceed if we can prove that BinOp does not yield poison.
+ if (!isKnownNotFullPoison(BinOp)) return SCEV::FlagAnyWrap;
+
+ // At this point we know that if V is executed, then it does not wrap
+ // according to at least one of NSW or NUW. If V is not executed, then we do
+ // not know if the calculation that V represents would wrap. Multiple
+ // instructions can map to the same SCEV. If we apply NSW or NUW from V to
+ // the SCEV, we must guarantee no wrapping for that SCEV also when it is
+ // derived from other instructions that map to the same SCEV. We cannot make
+ // that guarantee for cases where V is not executed. So we need to find the
+ // loop that V is considered in relation to and prove that V is executed for
+ // every iteration of that loop. That implies that the value that V
+ // calculates does not wrap anywhere in the loop, so then we can apply the
+ // flags to the SCEV.
+ //
+ // We check isLoopInvariant to disambiguate in case we are adding two
+ // recurrences from different loops, so that we know which loop to prove
+ // that V is executed in.
+ for (int OpIndex = 0; OpIndex < 2; ++OpIndex) {
+ const SCEV *Op = getSCEV(BinOp->getOperand(OpIndex));
+ if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
+ const int OtherOpIndex = 1 - OpIndex;
+ const SCEV *OtherOp = getSCEV(BinOp->getOperand(OtherOpIndex));
+ if (isLoopInvariant(OtherOp, AddRec->getLoop()) &&
+ isGuaranteedToExecuteForEveryIteration(BinOp, AddRec->getLoop()))
+ return Flags;
+ }
+ }
+ return SCEV::FlagAnyWrap;
+}
+
+/// createSCEV - We know that there is no SCEV for the specified value. Analyze
+/// the expression.
///
const SCEV *ScalarEvolution::createSCEV(Value *V) {
if (!isSCEVable(V->getType()))
@@ -4127,29 +4186,52 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
//
- // Don't apply this instruction's NSW or NUW flags to the new
- // expression. The instruction may be guarded by control flow that the
- // no-wrap behavior depends on. Non-control-equivalent instructions can be
- // mapped to the same SCEV expression, and it would be incorrect to transfer
- // NSW/NUW semantics to those operations.
+ // FIXME: Expand this handling of NSW and NUW to other instructions, like
+ // sub and mul.
SmallVector<const SCEV *, 4> AddOps;
- AddOps.push_back(getSCEV(U->getOperand(1)));
- for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
- unsigned Opcode = Op->getValueID() - Value::InstructionVal;
- if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+ for (Value *Op = U;; Op = U->getOperand(0)) {
+ U = dyn_cast<Operator>(Op);
+ unsigned Opcode = U ? U->getOpcode() : 0;
+ if (!U || (Opcode != Instruction::Add && Opcode != Instruction::Sub)) {
+ assert(Op != V && "V should be an add");
+ AddOps.push_back(getSCEV(Op));
break;
- U = cast<Operator>(Op);
+ }
+
+ if (auto *OpSCEV = getExistingSCEV(Op)) {
+ AddOps.push_back(OpSCEV);
+ break;
+ }
+
+ // If a NUW or NSW flag can be applied to the SCEV for this
+ // addition, then compute the SCEV for this addition by itself
+ // with a separate call to getAddExpr. We need to do that
+ // instead of pushing the operands of the addition onto AddOps,
+ // since the flags are only known to apply to this particular
+ // addition - they may not apply to other additions that can be
+ // formed with operands from AddOps.
+ //
+ // FIXME: Expand this to sub instructions.
+ if (Opcode == Instruction::Add && isa<BinaryOperator>(U)) {
+ SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(U);
+ if (Flags != SCEV::FlagAnyWrap) {
+ AddOps.push_back(getAddExpr(getSCEV(U->getOperand(0)),
+ getSCEV(U->getOperand(1)), Flags));
+ break;
+ }
+ }
+
const SCEV *Op1 = getSCEV(U->getOperand(1));
if (Opcode == Instruction::Sub)
AddOps.push_back(getNegativeSCEV(Op1));
else
AddOps.push_back(Op1);
}
- AddOps.push_back(getSCEV(U->getOperand(0)));
return getAddExpr(AddOps);
}
+
case Instruction::Mul: {
- // Don't transfer NSW/NUW for the same reason as AddExpr.
+ // FIXME: Transfer NSW/NUW as in AddExpr.
SmallVector<const SCEV *, 4> MulOps;
MulOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index fa0d7798cae..434a69e0820 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3316,6 +3316,167 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
return OverflowResult::MayOverflow;
}
+bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
+ // FIXME: This conservative implementation can be relaxed. E.g. most
+ // atomic operations are guaranteed to terminate on most platforms
+ // and most functions terminate.
+
+ return !I->isAtomic() && // atomics may never succeed on some platforms
+ !isa<CallInst>(I) && // could throw and might not terminate
+ !isa<InvokeInst>(I) && // might not terminate and could throw to
+ // non-successor (see bug 24185 for details).
+ !isa<ResumeInst>(I) && // has no successors
+ !isa<ReturnInst>(I); // has no successors
+}
+
+bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
+ const Loop *L) {
+ // The loop header is guaranteed to be executed for every iteration.
+ //
+ // FIXME: Relax this constraint to cover all basic blocks that are
+ // guaranteed to be executed at every iteration.
+ if (I->getParent() != L->getHeader()) return false;
+
+ for (const Instruction &LI : *L->getHeader()) {
+ if (&LI == I) return true;
+ if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
+ }
+ llvm_unreachable("Instruction not contained in its own parent basic block.");
+}
+
+bool llvm::propagatesFullPoison(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Xor:
+ case Instruction::Trunc:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ // These operations all propagate poison unconditionally. Note that poison
+ // is not any particular value, so xor or subtraction of poison with
+ // itself still yields poison, not zero.
+ return true;
+
+ case Instruction::AShr:
+ case Instruction::SExt:
+ // For these operations, one bit of the input is replicated across
+ // multiple output bits. A replicated poison bit is still poison.
+ return true;
+
+ case Instruction::Shl: {
+ // Left shift *by* a poison value is poison. The number of
+ // positions to shift is unsigned, so no negative values are
+ // possible there. Left shift by zero places preserves poison. So
+ // it only remains to consider left shift of poison by a positive
+ // number of places.
+ //
+ // A left shift by a positive number of places leaves the lowest order bit
+ // non-poisoned. However, if such a shift has a no-wrap flag, then we can
+ // make the poison operand violate that flag, yielding a fresh full-poison
+ // value.
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
+ }
+
+ case Instruction::Mul: {
+ // A multiplication by zero yields a non-poison zero result, so we need to
+ // rule out zero as an operand. Conservatively, multiplication by a
+ // non-zero constant is not multiplication by zero.
+ //
+ // Multiplication by a non-zero constant can leave some bits
+ // non-poisoned. For example, a multiplication by 2 leaves the lowest
+ // order bit unpoisoned. So we need to consider that.
+ //
+ // Multiplication by 1 preserves poison. If the multiplication has a
+ // no-wrap flag, then we can make the poison operand violate that flag
+ // when multiplied by any integer other than 0 and 1.
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
+ for (Value *V : OBO->operands()) {
+ if (auto *CI = dyn_cast<ConstantInt>(V)) {
+ // A ConstantInt cannot yield poison, so we can assume that it is
+ // the other operand that is poison.
+ return !CI->isZero();
+ }
+ }
+ }
+ return false;
+ }
+
+ case Instruction::GetElementPtr:
+ // A GEP implicitly represents a sequence of additions, subtractions,
+ // truncations, sign extensions and multiplications. The multiplications
+ // are by the non-zero sizes of some set of types, so we do not have to be
+ // concerned with multiplication by zero. If the GEP is in-bounds, then
+ // these operations are implicitly no-signed-wrap so poison is propagated
+ // by the arguments above for Add, Sub, Trunc, SExt and Mul.
+ return cast<GEPOperator>(I)->isInBounds();
+
+ default:
+ return false;
+ }
+}
+
+const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Store:
+ return cast<StoreInst>(I)->getPointerOperand();
+
+ case Instruction::Load:
+ return cast<LoadInst>(I)->getPointerOperand();
+
+ case Instruction::AtomicCmpXchg:
+ return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
+
+ case Instruction::AtomicRMW:
+ return cast<AtomicRMWInst>(I)->getPointerOperand();
+
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ return I->getOperand(1);
+
+ default:
+ return nullptr;
+ }
+}
+
+bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
+ // We currently only look for uses of poison values within the same basic
+ // block, as that makes it easier to guarantee that the uses will be
+ // executed given that PoisonI is executed.
+ //
+ // FIXME: Expand this to consider uses beyond the same basic block. To do
+ // this, look out for the distinction between post-dominance and strong
+ // post-dominance.
+ const BasicBlock *BB = PoisonI->getParent();
+
+ // Set of instructions that we have proved will yield poison if PoisonI
+ // does.
+ SmallSet<const Value *, 16> YieldsPoison;
+ YieldsPoison.insert(PoisonI);
+
+ for (const Instruction *I = PoisonI, *E = BB->end(); I != E;
+ I = I->getNextNode()) {
+ if (I != PoisonI) {
+ const Value *NotPoison = getGuaranteedNonFullPoisonOp(I);
+ if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
+ if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false;
+ }
+
+ // Mark poison that propagates from I through uses of I.
+ if (YieldsPoison.count(I)) {
+ for (const User *User : I->users()) {
+ const Instruction *UserI = cast<Instruction>(User);
+ if (UserI->getParent() == BB && propagatesFullPoison(UserI))
+ YieldsPoison.insert(User);
+ }
+ }
+ }
+ return false;
+}
+
static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred,
Value *CmpLHS, Value *CmpRHS,
Value *TrueVal, Value *FalseVal,
OpenPOWER on IntegriCloud