summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp54
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp61
2 files changed, 110 insertions, 5 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 7b637db9ff1..cd82a317355 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -3860,7 +3860,7 @@ struct BinaryOp {
/// Try to map \p V into a BinaryOp, and return \c None on failure.
-static Optional<BinaryOp> MatchBinaryOp(Value *V) {
+static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
auto *Op = dyn_cast<Operator>(V);
if (!Op)
return None;
@@ -3906,6 +3906,50 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V) {
}
return BinaryOp(Op);
+ case Instruction::ExtractValue: {
+ auto *EVI = cast<ExtractValueInst>(Op);
+ if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
+ break;
+
+ auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand());
+ if (!CI)
+ break;
+
+ if (auto *F = CI->getCalledFunction())
+ switch (F->getIntrinsicID()) {
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow: {
+ if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
+ return BinaryOp(Instruction::Add, CI->getArgOperand(0),
+ CI->getArgOperand(1));
+
+ // Now that we know that all uses of the arithmetic-result component of
+ // CI are guarded by the overflow check, we can go ahead and pretend
+ // that the arithmetic is non-overflowing.
+ if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow)
+ return BinaryOp(Instruction::Add, CI->getArgOperand(0),
+ CI->getArgOperand(1), /* IsNSW = */ true,
+ /* IsNUW = */ false);
+ else
+ return BinaryOp(Instruction::Add, CI->getArgOperand(0),
+ CI->getArgOperand(1), /* IsNSW = */ false,
+ /* IsNUW*/ true);
+ }
+
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
+ CI->getArgOperand(1));
+
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ return BinaryOp(Instruction::Mul, CI->getArgOperand(0),
+ CI->getArgOperand(1));
+ default:
+ break;
+ }
+ }
+
default:
break;
}
@@ -3980,7 +4024,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
- if (auto BO = MatchBinaryOp(BEValueV)) {
+ if (auto BO = MatchBinaryOp(BEValueV, DT)) {
if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
if (BO->IsNUW)
Flags = setFlags(Flags, SCEV::FlagNUW);
@@ -4956,7 +5000,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
return getUnknown(V);
Operator *U = cast<Operator>(V);
- if (auto BO = MatchBinaryOp(U)) {
+ if (auto BO = MatchBinaryOp(U, DT)) {
switch (BO->Opcode) {
case Instruction::Add: {
// The simple thing to do would be to just call getSCEV on both operands
@@ -4997,7 +5041,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
else
AddOps.push_back(getSCEV(BO->RHS));
- auto NewBO = MatchBinaryOp(BO->LHS);
+ auto NewBO = MatchBinaryOp(BO->LHS, DT);
if (!NewBO || (NewBO->Opcode != Instruction::Add &&
NewBO->Opcode != Instruction::Sub)) {
AddOps.push_back(getSCEV(BO->LHS));
@@ -5027,7 +5071,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
}
MulOps.push_back(getSCEV(BO->RHS));
- auto NewBO = MatchBinaryOp(BO->LHS);
+ auto NewBO = MatchBinaryOp(BO->LHS, DT);
if (!NewBO || NewBO->Opcode != Instruction::Mul) {
MulOps.push_back(getSCEV(BO->LHS));
break;
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 58f6cd187dd..761b8332ec4 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3373,6 +3373,67 @@ static OverflowResult computeOverflowForSignedAdd(
return OverflowResult::MayOverflow;
}
+bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) {
+#ifndef NDEBUG
+ auto IID = II->getIntrinsicID();
+ assert((IID == Intrinsic::sadd_with_overflow ||
+ IID == Intrinsic::uadd_with_overflow ||
+ IID == Intrinsic::ssub_with_overflow ||
+ IID == Intrinsic::usub_with_overflow ||
+ IID == Intrinsic::smul_with_overflow ||
+ IID == Intrinsic::umul_with_overflow) &&
+ "Not an overflow intrinsic!");
+#endif
+
+ SmallVector<BranchInst *, 2> GuardingBranches;
+ SmallVector<ExtractValueInst *, 2> Results;
+
+ for (User *U : II->users()) {
+ if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
+ assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
+
+ if (EVI->getIndices()[0] == 0)
+ Results.push_back(EVI);
+ else {
+ assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
+
+ for (auto *U : EVI->users())
+ if (auto *B = dyn_cast<BranchInst>(U)) {
+ assert(B->isConditional() && "How else is it using an i1?");
+ GuardingBranches.push_back(B);
+ }
+ }
+ } else {
+ // We are using the aggregate directly in a way we don't want to analyze
+ // here (storing it to a global, say).
+ return false;
+ }
+ }
+
+ auto AllUsesGuardedByBranch = [&](BranchInst *BI) {
+ BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
+ if (!NoWrapEdge.isSingleEdge())
+ return false;
+
+ // Check if all users of the add are provably no-wrap.
+ for (auto *Result : Results) {
+ // If the extractvalue itself is not executed on overflow, the we don't
+ // need to check each use separately, since domination is transitive.
+ if (DT.dominates(NoWrapEdge, Result->getParent()))
+ continue;
+
+ for (auto &RU : Result->uses())
+ if (!DT.dominates(NoWrapEdge, RU))
+ return false;
+ }
+
+ return true;
+ };
+
+ return any_of(GuardingBranches, AllUsesGuardedByBranch);
+}
+
+
OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC,
OpenPOWER on IntegriCloud