summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorJingyue Wu <jingyue@google.com>2015-08-20 18:27:04 +0000
committerJingyue Wu <jingyue@google.com>2015-08-20 18:27:04 +0000
commit10fcea5d4b9b6f9df529f58a30eb9dd6c722b07e (patch)
tree6f7a0921ed4b51d0282ab5be302df721587f902d /llvm/lib/Analysis
parented6b9bfeabdc482d2b133eb3054a3c1530449f27 (diff)
downloadbcm5719-llvm-10fcea5d4b9b6f9df529f58a30eb9dd6c722b07e.tar.gz
bcm5719-llvm-10fcea5d4b9b6f9df529f58a30eb9dd6c722b07e.zip
[ValueTracking] computeOverflowForSignedAdd and isKnownNonNegative
Summary: Refactor, NFC Extracts computeOverflowForSignedAdd and isKnownNonNegative from NaryReassociate to ValueTracking in case others need it. Reviewers: reames Subscribers: majnemer, llvm-commits Differential Revision: http://reviews.llvm.org/D11313 llvm-svn: 245591
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp69
1 files changed, 69 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 7537b4d8095..9b930d7d2eb 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -185,6 +185,14 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
}
+bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ bool NonNegative, Negative;
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
+ return NonNegative;
+}
+
static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth, const Query &Q);
@@ -3370,6 +3378,67 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
return OverflowResult::MayOverflow;
}
+static OverflowResult computeOverflowForSignedAdd(
+ Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
+ if (Add && Add->hasNoSignedWrap()) {
+ return OverflowResult::NeverOverflows;
+ }
+
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ bool RHSKnownNonNegative, RHSKnownNegative;
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+ ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+
+ if ((LHSKnownNonNegative && RHSKnownNegative) ||
+ (LHSKnownNegative && RHSKnownNonNegative)) {
+ // The sign bits are opposite: this CANNOT overflow.
+ return OverflowResult::NeverOverflows;
+ }
+
+ // The remaining code needs Add to be available. Early returns if not so.
+ if (!Add)
+ return OverflowResult::MayOverflow;
+
+ // If the sign of Add is the same as at least one of the operands, this add
+ // CANNOT overflow. This is particularly useful when the sum is
+ // @llvm.assume'ed non-negative rather than proved so from analyzing its
+ // operands.
+ bool LHSOrRHSKnownNonNegative =
+ (LHSKnownNonNegative || RHSKnownNonNegative);
+ bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
+ if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
+ bool AddKnownNonNegative, AddKnownNegative;
+ ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
+ /*Depth=*/0, AC, CxtI, DT);
+ if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
+ (AddKnownNegative && LHSOrRHSKnownNegative)) {
+ return OverflowResult::NeverOverflows;
+ }
+ }
+
+ return OverflowResult::MayOverflow;
+}
+
+OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
+ Add, DL, AC, CxtI, DT);
+}
+
+OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
+}
+
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
// FIXME: This conservative implementation can be relaxed. E.g. most
// atomic operations are guaranteed to terminate on most platforms
OpenPOWER on IntegriCloud