summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp49
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/CodeMetrics.cpp43
-rw-r--r--llvm/lib/Analysis/DemandedBits.cpp13
-rw-r--r--llvm/lib/Analysis/IVUsers.cpp17
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp41
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp282
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp48
-rw-r--r--llvm/lib/Analysis/Lint.cpp20
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp11
-rw-r--r--llvm/lib/Analysis/PHITransAddr.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp121
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp231
14 files changed, 465 insertions, 420 deletions
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 55498432e11..50a43fe22e8 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -23,6 +23,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -181,7 +182,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
/*static*/ const Value *BasicAAResult::GetLinearExpression(
const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
- DominatorTree *DT, bool &NSW, bool &NUW) {
+ AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -220,7 +221,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0,
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
BOp, DT)) {
Scale = 1;
Offset = 0;
@@ -229,23 +230,23 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
LLVM_FALLTHROUGH;
case Instruction::Add:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
Offset += RHS;
break;
case Instruction::Sub:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
Offset -= RHS;
break;
case Instruction::Mul:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
Offset *= RHS;
Scale *= RHS;
break;
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
- SExtBits, DL, Depth + 1, DT, NSW, NUW);
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
Offset <<= RHS.getLimitedValue();
Scale <<= RHS.getLimitedValue();
// the semantics of nsw and nuw for left shifts don't match those of
@@ -272,7 +273,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
const Value *Result =
GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
- Depth + 1, DT, NSW, NUW);
+ Depth + 1, AC, DT, NSW, NUW);
// zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
// by just incrementing the number of bits we've extended by.
@@ -343,7 +344,7 @@ static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
/// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
/// through pointer casts.
bool BasicAAResult::DecomposeGEPExpression(const Value *V,
- DecomposedGEP &Decomposed, const DataLayout &DL,
+ DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
DominatorTree *DT) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = MaxLookupSearchDepth;
@@ -384,9 +385,10 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
if (const Instruction *I = dyn_cast<Instruction>(V))
- // TODO: Get a DominatorTree and use it here (it is now available in
- // this function, but this should be updated when GetUnderlyingObject
- // is updated). TLI should be provided also.
+ // TODO: Get a DominatorTree and AssumptionCache and use them here
+ // (these are both now available in this function, but this should be
+ // updated when GetUnderlyingObject is updated). TLI should be
+ // provided also.
if (const Value *Simplified =
SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
V = Simplified;
@@ -448,7 +450,7 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
bool NSW = true, NUW = true;
Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
- SExtBits, DL, 0, DT, NSW, NUW);
+ SExtBits, DL, 0, AC, DT, NSW, NUW);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@@ -1057,9 +1059,9 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *UnderlyingV2) {
DecomposedGEP DecompGEP1, DecompGEP2;
bool GEP1MaxLookupReached =
- DecomposeGEPExpression(GEP1, DecompGEP1, DL, DT);
+ DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
bool GEP2MaxLookupReached =
- DecomposeGEPExpression(V2, DecompGEP2, DL, DT);
+ DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
@@ -1221,7 +1223,7 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
bool SignKnownZero, SignKnownOne;
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
- 0, nullptr, DT);
+ 0, &AC, nullptr, DT);
// Zero-extension widens the variable, and so forces the sign
// bit to zero.
@@ -1256,7 +1258,7 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
return NoAlias;
if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
- GEP1BaseOffset, DT))
+ GEP1BaseOffset, &AC, DT))
return NoAlias;
}
@@ -1658,7 +1660,7 @@ void BasicAAResult::GetIndexDifference(
bool BasicAAResult::constantOffsetHeuristic(
const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
- uint64_t V2Size, int64_t BaseOffset,
+ uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
DominatorTree *DT) {
if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
@@ -1681,11 +1683,11 @@ bool BasicAAResult::constantOffsetHeuristic(
bool NSW = true, NUW = true;
unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
- V0SExtBits, DL, 0, DT, NSW, NUW);
+ V0SExtBits, DL, 0, AC, DT, NSW, NUW);
NSW = true;
NUW = true;
const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
- V1SExtBits, DL, 0, DT, NSW, NUW);
+ V1SExtBits, DL, 0, AC, DT, NSW, NUW);
if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
@@ -1719,6 +1721,7 @@ AnalysisKey BasicAA::Key;
BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
return BasicAAResult(F.getParent()->getDataLayout(),
AM.getResult<TargetLibraryAnalysis>(F),
+ AM.getResult<AssumptionAnalysis>(F),
&AM.getResult<DominatorTreeAnalysis>(F),
AM.getCachedResult<LoopAnalysis>(F));
}
@@ -1732,6 +1735,7 @@ void BasicAAWrapperPass::anchor() {}
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
"Basic Alias Analysis (stateless AA impl)", true, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
@@ -1742,12 +1746,13 @@ FunctionPass *llvm::createBasicAAWrapperPass() {
}
bool BasicAAWrapperPass::runOnFunction(Function &F) {
+ auto &ACT = getAnalysis<AssumptionCacheTracker>();
auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
- &DTWP.getDomTree(),
+ ACT.getAssumptionCache(F), &DTWP.getDomTree(),
LIWP ? &LIWP->getLoopInfo() : nullptr));
return false;
@@ -1755,6 +1760,7 @@ bool BasicAAWrapperPass::runOnFunction(Function &F) {
void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -1762,5 +1768,6 @@ void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
return BasicAAResult(
F.getParent()->getDataLayout(),
- P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
+ P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
+ P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
}
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 7175eb74904..08d50c29dfc 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -4,6 +4,7 @@ add_llvm_library(LLVMAnalysis
AliasAnalysisSummary.cpp
AliasSetTracker.cpp
Analysis.cpp
+ AssumptionCache.cpp
BasicAliasAnalysis.cpp
BlockFrequencyInfo.cpp
BlockFrequencyInfoImpl.cpp
diff --git a/llvm/lib/Analysis/CodeMetrics.cpp b/llvm/lib/Analysis/CodeMetrics.cpp
index 1f7c6aa3568..bdffdd8eb27 100644
--- a/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/llvm/lib/Analysis/CodeMetrics.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -70,31 +71,45 @@ static void completeEphemeralValues(SmallPtrSetImpl<const Value *> &Visited,
// Find all ephemeral values.
void CodeMetrics::collectEphemeralValues(
- const Loop *L, SmallPtrSetImpl<const Value *> &EphValues) {
+ const Loop *L, AssumptionCache *AC,
+ SmallPtrSetImpl<const Value *> &EphValues) {
SmallPtrSet<const Value *, 32> Visited;
SmallVector<const Value *, 16> Worklist;
- for (auto &B : L->blocks())
- for (auto &I : *B)
- if (auto *II = dyn_cast<IntrinsicInst>(&I))
- if (II->getIntrinsicID() == Intrinsic::assume &&
- EphValues.insert(II).second)
- appendSpeculatableOperands(II, Visited, Worklist);
+ for (auto &AssumeVH : AC->assumptions()) {
+ if (!AssumeVH)
+ continue;
+ Instruction *I = cast<Instruction>(AssumeVH);
+
+ // Filter out call sites outside of the loop so we don't do a function's
+ // worth of work for each of its loops (and, in the common case, ephemeral
+ // values in the loop are likely due to @llvm.assume calls in the loop).
+ if (!L->contains(I->getParent()))
+ continue;
+
+ if (EphValues.insert(I).second)
+ appendSpeculatableOperands(I, Visited, Worklist);
+ }
completeEphemeralValues(Visited, Worklist, EphValues);
}
void CodeMetrics::collectEphemeralValues(
- const Function *F, SmallPtrSetImpl<const Value *> &EphValues) {
+ const Function *F, AssumptionCache *AC,
+ SmallPtrSetImpl<const Value *> &EphValues) {
SmallPtrSet<const Value *, 32> Visited;
SmallVector<const Value *, 16> Worklist;
- for (auto &B : *F)
- for (auto &I : B)
- if (auto *II = dyn_cast<IntrinsicInst>(&I))
- if (II->getIntrinsicID() == Intrinsic::assume &&
- EphValues.insert(II).second)
- appendSpeculatableOperands(II, Visited, Worklist);
+ for (auto &AssumeVH : AC->assumptions()) {
+ if (!AssumeVH)
+ continue;
+ Instruction *I = cast<Instruction>(AssumeVH);
+ assert(I->getParent()->getParent() == F &&
+ "Found assumption for the wrong function!");
+
+ if (EphValues.insert(I).second)
+ appendSpeculatableOperands(I, Visited, Worklist);
+ }
completeEphemeralValues(Visited, Worklist, EphValues);
}
diff --git a/llvm/lib/Analysis/DemandedBits.cpp b/llvm/lib/Analysis/DemandedBits.cpp
index a52af4921c4..688c1db534c 100644
--- a/llvm/lib/Analysis/DemandedBits.cpp
+++ b/llvm/lib/Analysis/DemandedBits.cpp
@@ -24,6 +24,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -44,6 +45,7 @@ using namespace llvm;
char DemandedBitsWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(DemandedBitsWrapperPass, "demanded-bits",
"Demanded bits analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(DemandedBitsWrapperPass, "demanded-bits",
"Demanded bits analysis", false, false)
@@ -54,6 +56,7 @@ DemandedBitsWrapperPass::DemandedBitsWrapperPass() : FunctionPass(ID) {
void DemandedBitsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesAll();
}
@@ -85,13 +88,13 @@ void DemandedBits::determineLiveOperandBits(
KnownZero = APInt(BitWidth, 0);
KnownOne = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0,
- UserI, &DT);
+ &AC, UserI, &DT);
if (V2) {
KnownZero2 = APInt(BitWidth, 0);
KnownOne2 = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL,
- 0, UserI, &DT);
+ 0, &AC, UserI, &DT);
}
};
@@ -245,8 +248,9 @@ void DemandedBits::determineLiveOperandBits(
}
bool DemandedBitsWrapperPass::runOnFunction(Function &F) {
+ auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DB.emplace(F, DT);
+ DB.emplace(F, AC, DT);
return false;
}
@@ -386,8 +390,9 @@ AnalysisKey DemandedBitsAnalysis::Key;
DemandedBits DemandedBitsAnalysis::run(Function &F,
FunctionAnalysisManager &AM) {
+ auto &AC = AM.getResult<AssumptionAnalysis>(F);
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
- return DemandedBits(F, DT);
+ return DemandedBits(F, AC, DT);
}
PreservedAnalyses DemandedBitsPrinterPass::run(Function &F,
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index b365ed5af15..76e2561b9da 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -14,6 +14,7 @@
#include "llvm/Analysis/IVUsers.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/LoopPassManager.h"
@@ -40,7 +41,8 @@ IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM) {
AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
Function *F = L.getHeader()->getParent();
- return IVUsers(&L, FAM.getCachedResult<LoopAnalysis>(*F),
+ return IVUsers(&L, FAM.getCachedResult<AssumptionAnalysis>(*F),
+ FAM.getCachedResult<LoopAnalysis>(*F),
FAM.getCachedResult<DominatorTreeAnalysis>(*F),
FAM.getCachedResult<ScalarEvolutionAnalysis>(*F));
}
@@ -53,6 +55,7 @@ PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM) {
char IVUsersWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(IVUsersWrapperPass, "iv-users",
"Induction Variable Users", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
@@ -260,11 +263,12 @@ IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
return IVUses.back();
}
-IVUsers::IVUsers(Loop *L, LoopInfo *LI, DominatorTree *DT, ScalarEvolution *SE)
- : L(L), LI(LI), DT(DT), SE(SE), IVUses() {
+IVUsers::IVUsers(Loop *L, AssumptionCache *AC, LoopInfo *LI, DominatorTree *DT,
+ ScalarEvolution *SE)
+ : L(L), AC(AC), LI(LI), DT(DT), SE(SE), IVUses() {
// Collect ephemeral values so that AddUsersIfInteresting skips them.
EphValues.clear();
- CodeMetrics::collectEphemeralValues(L, EphValues);
+ CodeMetrics::collectEphemeralValues(L, AC, EphValues);
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -313,6 +317,7 @@ IVUsersWrapperPass::IVUsersWrapperPass() : LoopPass(ID) {
}
void IVUsersWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolutionWrapperPass>();
@@ -320,11 +325,13 @@ void IVUsersWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool IVUsersWrapperPass::runOnLoop(Loop *L, LPPassManager &LPM) {
+ auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
+ *L->getHeader()->getParent());
auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
- IU.reset(new IVUsers(L, LI, DT, SE));
+ IU.reset(new IVUsers(L, AC, LI, DT, SE));
return false;
}
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 7d4ad48ce3a..0228a1ba38f 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
@@ -68,6 +69,9 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;
+ /// Getter for the cache of @llvm.assume intrinsics.
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache;
+
/// Profile summary information.
ProfileSummaryInfo *PSI;
@@ -197,19 +201,20 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
public:
CallAnalyzer(const TargetTransformInfo &TTI,
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
ProfileSummaryInfo *PSI, Function &Callee, CallSite CSArg,
const InlineParams &Params)
- : TTI(TTI), PSI(PSI), F(Callee), CandidateCS(CSArg), Params(Params),
- Threshold(Params.DefaultThreshold), Cost(0), IsCallerRecursive(false),
- IsRecursiveCall(false), ExposesReturnsTwice(false),
- HasDynamicAlloca(false), ContainsNoDuplicateCall(false),
- HasReturn(false), HasIndirectBr(false), HasFrameEscape(false),
- AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
- FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
- NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
- NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
- NumInstructionsSimplified(0), SROACostSavings(0),
- SROACostSavingsLost(0) {}
+ : TTI(TTI), GetAssumptionCache(GetAssumptionCache), PSI(PSI), F(Callee),
+ CandidateCS(CSArg), Params(Params), Threshold(Params.DefaultThreshold),
+ Cost(0), IsCallerRecursive(false), IsRecursiveCall(false),
+ ExposesReturnsTwice(false), HasDynamicAlloca(false),
+ ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
+ HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
+ NumVectorInstructions(0), FiftyPercentVectorBonus(0),
+ TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
+ NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
+ NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
+ SROACostSavings(0), SROACostSavingsLost(0) {}
bool analyzeCall(CallSite CS);
@@ -957,7 +962,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
// out. Pretend to inline the function, with a custom threshold.
auto IndirectCallParams = Params;
IndirectCallParams.DefaultThreshold = InlineConstants::IndirectCallThreshold;
- CallAnalyzer CA(TTI, PSI, *F, CS, IndirectCallParams);
+ CallAnalyzer CA(TTI, GetAssumptionCache, PSI, *F, CS, IndirectCallParams);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// threshold to get the bonus we want to apply, but don't go below zero.
@@ -1313,7 +1318,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// the ephemeral values multiple times (and they're completely determined by
// the callee, so this is purely duplicate work).
SmallPtrSet<const Value *, 32> EphValues;
- CodeMetrics::collectEphemeralValues(&F, EphValues);
+ CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
// The worklist of live basic blocks in the callee *after* inlining. We avoid
// adding basic blocks of the callee which can be proven to be dead for this
@@ -1446,13 +1451,17 @@ static bool functionsHaveCompatibleAttributes(Function *Caller,
InlineCost llvm::getInlineCost(
CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
ProfileSummaryInfo *PSI) {
- return getInlineCost(CS, CS.getCalledFunction(), Params, CalleeTTI, PSI);
+ return getInlineCost(CS, CS.getCalledFunction(), Params, CalleeTTI,
+ GetAssumptionCache, PSI);
}
InlineCost llvm::getInlineCost(
CallSite CS, Function *Callee, const InlineParams &Params,
- TargetTransformInfo &CalleeTTI, ProfileSummaryInfo *PSI) {
+ TargetTransformInfo &CalleeTTI,
+ std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
+ ProfileSummaryInfo *PSI) {
// Cannot inline indirect calls.
if (!Callee)
@@ -1486,7 +1495,7 @@ InlineCost llvm::getInlineCost(
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(CalleeTTI, PSI, *Callee, CS, Params);
+ CallAnalyzer CA(CalleeTTI, GetAssumptionCache, PSI, *Callee, CS, Params);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 5fd96d117a1..b4686a1ff17 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -50,11 +50,13 @@ struct Query {
const DataLayout &DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
+ AssumptionCache *AC;
const Instruction *CxtI;
Query(const DataLayout &DL, const TargetLibraryInfo *tli,
- const DominatorTree *dt, const Instruction *cxti = nullptr)
- : DL(DL), TLI(tli), DT(dt), CxtI(cxti) {}
+ const DominatorTree *dt, AssumptionCache *ac = nullptr,
+ const Instruction *cxti = nullptr)
+ : DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
};
} // end anonymous namespace
@@ -582,8 +584,9 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, const Instruction *CxtI) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, CxtI),
+ const DominatorTree *DT, AssumptionCache *AC,
+ const Instruction *CxtI) {
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -688,7 +691,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.CxtI, Q.DT);
+ computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (KnownZero == ~APInt::getSignBit(BitWidth)) {
// Op1 is either 0 or the minimum signed value. If the sub is NSW, then
// Op1 must be 0 because negating the minimum signed value is undefined.
@@ -794,8 +797,9 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, const Instruction *CxtI) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, CxtI),
+ const DominatorTree *DT, AssumptionCache *AC,
+ const Instruction *CxtI) {
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -962,35 +966,35 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1089,9 +1093,9 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1107,9 +1111,9 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1154,9 +1158,9 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1230,9 +1234,9 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1248,9 +1252,9 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1276,9 +1280,9 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1346,7 +1350,7 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.CxtI, Q.DT);
+ computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (KnownOne.getLimitedValue() >= BitWidth)
return UndefValue::get(Op0->getType());
@@ -1382,8 +1386,8 @@ static Value *SimplifyRightShift(unsigned Opcode, Value *Op0, Value *Op1,
unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
APInt Op0KnownZero(BitWidth, 0);
APInt Op0KnownOne(BitWidth, 0);
- computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.CxtI,
- Q.DT);
+ computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.AC,
+ Q.CxtI, Q.DT);
if (Op0KnownOne[0])
return Op0;
}
@@ -1412,9 +1416,9 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1437,9 +1441,9 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1461,7 +1465,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
return X;
// Arithmetic shifting an all-sign-bit value is a no-op.
- unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.CxtI, Q.DT);
+ unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (NumSignBits == Op0->getType()->getScalarSizeInBits())
return Op0;
@@ -1471,9 +1475,9 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1655,9 +1659,11 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
// A & (-A) = A if A is a power of two or zero.
if (match(Op0, m_Neg(m_Specific(Op1))) ||
match(Op1, m_Neg(m_Specific(Op0)))) {
- if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.CxtI, Q.DT))
+ if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
+ Q.DT))
return Op0;
- if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.CxtI, Q.DT))
+ if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
+ Q.DT))
return Op1;
}
@@ -1722,9 +1728,9 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1904,10 +1910,10 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
match(A, m_Add(m_Value(V1), m_Value(V2)))) {
// Add commutes, try both ways.
if (V1 == B &&
- MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.CxtI, Q.DT))
+ MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return A;
if (V2 == B &&
- MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.CxtI, Q.DT))
+ MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return A;
}
// Or commutes, try both ways.
@@ -1915,10 +1921,10 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
match(B, m_Add(m_Value(V1), m_Value(V2)))) {
// Add commutes, try both ways.
if (V1 == A &&
- MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.CxtI, Q.DT))
+ MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return B;
if (V2 == A &&
- MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.CxtI, Q.DT))
+ MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return B;
}
}
@@ -1935,9 +1941,9 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -1989,9 +1995,9 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -2306,44 +2312,44 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.CxtI, Q.DT))
+ if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.CxtI, Q.DT))
+ if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative)
return getFalse(ITy);
break;
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getTrue(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.CxtI, Q.DT))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getFalse(ITy);
break;
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative)
return getTrue(ITy);
break;
case ICmpInst::ICMP_SGT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getFalse(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.CxtI, Q.DT))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getTrue(ITy);
break;
}
@@ -2574,9 +2580,9 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool RHSKnownNonNegative, RHSKnownNegative;
bool YKnownNonNegative, YKnownNegative;
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, Q.DL, 0,
+ Q.AC, Q.CxtI, Q.DT);
+ ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
- ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
if (RHSKnownNonNegative && YKnownNegative)
return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
if (RHSKnownNegative || YKnownNonNegative)
@@ -2594,9 +2600,9 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool LHSKnownNonNegative, LHSKnownNegative;
bool YKnownNonNegative, YKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0,
+ Q.AC, Q.CxtI, Q.DT);
+ ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
- ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
if (LHSKnownNonNegative && YKnownNegative)
return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
if (LHSKnownNegative || YKnownNonNegative)
@@ -2652,8 +2658,8 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH;
@@ -2663,8 +2669,8 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
return getFalse(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH;
@@ -2683,8 +2689,8 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH;
@@ -2694,8 +2700,8 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.CxtI,
- Q.DT);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
+ Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH;
@@ -3220,7 +3226,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// icmp eq|ne X, Y -> false|true if X != Y
if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
- isKnownNonEqual(LHS, RHS, Q.DL, Q.CxtI, Q.DT)) {
+ isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
LLVMContext &Ctx = LHS->getType()->getContext();
return Pred == ICmpInst::ICMP_NE ?
ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
@@ -3279,7 +3285,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
unsigned BitWidth = RHSVal->getBitWidth();
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0,
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0, Q.AC,
Q.CxtI, Q.DT);
if (((LHSKnownZero & *RHSVal) != 0) || ((LHSKnownOne & ~(*RHSVal)) != 0))
return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
@@ -3305,9 +3311,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -3438,10 +3444,10 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF,
- Query(DL, TLI, DT, CxtI), RecursionLimit);
+ Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// See if V simplifies when its operand Op is replaced with RepOp.
@@ -3708,10 +3714,10 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal,
- Query(DL, TLI, DT, CxtI), RecursionLimit);
+ Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// Given operands for an GetElementPtrInst, see if we can fold the result.
@@ -3827,10 +3833,10 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyGEPInst(SrcTy, Ops,
- Query(DL, TLI, DT, CxtI), RecursionLimit);
+ Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// Given operands for an InsertValueInst, see if we can fold the result.
@@ -3864,9 +3870,9 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
Value *llvm::SimplifyInsertValueInst(
Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
- const TargetLibraryInfo *TLI, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -3899,8 +3905,9 @@ Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
+ AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -3931,8 +3938,8 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &,
Value *llvm::SimplifyExtractElementInst(
Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, const Instruction *CxtI) {
- return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, CxtI),
+ const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
+ return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -4006,9 +4013,9 @@ static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyCastInst(CastOpc, Op, Ty, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyCastInst(CastOpc, Op, Ty, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -4101,18 +4108,18 @@ static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -4126,9 +4133,9 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, CxtI),
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
@@ -4328,24 +4335,24 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT,
- const Instruction *CxtI) {
- return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, CxtI),
+ AssumptionCache *AC, const Instruction *CxtI) {
+ return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
+ const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCall(V, Args.begin(), Args.end(),
- Query(DL, TLI, DT, CxtI), RecursionLimit);
+ Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// See if we can compute a simplified version of this instruction.
/// If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT) {
+ const DominatorTree *DT, AssumptionCache *AC) {
Value *Result;
switch (I->getOpcode()) {
@@ -4354,137 +4361,137 @@ Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
break;
case Instruction::FAdd:
Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
- TLI, DT, I);
+ TLI, DT, AC, I);
break;
case Instruction::FSub:
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
- TLI, DT, I);
+ TLI, DT, AC, I);
break;
case Instruction::FMul:
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Mul:
Result =
- SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, I);
+ SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::SDiv:
Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::UDiv:
Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::FDiv:
Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::SRem:
Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::URem:
Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::FRem:
Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
- TLI, DT, I);
+ TLI, DT, AC, I);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
- I);
+ AC, I);
break;
case Instruction::And:
Result =
- SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, I);
+ SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::Or:
Result =
- SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, I);
+ SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::Xor:
Result =
- SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, I);
+ SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::ICmp:
Result =
SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), I->getOperand(0),
- I->getOperand(1), DL, TLI, DT, I);
+ I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), DL, TLI, DT, I);
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
- I->getOperand(2), DL, TLI, DT, I);
+ I->getOperand(2), DL, TLI, DT, AC, I);
break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
- Ops, DL, TLI, DT, I);
+ Ops, DL, TLI, DT, AC, I);
break;
}
case Instruction::InsertValue: {
InsertValueInst *IV = cast<InsertValueInst>(I);
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
IV->getInsertedValueOperand(),
- IV->getIndices(), DL, TLI, DT, I);
+ IV->getIndices(), DL, TLI, DT, AC, I);
break;
}
case Instruction::ExtractValue: {
auto *EVI = cast<ExtractValueInst>(I);
Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
- EVI->getIndices(), DL, TLI, DT, I);
+ EVI->getIndices(), DL, TLI, DT, AC, I);
break;
}
case Instruction::ExtractElement: {
auto *EEI = cast<ExtractElementInst>(I);
Result = SimplifyExtractElementInst(
- EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, I);
+ EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I);
break;
}
case Instruction::PHI:
- Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, I));
+ Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I));
break;
case Instruction::Call: {
CallSite CS(cast<CallInst>(I));
Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), DL,
- TLI, DT, I);
+ TLI, DT, AC, I);
break;
}
#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
#include "llvm/IR/Instruction.def"
#undef HANDLE_CAST_INST
Result = SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(),
- DL, TLI, DT, I);
+ DL, TLI, DT, AC, I);
break;
}
@@ -4494,7 +4501,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
unsigned BitWidth = I->getType()->getScalarSizeInBits();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- computeKnownBits(I, KnownZero, KnownOne, DL, /*Depth*/0, I, DT);
+ computeKnownBits(I, KnownZero, KnownOne, DL, /*Depth*/0, AC, I, DT);
if ((KnownZero | KnownOne).isAllOnesValue())
Result = ConstantInt::get(I->getType(), KnownOne);
}
@@ -4518,7 +4525,8 @@ Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT) {
+ const DominatorTree *DT,
+ AssumptionCache *AC) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
const DataLayout &DL = I->getModule()->getDataLayout();
@@ -4547,7 +4555,7 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
I = Worklist[Idx];
// See if this instruction simplifies.
- SimpleV = SimplifyInstruction(I, DL, TLI, DT);
+ SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC);
if (!SimpleV)
continue;
@@ -4573,14 +4581,16 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT) {
- return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT);
+ const DominatorTree *DT,
+ AssumptionCache *AC) {
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI,
- const DominatorTree *DT) {
+ const DominatorTree *DT,
+ AssumptionCache *AC) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
}
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index c1561adbcbe..e51e8217360 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -15,6 +15,7 @@
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -41,6 +42,7 @@ using namespace PatternMatch;
char LazyValueInfoWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
@@ -577,6 +579,7 @@ namespace {
return true;
}
+ AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
const DataLayout &DL; ///< A mandatory DataLayout
DominatorTree *DT; ///< An optional DT pointer.
@@ -635,8 +638,9 @@ namespace {
/// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
- LazyValueInfoImpl(const DataLayout &DL, DominatorTree *DT = nullptr)
- : DL(DL), DT(DT) {}
+ LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
+ DominatorTree *DT = nullptr)
+ : AC(AC), DL(DL), DT(DT) {}
};
} // end anonymous namespace
@@ -920,16 +924,14 @@ void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
if (!BBI)
return;
- for (auto *U : Val->users()) {
- auto *II = dyn_cast<IntrinsicInst>(U);
- if (!II)
+ for (auto &AssumeVH : AC->assumptions()) {
+ if (!AssumeVH)
continue;
- if (II->getIntrinsicID() != Intrinsic::assume)
- continue;
- if (!isValidAssumeForContext(II, BBI, DT))
+ auto *I = cast<CallInst>(AssumeVH);
+ if (!isValidAssumeForContext(I, BBI, DT))
continue;
- BBLV = intersect(BBLV, getValueFromCondition(Val, II->getArgOperand(0)));
+ BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
}
// If guards are not used in the module, don't spend time looking for them
@@ -1456,16 +1458,18 @@ void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
//===----------------------------------------------------------------------===//
/// This lazily constructs the LazyValueInfoImpl.
-static LazyValueInfoImpl &getImpl(void *&PImpl, const DataLayout *DL,
+static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
+ const DataLayout *DL,
DominatorTree *DT = nullptr) {
if (!PImpl) {
assert(DL && "getCache() called with a null DataLayout");
- PImpl = new LazyValueInfoImpl(*DL, DT);
+ PImpl = new LazyValueInfoImpl(AC, *DL, DT);
}
return *static_cast<LazyValueInfoImpl*>(PImpl);
}
bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
+ Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const DataLayout &DL = F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
@@ -1474,7 +1478,7 @@ bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
if (Info.PImpl)
- getImpl(Info.PImpl, &DL, Info.DT).clear();
+ getImpl(Info.PImpl, Info.AC, &DL, Info.DT).clear();
// Fully lazy.
return false;
@@ -1482,6 +1486,7 @@ bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -1492,7 +1497,7 @@ LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
- delete &getImpl(PImpl, nullptr);
+ delete &getImpl(PImpl, AC, nullptr);
PImpl = nullptr;
}
}
@@ -1500,10 +1505,11 @@ void LazyValueInfo::releaseMemory() {
void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
LazyValueInfo LazyValueAnalysis::run(Function &F, FunctionAnalysisManager &FAM) {
+ auto &AC = FAM.getResult<AssumptionAnalysis>(F);
auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
- return LazyValueInfo(&TLI, DT);
+ return LazyValueInfo(&AC, &TLI, DT);
}
/// Returns true if we can statically tell that this value will never be a
@@ -1528,7 +1534,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
const DataLayout &DL = BB->getModule()->getDataLayout();
LVILatticeVal Result =
- getImpl(PImpl, &DL, DT).getValueInBlock(V, BB, CxtI);
+ getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
if (Result.isConstant())
return Result.getConstant();
@@ -1546,7 +1552,7 @@ ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
unsigned Width = V->getType()->getIntegerBitWidth();
const DataLayout &DL = BB->getModule()->getDataLayout();
LVILatticeVal Result =
- getImpl(PImpl, &DL, DT).getValueInBlock(V, BB, CxtI);
+ getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
if (Result.isUndefined())
return ConstantRange(Width, /*isFullSet=*/false);
if (Result.isConstantRange())
@@ -1565,7 +1571,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
- getImpl(PImpl, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
+ getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
if (Result.isConstant())
return Result.getConstant();
@@ -1653,7 +1659,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
- getImpl(PImpl, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
+ getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
return getPredicateResult(Pred, C, Result, DL, TLI);
}
@@ -1673,7 +1679,7 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
return LazyValueInfo::True;
}
const DataLayout &DL = CxtI->getModule()->getDataLayout();
- LVILatticeVal Result = getImpl(PImpl, &DL, DT).getValueAt(V, CxtI);
+ LVILatticeVal Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
if (Ret != Unknown)
return Ret;
@@ -1763,13 +1769,13 @@ void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
BasicBlock *NewSucc) {
if (PImpl) {
const DataLayout &DL = PredBB->getModule()->getDataLayout();
- getImpl(PImpl, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
+ getImpl(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
}
}
void LazyValueInfo::eraseBlock(BasicBlock *BB) {
if (PImpl) {
const DataLayout &DL = BB->getModule()->getDataLayout();
- getImpl(PImpl, &DL, DT).eraseBlock(BB);
+ getImpl(PImpl, AC, &DL, DT).eraseBlock(BB);
}
}
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 75d03d18b89..2ca46b1fe87 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -40,6 +40,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
@@ -127,6 +128,7 @@ namespace {
Module *Mod;
const DataLayout *DL;
AliasAnalysis *AA;
+ AssumptionCache *AC;
DominatorTree *DT;
TargetLibraryInfo *TLI;
@@ -143,6 +145,7 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
}
@@ -182,6 +185,7 @@ namespace {
char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
@@ -199,6 +203,7 @@ bool Lint::runOnFunction(Function &F) {
Mod = F.getParent();
DL = &F.getParent()->getDataLayout();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+ AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
visit(F);
@@ -520,7 +525,8 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I);
}
-static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT) {
+static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
+ AssumptionCache *AC) {
// Assume undef could be zero.
if (isa<UndefValue>(V))
return true;
@@ -529,7 +535,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT) {
if (!VecTy) {
unsigned BitWidth = V->getType()->getIntegerBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(V, KnownZero, KnownOne, DL, 0,
+ computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC,
dyn_cast<Instruction>(V), DT);
return KnownZero.isAllOnesValue();
}
@@ -560,22 +566,22 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT) {
}
void Lint::visitSDiv(BinaryOperator &I) {
- Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT),
+ Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitUDiv(BinaryOperator &I) {
- Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT),
+ Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitSRem(BinaryOperator &I) {
- Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT),
+ Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitURem(BinaryOperator &I) {
- Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT),
+ Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
@@ -693,7 +699,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, *DL, TLI, DT))
+ if (Value *W = SimplifyInstruction(Inst, *DL, TLI, DT, AC))
return findValueImpl(W, OffsetOk, Visited);
} else if (auto *C = dyn_cast<Constant>(V)) {
if (Value *W = ConstantFoldConstant(C, *DL, TLI))
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index dcca7bb7205..82a15a654f4 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/OrderedBasicBlock.h"
@@ -890,7 +891,7 @@ void MemoryDependenceResults::getNonLocalPointerDependency(
return;
}
const DataLayout &DL = FromBB->getModule()->getDataLayout();
- PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
+ PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
@@ -1647,15 +1648,17 @@ AnalysisKey MemoryDependenceAnalysis::Key;
MemoryDependenceResults
MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
auto &AA = AM.getResult<AAManager>(F);
+ auto &AC = AM.getResult<AssumptionAnalysis>(F);
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
- return MemoryDependenceResults(AA, TLI, DT);
+ return MemoryDependenceResults(AA, AC, TLI, DT);
}
char MemoryDependenceWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
"Memory Dependence Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
@@ -1674,6 +1677,7 @@ void MemoryDependenceWrapperPass::releaseMemory() {
void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequiredTransitive<AAResultsWrapperPass>();
AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
@@ -1685,8 +1689,9 @@ unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+ auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- MemDep.emplace(AA, TLI, DT);
+ MemDep.emplace(AA, AC, TLI, DT);
return false;
}
diff --git a/llvm/lib/Analysis/PHITransAddr.cpp b/llvm/lib/Analysis/PHITransAddr.cpp
index 6e9b9e10da4..84ecd4ab980 100644
--- a/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/llvm/lib/Analysis/PHITransAddr.cpp
@@ -227,7 +227,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// Simplify the GEP to handle 'gep x, 0' -> x etc.
if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(),
- GEPOps, DL, TLI, DT)) {
+ GEPOps, DL, TLI, DT, AC)) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
@@ -276,7 +276,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT, AC)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -367,7 +367,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
SmallVectorImpl<Instruction*> &NewInsts) {
// See if we have a version of this value already available and dominating
// PredBB. If so, there is no need to insert a new instance of it.
- PHITransAddr Tmp(InVal, DL);
+ PHITransAddr Tmp(InVal, DL, AC);
if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT, /*MustDominate=*/true))
return Tmp.getAddr();
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 3a99f2a2157..7962222a210 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -64,8 +64,8 @@
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
@@ -1212,7 +1212,6 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -1599,7 +1598,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// these to prove lack of overflow. Use this fact to avoid
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AffectedMap.empty()) {
+ !AC.assumptions().empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1665,7 +1664,6 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -1835,7 +1833,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AffectedMap.empty()) {
+ !AC.assumptions().empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1893,7 +1891,6 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -2447,7 +2444,6 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2740,7 +2736,6 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2861,7 +2856,6 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3042,7 +3036,6 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -3198,7 +3191,6 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3300,7 +3292,6 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3501,40 +3492,9 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
ExprValueMap[Stripped].insert({V, Offset});
}
}
-
- // If this value is an instruction or an argument, and might be affected by
- // an assumption, and its SCEV to the AffectedMap.
- if (isa<Instruction>(V) || isa<Argument>(V)) {
- for (auto *U : V->users()) {
- auto *II = dyn_cast<IntrinsicInst>(U);
- if (!II)
- continue;
- if (II->getIntrinsicID() != Intrinsic::assume)
- continue;
-
- AffectedMap[S].insert(II);
- }
- }
-
return S;
}
-// If one of this SCEV's operands is in the AffectedMap (meaning that it might
-// be affected by an assumption), then this SCEV might be affected by the same
-// assumption.
-void ScalarEvolution::addAffectedFromOperands(const SCEV *S) {
- if (auto *NS = dyn_cast<SCEVNAryExpr>(S))
- for (auto *Op : NS->operands()) {
- auto AMI = AffectedMap.find(Op);
- if (AMI == AffectedMap.end())
- continue;
-
- auto &ISet = AffectedMap[S];
- AMI = AffectedMap.find(Op);
- ISet.insert(AMI->second.begin(), AMI->second.end());
- }
-}
-
const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
@@ -4324,7 +4284,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT))
+ if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC))
if (LI.replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -4512,7 +4472,7 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0,
+ computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC,
nullptr, &DT);
return Zeros.countTrailingOnes();
}
@@ -4683,14 +4643,14 @@ ScalarEvolution::getRange(const SCEV *S,
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, nullptr, &DT);
+ computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT);
if (Ones != ~Zeros + 1)
ConservativeResult =
ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
} else {
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
"generalize as needed!");
- unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, nullptr, &DT);
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
if (NS > 1)
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
@@ -5179,7 +5139,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(),
- 0, nullptr, &DT);
+ 0, &AC, nullptr, &DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -6374,7 +6334,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
// bitwidth(K) iterations.
Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
bool KnownZero, KnownOne;
- ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0,
+ ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0, nullptr,
Predecessor->getTerminator(), &DT);
auto *Ty = cast<IntegerType>(RHS->getType());
if (KnownZero)
@@ -7966,23 +7926,16 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- auto CheckAssumptions = [&](const SCEV *S) {
- auto AMI = AffectedMap.find(S);
- if (AMI != AffectedMap.end())
- for (auto *Assume : AMI->second) {
- auto *CI = cast<CallInst>(Assume);
- if (!DT.dominates(CI, Latch->getTerminator()))
- continue;
-
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
-
- return false;
- };
+ for (auto &AssumeVH : AC.assumptions()) {
+ if (!AssumeVH)
+ continue;
+ auto *CI = cast<CallInst>(AssumeVH);
+ if (!DT.dominates(CI, Latch->getTerminator()))
+ continue;
- if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
- return true;
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
// If the loop is not reachable from the entry block, we risk running into an
// infinite loop as we walk up into the dom tree. These loops do not matter
@@ -8067,23 +8020,16 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- auto CheckAssumptions = [&](const SCEV *S) {
- auto AMI = AffectedMap.find(S);
- if (AMI != AffectedMap.end())
- for (auto *Assume : AMI->second) {
- auto *CI = cast<CallInst>(Assume);
- if (!DT.dominates(CI, L->getHeader()))
- continue;
-
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
-
- return false;
- };
+ for (auto &AssumeVH : AC.assumptions()) {
+ if (!AssumeVH)
+ continue;
+ auto *CI = cast<CallInst>(AssumeVH);
+ if (!DT.dominates(CI, L->getHeader()))
+ continue;
- if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
- return true;
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
return false;
}
@@ -9536,8 +9482,9 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
- DominatorTree &DT, LoopInfo &LI)
- : F(F), TLI(TLI), DT(DT), LI(LI),
+ AssumptionCache &AC, DominatorTree &DT,
+ LoopInfo &LI)
+ : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
CouldNotCompute(new SCEVCouldNotCompute()),
WalkingBEDominatingConds(false), ProvingSplitPredicate(false),
ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64),
@@ -9559,7 +9506,7 @@ ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
}
ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
- : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), DT(Arg.DT),
+ : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
ValueExprMap(std::move(Arg.ValueExprMap)),
PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
@@ -10030,7 +9977,7 @@ void ScalarEvolution::verify() const {
// Gather stringified backedge taken counts for all loops using a fresh
// ScalarEvolution object.
- ScalarEvolution SE2(F, TLI, DT, LI);
+ ScalarEvolution SE2(F, TLI, AC, DT, LI);
for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2);
@@ -10071,6 +10018,7 @@ AnalysisKey ScalarEvolutionAnalysis::Key;
ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
FunctionAnalysisManager &AM) {
return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
+ AM.getResult<AssumptionAnalysis>(F),
AM.getResult<DominatorTreeAnalysis>(F),
AM.getResult<LoopAnalysis>(F));
}
@@ -10083,6 +10031,7 @@ ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
@@ -10097,6 +10046,7 @@ ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
SE.reset(new ScalarEvolution(
F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
+ getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
return false;
@@ -10117,6 +10067,7 @@ void ScalarEvolutionWrapperPass::verifyAnalysis() const {
void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addRequiredTransitive<AssumptionCacheTracker>();
AU.addRequiredTransitive<LoopInfoWrapperPass>();
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index c558bba5740..d15a7dbd20e 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1800,7 +1800,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// so narrow phis can reuse them.
for (PHINode *Phi : Phis) {
auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
- if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT))
+ if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
return V;
if (!SE.isSCEVable(PN->getType()))
return nullptr;
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 601fef85239..950a2fbcff9 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -15,7 +15,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/Loads.h"
@@ -73,6 +73,7 @@ namespace {
// figuring out if we can use it.
struct Query {
const DataLayout &DL;
+ AssumptionCache *AC;
const Instruction *CxtI;
const DominatorTree *DT;
@@ -88,11 +89,12 @@ struct Query {
std::array<const Value *, MaxDepth> Excluded;
unsigned NumExcluded;
- Query(const DataLayout &DL, const Instruction *CxtI, const DominatorTree *DT)
- : DL(DL), CxtI(CxtI), DT(DT), NumExcluded(0) {}
+ Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT)
+ : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
Query(const Query &Q, const Value *NewExcl)
- : DL(Q.DL), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
+ : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
Excluded = Q.Excluded;
Excluded[NumExcluded++] = NewExcl;
assert(NumExcluded <= Excluded.size());
@@ -128,15 +130,15 @@ static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
::computeKnownBits(V, KnownZero, KnownOne, Depth,
- Query(DL, safeCxtI(V, CxtI), DT));
+ Query(DL, AC, safeCxtI(V, CxtI), DT));
}
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
const DataLayout &DL,
- const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
assert(LHS->getType() == RHS->getType() &&
"LHS and RHS should have the same type");
@@ -145,8 +147,8 @@ bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, CxtI, DT);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, CxtI, DT);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
}
@@ -155,10 +157,10 @@ static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
::ComputeSignBit(V, KnownZero, KnownOne, Depth,
- Query(DL, safeCxtI(V, CxtI), DT));
+ Query(DL, AC, safeCxtI(V, CxtI), DT));
}
static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
@@ -166,51 +168,58 @@ static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
bool OrZero,
- unsigned Depth, const Instruction *CxtI,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
const DominatorTree *DT) {
return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
- Query(DL, safeCxtI(V, CxtI), DT));
+ Query(DL, AC, safeCxtI(V, CxtI), DT));
}
static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI, const DominatorTree *DT) {
- return ::isKnownNonZero(V, Depth, Query(DL, safeCxtI(V, CxtI), DT));
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
}
bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
- unsigned Depth, const Instruction *CxtI,
+ unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
bool NonNegative, Negative;
- ComputeSignBit(V, NonNegative, Negative, DL, Depth, CxtI, DT);
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
return NonNegative;
}
bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI, const DominatorTree *DT) {
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
if (auto *CI = dyn_cast<ConstantInt>(V))
return CI->getValue().isStrictlyPositive();
// TODO: We'd doing two recursive queries here. We should factor this such
// that only a single query is needed.
- return isKnownNonNegative(V, DL, Depth, CxtI, DT) &&
- isKnownNonZero(V, DL, Depth, CxtI, DT);
+ return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
+ isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
}
bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI, const DominatorTree *DT) {
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
bool NonNegative, Negative;
- ComputeSignBit(V, NonNegative, Negative, DL, Depth, CxtI, DT);
+ ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
return Negative;
}
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
- const DataLayout &DL, const Instruction *CxtI,
+ const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
- return ::isKnownNonEqual(V1, V2, Query(DL, safeCxtI(V1, safeCxtI(V2, CxtI)),
+ return ::isKnownNonEqual(V1, V2, Query(DL, AC,
+ safeCxtI(V1, safeCxtI(V2, CxtI)),
DT));
}
@@ -219,19 +228,20 @@ static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
const DataLayout &DL,
- unsigned Depth, const Instruction *CxtI,
- const DominatorTree *DT) {
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT) {
return ::MaskedValueIsZero(V, Mask, Depth,
- Query(DL, safeCxtI(V, CxtI), DT));
+ Query(DL, AC, safeCxtI(V, CxtI), DT));
}
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
const Query &Q);
unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
- unsigned Depth, const Instruction *CxtI,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
const DominatorTree *DT) {
- return ::ComputeNumSignBits(V, Depth, Query(DL, safeCxtI(V, CxtI), DT));
+ return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
}
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
@@ -511,33 +521,36 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
const Query &Q) {
// Use of assumptions is context-sensitive. If we don't have a context, we
// cannot use them!
- if (!Q.CxtI)
+ if (!Q.AC || !Q.CxtI)
return;
unsigned BitWidth = KnownZero.getBitWidth();
- for (auto *U : V->users()) {
- auto *II = dyn_cast<IntrinsicInst>(U);
- if (!II)
- continue;
- if (II->getIntrinsicID() != Intrinsic::assume)
+ for (auto &AssumeVH : Q.AC->assumptions()) {
+ if (!AssumeVH)
continue;
- if (Q.isExcluded(II))
+ CallInst *I = cast<CallInst>(AssumeVH);
+ assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
+ "Got assumption for the wrong function!");
+ if (Q.isExcluded(I))
continue;
- Value *Arg = II->getArgOperand(0);
+ // Warning: This loop can end up being somewhat performance sensetive.
+ // We're running this loop for once for each value queried resulting in a
+ // runtime of ~O(#assumes * #values).
- if (Arg == V && isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
+ "must be an assume intrinsic");
+
+ Value *Arg = I->getArgOperand(0);
+
+ if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
KnownZero.clearAllBits();
KnownOne.setAllBits();
return;
}
- // Note that the patterns below need to be kept in sync with the code
- // in InstCombiner::visitCallInst that adds relevant values to each
- // assume's operand bundles.
-
// The remaining tests are all recursive, so bail out if we hit the limit.
if (Depth == MaxDepth)
continue;
@@ -551,20 +564,20 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
ConstantInt *C;
// assume(v = a)
if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
KnownZero |= RHSKnownZero;
KnownOne |= RHSKnownOne;
// assume(v & b = a)
} else if (match(Arg,
m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
- computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
// For those bits in the mask that are known to be one, we can propagate
// known bits from the RHS to V.
@@ -574,11 +587,11 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
- computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
// For those bits in the mask that are known to be one, we can propagate
// inverted known bits from the RHS to V.
@@ -588,11 +601,11 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg,
m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
- computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
// For those bits in B that are known to be zero, we can propagate known
// bits from the RHS to V.
@@ -602,11 +615,11 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
- computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
// For those bits in B that are known to be zero, we can propagate
// inverted known bits from the RHS to V.
@@ -616,11 +629,11 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg,
m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
- computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
// For those bits in B that are known to be zero, we can propagate known
// bits from the RHS to V. For those bits in B that are known to be one,
@@ -633,11 +646,11 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
- computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
// For those bits in B that are known to be zero, we can propagate
// inverted known bits from the RHS to V. For those bits in B that are
@@ -650,9 +663,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them to known
// bits in V shifted to the right by C.
KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
@@ -661,9 +674,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them inverted
// to known bits in V shifted to the right by C.
KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
@@ -674,9 +687,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
m_AShr(m_V, m_ConstantInt(C))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them to known
// bits in V shifted to the right by C.
KnownZero |= RHSKnownZero << C->getZExtValue();
@@ -687,9 +700,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
m_AShr(m_V, m_ConstantInt(C)))),
m_Value(A))) &&
Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// For those bits in RHS that are known, we can propagate them inverted
// to known bits in V shifted to the right by C.
KnownZero |= RHSKnownOne << C->getZExtValue();
@@ -697,9 +710,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v >=_s c) where c is non-negative
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_SGE &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
if (RHSKnownZero.isNegative()) {
// We know that the sign bit is zero.
@@ -708,9 +721,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v >_s c) where c is at least -1.
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_SGT &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
// We know that the sign bit is zero.
@@ -719,9 +732,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v <=_s c) where c is negative
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_SLE &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
if (RHSKnownOne.isNegative()) {
// We know that the sign bit is one.
@@ -730,9 +743,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v <_s c) where c is non-positive
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_SLT &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
// We know that the sign bit is one.
@@ -741,9 +754,9 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v <=_u c)
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_ULE &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// Whatever high bits in c are zero are known to be zero.
KnownZero |=
@@ -751,13 +764,13 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
// assume(v <_u c)
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
Pred == ICmpInst::ICMP_ULT &&
- isValidAssumeForContext(II, Q.CxtI, Q.DT)) {
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
- computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, II));
+ computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
// Whatever high bits in c are zero are known to be zero (if c is a power
// of 2, then one more).
- if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, II)))
+ if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
KnownZero |=
APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
else
@@ -3118,7 +3131,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
- // TODO: Acquire a DominatorTree and use it.
+ // TODO: Acquire a DominatorTree and AssumptionCache and use them.
if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
V = Simplified;
continue;
@@ -3403,6 +3416,7 @@ bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
// Multiplying n * m significant bits yields a result of n + m significant
@@ -3416,8 +3430,10 @@ OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
APInt LHSKnownOne(BitWidth, 0);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, CxtI, DT);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, CxtI, DT);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
+ DT);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
+ DT);
// Note that underestimating the number of zero bits gives a more
// conservative answer.
unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
@@ -3451,15 +3467,16 @@ OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
bool LHSKnownNonNegative, LHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
- CxtI, DT);
+ AC, CxtI, DT);
if (LHSKnownNonNegative || LHSKnownNegative) {
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
- CxtI, DT);
+ AC, CxtI, DT);
if (LHSKnownNegative && RHSKnownNegative) {
// The sign bit is set in both cases: this MUST overflow.
@@ -3481,6 +3498,7 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
const Value *RHS,
const AddOperator *Add,
const DataLayout &DL,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
if (Add && Add->hasNoSignedWrap()) {
@@ -3490,9 +3508,9 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
bool LHSKnownNonNegative, LHSKnownNegative;
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
- CxtI, DT);
+ AC, CxtI, DT);
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
- CxtI, DT);
+ AC, CxtI, DT);
if ((LHSKnownNonNegative && RHSKnownNegative) ||
(LHSKnownNegative && RHSKnownNonNegative)) {
@@ -3514,7 +3532,7 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
bool AddKnownNonNegative, AddKnownNegative;
ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
- /*Depth=*/0, CxtI, DT);
+ /*Depth=*/0, AC, CxtI, DT);
if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
(AddKnownNegative && LHSOrRHSKnownNegative)) {
return OverflowResult::NeverOverflows;
@@ -3588,18 +3606,20 @@ bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
- Add, DL, CxtI, DT);
+ Add, DL, AC, CxtI, DT);
}
OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
- return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, CxtI, DT);
+ return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
}
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
@@ -4130,7 +4150,8 @@ SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
static bool isTruePredicate(CmpInst::Predicate Pred,
const Value *LHS, const Value *RHS,
const DataLayout &DL, unsigned Depth,
- const Instruction *CxtI, const DominatorTree *DT) {
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
return true;
@@ -4168,7 +4189,7 @@ static bool isTruePredicate(CmpInst::Predicate Pred,
match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
unsigned BitWidth = CA->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, CxtI, DT);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
return true;
@@ -4193,24 +4214,25 @@ static Optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
const Value *ARHS, const Value *BLHS,
const Value *BRHS, const DataLayout &DL,
- unsigned Depth, const Instruction *CxtI,
- const DominatorTree *DT) {
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT) {
switch (Pred) {
default:
return None;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
- if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, CxtI,
+ if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
DT) &&
- isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, CxtI, DT))
+ isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
return true;
return None;
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
- if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, CxtI, DT) &&
- isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, CxtI, DT))
+ if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ DT) &&
+ isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
return true;
return None;
}
@@ -4274,7 +4296,8 @@ isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL, bool InvertAPred,
- unsigned Depth, const Instruction *CxtI,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
const DominatorTree *DT) {
// A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
if (LHS->getType() != RHS->getType())
@@ -4327,8 +4350,8 @@ Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
}
if (APred == BPred)
- return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, CxtI,
- DT);
+ return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
+ CxtI, DT);
return None;
}
OpenPOWER on IntegriCloud