summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/MemorySSA.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis/MemorySSA.cpp')
-rw-r--r--llvm/lib/Analysis/MemorySSA.cpp153
1 files changed, 83 insertions, 70 deletions
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index a1c573a1a79..ea68faa7fc7 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -251,10 +251,10 @@ struct ClobberAlias {
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
// ignored if IsClobber = false.
-static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
- const MemoryLocation &UseLoc,
- const Instruction *UseInst,
- AliasAnalysis &AA) {
+template <typename AliasAnalysisType>
+static ClobberAlias
+instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
+ const Instruction *UseInst, AliasAnalysisType &AA) {
Instruction *DefInst = MD->getMemoryInst();
assert(DefInst && "Defining instruction not actually an instruction");
const auto *UseCall = dyn_cast<CallBase>(UseInst);
@@ -299,10 +299,11 @@ static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
return {isModSet(I), AR};
}
+template <typename AliasAnalysisType>
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
- AliasAnalysis &AA) {
+ AliasAnalysisType &AA) {
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
// to exist while MemoryLocOrCall is pushed through places.
if (UseMLOC.IsCall)
@@ -345,12 +346,12 @@ struct UpwardsMemoryQuery {
} // end anonymous namespace
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
- AliasAnalysis &AA) {
+ BatchAAResults &AA) {
Instruction *Inst = MD->getMemoryInst();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_end:
- return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
+ return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
default:
return false;
}
@@ -358,13 +359,14 @@ static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
return false;
}
-static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
+template <typename AliasAnalysisType>
+static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
- AA.pointsToConstantMemory(cast<LoadInst>(I)->
- getPointerOperand()));
+ AA.pointsToConstantMemory(MemoryLocation(
+ cast<LoadInst>(I)->getPointerOperand())));
}
/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
@@ -380,10 +382,12 @@ static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
/// \param Query The UpwardsMemoryQuery we used for our search.
/// \param AA The AliasAnalysis we used for our search.
/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
+
+template <typename AliasAnalysisType>
LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
const MemoryLocation &StartLoc, const MemorySSA &MSSA,
- const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
+ const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
bool AllowImpreciseClobber = false) {
assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
@@ -473,7 +477,7 @@ namespace {
/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
/// in one class.
-class ClobberWalker {
+template <class AliasAnalysisType> class ClobberWalker {
/// Save a few bytes by using unsigned instead of size_t.
using ListIndex = unsigned;
@@ -497,7 +501,7 @@ class ClobberWalker {
};
const MemorySSA &MSSA;
- AliasAnalysis &AA;
+ AliasAnalysisType &AA;
DominatorTree &DT;
UpwardsMemoryQuery *Query;
@@ -886,9 +890,10 @@ class ClobberWalker {
}
public:
- ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
+ ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
: MSSA(MSSA), AA(AA), DT(DT) {}
+ AliasAnalysisType *getAA() { return &AA; }
/// Finds the nearest clobber for the given query, optimizing phis if
/// possible.
MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
@@ -944,12 +949,12 @@ struct RenamePassData {
namespace llvm {
-class MemorySSA::ClobberWalkerBase {
- ClobberWalker Walker;
+template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
+ ClobberWalker<AliasAnalysisType> Walker;
MemorySSA *MSSA;
public:
- ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
+ ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
: Walker(*M, *A, *D), MSSA(M) {}
MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
@@ -966,19 +971,24 @@ public:
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
/// longer does caching on its own, but the name has been retained for the
/// moment.
+template <class AliasAnalysisType>
class MemorySSA::CachingWalker final : public MemorySSAWalker {
- ClobberWalkerBase *Walker;
+ ClobberWalkerBase<AliasAnalysisType> *Walker;
public:
- CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
+ CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
: MemorySSAWalker(M), Walker(W) {}
~CachingWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+ return Walker->getClobberingMemoryAccessBase(MA, false);
+ }
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) override;
+ const MemoryLocation &Loc) override {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+ }
void invalidateInfo(MemoryAccess *MA) override {
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
@@ -986,19 +996,24 @@ public:
}
};
+template <class AliasAnalysisType>
class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
- ClobberWalkerBase *Walker;
+ ClobberWalkerBase<AliasAnalysisType> *Walker;
public:
- SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
+ SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
: MemorySSAWalker(M), Walker(W) {}
~SkipSelfWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
+ return Walker->getClobberingMemoryAccessBase(MA, true);
+ }
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) override;
+ const MemoryLocation &Loc) override {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+ }
void invalidateInfo(MemoryAccess *MA) override {
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
@@ -1140,9 +1155,20 @@ void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
}
MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
- : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
+ : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
SkipWalker(nullptr), NextID(0) {
- buildMemorySSA();
+ // Build MemorySSA using a batch alias analysis. This reuses the internal
+ // state that AA collects during an alias()/getModRefInfo() call. This is
+ // safe because there are no CFG changes while building MemorySSA and can
+ // significantly reduce the time spent by the compiler in AA, because we will
+ // make queries about all the instructions in the Function.
+ BatchAAResults BatchAA(*AA);
+ buildMemorySSA(BatchAA);
+ // Intentionally leave AA to nullptr while building so we don't accidently
+ // use non-batch AliasAnalysis.
+ this->AA = AA;
+ // Also create the walker here.
+ getWalker();
}
MemorySSA::~MemorySSA() {
@@ -1179,9 +1205,9 @@ namespace llvm {
/// which is walking bottom-up.
class MemorySSA::OptimizeUses {
public:
- OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
+ OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, BatchAAResults *BAA,
DominatorTree *DT)
- : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {}
+ : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
void optimizeUses();
@@ -1210,7 +1236,7 @@ private:
MemorySSA *MSSA;
MemorySSAWalker *Walker;
- AliasAnalysis *AA;
+ BatchAAResults *AA;
DominatorTree *DT;
};
@@ -1407,7 +1433,7 @@ void MemorySSA::placePHINodes(
createMemoryPhi(BB);
}
-void MemorySSA::buildMemorySSA() {
+void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
// We create an access to represent "live on entry", for things like
// arguments or users of globals, where the memory they use is defined before
// the beginning of the function. We do not actually insert it into the IR.
@@ -1429,7 +1455,7 @@ void MemorySSA::buildMemorySSA() {
AccessList *Accesses = nullptr;
DefsList *Defs = nullptr;
for (Instruction &I : B) {
- MemoryUseOrDef *MUD = createNewAccess(&I);
+ MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
if (!MUD)
continue;
@@ -1453,9 +1479,9 @@ void MemorySSA::buildMemorySSA() {
SmallPtrSet<BasicBlock *, 16> Visited;
renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
- CachingWalker *Walker = getWalkerImpl();
-
- OptimizeUses(this, Walker, AA, DT).optimizeUses();
+ ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
+ CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
+ OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
// Mark the uses in unreachable blocks as live on entry, so that they go
// somewhere.
@@ -1466,14 +1492,16 @@ void MemorySSA::buildMemorySSA() {
MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
-MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
+MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
if (Walker)
return Walker.get();
if (!WalkerBase)
- WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+ WalkerBase =
+ llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
- Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
+ Walker =
+ llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
return Walker.get();
}
@@ -1482,9 +1510,11 @@ MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
return SkipWalker.get();
if (!WalkerBase)
- WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+ WalkerBase =
+ llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
- SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
+ SkipWalker =
+ llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
return SkipWalker.get();
}
@@ -1603,7 +1633,7 @@ MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
MemoryAccess *Definition,
const MemoryUseOrDef *Template) {
assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
- MemoryUseOrDef *NewAccess = createNewAccess(I, Template);
+ MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
assert(
NewAccess != nullptr &&
"Tried to create a memory access for a non-memory touching instruction");
@@ -1626,7 +1656,9 @@ static inline bool isOrdered(const Instruction *I) {
}
/// Helper function to create new memory accesses
+template <typename AliasAnalysisType>
MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
+ AliasAnalysisType *AAP,
const MemoryUseOrDef *Template) {
// The assume intrinsic has a control dependency which we model by claiming
// that it writes arbitrarily. Ignore that fake memory dependency here.
@@ -1641,7 +1673,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
#if !defined(NDEBUG)
- ModRefInfo ModRef = AA->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, None);
bool DefCheck, UseCheck;
DefCheck = isModSet(ModRef) || isOrdered(I);
UseCheck = isRefSet(ModRef);
@@ -1649,7 +1681,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
#endif
} else {
// Find out what affect this instruction has on memory.
- ModRefInfo ModRef = AA->getModRefInfo(I, None);
+ ModRefInfo ModRef = AAP->getModRefInfo(I, None);
// The isOrdered check is used to ensure that volatiles end up as defs
// (atomics end up as ModRef right now anyway). Until we separate the
// ordering chain from the memory chain, this enables people to see at least
@@ -1702,7 +1734,7 @@ void MemorySSA::removeFromLookups(MemoryAccess *MA) {
MUD->setDefiningAccess(nullptr);
// Invalidate our walker's cache if necessary
if (!isa<MemoryUse>(MA))
- Walker->invalidateInfo(MA);
+ getWalker()->invalidateInfo(MA);
Value *MemoryInst;
if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
@@ -2175,7 +2207,9 @@ MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
-MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
+template <typename AliasAnalysisType>
+MemoryAccess *
+MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
if (isa<MemoryPhi>(StartingAccess))
return StartingAccess;
@@ -2212,9 +2246,10 @@ MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
return Clobber;
}
+template <typename AliasAnalysisType>
MemoryAccess *
-MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
- bool SkipSelf) {
+MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
+ MemoryAccess *MA, bool SkipSelf) {
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
// If this is a MemoryPhi, we can't do anything.
if (!StartingAccess)
@@ -2240,7 +2275,7 @@ MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
UpwardsMemoryQuery Q(I, StartingAccess);
- if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
+ if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
StartingAccess->setOptimized(LiveOnEntry);
StartingAccess->setOptimizedAccessType(None);
@@ -2290,28 +2325,6 @@ MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
}
MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- return Walker->getClobberingMemoryAccessBase(MA, false);
-}
-
-MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) {
- return Walker->getClobberingMemoryAccessBase(MA, Loc);
-}
-
-MemoryAccess *
-MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
- return Walker->getClobberingMemoryAccessBase(MA, true);
-}
-
-MemoryAccess *
-MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA,
- const MemoryLocation &Loc) {
- return Walker->getClobberingMemoryAccessBase(MA, Loc);
-}
-
-MemoryAccess *
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
return Use->getDefiningAccess();
OpenPOWER on IntegriCloud