summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2015-06-17 07:18:54 +0000
committerChandler Carruth <chandlerc@gmail.com>2015-06-17 07:18:54 +0000
commitac80dc753228af5f1b415bc9fa9834aeba19122b (patch)
tree5844990db3db3823a6a80089477dfa3b9f52dea2 /llvm/lib/Transforms/Scalar
parentc41404a09044d241774bf70afe9013dbde751f64 (diff)
downloadbcm5719-llvm-ac80dc753228af5f1b415bc9fa9834aeba19122b.tar.gz
bcm5719-llvm-ac80dc753228af5f1b415bc9fa9834aeba19122b.zip
[PM/AA] Remove the Location typedef from the AliasAnalysis class now
that it is its own entity in the form of MemoryLocation, and update all the callers. This is an entirely mechanical change. References to "Location" within AA subclases become "MemoryLocation", and elsewhere "AliasAnalysis::Location" becomes "MemoryLocation". Hope that helps out-of-tree folks update. llvm-svn: 239885
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp46
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp11
-rw-r--r--llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp22
-rw-r--r--llvm/lib/Transforms/Scalar/Sink.cpp2
5 files changed, 40 insertions, 43 deletions
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index eb48a766a2c..9f4a9c1137a 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -78,7 +78,7 @@ namespace {
bool runOnBasicBlock(BasicBlock &BB);
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
- void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+ void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL);
@@ -194,37 +194,37 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
-static AliasAnalysis::Location
-getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
- AliasAnalysis::Location Loc = MemoryLocation::getForDest(MI);
+ MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (!II) return AliasAnalysis::Location();
+ if (!II)
+ return MemoryLocation();
switch (II->getIntrinsicID()) {
- default: return AliasAnalysis::Location(); // Unhandled intrinsic.
+ default:
+ return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
- return AliasAnalysis::Location(II->getArgOperand(0));
+ return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- return AliasAnalysis::Location(II->getArgOperand(1), Len);
+ return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
/// instruction if any.
-static AliasAnalysis::Location
-getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
"Unknown instruction case");
@@ -232,7 +232,7 @@ getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
// instructions (memcpy/memmove).
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
return MemoryLocation::getForSource(MTI);
- return AliasAnalysis::Location();
+ return MemoryLocation();
}
@@ -333,8 +333,8 @@ namespace {
/// completely overwrites a store to the 'Earlier' location.
/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
-static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
- const AliasAnalysis::Location &Earlier,
+static OverwriteResult isOverwrite(const MemoryLocation &Later,
+ const MemoryLocation &Earlier,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
int64_t &EarlierOff, int64_t &LaterOff) {
@@ -441,11 +441,11 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
/// This function detects when it is unsafe to remove a dependent instruction
/// because the DSE inducing instruction may be a self-read.
static bool isPossibleSelfRead(Instruction *Inst,
- const AliasAnalysis::Location &InstStoreLoc,
+ const MemoryLocation &InstStoreLoc,
Instruction *DepWrite, AliasAnalysis &AA) {
// Self reads can only happen for instructions that read memory. Get the
// location read.
- AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
+ MemoryLocation InstReadLoc = getLocForRead(Inst, AA);
if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
@@ -459,7 +459,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
// Here we don't know if A/B may alias, but we do know that B/B are must
// aliases, so removing the first memcpy is safe (assuming it writes <= #
// bytes as the second one.
- AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
+ MemoryLocation DepReadLoc = getLocForRead(DepWrite, AA);
if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
return false;
@@ -525,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
}
// Figure out what location is being stored to.
- AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
+ MemoryLocation Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
if (!Loc.Ptr)
@@ -540,7 +540,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
//
// Find out what memory location the dependent instruction stores.
Instruction *DepWrite = InstDep.getInst();
- AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
+ MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
if (!DepLoc.Ptr)
break;
@@ -645,7 +645,7 @@ static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
bool DSE::HandleFree(CallInst *F) {
bool MadeChange = false;
- AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
+ MemoryLocation Loc = MemoryLocation(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
const DataLayout &DL = F->getModule()->getDataLayout();
@@ -809,7 +809,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
- AliasAnalysis::Location LoadedLoc;
+ MemoryLocation LoadedLoc;
// If we encounter a use of the pointer, it is no longer considered dead
if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
@@ -845,7 +845,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
-void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
@@ -864,8 +864,8 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// Remove objects that could alias LoadedLoc.
DeadStackObjects.remove_if([&](Value *I) {
// See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(
- I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
+ MemoryLocation StackLoc(I,
+ getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return !AA->isNoAlias(StackLoc, LoadedLoc);
});
}
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index f92ecd4efda..cf4d2c90b73 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -844,7 +844,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
// operand in the store. Store to &A[i] of 100 will always return may alias
// with store of &A[100], we need to StoreLoc to be "A" with size of 100,
// which will then no-alias a store to &A[100].
- AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
+ MemoryLocation StoreLoc(Ptr, AccessSize);
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
++BI)
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index f54c8ae680b..85012afc80a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -510,7 +510,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// Check that nothing touches the dest of the "copy" between
// the call and the store.
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- AliasAnalysis::Location StoreLoc = MemoryLocation::get(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
E = C; I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
@@ -997,7 +997,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
}
}
- AliasAnalysis::Location SrcLoc = MemoryLocation::getForSource(M);
+ MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
M, M->getParent());
@@ -1075,10 +1075,9 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
- MemDepResult DepInfo =
- MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
- true, CS.getInstruction(),
- CS.getInstruction()->getParent());
+ MemDepResult DepInfo = MD->getPointerDependencyFrom(
+ MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(),
+ CS.getInstruction()->getParent());
if (!DepInfo.isClobber())
return false;
diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 776dfb4d487..243db8d70ca 100644
--- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -144,9 +144,8 @@ private:
// Routines for sinking stores
StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
- bool isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location Loc);
+ bool isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End, MemoryLocation Loc);
bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
bool mergeStores(BasicBlock *BB);
// The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
@@ -241,7 +240,7 @@ bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
bool MergedLoadStoreMotion::isLoadHoistBarrierInRange(const Instruction& Start,
const Instruction& End,
LoadInst* LI) {
- AliasAnalysis::Location Loc = MemoryLocation::get(LI);
+ MemoryLocation Loc = MemoryLocation::get(LI);
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::Mod);
}
@@ -266,8 +265,8 @@ LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB1,
LoadInst *Load1 = dyn_cast<LoadInst>(Inst);
BasicBlock *BB0 = Load0->getParent();
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Load0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Load1);
+ MemoryLocation Loc0 = MemoryLocation::get(Load0);
+ MemoryLocation Loc1 = MemoryLocation::get(Load1);
if (AA->isMustAlias(Loc0, Loc1) && Load0->isSameOperationAs(Load1) &&
!isLoadHoistBarrierInRange(BB1->front(), *Load1, Load1) &&
!isLoadHoistBarrierInRange(BB0->front(), *Load0, Load0)) {
@@ -400,10 +399,9 @@ bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) {
/// happening it is considered a sink barrier.
///
-bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location
- Loc) {
+bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End,
+ MemoryLocation Loc) {
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::ModRef);
}
@@ -425,8 +423,8 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
StoreInst *Store1 = cast<StoreInst>(Inst);
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Store0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Store1);
+ MemoryLocation Loc0 = MemoryLocation::get(Store0);
+ MemoryLocation Loc1 = MemoryLocation::get(Store1);
if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) &&
!isStoreSinkBarrierInRange(*(std::next(BasicBlock::iterator(Store1))),
BB1->back(), Loc1) &&
diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp
index 078c6a921a0..f49f4eaaedc 100644
--- a/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -163,7 +163,7 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
}
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
- AliasAnalysis::Location Loc = MemoryLocation::get(L);
+ MemoryLocation Loc = MemoryLocation::get(L);
for (Instruction *S : Stores)
if (AA->getModRefInfo(S, Loc) & AliasAnalysis::Mod)
return false;
OpenPOWER on IntegriCloud