summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp17
-rw-r--r--llvm/lib/Transforms/Scalar/Sink.cpp6
5 files changed, 21 insertions, 21 deletions
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 69112f3cee2..469930ca6a1 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -834,7 +834,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
continue;
}
- if (auto CS = CallSite(&*BBI)) {
+ if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
if (isAllocLikeFn(&*BBI, TLI))
@@ -842,15 +842,15 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
// If this call does not access memory, it can't be loading any of our
// pointers.
- if (AA->doesNotAccessMemory(CS))
+ if (AA->doesNotAccessMemory(Call))
continue;
// If the call might load from any of our allocas, then any store above
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
- return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI,
- BB.getParent())));
+ return isRefSet(AA->getModRefInfo(
+ Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
});
// If all of the allocas were clobbered by the call then we're not going
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 440ea4a5bc7..04ed914b86c 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -437,7 +437,7 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
// Non-local case.
const MemoryDependenceResults::NonLocalDepInfo &deps =
- MD->getNonLocalCallDependency(CallSite(C));
+ MD->getNonLocalCallDependency(C);
// FIXME: Move the checking logic to MemDep!
CallInst* cdep = nullptr;
diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
index f48d3cc098a..83861b98fbd 100644
--- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
@@ -360,10 +360,11 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() {
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
assert(I != nullptr && "Null instruction found!");
// Check function call safety
- if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
- LLVM_DEBUG(dbgs() << " Unsafe call site found.\n");
- return false;
- }
+ if (auto *Call = dyn_cast<CallBase>(I))
+ if (!AA->doesNotAccessMemory(Call)) {
+ LLVM_DEBUG(dbgs() << " Unsafe call site found.\n");
+ return false;
+ }
// Avoid loops with possiblity of throw
if (I->mayThrow()) {
LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n");
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2bdecb48446..ced923d6973 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -546,8 +546,8 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
// Memory locations of lifted instructions.
SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
- // Lifted callsites.
- SmallVector<ImmutableCallSite, 8> CallSites;
+ // Lifted calls.
+ SmallVector<const CallBase *, 8> Calls;
const MemoryLocation LoadLoc = MemoryLocation::get(LI);
@@ -565,10 +565,9 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
});
if (!NeedLift)
- NeedLift =
- llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
- return isModOrRefSet(AA.getModRefInfo(C, CS));
- });
+ NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
+ return isModOrRefSet(AA.getModRefInfo(C, Call));
+ });
}
if (!NeedLift)
@@ -579,12 +578,12 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
// none of them may modify its source.
if (isModSet(AA.getModRefInfo(C, LoadLoc)))
return false;
- else if (auto CS = ImmutableCallSite(C)) {
+ else if (const auto *Call = dyn_cast<CallBase>(C)) {
// If we can't lift this before P, it's game over.
- if (isModOrRefSet(AA.getModRefInfo(P, CS)))
+ if (isModOrRefSet(AA.getModRefInfo(P, Call)))
return false;
- CallSites.push_back(CS);
+ Calls.push_back(Call);
} else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
// If we can't lift this before P, it's game over.
auto ML = MemoryLocation::get(C);
diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp
index d1cdfabb0cc..c99da8f0737 100644
--- a/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -76,14 +76,14 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
Inst->mayThrow())
return false;
- if (auto CS = CallSite(Inst)) {
+ if (auto *Call = dyn_cast<CallBase>(Inst)) {
// Convergent operations cannot be made control-dependent on additional
// values.
- if (CS.hasFnAttr(Attribute::Convergent))
+ if (Call->hasFnAttr(Attribute::Convergent))
return false;
for (Instruction *S : Stores)
- if (isModSet(AA.getModRefInfo(S, CS)))
+ if (isModSet(AA.getModRefInfo(S, Call)))
return false;
}
OpenPOWER on IntegriCloud