diff options
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r-- | llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVN.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp | 17 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/Sink.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/InlineFunction.cpp | 8 |
8 files changed, 33 insertions, 37 deletions
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index 3a04f7a00d4..4e2a82b56ee 100644 --- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -130,16 +130,15 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody, // Some instructions can be ignored even if they read or write memory. // Detect these now, skipping to the next instruction if one is found. - CallSite CS(cast<Value>(I)); - if (CS) { + if (auto *Call = dyn_cast<CallBase>(I)) { // Ignore calls to functions in the same SCC, as long as the call sites // don't have operand bundles. Calls with operand bundles are allowed to // have memory effects not described by the memory effects of the call // target. - if (!CS.hasOperandBundles() && CS.getCalledFunction() && - SCCNodes.count(CS.getCalledFunction())) + if (!Call->hasOperandBundles() && Call->getCalledFunction() && + SCCNodes.count(Call->getCalledFunction())) continue; - FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); + FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call); ModRefInfo MRI = createModRefInfo(MRB); // If the call doesn't access memory, we're done. @@ -158,7 +157,7 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody, // Check whether all pointer arguments point to local memory, and // ignore calls that only access local memory. - for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); + for (CallSite::arg_iterator CI = Call->arg_begin(), CE = Call->arg_end(); CI != CE; ++CI) { Value *Arg = *CI; if (!Arg->getType()->isPtrOrPtrVectorTy()) diff --git a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp index 52a5e8c96ab..4bd5fd1acd4 100644 --- a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp +++ b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp @@ -45,18 +45,15 @@ bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr, default: break; } - ImmutableCallSite CS(Inst); - assert(CS && "Only calls can alter reference counts!"); + const auto *Call = cast<CallBase>(Inst); // See if AliasAnalysis can help us with the call. - FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS); + FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(Call); if (AliasAnalysis::onlyReadsMemory(MRB)) return false; if (AliasAnalysis::onlyAccessesArgPointees(MRB)) { const DataLayout &DL = Inst->getModule()->getDataLayout(); - for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); - I != E; ++I) { - const Value *Op = *I; + for (const Value *Op : Call->args()) { if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL)) return true; diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 69112f3cee2..469930ca6a1 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -834,7 +834,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, continue; } - if (auto CS = CallSite(&*BBI)) { + if (auto *Call = dyn_cast<CallBase>(&*BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(&*BBI, TLI)) @@ -842,15 +842,15 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, // If this call does not access memory, it can't be loading any of our // pointers. - if (AA->doesNotAccessMemory(CS)) + if (AA->doesNotAccessMemory(Call)) continue; // If the call might load from any of our allocas, then any store above // the call is live. DeadStackObjects.remove_if([&](Value *I) { // See if the call site touches the value. - return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI, - BB.getParent()))); + return isRefSet(AA->getModRefInfo( + Call, I, getPointerSize(I, DL, *TLI, BB.getParent()))); }); // If all of the allocas were clobbered by the call then we're not going diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 440ea4a5bc7..04ed914b86c 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -437,7 +437,7 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { // Non-local case. const MemoryDependenceResults::NonLocalDepInfo &deps = - MD->getNonLocalCallDependency(CallSite(C)); + MD->getNonLocalCallDependency(C); // FIXME: Move the checking logic to MemDep! CallInst* cdep = nullptr; diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp index f48d3cc098a..83861b98fbd 100644 --- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp +++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp @@ -360,10 +360,11 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() { bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { assert(I != nullptr && "Null instruction found!"); // Check function call safety - if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) { - LLVM_DEBUG(dbgs() << " Unsafe call site found.\n"); - return false; - } + if (auto *Call = dyn_cast<CallBase>(I)) + if (!AA->doesNotAccessMemory(Call)) { + LLVM_DEBUG(dbgs() << " Unsafe call site found.\n"); + return false; + } // Avoid loops with possiblity of throw if (I->mayThrow()) { LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n"); diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 2bdecb48446..ced923d6973 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -546,8 +546,8 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, // Memory locations of lifted instructions. SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; - // Lifted callsites. - SmallVector<ImmutableCallSite, 8> CallSites; + // Lifted calls. + SmallVector<const CallBase *, 8> Calls; const MemoryLocation LoadLoc = MemoryLocation::get(LI); @@ -565,10 +565,9 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, }); if (!NeedLift) - NeedLift = - llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { - return isModOrRefSet(AA.getModRefInfo(C, CS)); - }); + NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) { + return isModOrRefSet(AA.getModRefInfo(C, Call)); + }); } if (!NeedLift) @@ -579,12 +578,12 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, // none of them may modify its source. if (isModSet(AA.getModRefInfo(C, LoadLoc))) return false; - else if (auto CS = ImmutableCallSite(C)) { + else if (const auto *Call = dyn_cast<CallBase>(C)) { // If we can't lift this before P, it's game over. - if (isModOrRefSet(AA.getModRefInfo(P, CS))) + if (isModOrRefSet(AA.getModRefInfo(P, Call))) return false; - CallSites.push_back(CS); + Calls.push_back(Call); } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { // If we can't lift this before P, it's game over. auto ML = MemoryLocation::get(C); diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp index d1cdfabb0cc..c99da8f0737 100644 --- a/llvm/lib/Transforms/Scalar/Sink.cpp +++ b/llvm/lib/Transforms/Scalar/Sink.cpp @@ -76,14 +76,14 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA, Inst->mayThrow()) return false; - if (auto CS = CallSite(Inst)) { + if (auto *Call = dyn_cast<CallBase>(Inst)) { // Convergent operations cannot be made control-dependent on additional // values. - if (CS.hasFnAttr(Attribute::Convergent)) + if (Call->hasFnAttr(Attribute::Convergent)) return false; for (Instruction *S : Stores) - if (isModSet(AA.getModRefInfo(S, CS))) + if (isModSet(AA.getModRefInfo(S, Call))) return false; } diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 027351287c5..623fe91a5a6 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -999,22 +999,22 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, PtrArgs.push_back(CXI->getPointerOperand()); else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) PtrArgs.push_back(RMWI->getPointerOperand()); - else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { + else if (const auto *Call = dyn_cast<CallBase>(I)) { // If we know that the call does not access memory, then we'll still // know that about the inlined clone of this call site, and we don't // need to add metadata. - if (ICS.doesNotAccessMemory()) + if (Call->doesNotAccessMemory()) continue; IsFuncCall = true; if (CalleeAAR) { - FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); + FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call); if (MRB == FMRB_OnlyAccessesArgumentPointees || MRB == FMRB_OnlyReadsArgumentPointees) IsArgMemOnlyCall = true; } - for (Value *Arg : ICS.args()) { + for (Value *Arg : Call->args()) { // We need to check the underlying objects of all arguments, not just // the pointer arguments, because we might be passing pointers as // integers, etc. |