diff options
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNHoist.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNSink.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GuardWidening.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopSink.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/MergeICmps.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/NewGVN.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp | 9 |
11 files changed, 28 insertions, 36 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index d031d675642..3043df9cca7 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -748,11 +748,9 @@ private: // TODO: Remove fully-redundant expressions. // Get instruction from the Map, assume that all the Instructions // with same VNs have same rank (this is an approximation). - llvm::sort(Ranks.begin(), Ranks.end(), - [this, &Map](const VNType &r1, const VNType &r2) { - return (rank(*Map.lookup(r1).begin()) < - rank(*Map.lookup(r2).begin())); - }); + llvm::sort(Ranks, [this, &Map](const VNType &r1, const VNType &r2) { + return (rank(*Map.lookup(r1).begin()) < rank(*Map.lookup(r2).begin())); + }); // - Sort VNs according to their rank, and start with lowest ranked VN // - Take a VN and for each instruction with same VN diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp index 3737831e59b..187c668c338 100644 --- a/llvm/lib/Transforms/Scalar/GVNSink.cpp +++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp @@ -239,7 +239,7 @@ public: SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops; for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)}); - llvm::sort(Ops.begin(), Ops.end()); + llvm::sort(Ops); for (auto &P : Ops) { Blocks.push_back(P.first); Values.push_back(P.second); @@ -762,7 +762,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) { } if (Preds.size() < 2) return 0; - llvm::sort(Preds.begin(), Preds.end()); + llvm::sort(Preds); unsigned NumOrigPreds = Preds.size(); // We can only sink instructions through unconditional branches. diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp index 9106459cfa9..cbbd7b861d8 100644 --- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp +++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp @@ -698,9 +698,8 @@ bool GuardWideningImpl::combineRangeChecks( // CurrentChecks.size() will typically be 3 here, but so far there has been // no need to hard-code that fact. - llvm::sort(CurrentChecks.begin(), CurrentChecks.end(), - [&](const GuardWideningImpl::RangeCheck &LHS, - const GuardWideningImpl::RangeCheck &RHS) { + llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS, + const GuardWideningImpl::RangeCheck &RHS) { return LHS.getOffsetValue().slt(RHS.getOffsetValue()); }); diff --git a/llvm/lib/Transforms/Scalar/LoopSink.cpp b/llvm/lib/Transforms/Scalar/LoopSink.cpp index 67bcbdca185..db502e1c5db 100644 --- a/llvm/lib/Transforms/Scalar/LoopSink.cpp +++ b/llvm/lib/Transforms/Scalar/LoopSink.cpp @@ -208,11 +208,9 @@ static bool sinkInstruction(Loop &L, Instruction &I, SmallVector<BasicBlock *, 2> SortedBBsToSinkInto; SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(), BBsToSinkInto.end()); - llvm::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(), - [&](BasicBlock *A, BasicBlock *B) { - return LoopBlockNumber.find(A)->second < - LoopBlockNumber.find(B)->second; - }); + llvm::sort(SortedBBsToSinkInto, [&](BasicBlock *A, BasicBlock *B) { + return LoopBlockNumber.find(A)->second < LoopBlockNumber.find(B)->second; + }); BasicBlock *MoveBB = *SortedBBsToSinkInto.begin(); // FIXME: Optimize the efficiency for cloned value replacement. The current diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index fa83b48210b..857b83da96d 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1487,7 +1487,7 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { SmallVector<const SCEV *, 4> Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. - llvm::sort(Key.begin(), Key.end()); + llvm::sort(Key); return Uniquifier.count(Key); } @@ -1511,7 +1511,7 @@ bool LSRUse::InsertFormula(const Formula &F, const Loop &L) { SmallVector<const SCEV *, 4> Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. - llvm::sort(Key.begin(), Key.end()); + llvm::sort(Key); if (!Uniquifier.insert(Key).second) return false; @@ -4238,7 +4238,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() { Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for // uniquifying. - llvm::sort(Key.begin(), Key.end()); + llvm::sort(Key); std::pair<BestFormulaeTy::const_iterator, bool> P = BestFormulae.insert(std::make_pair(Key, FIdx)); diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp index 1e4bbd4c472..f68662b488c 100644 --- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp +++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp @@ -465,10 +465,9 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi, #endif // MERGEICMPS_DOT_ON // Reorder blocks by LHS. We can do that without changing the // semantics because we are only accessing dereferencable memory. - llvm::sort(Comparisons_.begin(), Comparisons_.end(), - [](const BCECmpBlock &a, const BCECmpBlock &b) { - return a.Lhs() < b.Lhs(); - }); + llvm::sort(Comparisons_, [](const BCECmpBlock &a, const BCECmpBlock &b) { + return a.Lhs() < b.Lhs(); + }); #ifdef MERGEICMPS_DOT_ON errs() << "AFTER REORDERING:\n\n"; dump(); diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp index f3cbd87cae6..ed9f868af61 100644 --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -959,8 +959,7 @@ static bool isCopyOfAPHI(const Value *V) { // order. The BlockInstRange numbers are generated in an RPO walk of the basic // blocks. void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const { - llvm::sort(Ops.begin(), Ops.end(), - [&](const ValPair &P1, const ValPair &P2) { + llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) { return BlockInstRange.lookup(P1.second).first < BlockInstRange.lookup(P2.second).first; }); @@ -3955,7 +3954,7 @@ bool NewGVN::eliminateInstructions(Function &F) { convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead); // Sort the whole thing. - llvm::sort(DFSOrderedSet.begin(), DFSOrderedSet.end()); + llvm::sort(DFSOrderedSet); for (auto &VD : DFSOrderedSet) { int MemberDFSIn = VD.DFSIn; int MemberDFSOut = VD.DFSOut; @@ -4118,7 +4117,7 @@ bool NewGVN::eliminateInstructions(Function &F) { // If we have possible dead stores to look at, try to eliminate them. if (CC->getStoreCount() > 0) { convertClassToLoadsAndStores(*CC, PossibleDeadStores); - llvm::sort(PossibleDeadStores.begin(), PossibleDeadStores.end()); + llvm::sort(PossibleDeadStores); ValueDFSStack EliminationStack; for (auto &VD : PossibleDeadStores) { int MemberDFSIn = VD.DFSIn; diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp index 8f30bccf48f..7f9aad24883 100644 --- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -524,7 +524,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) { }; // We need the order of list to be stable so that naming ends up stable // when we split edges. This makes test cases much easier to write. - llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName); + llvm::sort(PollLocations, OrderByBBName); // We can sometimes end up with duplicate poll locations. This happens if // a single loop is visited more than once. The fact this happens seems diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index 5d3e928d509..5e23a8a3dcd 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -1825,7 +1825,7 @@ static void relocationViaAlloca( } } - llvm::sort(Uses.begin(), Uses.end()); + llvm::sort(Uses); auto Last = std::unique(Uses.begin(), Uses.end()); Uses.erase(Last, Uses.end()); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 8b591de1e99..6e991409bf0 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1060,7 +1060,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) // Sort the uses. This arranges for the offsets to be in ascending order, // and the sizes to be in descending order. - llvm::sort(Slices.begin(), Slices.end()); + llvm::sort(Slices); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -1906,7 +1906,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { "All non-integer types eliminated!"); return RHSTy->getNumElements() < LHSTy->getNumElements(); }; - llvm::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); + llvm::sort(CandidateTys, RankVectorTypes); CandidateTys.erase( std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), CandidateTys.end()); @@ -4221,7 +4221,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { } if (!IsSorted) - llvm::sort(AS.begin(), AS.end()); + llvm::sort(AS); /// Describes the allocas introduced by rewritePartition in order to migrate /// the debug info. diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index 57799c8cd8c..17035f469da 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -1265,11 +1265,10 @@ static void buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks, // matter as we're just trying to build up the map from inside-out; we use // the map in a more stably ordered way below. auto OrderedClonedExitsInLoops = ClonedExitsInLoops; - llvm::sort(OrderedClonedExitsInLoops.begin(), OrderedClonedExitsInLoops.end(), - [&](BasicBlock *LHS, BasicBlock *RHS) { - return ExitLoopMap.lookup(LHS)->getLoopDepth() < - ExitLoopMap.lookup(RHS)->getLoopDepth(); - }); + llvm::sort(OrderedClonedExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) { + return ExitLoopMap.lookup(LHS)->getLoopDepth() < + ExitLoopMap.lookup(RHS)->getLoopDepth(); + }); // Populate the existing ExitLoopMap with everything reachable from each // exit, starting from the inner most exit. |