summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFangrui Song <maskray@google.com>2018-09-27 02:13:45 +0000
committerFangrui Song <maskray@google.com>2018-09-27 02:13:45 +0000
commit0cac726a00a2be06a959a89efe6587dba28e5822 (patch)
treeca196a4c9ae371e54d735a6e5ee085de8c885d4f
parentf1c96490d425cb2253871936a91083e177e866b0 (diff)
downloadbcm5719-llvm-0cac726a00a2be06a959a89efe6587dba28e5822.tar.gz
bcm5719-llvm-0cac726a00a2be06a959a89efe6587dba28e5822.zip
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102. Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits Differential Revision: https://reviews.llvm.org/D52573 llvm-svn: 343163
-rw-r--r--llvm/include/llvm/Analysis/LoopInfoImpl.h4
-rw-r--r--llvm/include/llvm/CodeGen/SlotIndexes.h2
-rw-r--r--llvm/include/llvm/ProfileData/InstrProf.h6
-rw-r--r--llvm/include/llvm/Support/CFGUpdate.h2
-rw-r--r--llvm/include/llvm/Support/GenericDomTreeConstruction.h7
-rw-r--r--llvm/include/llvm/Support/ScopedPrinter.h2
-rw-r--r--llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp12
-rw-r--r--llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/CallGraph.cpp3
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp2
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp4
-rw-r--r--llvm/lib/Bitcode/Reader/ValueList.cpp2
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp7
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp40
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp6
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp3
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp2
-rw-r--r--llvm/lib/CodeGen/LocalStackSlotAllocation.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRCanonicalizerPass.cpp8
-rw-r--r--llvm/lib/CodeGen/MachineBasicBlock.cpp2
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp5
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp2
-rw-r--r--llvm/lib/CodeGen/ReachingDefAnalysis.cpp2
-rw-r--r--llvm/lib/CodeGen/ScheduleDAGInstrs.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp7
-rw-r--r--llvm/lib/CodeGen/SlotIndexes.cpp2
-rw-r--r--llvm/lib/CodeGen/StackColoring.cpp2
-rw-r--r--llvm/lib/CodeGen/StackMaps.cpp9
-rw-r--r--llvm/lib/CodeGen/StackSlotColoring.cpp2
-rw-r--r--llvm/lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp2
-rw-r--r--llvm/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp2
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFContext.cpp5
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp2
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp2
-rw-r--r--llvm/lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp9
-rw-r--r--llvm/lib/IR/AsmWriter.cpp2
-rw-r--r--llvm/lib/IR/Attributes.cpp2
-rw-r--r--llvm/lib/IR/Metadata.cpp4
-rw-r--r--llvm/lib/IR/Verifier.cpp4
-rw-r--r--llvm/lib/LTO/ThinLTOCodeGenerator.cpp11
-rw-r--r--llvm/lib/MC/MachObjectWriter.cpp4
-rw-r--r--llvm/lib/MC/WinCOFFObjectWriter.cpp7
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMapping.cpp7
-rw-r--r--llvm/lib/ProfileData/GCOV.cpp2
-rw-r--r--llvm/lib/ProfileData/ProfileSummaryBuilder.cpp2
-rw-r--r--llvm/lib/Support/JSON.cpp2
-rw-r--r--llvm/lib/Support/SourceMgr.cpp2
-rw-r--r--llvm/lib/Support/Timer.cpp2
-rw-r--r--llvm/lib/TableGen/Record.cpp7
-rw-r--r--llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp29
-rw-r--r--llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp19
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp15
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp7
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenInsert.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/RDFDeadCode.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/RDFGraph.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/RDFLiveness.cpp6
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp19
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp2
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.cpp4
-rw-r--r--llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp6
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp26
-rw-r--r--llvm/lib/Transforms/Scalar/GVNHoist.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/GVNSink.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GuardWidening.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/LoopSink.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/NewGVN.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp9
-rw-r--r--llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/LowerSwitch.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/PredicateInfo.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp11
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SplitModule.cpp14
-rw-r--r--llvm/tools/dsymutil/DwarfLinker.cpp2
-rw-r--r--llvm/tools/dsymutil/DwarfStreamer.cpp2
-rw-r--r--llvm/tools/llvm-config/llvm-config.cpp2
-rw-r--r--llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp2
-rw-r--r--llvm/tools/llvm-exegesis/lib/Analysis.cpp16
-rw-r--r--llvm/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp7
-rw-r--r--llvm/tools/llvm-mca/lib/InstrBuilder.cpp2
-rw-r--r--llvm/tools/llvm-nm/llvm-nm.cpp2
-rw-r--r--llvm/tools/llvm-objdump/COFFDump.cpp2
-rw-r--r--llvm/tools/llvm-objdump/MachODump.cpp2
-rw-r--r--llvm/tools/llvm-objdump/llvm-objdump.cpp6
-rw-r--r--llvm/tools/llvm-pdbutil/DumpOutputStyle.cpp2
-rw-r--r--llvm/tools/llvm-pdbutil/PrettyTypeDumper.cpp2
-rw-r--r--llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp6
-rw-r--r--llvm/tools/llvm-readobj/COFFDumper.cpp3
-rw-r--r--llvm/tools/llvm-xray/xray-account.cpp131
-rw-r--r--llvm/tools/yaml2obj/yaml2macho.cpp7
-rw-r--r--llvm/unittests/ADT/StringMapTest.cpp4
-rw-r--r--llvm/unittests/Analysis/LazyCallGraphTest.cpp22
-rw-r--r--llvm/unittests/Analysis/MemorySSATest.cpp7
-rw-r--r--llvm/unittests/Support/Path.cpp12
-rw-r--r--llvm/utils/TableGen/CTagsEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/CodeGenDAGPatterns.cpp4
-rw-r--r--llvm/utils/TableGen/CodeGenRegisters.cpp14
-rw-r--r--llvm/utils/TableGen/CodeGenSchedule.cpp14
-rw-r--r--llvm/utils/TableGen/CodeGenTarget.cpp10
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/InfoByHwMode.cpp4
-rw-r--r--llvm/utils/TableGen/SubtargetEmitter.cpp17
124 files changed, 395 insertions, 443 deletions
diff --git a/llvm/include/llvm/Analysis/LoopInfoImpl.h b/llvm/include/llvm/Analysis/LoopInfoImpl.h
index 94138985886..d3054b7cf85 100644
--- a/llvm/include/llvm/Analysis/LoopInfoImpl.h
+++ b/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -640,8 +640,8 @@ void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
- llvm::sort(BB1.begin(), BB1.end());
- llvm::sort(BB2.begin(), BB2.end());
+ llvm::sort(BB1);
+ llvm::sort(BB2);
return BB1 == BB2;
}
diff --git a/llvm/include/llvm/CodeGen/SlotIndexes.h b/llvm/include/llvm/CodeGen/SlotIndexes.h
index bb9c5cb4cfb..55082222b7a 100644
--- a/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -676,7 +676,7 @@ class raw_ostream;
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr);
- llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ llvm::sort(idx2MBBMap, Idx2MBBCompare());
}
/// Free the resources that were required to maintain a SlotIndex.
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 206142b3565..dc45021fc47 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -544,9 +544,9 @@ Error InstrProfSymtab::create(const NameIterRange &IterRange) {
void InstrProfSymtab::finalizeSymtab() {
if (Sorted)
return;
- llvm::sort(MD5NameMap.begin(), MD5NameMap.end(), less_first());
- llvm::sort(MD5FuncMap.begin(), MD5FuncMap.end(), less_first());
- llvm::sort(AddrToMD5Map.begin(), AddrToMD5Map.end(), less_first());
+ llvm::sort(MD5NameMap, less_first());
+ llvm::sort(MD5FuncMap, less_first());
+ llvm::sort(AddrToMD5Map, less_first());
AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
AddrToMD5Map.end());
Sorted = true;
diff --git a/llvm/include/llvm/Support/CFGUpdate.h b/llvm/include/llvm/Support/CFGUpdate.h
index c1b1e3df803..63c24a3d2a2 100644
--- a/llvm/include/llvm/Support/CFGUpdate.h
+++ b/llvm/include/llvm/Support/CFGUpdate.h
@@ -105,7 +105,7 @@ void LegalizeUpdates(ArrayRef<Update<NodePtr>> AllUpdates,
Operations[{U.getTo(), U.getFrom()}] = int(i);
}
- llvm::sort(Result.begin(), Result.end(),
+ llvm::sort(Result,
[&Operations](const Update<NodePtr> &A, const Update<NodePtr> &B) {
return Operations[{A.getFrom(), A.getTo()}] >
Operations[{B.getFrom(), B.getTo()}];
diff --git a/llvm/include/llvm/Support/GenericDomTreeConstruction.h b/llvm/include/llvm/Support/GenericDomTreeConstruction.h
index 5eb09430de0..344484b285c 100644
--- a/llvm/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/llvm/include/llvm/Support/GenericDomTreeConstruction.h
@@ -1386,10 +1386,9 @@ struct SemiNCAInfo {
// Make a copy and sort it such that it is possible to check if there are
// no gaps between DFS numbers of adjacent children.
SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end());
- llvm::sort(Children.begin(), Children.end(),
- [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
- return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
- });
+ llvm::sort(Children, [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
+ return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
+ });
auto PrintChildrenError = [Node, &Children, PrintNodeAndDFSNums](
const TreeNodePtr FirstCh, const TreeNodePtr SecondCh) {
diff --git a/llvm/include/llvm/Support/ScopedPrinter.h b/llvm/include/llvm/Support/ScopedPrinter.h
index 062439b4f7d..34c1a287ee1 100644
--- a/llvm/include/llvm/Support/ScopedPrinter.h
+++ b/llvm/include/llvm/Support/ScopedPrinter.h
@@ -138,7 +138,7 @@ public:
}
}
- llvm::sort(SetFlags.begin(), SetFlags.end(), &flagName<TFlag>);
+ llvm::sort(SetFlags, &flagName<TFlag>);
startLine() << Label << " [ (" << hex(Value) << ")\n";
for (const auto &Flag : SetFlags) {
diff --git a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
index 98209888f8a..08ebcc47a80 100644
--- a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -156,9 +156,9 @@ static void combineWeight(Weight &W, const Weight &OtherW) {
static void combineWeightsBySorting(WeightList &Weights) {
// Sort so edges to the same node are adjacent.
- llvm::sort(Weights.begin(), Weights.end(),
- [](const Weight &L,
- const Weight &R) { return L.TargetNode < R.TargetNode; });
+ llvm::sort(Weights, [](const Weight &L, const Weight &R) {
+ return L.TargetNode < R.TargetNode;
+ });
// Combine adjacent edges.
WeightList::iterator O = Weights.begin();
@@ -707,7 +707,7 @@ static void findIrreducibleHeaders(
"Expected irreducible CFG; -loop-info is likely invalid");
if (Headers.size() == InSCC.size()) {
// Every block is a header.
- llvm::sort(Headers.begin(), Headers.end());
+ llvm::sort(Headers);
return;
}
@@ -742,8 +742,8 @@ static void findIrreducibleHeaders(
Others.push_back(Irr.Node);
LLVM_DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
}
- llvm::sort(Headers.begin(), Headers.end());
- llvm::sort(Others.begin(), Others.end());
+ llvm::sort(Headers);
+ llvm::sort(Others);
}
static void createIrreducibleLoop(
diff --git a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
index 194983418b0..18ea37fd7a8 100644
--- a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -395,7 +395,7 @@ populateAliasMap(DenseMap<const Value *, std::vector<OffsetValue>> &AliasMap,
}
// Sort AliasList for faster lookup
- llvm::sort(AliasList.begin(), AliasList.end());
+ llvm::sort(AliasList);
}
}
@@ -479,7 +479,7 @@ static void populateExternalRelations(
}
// Remove duplicates in ExtRelations
- llvm::sort(ExtRelations.begin(), ExtRelations.end());
+ llvm::sort(ExtRelations);
ExtRelations.erase(std::unique(ExtRelations.begin(), ExtRelations.end()),
ExtRelations.end());
}
diff --git a/llvm/lib/Analysis/CallGraph.cpp b/llvm/lib/Analysis/CallGraph.cpp
index cbdf5f63c55..0da678e1611 100644
--- a/llvm/lib/Analysis/CallGraph.cpp
+++ b/llvm/lib/Analysis/CallGraph.cpp
@@ -97,8 +97,7 @@ void CallGraph::print(raw_ostream &OS) const {
for (const auto &I : *this)
Nodes.push_back(I.second.get());
- llvm::sort(Nodes.begin(), Nodes.end(),
- [](CallGraphNode *LHS, CallGraphNode *RHS) {
+ llvm::sort(Nodes, [](CallGraphNode *LHS, CallGraphNode *RHS) {
if (Function *LF = LHS->getFunction())
if (Function *RF = RHS->getFunction())
return LF->getName() < RF->getName();
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index a0e22ad980c..03483262b13 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -807,7 +807,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
DirtyBlocks.push_back(Entry.getBB());
// Sort the cache so that we can do fast binary search lookups below.
- llvm::sort(Cache.begin(), Cache.end());
+ llvm::sort(Cache);
++NumCacheDirtyNonLocal;
// cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
@@ -1070,7 +1070,7 @@ SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
break;
default:
// Added many values, do a full scale sort.
- llvm::sort(Cache.begin(), Cache.end());
+ llvm::sort(Cache);
break;
}
}
@@ -1662,7 +1662,7 @@ void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
// Re-sort the NonLocalDepInfo. Changing the dirty entry to its
// subsequent value may invalidate the sortedness.
- llvm::sort(NLPDI.begin(), NLPDI.end());
+ llvm::sort(NLPDI);
}
ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 5626e69398e..d99d4767366 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -11064,7 +11064,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first.
- llvm::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) {
+ llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
return numberOfTerms(LHS) > numberOfTerms(RHS);
});
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 8f89389c4b5..ca5cf1663b8 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1867,7 +1867,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
Phis.push_back(&PN);
if (TTI)
- llvm::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
+ llvm::sort(Phis, [](Value *LHS, Value *RHS) {
// Put pointers at the back and make sure pointer < pointer = false.
if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 102135fbf31..fb678febe23 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -1399,10 +1399,10 @@ static bool compareWithVectorFnName(const VecDesc &LHS, StringRef S) {
void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
VectorDescs.insert(VectorDescs.end(), Fns.begin(), Fns.end());
- llvm::sort(VectorDescs.begin(), VectorDescs.end(), compareByScalarFnName);
+ llvm::sort(VectorDescs, compareByScalarFnName);
ScalarDescs.insert(ScalarDescs.end(), Fns.begin(), Fns.end());
- llvm::sort(ScalarDescs.begin(), ScalarDescs.end(), compareByVectorFnName);
+ llvm::sort(ScalarDescs, compareByVectorFnName);
}
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
diff --git a/llvm/lib/Bitcode/Reader/ValueList.cpp b/llvm/lib/Bitcode/Reader/ValueList.cpp
index 1ab22b5cc3d..b3945a37408 100644
--- a/llvm/lib/Bitcode/Reader/ValueList.cpp
+++ b/llvm/lib/Bitcode/Reader/ValueList.cpp
@@ -144,7 +144,7 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
void BitcodeReaderValueList::resolveConstantForwardRefs() {
// Sort the values by-pointer so that they are efficient to look up with a
// binary search.
- llvm::sort(ResolveConstants.begin(), ResolveConstants.end());
+ llvm::sort(ResolveConstants);
SmallVector<Constant *, 64> NewOps;
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index d473741e8ce..deb04f1bb36 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -184,7 +184,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
return;
bool IsGlobalValue = OM.isGlobalValue(ID);
- llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ llvm::sort(List, [&](const Entry &L, const Entry &R) {
const Use *LU = L.first;
const Use *RU = R.first;
if (LU == RU)
@@ -745,7 +745,7 @@ void ValueEnumerator::organizeMetadata() {
// and then sort by the original/current ID. Since the IDs are guaranteed to
// be unique, the result of std::sort will be deterministic. There's no need
// for std::stable_sort.
- llvm::sort(Order.begin(), Order.end(), [this](MDIndex LHS, MDIndex RHS) {
+ llvm::sort(Order, [this](MDIndex LHS, MDIndex RHS) {
return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) <
std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID);
});
diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 61b683616d3..f99b5c20e1c 100644
--- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -2353,10 +2353,9 @@ void CodeViewDebug::emitLocalVariableList(ArrayRef<LocalVariable> Locals) {
for (const LocalVariable &L : Locals)
if (L.DIVar->isParameter())
Params.push_back(&L);
- llvm::sort(Params.begin(), Params.end(),
- [](const LocalVariable *L, const LocalVariable *R) {
- return L->DIVar->getArg() < R->DIVar->getArg();
- });
+ llvm::sort(Params, [](const LocalVariable *L, const LocalVariable *R) {
+ return L->DIVar->getArg() < R->DIVar->getArg();
+ });
for (const LocalVariable *L : Params)
emitLocalVariable(*L);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h
index ac49657b68f..befa4b941c8 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h
@@ -139,7 +139,7 @@ public:
// Sort the pieces by offset.
// Remove any duplicate entries by dropping all but the first.
void sortUniqueValues() {
- llvm::sort(Values.begin(), Values.end());
+ llvm::sort(Values);
Values.erase(
std::unique(
Values.begin(), Values.end(), [](const Value &A, const Value &B) {
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index f6a875f405a..6c6a861306d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -241,7 +241,7 @@ ArrayRef<DbgVariable::FrameIndexExpr> DbgVariable::getFrameIndexExprs() const {
return A.Expr->isFragment();
}) &&
"multiple FI expressions without DW_OP_LLVM_fragment");
- llvm::sort(FrameIndexExprs.begin(), FrameIndexExprs.end(),
+ llvm::sort(FrameIndexExprs,
[](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool {
return A.Expr->getFragmentInfo()->OffsetInBits <
B.Expr->getFragmentInfo()->OffsetInBits;
@@ -612,22 +612,21 @@ void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
/// Sort and unique GVEs by comparing their fragment offset.
static SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &
sortGlobalExprs(SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &GVEs) {
- llvm::sort(GVEs.begin(), GVEs.end(),
- [](DwarfCompileUnit::GlobalExpr A,
- DwarfCompileUnit::GlobalExpr B) {
- // Sort order: first null exprs, then exprs without fragment
- // info, then sort by fragment offset in bits.
- // FIXME: Come up with a more comprehensive comparator so
- // the sorting isn't non-deterministic, and so the following
- // std::unique call works correctly.
- if (!A.Expr || !B.Expr)
- return !!B.Expr;
- auto FragmentA = A.Expr->getFragmentInfo();
- auto FragmentB = B.Expr->getFragmentInfo();
- if (!FragmentA || !FragmentB)
- return !!FragmentB;
- return FragmentA->OffsetInBits < FragmentB->OffsetInBits;
- });
+ llvm::sort(
+ GVEs, [](DwarfCompileUnit::GlobalExpr A, DwarfCompileUnit::GlobalExpr B) {
+ // Sort order: first null exprs, then exprs without fragment
+ // info, then sort by fragment offset in bits.
+ // FIXME: Come up with a more comprehensive comparator so
+ // the sorting isn't non-deterministic, and so the following
+ // std::unique call works correctly.
+ if (!A.Expr || !B.Expr)
+ return !!B.Expr;
+ auto FragmentA = A.Expr->getFragmentInfo();
+ auto FragmentB = B.Expr->getFragmentInfo();
+ if (!FragmentA || !FragmentB)
+ return !!FragmentB;
+ return FragmentA->OffsetInBits < FragmentB->OffsetInBits;
+ });
GVEs.erase(std::unique(GVEs.begin(), GVEs.end(),
[](DwarfCompileUnit::GlobalExpr A,
DwarfCompileUnit::GlobalExpr B) {
@@ -2000,10 +1999,9 @@ void DwarfDebug::emitDebugARanges() {
}
// Sort the CU list (again, to ensure consistent output order).
- llvm::sort(CUs.begin(), CUs.end(),
- [](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
- return A->getUniqueID() < B->getUniqueID();
- });
+ llvm::sort(CUs, [](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
+ return A->getUniqueID() < B->getUniqueID();
+ });
// Emit an arange table for each CU we used.
for (DwarfCompileUnit *CU : CUs) {
diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 65de9d7e65a..89fe3857228 100644
--- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -359,9 +359,9 @@ void EHStreamer::emitExceptionTable() {
LandingPads.push_back(&PadInfos[i]);
// Order landing pads lexicographically by type id.
- llvm::sort(LandingPads.begin(), LandingPads.end(),
- [](const LandingPadInfo *L,
- const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; });
+ llvm::sort(LandingPads, [](const LandingPadInfo *L, const LandingPadInfo *R) {
+ return L->TypeIds < R->TypeIds;
+ });
// Compute the actions table and gather the first action index for each
// landing pad site.
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index d69e9589976..dfbfae85a86 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -4989,8 +4989,7 @@ bool CodeGenPrepare::splitLargeGEPOffsets() {
return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
};
// Sorting all the GEPs of the same data structures based on the offsets.
- llvm::sort(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end(),
- compareGEPOffset);
+ llvm::sort(LargeOffsetGEPs, compareGEPOffset);
LargeOffsetGEPs.erase(
std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
LargeOffsetGEPs.end());
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index ae061b64a38..1bfede097bd 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -219,7 +219,7 @@ void LegalizerInfo::computeTables() {
Opcode, TypeIdx, ElementSize,
moreToWiderTypesAndLessToWidest(NumElementsActions));
}
- llvm::sort(ElementSizesSeen.begin(), ElementSizesSeen.end());
+ llvm::sort(ElementSizesSeen);
SizeChangeStrategy VectorElementSizeChangeStrategy =
&unsupportedForDifferentSizes;
if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() &&
diff --git a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index f90ce0c8cd2..795028e9792 100644
--- a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -328,7 +328,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Sort the frame references by local offset.
// Use frame index as a tie-breaker in case MI's have the same offset.
- llvm::sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
+ llvm::sort(FrameReferenceInsns);
MachineBasicBlock *Entry = &Fn.front();
diff --git a/llvm/lib/CodeGen/MIRCanonicalizerPass.cpp b/llvm/lib/CodeGen/MIRCanonicalizerPass.cpp
index fa43d13b1b8..bbd1dcdb744 100644
--- a/llvm/lib/CodeGen/MIRCanonicalizerPass.cpp
+++ b/llvm/lib/CodeGen/MIRCanonicalizerPass.cpp
@@ -134,10 +134,10 @@ rescheduleLexographically(std::vector<MachineInstr *> instructions,
StringInstrMap.push_back({(i == std::string::npos) ? S : S.substr(i), II});
}
- llvm::sort(StringInstrMap.begin(), StringInstrMap.end(),
- [](const StringInstrPair &a, const StringInstrPair &b) -> bool {
- return (a.first < b.first);
- });
+ llvm::sort(StringInstrMap,
+ [](const StringInstrPair &a, const StringInstrPair &b) -> bool {
+ return (a.first < b.first);
+ });
for (auto &II : StringInstrMap) {
diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 6a242d8d070..c60270559fa 100644
--- a/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -461,7 +461,7 @@ bool MachineBasicBlock::isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) const {
}
void MachineBasicBlock::sortUniqueLiveIns() {
- llvm::sort(LiveIns.begin(), LiveIns.end(),
+ llvm::sort(LiveIns,
[](const RegisterMaskPair &LI0, const RegisterMaskPair &LI1) {
return LI0.PhysReg < LI1.PhysReg;
});
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 988861e3c90..5f6f0cf96a5 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -1861,8 +1861,7 @@ void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) {
RecRPTracker.closeBottom();
std::vector<SUnit *> SUnits(NS.begin(), NS.end());
- llvm::sort(SUnits.begin(), SUnits.end(),
- [](const SUnit *A, const SUnit *B) {
+ llvm::sort(SUnits, [](const SUnit *A, const SUnit *B) {
return A->NodeNum > B->NodeNum;
});
@@ -3981,7 +3980,7 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
};
// sort, so that we can perform a binary search
- llvm::sort(Indices.begin(), Indices.end(), CompareKey);
+ llvm::sort(Indices, CompareKey);
bool Valid = true;
(void)Valid;
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index d60f17ad07f..9ab405f38d4 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1554,7 +1554,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
if (MemOpRecords.size() < 2)
return;
- llvm::sort(MemOpRecords.begin(), MemOpRecords.end());
+ llvm::sort(MemOpRecords);
unsigned ClusterLength = 1;
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
SUnit *SUa = MemOpRecords[Idx].SU;
diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
index 050fef5d25e..a9f0a938729 100644
--- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
+++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp
@@ -157,7 +157,7 @@ bool ReachingDefAnalysis::runOnMachineFunction(MachineFunction &mf) {
// Sorting all reaching defs found for a ceartin reg unit in a given BB.
for (MBBDefsInfo &MBBDefs : MBBReachingDefs) {
for (MBBRegUnitDefs &RegUnitDefs : MBBDefs)
- llvm::sort(RegUnitDefs.begin(), RegUnitDefs.end());
+ llvm::sort(RegUnitDefs);
}
return false;
diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index 5eb842eb04a..346f82ff95f 100644
--- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -996,7 +996,7 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
for (auto &I : loads)
for (auto *SU : I.second)
NodeNums.push_back(SU->NodeNum);
- llvm::sort(NodeNums.begin(), NodeNums.end());
+ llvm::sort(NodeNums);
// The N last elements in NodeNums will be removed, and the SU with
// the lowest NodeNum of them will become the new BarrierChain to
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ad8121babe4..3f71f154779 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13250,8 +13250,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
// Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list.
- llvm::sort(LoadedSlices.begin(), LoadedSlices.end(),
- [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
+ llvm::sort(LoadedSlices, [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
});
@@ -14247,10 +14246,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
// Sort the memory operands according to their distance from the
// base pointer.
- llvm::sort(StoreNodes.begin(), StoreNodes.end(),
- [](MemOpLink LHS, MemOpLink RHS) {
- return LHS.OffsetFromBase < RHS.OffsetFromBase;
- });
+ llvm::sort(StoreNodes, [](MemOpLink LHS, MemOpLink RHS) {
+ return LHS.OffsetFromBase < RHS.OffsetFromBase;
+ });
// Store Merge attempts to merge the lowest stores. This generally
// works out as if successful, as the remaining stores are checked
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 90bc241ee5a..957b2e35494 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -242,7 +242,7 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
return;
// Sort them in increasing order.
- llvm::sort(Offsets.begin(), Offsets.end());
+ llvm::sort(Offsets);
// Check if the loads are close enough.
SmallVector<SDNode*, 4> Loads;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index c367d73ed4e..a8843314ef3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8016,7 +8016,7 @@ void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
}
// Sort the uses, so that all the uses from a given User are together.
- llvm::sort(Uses.begin(), Uses.end());
+ llvm::sort(Uses);
for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
UseIndex != UseIndexEnd; ) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index b9af58d230c..1bbc3ff2d97 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2580,8 +2580,7 @@ void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
assert(CC.Low == CC.High && "Input clusters must be single-case");
#endif
- llvm::sort(Clusters.begin(), Clusters.end(),
- [](const CaseCluster &a, const CaseCluster &b) {
+ llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
return a.Low->getValue().slt(b.Low->getValue());
});
@@ -6252,7 +6251,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
GA->getGlobal(), getCurSDLoc(),
Val.getValueType(), GA->getOffset())});
}
- llvm::sort(Targets.begin(), Targets.end(),
+ llvm::sort(Targets,
[](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
return T1.Offset < T2.Offset;
});
@@ -9670,7 +9669,7 @@ bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
}
BitTestInfo BTI;
- llvm::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
+ llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
// Sort by probability first, number of bits second, bit mask third.
if (a.ExtraProb != b.ExtraProb)
return a.ExtraProb > b.ExtraProb;
diff --git a/llvm/lib/CodeGen/SlotIndexes.cpp b/llvm/lib/CodeGen/SlotIndexes.cpp
index ed74b3e4fa1..fccbb8ec91c 100644
--- a/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -95,7 +95,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
}
// Sort the Idx2MBBMap
- llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ llvm::sort(idx2MBBMap, Idx2MBBCompare());
LLVM_DEBUG(mf->print(dbgs(), this));
diff --git a/llvm/lib/CodeGen/StackColoring.cpp b/llvm/lib/CodeGen/StackColoring.cpp
index 33733661ea6..eb8552915e2 100644
--- a/llvm/lib/CodeGen/StackColoring.cpp
+++ b/llvm/lib/CodeGen/StackColoring.cpp
@@ -1231,7 +1231,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
});
for (auto &s : LiveStarts)
- llvm::sort(s.begin(), s.end());
+ llvm::sort(s);
bool Changed = true;
while (Changed) {
diff --git a/llvm/lib/CodeGen/StackMaps.cpp b/llvm/lib/CodeGen/StackMaps.cpp
index 19a191c01db..0676fa2421e 100644
--- a/llvm/lib/CodeGen/StackMaps.cpp
+++ b/llvm/lib/CodeGen/StackMaps.cpp
@@ -268,11 +268,10 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
// in the list. Merge entries that refer to the same dwarf register and use
// the maximum size that needs to be spilled.
- llvm::sort(LiveOuts.begin(), LiveOuts.end(),
- [](const LiveOutReg &LHS, const LiveOutReg &RHS) {
- // Only sort by the dwarf register number.
- return LHS.DwarfRegNum < RHS.DwarfRegNum;
- });
+ llvm::sort(LiveOuts, [](const LiveOutReg &LHS, const LiveOutReg &RHS) {
+ // Only sort by the dwarf register number.
+ return LHS.DwarfRegNum < RHS.DwarfRegNum;
+ });
for (auto I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) {
for (auto II = std::next(I); II != E; ++II) {
diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index eb15b15a24a..d8c6a249e4d 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -214,7 +214,7 @@ void StackSlotColoring::InitializeSlots() {
Intervals.reserve(LS->getNumIntervals());
for (auto &I : *LS)
Intervals.push_back(&I);
- llvm::sort(Intervals.begin(), Intervals.end(),
+ llvm::sort(Intervals,
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
// Gather all spill slots into a list.
diff --git a/llvm/lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp b/llvm/lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp
index bf9dd7c8686..4001741f560 100644
--- a/llvm/lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp
+++ b/llvm/lib/DebugInfo/CodeView/DebugCrossImpSubsection.cpp
@@ -79,7 +79,7 @@ Error DebugCrossModuleImportsSubsection::commit(
for (const auto &M : Mappings)
Ids.push_back(&M);
- llvm::sort(Ids.begin(), Ids.end(), [this](const T &L1, const T &L2) {
+ llvm::sort(Ids, [this](const T &L1, const T &L2) {
return Strings.getIdForString(L1->getKey()) <
Strings.getIdForString(L2->getKey());
});
diff --git a/llvm/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp b/llvm/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
index d2acc9a2100..9b251f5931b 100644
--- a/llvm/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
+++ b/llvm/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
@@ -91,7 +91,7 @@ std::vector<uint32_t> DebugStringTableSubsection::sortedIds() const {
Result.reserve(IdToString.size());
for (const auto &Entry : IdToString)
Result.push_back(Entry.first);
- llvm::sort(Result.begin(), Result.end());
+ llvm::sort(Result);
return Result;
}
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
index a5c31a56fad..5d9f5a328ae 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -106,10 +106,11 @@ collectContributionData(DWARFContext::unit_iterator_range Units) {
// Sort the contributions so that any invalid ones are placed at
// the start of the contributions vector. This way they are reported
// first.
- llvm::sort(Contributions.begin(), Contributions.end(),
+ llvm::sort(Contributions,
[](const Optional<StrOffsetsContributionDescriptor> &L,
const Optional<StrOffsetsContributionDescriptor> &R) {
- if (L && R) return L->Base < R->Base;
+ if (L && R)
+ return L->Base < R->Base;
return R.hasValue();
});
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
index 19bfcaed202..c26db9ab96e 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugAranges.cpp
@@ -80,7 +80,7 @@ void DWARFDebugAranges::appendRange(uint32_t CUOffset, uint64_t LowPC,
void DWARFDebugAranges::construct() {
std::multiset<uint32_t> ValidCUs; // Maintain the set of CUs describing
// a current address range.
- llvm::sort(Endpoints.begin(), Endpoints.end());
+ llvm::sort(Endpoints);
uint64_t PrevAddress = -1ULL;
for (const auto &E : Endpoints) {
if (PrevAddress < E.Address && !ValidCUs.empty()) {
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
index 7b41490090a..1d621ff244f 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
@@ -839,7 +839,7 @@ Error DWARFDebugLine::LineTable::parse(
// Sort all sequences so that address lookup will work faster.
if (!Sequences.empty()) {
- llvm::sort(Sequences.begin(), Sequences.end(), Sequence::orderByLowPC);
+ llvm::sort(Sequences, Sequence::orderByLowPC);
// Note: actually, instruction address ranges of sequences should not
// overlap (in shared objects and executables). If they do, the address
// lookup would still work, though, but result would be ambiguous.
diff --git a/llvm/lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp b/llvm/lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp
index 19efae6fc55..b40ae8e9007 100644
--- a/llvm/lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp
+++ b/llvm/lib/DebugInfo/PDB/Native/GSIStreamBuilder.cpp
@@ -144,11 +144,10 @@ void GSIHashStreamBuilder::finalizeBuckets(uint32_t RecordZeroOffset) {
// can properly early-out when it detects the record won't be found. The
// algorithm used here corredsponds to the function
// caseInsensitiveComparePchPchCchCch in the reference implementation.
- llvm::sort(Bucket.begin(), Bucket.end(),
- [](const std::pair<StringRef, PSHashRecord> &Left,
- const std::pair<StringRef, PSHashRecord> &Right) {
- return gsiRecordLess(Left.first, Right.first);
- });
+ llvm::sort(Bucket, [](const std::pair<StringRef, PSHashRecord> &Left,
+ const std::pair<StringRef, PSHashRecord> &Right) {
+ return gsiRecordLess(Left.first, Right.first);
+ });
for (const auto &Entry : Bucket)
HashRecords.push_back(Entry.second);
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 387154ecfe5..7f3a6bee6b9 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -199,7 +199,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
!isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V);
if (auto *BA = dyn_cast<BlockAddress>(V))
ID = OM.lookup(BA->getBasicBlock()).first;
- llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ llvm::sort(List, [&](const Entry &L, const Entry &R) {
const Use *LU = L.first;
const Use *RU = R.first;
if (LU == RU)
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index d1330e93e21..63a0f6f22af 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -658,7 +658,7 @@ AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
FoldingSetNodeID ID;
SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
- llvm::sort(SortedAttrs.begin(), SortedAttrs.end());
+ llvm::sort(SortedAttrs);
for (const auto Attr : SortedAttrs)
Attr.Profile(ID);
diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp
index 83a22d95bd8..204fecde32c 100644
--- a/llvm/lib/IR/Metadata.cpp
+++ b/llvm/lib/IR/Metadata.cpp
@@ -237,7 +237,7 @@ void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
// Copy out uses since UseMap will get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
- llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
});
for (const auto &Pair : Uses) {
@@ -290,7 +290,7 @@ void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
// Copy out uses since UseMap could get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
- llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
});
UseMap.clear();
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index d4f5173d92f..40d1edc33e7 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -2300,7 +2300,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
if (isa<PHINode>(BB.front())) {
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
- llvm::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) {
// Ensure that PHI nodes have at least one entry!
Assert(PN.getNumIncomingValues() != 0,
@@ -2318,7 +2318,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
Values.push_back(
std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
- llvm::sort(Values.begin(), Values.end());
+ llvm::sort(Values);
for (unsigned i = 0, e = Values.size(); i != e; ++i) {
// Check to make sure that if there is more than one entry for a
diff --git a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index 4e47239f49d..9500b2ded70 100644
--- a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -968,12 +968,11 @@ void ThinLTOCodeGenerator::run() {
std::vector<int> ModulesOrdering;
ModulesOrdering.resize(Modules.size());
std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
- llvm::sort(ModulesOrdering.begin(), ModulesOrdering.end(),
- [&](int LeftIndex, int RightIndex) {
- auto LSize = Modules[LeftIndex].getBuffer().size();
- auto RSize = Modules[RightIndex].getBuffer().size();
- return LSize > RSize;
- });
+ llvm::sort(ModulesOrdering, [&](int LeftIndex, int RightIndex) {
+ auto LSize = Modules[LeftIndex].getBuffer().size();
+ auto RSize = Modules[RightIndex].getBuffer().size();
+ return LSize > RSize;
+ });
// Parallel optimizer + codegen
{
diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp
index 2664528909a..c2e8dc1ef65 100644
--- a/llvm/lib/MC/MachObjectWriter.cpp
+++ b/llvm/lib/MC/MachObjectWriter.cpp
@@ -597,8 +597,8 @@ void MachObjectWriter::computeSymbolTable(
}
// External and undefined symbols are required to be in lexicographic order.
- llvm::sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
- llvm::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
+ llvm::sort(ExternalSymbolData);
+ llvm::sort(UndefinedSymbolData);
// Set the symbol indices.
Index = 0;
diff --git a/llvm/lib/MC/WinCOFFObjectWriter.cpp b/llvm/lib/MC/WinCOFFObjectWriter.cpp
index 3ec1c092600..b774852eabe 100644
--- a/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -558,10 +558,9 @@ void WinCOFFObjectWriter::writeSectionHeaders() {
std::vector<COFFSection *> Arr;
for (auto &Section : Sections)
Arr.push_back(Section.get());
- llvm::sort(Arr.begin(), Arr.end(),
- [](const COFFSection *A, const COFFSection *B) {
- return A->Number < B->Number;
- });
+ llvm::sort(Arr, [](const COFFSection *A, const COFFSection *B) {
+ return A->Number < B->Number;
+ });
for (auto &Section : Arr) {
if (Section->Number == -1)
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
index 5d9ea52bd74..b2dde3406a6 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
@@ -83,7 +83,7 @@ Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) {
return Counter::getZero();
// Group the terms by counter ID.
- llvm::sort(Terms.begin(), Terms.end(), [](const Term &LHS, const Term &RHS) {
+ llvm::sort(Terms, [](const Term &LHS, const Term &RHS) {
return LHS.CounterID < RHS.CounterID;
});
@@ -463,8 +463,7 @@ class SegmentBuilder {
/// Sort a nested sequence of regions from a single file.
static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) {
- llvm::sort(Regions.begin(), Regions.end(), [](const CountedRegion &LHS,
- const CountedRegion &RHS) {
+ llvm::sort(Regions, [](const CountedRegion &LHS, const CountedRegion &RHS) {
if (LHS.startLoc() != RHS.startLoc())
return LHS.startLoc() < RHS.startLoc();
if (LHS.endLoc() != RHS.endLoc())
@@ -561,7 +560,7 @@ std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const {
for (const auto &Function : getCoveredFunctions())
Filenames.insert(Filenames.end(), Function.Filenames.begin(),
Function.Filenames.end());
- llvm::sort(Filenames.begin(), Filenames.end());
+ llvm::sort(Filenames);
auto Last = std::unique(Filenames.begin(), Filenames.end());
Filenames.erase(Last, Filenames.end());
return Filenames;
diff --git a/llvm/lib/ProfileData/GCOV.cpp b/llvm/lib/ProfileData/GCOV.cpp
index 5d9a5ca9895..b687346a2c0 100644
--- a/llvm/lib/ProfileData/GCOV.cpp
+++ b/llvm/lib/ProfileData/GCOV.cpp
@@ -712,7 +712,7 @@ void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
SmallVector<StringRef, 4> Filenames;
for (const auto &LI : LineInfo)
Filenames.push_back(LI.first());
- llvm::sort(Filenames.begin(), Filenames.end());
+ llvm::sort(Filenames);
for (StringRef Filename : Filenames) {
auto AllLines = LineConsumer(Filename);
diff --git a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
index 62f00d693c6..3a8462fd9b0 100644
--- a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
+++ b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
@@ -58,7 +58,7 @@ void SampleProfileSummaryBuilder::addRecord(
void ProfileSummaryBuilder::computeDetailedSummary() {
if (DetailedSummaryCutoffs.empty())
return;
- llvm::sort(DetailedSummaryCutoffs.begin(), DetailedSummaryCutoffs.end());
+ llvm::sort(DetailedSummaryCutoffs);
auto Iter = CountFrequencies.begin();
const auto End = CountFrequencies.end();
diff --git a/llvm/lib/Support/JSON.cpp b/llvm/lib/Support/JSON.cpp
index a5dae7a7c2e..d468013fb94 100644
--- a/llvm/lib/Support/JSON.cpp
+++ b/llvm/lib/Support/JSON.cpp
@@ -517,7 +517,7 @@ static std::vector<const Object::value_type *> sortedElements(const Object &O) {
std::vector<const Object::value_type *> Elements;
for (const auto &E : O)
Elements.push_back(&E);
- llvm::sort(Elements.begin(), Elements.end(),
+ llvm::sort(Elements,
[](const Object::value_type *L, const Object::value_type *R) {
return L->first < R->first;
});
diff --git a/llvm/lib/Support/SourceMgr.cpp b/llvm/lib/Support/SourceMgr.cpp
index e17e5ab0141..582e2cf6c11 100644
--- a/llvm/lib/Support/SourceMgr.cpp
+++ b/llvm/lib/Support/SourceMgr.cpp
@@ -269,7 +269,7 @@ SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN,
: SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind),
Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()),
FixIts(Hints.begin(), Hints.end()) {
- llvm::sort(FixIts.begin(), FixIts.end());
+ llvm::sort(FixIts);
}
static void buildFixItLine(std::string &CaretLine, std::string &FixItLine,
diff --git a/llvm/lib/Support/Timer.cpp b/llvm/lib/Support/Timer.cpp
index 3821f487a8f..82f5810dd10 100644
--- a/llvm/lib/Support/Timer.cpp
+++ b/llvm/lib/Support/Timer.cpp
@@ -295,7 +295,7 @@ void TimerGroup::addTimer(Timer &T) {
void TimerGroup::PrintQueuedTimers(raw_ostream &OS) {
// Sort the timers in descending order by amount of time taken.
- llvm::sort(TimersToPrint.begin(), TimersToPrint.end());
+ llvm::sort(TimersToPrint);
TimeRecord Total;
for (const PrintRecord &Record : TimersToPrint)
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index 24251bc57f6..0aa0944aeef 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -158,10 +158,9 @@ RecordRecTy *RecordRecTy::get(ArrayRef<Record *> UnsortedClasses) {
SmallVector<Record *, 4> Classes(UnsortedClasses.begin(),
UnsortedClasses.end());
- llvm::sort(Classes.begin(), Classes.end(),
- [](Record *LHS, Record *RHS) {
- return LHS->getNameInitAsString() < RHS->getNameInitAsString();
- });
+ llvm::sort(Classes, [](Record *LHS, Record *RHS) {
+ return LHS->getNameInitAsString() < RHS->getNameInitAsString();
+ });
FoldingSetNodeID ID;
ProfileRecordRecTy(ID, Classes);
diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index a95476b9118..452fbd3488b 100644
--- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -377,11 +377,10 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
// Now we have a set of sets, order them by start address so
// we can iterate over them sequentially.
- llvm::sort(V.begin(), V.end(),
- [](const std::vector<Chain*> &A,
- const std::vector<Chain*> &B) {
- return A.front()->startsBefore(B.front());
- });
+ llvm::sort(V,
+ [](const std::vector<Chain *> &A, const std::vector<Chain *> &B) {
+ return A.front()->startsBefore(B.front());
+ });
// As we only have two colors, we can track the global (BB-level) balance of
// odds versus evens. We aim to keep this near zero to keep both execution
@@ -453,16 +452,16 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
// change them to!
// Final tie-break with instruction order so pass output is stable (i.e. not
// dependent on malloc'd pointer values).
- llvm::sort(GV.begin(), GV.end(), [](const Chain *G1, const Chain *G2) {
- if (G1->size() != G2->size())
- return G1->size() > G2->size();
- if (G1->requiresFixup() != G2->requiresFixup())
- return G1->requiresFixup() > G2->requiresFixup();
- // Make sure startsBefore() produces a stable final order.
- assert((G1 == G2 || (G1->startsBefore(G2) ^ G2->startsBefore(G1))) &&
- "Starts before not total order!");
- return G1->startsBefore(G2);
- });
+ llvm::sort(GV, [](const Chain *G1, const Chain *G2) {
+ if (G1->size() != G2->size())
+ return G1->size() > G2->size();
+ if (G1->requiresFixup() != G2->requiresFixup())
+ return G1->requiresFixup() > G2->requiresFixup();
+ // Make sure startsBefore() produces a stable final order.
+ assert((G1 == G2 || (G1->startsBefore(G2) ^ G2->startsBefore(G1))) &&
+ "Starts before not total order!");
+ return G1->startsBefore(G2);
+ });
Color PreferredColor = Parity < 0 ? Color::Even : Color::Odd;
while (Chain *G = getAndEraseNext(PreferredColor, GV)) {
diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
index 15366d66bd8..8e4cc391dc2 100644
--- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
@@ -434,8 +434,7 @@ void GCNIterativeScheduler::scheduleRegion(Region &R, Range &&Schedule,
// Sort recorded regions by pressure - highest at the front
void GCNIterativeScheduler::sortRegionsByPressure(unsigned TargetOcc) {
const auto &ST = MF.getSubtarget<GCNSubtarget>();
- llvm::sort(Regions.begin(), Regions.end(),
- [&ST, TargetOcc](const Region *R1, const Region *R2) {
+ llvm::sort(Regions, [&ST, TargetOcc](const Region *R1, const Region *R2) {
return R2->MaxPressure.less(ST, R1->MaxPressure, TargetOcc);
});
}
diff --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
index cd14239de82..aa976d5141f 100644
--- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp
@@ -168,16 +168,15 @@ void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask,
CoveringSubregs.push_back(Idx);
}
- llvm::sort(CoveringSubregs.begin(), CoveringSubregs.end(),
- [this](unsigned A, unsigned B) {
- LaneBitmask MaskA = TRI->getSubRegIndexLaneMask(A);
- LaneBitmask MaskB = TRI->getSubRegIndexLaneMask(B);
- unsigned NA = MaskA.getNumLanes();
- unsigned NB = MaskB.getNumLanes();
- if (NA != NB)
- return NA > NB;
- return MaskA.getHighestLane() > MaskB.getHighestLane();
- });
+ llvm::sort(CoveringSubregs, [this](unsigned A, unsigned B) {
+ LaneBitmask MaskA = TRI->getSubRegIndexLaneMask(A);
+ LaneBitmask MaskB = TRI->getSubRegIndexLaneMask(B);
+ unsigned NA = MaskA.getNumLanes();
+ unsigned NB = MaskB.getNumLanes();
+ if (NA != NB)
+ return NA > NB;
+ return MaskA.getHighestLane() > MaskB.getHighestLane();
+ });
for (unsigned Idx : CoveringSubregs) {
LaneBitmask SubRegMask = TRI->getSubRegIndexLaneMask(Idx);
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 83de2c2c150..2ee8f9604e0 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1444,9 +1444,8 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
SmallVector<unsigned, 6> ScratchRegs;
for(unsigned I = 5; I < MI->getNumOperands(); ++I)
ScratchRegs.push_back(MI->getOperand(I).getReg());
- llvm::sort(ScratchRegs.begin(), ScratchRegs.end(),
- [&TRI](const unsigned &Reg1,
- const unsigned &Reg2) -> bool {
+ llvm::sort(ScratchRegs,
+ [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
return TRI.getEncodingValue(Reg1) <
TRI.getEncodingValue(Reg2);
});
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 742d5dd3d8c..3e057e953d8 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1008,8 +1008,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
if (Regs.empty())
continue;
- llvm::sort(Regs.begin(), Regs.end(), [&](const RegAndKill &LHS,
- const RegAndKill &RHS) {
+ llvm::sort(Regs, [&](const RegAndKill &LHS, const RegAndKill &RHS) {
return TRI.getEncodingValue(LHS.first) < TRI.getEncodingValue(RHS.first);
});
@@ -1105,7 +1104,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
if (Regs.empty())
continue;
- llvm::sort(Regs.begin(), Regs.end(), [&](unsigned LHS, unsigned RHS) {
+ llvm::sort(Regs, [&](unsigned LHS, unsigned RHS) {
return TRI.getEncodingValue(LHS) < TRI.getEncodingValue(RHS);
});
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index e6b8f2fcf45..6da7430a8e5 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1848,7 +1848,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) {
return M0->InsertPos < M1->InsertPos;
};
- llvm::sort(Candidates.begin(), Candidates.end(), LessThan);
+ llvm::sort(Candidates, LessThan);
// Go through list of candidates and merge.
bool Changed = false;
@@ -2186,13 +2186,12 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
bool RetVal = false;
// Sort by offset (in reverse order).
- llvm::sort(Ops.begin(), Ops.end(),
- [](const MachineInstr *LHS, const MachineInstr *RHS) {
- int LOffset = getMemoryOpOffset(*LHS);
- int ROffset = getMemoryOpOffset(*RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- });
+ llvm::sort(Ops, [](const MachineInstr *LHS, const MachineInstr *RHS) {
+ int LOffset = getMemoryOpOffset(*LHS);
+ int ROffset = getMemoryOpOffset(*RHS);
+ assert(LHS == RHS || LOffset != ROffset);
+ return LOffset > ROffset;
+ });
// The loads / stores of the same base are in order. Scan them from first to
// last and check for the following:
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 3dc2ac39f9f..b481f8b9659 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -1072,7 +1072,7 @@ void ARMTargetELFStreamer::finishAttributeSection() {
if (Contents.empty())
return;
- llvm::sort(Contents.begin(), Contents.end(), AttributeItem::LessTag);
+ llvm::sort(Contents, AttributeItem::LessTag);
ARMELFStreamer &Streamer = getStreamer();
diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
index b877ed7d5a9..6b48384c737 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -1947,10 +1947,9 @@ bool HCE::runOnMachineFunction(MachineFunction &MF) {
AssignmentMap IMap;
collect(MF);
- llvm::sort(Extenders.begin(), Extenders.end(),
- [](const ExtDesc &A, const ExtDesc &B) {
- return ExtValue(A) < ExtValue(B);
- });
+ llvm::sort(Extenders, [](const ExtDesc &A, const ExtDesc &B) {
+ return ExtValue(A) < ExtValue(B);
+ });
bool Changed = false;
LLVM_DEBUG(dbgs() << "Collected " << Extenders.size() << " extenders\n");
diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index 2582a021e95..e3492e7374e 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -632,7 +632,7 @@ void HexagonGenInsert::buildOrderingBT(RegisterOrdering &RB,
SortableVectorType VRs;
for (RegisterOrdering::iterator I = RB.begin(), E = RB.end(); I != E; ++I)
VRs.push_back(I->first);
- llvm::sort(VRs.begin(), VRs.end(), LexCmp);
+ llvm::sort(VRs, LexCmp);
// Transfer the results to the outgoing register ordering.
for (unsigned i = 0, n = VRs.size(); i < n; ++i)
RO.insert(std::make_pair(VRs[i], i));
diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
index 991af047387..61c2121163b 100644
--- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -578,7 +578,7 @@ bool HexagonStoreWidening::processBasicBlock(MachineBasicBlock &MBB) {
};
for (auto &G : SGs) {
assert(G.size() > 1 && "Store group with fewer than 2 elements");
- llvm::sort(G.begin(), G.end(), Less);
+ llvm::sort(G, Less);
Changed |= processStoreGroup(G);
}
diff --git a/llvm/lib/Target/Hexagon/RDFDeadCode.cpp b/llvm/lib/Target/Hexagon/RDFDeadCode.cpp
index da339bfd3ff..8dcd485d65e 100644
--- a/llvm/lib/Target/Hexagon/RDFDeadCode.cpp
+++ b/llvm/lib/Target/Hexagon/RDFDeadCode.cpp
@@ -214,7 +214,7 @@ bool DeadCodeElimination::erase(const SetVector<NodeId> &Nodes) {
return false;
return A.Id < B.Id;
};
- llvm::sort(DRNs.begin(), DRNs.end(), UsesFirst);
+ llvm::sort(DRNs, UsesFirst);
if (trace())
dbgs() << "Removing dead ref nodes:\n";
diff --git a/llvm/lib/Target/Hexagon/RDFGraph.cpp b/llvm/lib/Target/Hexagon/RDFGraph.cpp
index 3d1ec31dada..d8ca08e7050 100644
--- a/llvm/lib/Target/Hexagon/RDFGraph.cpp
+++ b/llvm/lib/Target/Hexagon/RDFGraph.cpp
@@ -1471,7 +1471,7 @@ void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs,
// and add a def for each S in the closure.
// Sort the refs so that the phis will be created in a deterministic order.
- llvm::sort(MaxRefs.begin(), MaxRefs.end());
+ llvm::sort(MaxRefs);
// Remove duplicates.
auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
MaxRefs.erase(NewEnd, MaxRefs.end());
diff --git a/llvm/lib/Target/Hexagon/RDFLiveness.cpp b/llvm/lib/Target/Hexagon/RDFLiveness.cpp
index c257d754ddf..9ff48d25a02 100644
--- a/llvm/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/llvm/lib/Target/Hexagon/RDFLiveness.cpp
@@ -207,7 +207,7 @@ NodeList Liveness::getAllReachingDefs(RegisterRef RefRR,
};
std::vector<NodeId> Tmp(Owners.begin(), Owners.end());
- llvm::sort(Tmp.begin(), Tmp.end(), Less);
+ llvm::sort(Tmp, Less);
// The vector is a list of instructions, so that defs coming from
// the same instruction don't need to be artificially ordered.
@@ -813,7 +813,7 @@ void Liveness::computeLiveIns() {
std::vector<RegisterRef> LV;
for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I)
LV.push_back(RegisterRef(I->PhysReg, I->LaneMask));
- llvm::sort(LV.begin(), LV.end());
+ llvm::sort(LV);
dbgs() << printMBBReference(B) << "\t rec = {";
for (auto I : LV)
dbgs() << ' ' << Print<RegisterRef>(I, DFG);
@@ -824,7 +824,7 @@ void Liveness::computeLiveIns() {
const RegisterAggr &LG = LiveMap[&B];
for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
LV.push_back(*I);
- llvm::sort(LV.begin(), LV.end());
+ llvm::sort(LV);
dbgs() << "\tcomp = {";
for (auto I : LV)
dbgs() << ' ' << Print<RegisterRef>(I, DFG);
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 3dc753772e5..c600382446a 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -453,7 +453,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
return;
// Sort relocations by the address they are applied to.
- llvm::sort(Relocs.begin(), Relocs.end(),
+ llvm::sort(Relocs,
[](const ELFRelocationEntry &A, const ELFRelocationEntry &B) {
return A.Offset < B.Offset;
});
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 54239c228d9..af17bb5f165 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -1401,7 +1401,7 @@ class BitPermutationSelector {
for (auto &I : ValueRots) {
ValueRotsVec.push_back(I.second);
}
- llvm::sort(ValueRotsVec.begin(), ValueRotsVec.end());
+ llvm::sort(ValueRotsVec);
}
// In 64-bit mode, rlwinm and friends have a rotation operator that
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index d69a2793710..d97b13a8d69 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -118,16 +118,15 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
// registers), by weight next, and then by position.
// TODO: Investigate more intelligent sorting heuristics. For starters, we
// should try to coalesce adjacent live intervals before non-adjacent ones.
- llvm::sort(SortedIntervals.begin(), SortedIntervals.end(),
- [MRI](LiveInterval *LHS, LiveInterval *RHS) {
- if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg))
- return MRI->isLiveIn(LHS->reg);
- if (LHS->weight != RHS->weight)
- return LHS->weight > RHS->weight;
- if (LHS->empty() || RHS->empty())
- return !LHS->empty() && RHS->empty();
- return *LHS < *RHS;
- });
+ llvm::sort(SortedIntervals, [MRI](LiveInterval *LHS, LiveInterval *RHS) {
+ if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg))
+ return MRI->isLiveIn(LHS->reg);
+ if (LHS->weight != RHS->weight)
+ return LHS->weight > RHS->weight;
+ if (LHS->empty() || RHS->empty())
+ return !LHS->empty() && RHS->empty();
+ return *LHS < *RHS;
+ });
LLVM_DEBUG(dbgs() << "Coloring register intervals:\n");
SmallVector<unsigned, 16> SlotMapping(SortedIntervals.size(), -1u);
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 7979315a2ae..14e4c455a08 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -828,7 +828,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
"split above!");
// Sort and unique the codes to minimize them.
- llvm::sort(UncondCodeSeq.begin(), UncondCodeSeq.end());
+ llvm::sort(UncondCodeSeq);
UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
UncondCodeSeq.end());
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index b87c149a36d..fff8a66d0e7 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -151,7 +151,7 @@ static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
Offset,
FramePtr));
}
- llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset);
+ llvm::sort(SpillList, CompareSSIOffset);
}
/// Creates an ordered list of EH info register 'spills'.
@@ -170,7 +170,7 @@ static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
SpillList.push_back(
StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]),
TL->getExceptionSelectorRegister(PersonalityFn)));
- llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset);
+ llvm::sort(SpillList, CompareSSIOffset);
}
static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 1c93ba8fa14..7455cd997ad 100644
--- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -129,7 +129,7 @@ createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
do {
SmallVector<WeakTrackingVH, 8> WUsers(CE->user_begin(), CE->user_end());
- llvm::sort(WUsers.begin(), WUsers.end());
+ llvm::sort(WUsers);
WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end());
while (!WUsers.empty())
if (WeakTrackingVH WU = WUsers.pop_back_val()) {
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 9d92806a92e..4357948d5ab 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -49,7 +49,7 @@ public:
BlockToIndexMapping(Function &F) {
for (BasicBlock &BB : F)
V.push_back(&BB);
- llvm::sort(V.begin(), V.end());
+ llvm::sort(V);
}
size_t blockToIndex(BasicBlock *BB) const {
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 4f757188470..2d29e93b1ca 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1997,7 +1997,7 @@ bool LowerTypeTestsModule::lower() {
}
Sets.emplace_back(I, MaxUniqueId);
}
- llvm::sort(Sets.begin(), Sets.end(),
+ llvm::sort(Sets,
[](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
return S1.second < S2.second;
@@ -2022,12 +2022,12 @@ bool LowerTypeTestsModule::lower() {
// Order type identifiers by unique ID for determinism. This ordering is
// stable as there is a one-to-one mapping between metadata and unique IDs.
- llvm::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) {
+ llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
});
// Same for the branch funnels.
- llvm::sort(ICallBranchFunnels.begin(), ICallBranchFunnels.end(),
+ llvm::sort(ICallBranchFunnels,
[&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
return F1->UniqueId < F2->UniqueId;
});
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 41ed0614af0..182202fda05 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -681,13 +681,12 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
Sum += NameFS.second.getEntrySamples();
R.push_back(&NameFS.second);
}
- llvm::sort(R.begin(), R.end(),
- [](const FunctionSamples *L, const FunctionSamples *R) {
- if (L->getEntrySamples() != R->getEntrySamples())
- return L->getEntrySamples() > R->getEntrySamples();
- return FunctionSamples::getGUID(L->getName()) <
- FunctionSamples::getGUID(R->getName());
- });
+ llvm::sort(R, [](const FunctionSamples *L, const FunctionSamples *R) {
+ if (L->getEntrySamples() != R->getEntrySamples())
+ return L->getEntrySamples() > R->getEntrySamples();
+ return FunctionSamples::getGUID(L->getName()) <
+ FunctionSamples::getGUID(R->getName());
+ });
}
return R;
}
@@ -1174,13 +1173,12 @@ static SmallVector<InstrProfValueData, 2> SortCallTargets(
SmallVector<InstrProfValueData, 2> R;
for (auto I = M.begin(); I != M.end(); ++I)
R.push_back({FunctionSamples::getGUID(I->getKey()), I->getValue()});
- llvm::sort(R.begin(), R.end(),
- [](const InstrProfValueData &L, const InstrProfValueData &R) {
- if (L.Count == R.Count)
- return L.Value > R.Value;
- else
- return L.Count > R.Count;
- });
+ llvm::sort(R, [](const InstrProfValueData &L, const InstrProfValueData &R) {
+ if (L.Count == R.Count)
+ return L.Value > R.Value;
+ else
+ return L.Count > R.Count;
+ });
return R;
}
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
index d031d675642..3043df9cca7 100644
--- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
@@ -748,11 +748,9 @@ private:
// TODO: Remove fully-redundant expressions.
// Get instruction from the Map, assume that all the Instructions
// with same VNs have same rank (this is an approximation).
- llvm::sort(Ranks.begin(), Ranks.end(),
- [this, &Map](const VNType &r1, const VNType &r2) {
- return (rank(*Map.lookup(r1).begin()) <
- rank(*Map.lookup(r2).begin()));
- });
+ llvm::sort(Ranks, [this, &Map](const VNType &r1, const VNType &r2) {
+ return (rank(*Map.lookup(r1).begin()) < rank(*Map.lookup(r2).begin()));
+ });
// - Sort VNs according to their rank, and start with lowest ranked VN
// - Take a VN and for each instruction with same VN
diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp
index 3737831e59b..187c668c338 100644
--- a/llvm/lib/Transforms/Scalar/GVNSink.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp
@@ -239,7 +239,7 @@ public:
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
- llvm::sort(Ops.begin(), Ops.end());
+ llvm::sort(Ops);
for (auto &P : Ops) {
Blocks.push_back(P.first);
Values.push_back(P.second);
@@ -762,7 +762,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
}
if (Preds.size() < 2)
return 0;
- llvm::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds);
unsigned NumOrigPreds = Preds.size();
// We can only sink instructions through unconditional branches.
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index 9106459cfa9..cbbd7b861d8 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -698,9 +698,8 @@ bool GuardWideningImpl::combineRangeChecks(
// CurrentChecks.size() will typically be 3 here, but so far there has been
// no need to hard-code that fact.
- llvm::sort(CurrentChecks.begin(), CurrentChecks.end(),
- [&](const GuardWideningImpl::RangeCheck &LHS,
- const GuardWideningImpl::RangeCheck &RHS) {
+ llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS,
+ const GuardWideningImpl::RangeCheck &RHS) {
return LHS.getOffsetValue().slt(RHS.getOffsetValue());
});
diff --git a/llvm/lib/Transforms/Scalar/LoopSink.cpp b/llvm/lib/Transforms/Scalar/LoopSink.cpp
index 67bcbdca185..db502e1c5db 100644
--- a/llvm/lib/Transforms/Scalar/LoopSink.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopSink.cpp
@@ -208,11 +208,9 @@ static bool sinkInstruction(Loop &L, Instruction &I,
SmallVector<BasicBlock *, 2> SortedBBsToSinkInto;
SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(),
BBsToSinkInto.end());
- llvm::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(),
- [&](BasicBlock *A, BasicBlock *B) {
- return LoopBlockNumber.find(A)->second <
- LoopBlockNumber.find(B)->second;
- });
+ llvm::sort(SortedBBsToSinkInto, [&](BasicBlock *A, BasicBlock *B) {
+ return LoopBlockNumber.find(A)->second < LoopBlockNumber.find(B)->second;
+ });
BasicBlock *MoveBB = *SortedBBsToSinkInto.begin();
// FIXME: Optimize the efficiency for cloned value replacement. The current
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index fa83b48210b..857b83da96d 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1487,7 +1487,7 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
- llvm::sort(Key.begin(), Key.end());
+ llvm::sort(Key);
return Uniquifier.count(Key);
}
@@ -1511,7 +1511,7 @@ bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
- llvm::sort(Key.begin(), Key.end());
+ llvm::sort(Key);
if (!Uniquifier.insert(Key).second)
return false;
@@ -4238,7 +4238,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for
// uniquifying.
- llvm::sort(Key.begin(), Key.end());
+ llvm::sort(Key);
std::pair<BestFormulaeTy::const_iterator, bool> P =
BestFormulae.insert(std::make_pair(Key, FIdx));
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index 1e4bbd4c472..f68662b488c 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -465,10 +465,9 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
#endif // MERGEICMPS_DOT_ON
// Reorder blocks by LHS. We can do that without changing the
// semantics because we are only accessing dereferencable memory.
- llvm::sort(Comparisons_.begin(), Comparisons_.end(),
- [](const BCECmpBlock &a, const BCECmpBlock &b) {
- return a.Lhs() < b.Lhs();
- });
+ llvm::sort(Comparisons_, [](const BCECmpBlock &a, const BCECmpBlock &b) {
+ return a.Lhs() < b.Lhs();
+ });
#ifdef MERGEICMPS_DOT_ON
errs() << "AFTER REORDERING:\n\n";
dump();
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index f3cbd87cae6..ed9f868af61 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -959,8 +959,7 @@ static bool isCopyOfAPHI(const Value *V) {
// order. The BlockInstRange numbers are generated in an RPO walk of the basic
// blocks.
void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
- llvm::sort(Ops.begin(), Ops.end(),
- [&](const ValPair &P1, const ValPair &P2) {
+ llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) {
return BlockInstRange.lookup(P1.second).first <
BlockInstRange.lookup(P2.second).first;
});
@@ -3955,7 +3954,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
// Sort the whole thing.
- llvm::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
+ llvm::sort(DFSOrderedSet);
for (auto &VD : DFSOrderedSet) {
int MemberDFSIn = VD.DFSIn;
int MemberDFSOut = VD.DFSOut;
@@ -4118,7 +4117,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
// If we have possible dead stores to look at, try to eliminate them.
if (CC->getStoreCount() > 0) {
convertClassToLoadsAndStores(*CC, PossibleDeadStores);
- llvm::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
+ llvm::sort(PossibleDeadStores);
ValueDFSStack EliminationStack;
for (auto &VD : PossibleDeadStores) {
int MemberDFSIn = VD.DFSIn;
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 8f30bccf48f..7f9aad24883 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -524,7 +524,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
};
// We need the order of list to be stable so that naming ends up stable
// when we split edges. This makes test cases much easier to write.
- llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
+ llvm::sort(PollLocations, OrderByBBName);
// We can sometimes end up with duplicate poll locations. This happens if
// a single loop is visited more than once. The fact this happens seems
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 5d3e928d509..5e23a8a3dcd 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1825,7 +1825,7 @@ static void relocationViaAlloca(
}
}
- llvm::sort(Uses.begin(), Uses.end());
+ llvm::sort(Uses);
auto Last = std::unique(Uses.begin(), Uses.end());
Uses.erase(Last, Uses.end());
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 8b591de1e99..6e991409bf0 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1060,7 +1060,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
- llvm::sort(Slices.begin(), Slices.end());
+ llvm::sort(Slices);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -1906,7 +1906,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
"All non-integer types eliminated!");
return RHSTy->getNumElements() < LHSTy->getNumElements();
};
- llvm::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
+ llvm::sort(CandidateTys, RankVectorTypes);
CandidateTys.erase(
std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
CandidateTys.end());
@@ -4221,7 +4221,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
}
if (!IsSorted)
- llvm::sort(AS.begin(), AS.end());
+ llvm::sort(AS);
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.
diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index 57799c8cd8c..17035f469da 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -1265,11 +1265,10 @@ static void buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks,
// matter as we're just trying to build up the map from inside-out; we use
// the map in a more stably ordered way below.
auto OrderedClonedExitsInLoops = ClonedExitsInLoops;
- llvm::sort(OrderedClonedExitsInLoops.begin(), OrderedClonedExitsInLoops.end(),
- [&](BasicBlock *LHS, BasicBlock *RHS) {
- return ExitLoopMap.lookup(LHS)->getLoopDepth() <
- ExitLoopMap.lookup(RHS)->getLoopDepth();
- });
+ llvm::sort(OrderedClonedExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) {
+ return ExitLoopMap.lookup(LHS)->getLoopDepth() <
+ ExitLoopMap.lookup(RHS)->getLoopDepth();
+ });
// Populate the existing ExitLoopMap with everything reachable from each
// exit, starting from the inner most exit.
diff --git a/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp b/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
index 8382220fc9e..b45b422b34b 100644
--- a/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
+++ b/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
@@ -161,7 +161,7 @@ void ImportedFunctionsInliningStatistics::dump(const bool Verbose) {
void ImportedFunctionsInliningStatistics::calculateRealInlines() {
// Removing duplicated Callers.
- llvm::sort(NonImportedCallers.begin(), NonImportedCallers.end());
+ llvm::sort(NonImportedCallers);
NonImportedCallers.erase(
std::unique(NonImportedCallers.begin(), NonImportedCallers.end()),
NonImportedCallers.end());
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index e99ecfef19c..d019a44fc70 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -372,7 +372,7 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
Case.getCaseSuccessor()));
- llvm::sort(Cases.begin(), Cases.end(), CaseCmp());
+ llvm::sort(Cases, CaseCmp());
// Merge case into clusters
if (Cases.size() >= 2) {
diff --git a/llvm/lib/Transforms/Utils/PredicateInfo.cpp b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
index e56ccef1289..9d9624850fb 100644
--- a/llvm/lib/Transforms/Utils/PredicateInfo.cpp
+++ b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
@@ -569,7 +569,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
auto Comparator = [&](const Value *A, const Value *B) {
return valueComesBefore(OI, A, B);
};
- llvm::sort(OpsToRename.begin(), OpsToRename.end(), Comparator);
+ llvm::sort(OpsToRename, Comparator);
ValueDFS_Compare Compare(OI);
// Compute liveness, and rename in O(uses) per Op.
for (auto *Op : OpsToRename) {
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 1e9193c04ff..6773ad7bfc7 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -477,7 +477,7 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
- llvm::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
+ llvm::sort(StoresByIndex, less_first());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
@@ -638,10 +638,9 @@ void PromoteMem2Reg::run() {
SmallVector<BasicBlock *, 32> PHIBlocks;
IDF.calculate(PHIBlocks);
if (PHIBlocks.size() > 1)
- llvm::sort(PHIBlocks.begin(), PHIBlocks.end(),
- [this](BasicBlock *A, BasicBlock *B) {
- return BBNumbers.lookup(A) < BBNumbers.lookup(B);
- });
+ llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
+ return BBNumbers.lookup(A) < BBNumbers.lookup(B);
+ });
unsigned CurrentVersion = 0;
for (BasicBlock *BB : PHIBlocks)
@@ -752,7 +751,7 @@ void PromoteMem2Reg::run() {
// Ok, now we know that all of the PHI nodes are missing entries for some
// basic blocks. Start by sorting the incoming predecessors for efficient
// access.
- llvm::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds);
// Now we loop through all BB's which have entries in SomePHI and remove
// them from the Preds list.
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 493d1688226..1e93b873f37 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5521,7 +5521,7 @@ static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
SmallVector<int64_t,4> Values;
for (auto &C : SI->cases())
Values.push_back(C.getCaseValue()->getValue().getSExtValue());
- llvm::sort(Values.begin(), Values.end());
+ llvm::sort(Values);
// If the switch is already dense, there's nothing useful to do here.
if (isSwitchDense(Values))
diff --git a/llvm/lib/Transforms/Utils/SplitModule.cpp b/llvm/lib/Transforms/Utils/SplitModule.cpp
index f8d758c5498..5db4d2e4df9 100644
--- a/llvm/lib/Transforms/Utils/SplitModule.cpp
+++ b/llvm/lib/Transforms/Utils/SplitModule.cpp
@@ -181,14 +181,12 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
std::make_pair(std::distance(GVtoClusterMap.member_begin(I),
GVtoClusterMap.member_end()), I));
- llvm::sort(Sets.begin(), Sets.end(),
- [](const SortType &a, const SortType &b) {
- if (a.first == b.first)
- return a.second->getData()->getName() >
- b.second->getData()->getName();
- else
- return a.first > b.first;
- });
+ llvm::sort(Sets, [](const SortType &a, const SortType &b) {
+ if (a.first == b.first)
+ return a.second->getData()->getName() > b.second->getData()->getName();
+ else
+ return a.first > b.first;
+ });
for (auto &I : Sets) {
unsigned CurrentClusterID = BalancinQueue.top().first;
diff --git a/llvm/tools/dsymutil/DwarfLinker.cpp b/llvm/tools/dsymutil/DwarfLinker.cpp
index cc4a1419420..2c5cce50132 100644
--- a/llvm/tools/dsymutil/DwarfLinker.cpp
+++ b/llvm/tools/dsymutil/DwarfLinker.cpp
@@ -484,7 +484,7 @@ bool DwarfLinker::RelocationManager::findValidRelocs(
// the file, this allows us to just keep an index in the relocation
// array that we advance during our walk, rather than resorting to
// some associative container. See DwarfLinker::NextValidReloc.
- llvm::sort(ValidRelocs.begin(), ValidRelocs.end());
+ llvm::sort(ValidRelocs);
return true;
}
diff --git a/llvm/tools/dsymutil/DwarfStreamer.cpp b/llvm/tools/dsymutil/DwarfStreamer.cpp
index 6a0554bb729..ef798be7bdf 100644
--- a/llvm/tools/dsymutil/DwarfStreamer.cpp
+++ b/llvm/tools/dsymutil/DwarfStreamer.cpp
@@ -320,7 +320,7 @@ void DwarfStreamer::emitUnitRangesEntries(CompileUnit &Unit,
// The object addresses where sorted, but again, the linked
// addresses might end up in a different order.
- llvm::sort(Ranges.begin(), Ranges.end());
+ llvm::sort(Ranges);
if (!Ranges.empty()) {
MS->SwitchSection(MC->getObjectFileInfo()->getDwarfARangesSection());
diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp
index 892adc3b9dd..bec89fef98c 100644
--- a/llvm/tools/llvm-config/llvm-config.cpp
+++ b/llvm/tools/llvm-config/llvm-config.cpp
@@ -523,7 +523,7 @@ int main(int argc, char **argv) {
if (DyLibExists && !sys::fs::exists(path)) {
Components =
GetAllDyLibComponents(IsInDevelopmentTree, true, DirSep);
- llvm::sort(Components.begin(), Components.end());
+ llvm::sort(Components);
break;
}
}
diff --git a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
index e3fa5c3d741..672fbb93104 100644
--- a/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
+++ b/llvm/tools/llvm-dwarfdump/llvm-dwarfdump.cpp
@@ -358,7 +358,7 @@ static void filterByAccelName(ArrayRef<std::string> Names, DWARFContext &DICtx,
getDies(DICtx, DICtx.getAppleNamespaces(), Name, Dies);
getDies(DICtx, DICtx.getDebugNames(), Name, Dies);
}
- llvm::sort(Dies.begin(), Dies.end());
+ llvm::sort(Dies);
Dies.erase(std::unique(Dies.begin(), Dies.end()), Dies.end());
for (DWARFDie Die : Dies)
diff --git a/llvm/tools/llvm-exegesis/lib/Analysis.cpp b/llvm/tools/llvm-exegesis/lib/Analysis.cpp
index 1da36b245a4..805bd52daf1 100644
--- a/llvm/tools/llvm-exegesis/lib/Analysis.cpp
+++ b/llvm/tools/llvm-exegesis/lib/Analysis.cpp
@@ -668,10 +668,9 @@ void distributePressure(float RemainingPressure,
llvm::SmallVector<float, 32> &DensePressure) {
// Find the number of subunits with minimal pressure (they are at the
// front).
- llvm::sort(Subunits.begin(), Subunits.end(),
- [&DensePressure](const uint16_t A, const uint16_t B) {
- return DensePressure[A] < DensePressure[B];
- });
+ llvm::sort(Subunits, [&DensePressure](const uint16_t A, const uint16_t B) {
+ return DensePressure[A] < DensePressure[B];
+ });
const auto getPressureForSubunit = [&DensePressure,
&Subunits](size_t I) -> float & {
return DensePressure[Subunits[I]];
@@ -718,11 +717,10 @@ std::vector<std::pair<uint16_t, float>> computeIdealizedProcResPressure(
llvm::SmallVector<llvm::MCWriteProcResEntry, 8> WPRS) {
// DensePressure[I] is the port pressure for Proc Resource I.
llvm::SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds());
- llvm::sort(WPRS.begin(), WPRS.end(),
- [](const llvm::MCWriteProcResEntry &A,
- const llvm::MCWriteProcResEntry &B) {
- return A.ProcResourceIdx < B.ProcResourceIdx;
- });
+ llvm::sort(WPRS, [](const llvm::MCWriteProcResEntry &A,
+ const llvm::MCWriteProcResEntry &B) {
+ return A.ProcResourceIdx < B.ProcResourceIdx;
+ });
for (const llvm::MCWriteProcResEntry &WPR : WPRS) {
// Get units for the entry.
const llvm::MCProcResourceDesc *const ProcResDesc =
diff --git a/llvm/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp b/llvm/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp
index bd66c712f85..47b5153b966 100644
--- a/llvm/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp
+++ b/llvm/tools/llvm-mca/lib/HardwareUnits/RegisterFile.cpp
@@ -260,10 +260,9 @@ void RegisterFile::collectWrites(SmallVectorImpl<WriteRef> &Writes,
}
// Remove duplicate entries and resize the input vector.
- llvm::sort(Writes.begin(), Writes.end(),
- [](const WriteRef &Lhs, const WriteRef &Rhs) {
- return Lhs.getWriteState() < Rhs.getWriteState();
- });
+ llvm::sort(Writes, [](const WriteRef &Lhs, const WriteRef &Rhs) {
+ return Lhs.getWriteState() < Rhs.getWriteState();
+ });
auto It = std::unique(Writes.begin(), Writes.end());
Writes.resize(std::distance(Writes.begin(), It));
diff --git a/llvm/tools/llvm-mca/lib/InstrBuilder.cpp b/llvm/tools/llvm-mca/lib/InstrBuilder.cpp
index 6e1146e3a9d..4ada7df1e11 100644
--- a/llvm/tools/llvm-mca/lib/InstrBuilder.cpp
+++ b/llvm/tools/llvm-mca/lib/InstrBuilder.cpp
@@ -64,7 +64,7 @@ static void initializeUsedResources(InstrDesc &ID,
// Sort elements by mask popcount, so that we prioritize resource units over
// resource groups, and smaller groups over larger groups.
- llvm::sort(Worklist.begin(), Worklist.end(),
+ llvm::sort(Worklist,
[](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
unsigned popcntA = countPopulation(A.first);
unsigned popcntB = countPopulation(B.first);
diff --git a/llvm/tools/llvm-nm/llvm-nm.cpp b/llvm/tools/llvm-nm/llvm-nm.cpp
index 37c1bf85809..9e4acf664fc 100644
--- a/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -709,7 +709,7 @@ static void sortAndPrintSymbolList(SymbolicFile &Obj, bool printName,
if (ReverseSort)
Cmp = [=](const NMSymbol &A, const NMSymbol &B) { return Cmp(B, A); };
- llvm::sort(SymbolList.begin(), SymbolList.end(), Cmp);
+ llvm::sort(SymbolList, Cmp);
}
if (!PrintFileName) {
diff --git a/llvm/tools/llvm-objdump/COFFDump.cpp b/llvm/tools/llvm-objdump/COFFDump.cpp
index a67ff9819e4..3503eda42f2 100644
--- a/llvm/tools/llvm-objdump/COFFDump.cpp
+++ b/llvm/tools/llvm-objdump/COFFDump.cpp
@@ -454,7 +454,7 @@ static bool getPDataSection(const COFFObjectFile *Obj,
Rels.push_back(Reloc);
// Sort relocations by address.
- llvm::sort(Rels.begin(), Rels.end(), RelocAddressLess);
+ llvm::sort(Rels, RelocAddressLess);
ArrayRef<uint8_t> Contents;
error(Obj->getSectionContents(Pdata, Contents));
diff --git a/llvm/tools/llvm-objdump/MachODump.cpp b/llvm/tools/llvm-objdump/MachODump.cpp
index c8c63ceac7f..9d72d0f4b6a 100644
--- a/llvm/tools/llvm-objdump/MachODump.cpp
+++ b/llvm/tools/llvm-objdump/MachODump.cpp
@@ -6922,7 +6922,7 @@ static void DisassembleMachO(StringRef Filename, MachOObjectFile *MachOOF,
BaseSegmentAddress);
// Sort the symbols by address, just in case they didn't come in that way.
- llvm::sort(Symbols.begin(), Symbols.end(), SymbolSorter());
+ llvm::sort(Symbols, SymbolSorter());
// Build a data in code table that is sorted on by the address of each entry.
uint64_t BaseAddress = 0;
diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 68e03d474b2..64c3823295c 100644
--- a/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -1447,8 +1447,8 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
}
}
- llvm::sort(DataMappingSymsAddr.begin(), DataMappingSymsAddr.end());
- llvm::sort(TextMappingSymsAddr.begin(), TextMappingSymsAddr.end());
+ llvm::sort(DataMappingSymsAddr);
+ llvm::sort(TextMappingSymsAddr);
if (Obj->isELF() && Obj->getArch() == Triple::amdgcn) {
// AMDGPU disassembler uses symbolizer for printing labels
@@ -1473,7 +1473,7 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
}
// Sort relocations by address.
- llvm::sort(Rels.begin(), Rels.end(), RelocAddressLess);
+ llvm::sort(Rels, RelocAddressLess);
StringRef SegmentName = "";
if (const MachOObjectFile *MachO = dyn_cast<const MachOObjectFile>(Obj)) {
diff --git a/llvm/tools/llvm-pdbutil/DumpOutputStyle.cpp b/llvm/tools/llvm-pdbutil/DumpOutputStyle.cpp
index 812aab5bbc2..4e80f413415 100644
--- a/llvm/tools/llvm-pdbutil/DumpOutputStyle.cpp
+++ b/llvm/tools/llvm-pdbutil/DumpOutputStyle.cpp
@@ -1118,7 +1118,7 @@ Error DumpOutputStyle::dumpStringTableFromPdb() {
std::vector<uint32_t> SortedIDs(IS->name_ids().begin(),
IS->name_ids().end());
- llvm::sort(SortedIDs.begin(), SortedIDs.end());
+ llvm::sort(SortedIDs);
for (uint32_t I : SortedIDs) {
auto ES = IS->getStringForID(I);
llvm::SmallString<32> Str;
diff --git a/llvm/tools/llvm-pdbutil/PrettyTypeDumper.cpp b/llvm/tools/llvm-pdbutil/PrettyTypeDumper.cpp
index a5a88aa1071..9ffdcfdf956 100644
--- a/llvm/tools/llvm-pdbutil/PrettyTypeDumper.cpp
+++ b/llvm/tools/llvm-pdbutil/PrettyTypeDumper.cpp
@@ -130,7 +130,7 @@ filterAndSortClassDefs(LinePrinter &Printer, Enumerator &E,
}
if (Comp)
- llvm::sort(Filtered.begin(), Filtered.end(), Comp);
+ llvm::sort(Filtered, Comp);
return Filtered;
}
diff --git a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp
index e90783c80aa..5783661a0b6 100644
--- a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp
+++ b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp
@@ -1216,8 +1216,7 @@ static void dumpPretty(StringRef Path) {
std::vector<std::unique_ptr<PDBSymbolFunc>> Funcs;
while (auto Func = Functions->getNext())
Funcs.push_back(std::move(Func));
- llvm::sort(Funcs.begin(), Funcs.end(),
- opts::pretty::compareFunctionSymbols);
+ llvm::sort(Funcs, opts::pretty::compareFunctionSymbols);
for (const auto &Func : Funcs) {
Printer.NewLine();
Dumper.start(*Func, FunctionDumper::PointerType::None);
@@ -1235,8 +1234,7 @@ static void dumpPretty(StringRef Path) {
std::vector<std::unique_ptr<PDBSymbolData>> Datas;
while (auto Var = Vars->getNext())
Datas.push_back(std::move(Var));
- llvm::sort(Datas.begin(), Datas.end(),
- opts::pretty::compareDataSymbols);
+ llvm::sort(Datas, opts::pretty::compareDataSymbols);
for (const auto &Var : Datas)
Dumper.start(*Var);
}
diff --git a/llvm/tools/llvm-readobj/COFFDumper.cpp b/llvm/tools/llvm-readobj/COFFDumper.cpp
index 8da5fc4234e..fe31c36b602 100644
--- a/llvm/tools/llvm-readobj/COFFDumper.cpp
+++ b/llvm/tools/llvm-readobj/COFFDumper.cpp
@@ -613,8 +613,7 @@ void COFFDumper::cacheRelocations() {
RelocMap[Section].push_back(Reloc);
// Sort relocations by address.
- llvm::sort(RelocMap[Section].begin(), RelocMap[Section].end(),
- relocAddressLess);
+ llvm::sort(RelocMap[Section], relocAddressLess);
}
}
diff --git a/llvm/tools/llvm-xray/xray-account.cpp b/llvm/tools/llvm-xray/xray-account.cpp
index 77de1de496f..93bb271b328 100644
--- a/llvm/tools/llvm-xray/xray-account.cpp
+++ b/llvm/tools/llvm-xray/xray-account.cpp
@@ -282,79 +282,76 @@ void LatencyAccountant::exportStats(const XRayFileHeader &Header, F Fn) const {
// Sort the data according to user-provided flags.
switch (AccountSortOutput) {
case SortField::FUNCID:
- llvm::sort(Results.begin(), Results.end(),
- [](const TupleType &L, const TupleType &R) {
- if (AccountSortOrder == SortDirection::ASCENDING)
- return std::get<0>(L) < std::get<0>(R);
- if (AccountSortOrder == SortDirection::DESCENDING)
- return std::get<0>(L) > std::get<0>(R);
- llvm_unreachable("Unknown sort direction");
- });
+ llvm::sort(Results, [](const TupleType &L, const TupleType &R) {
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return std::get<0>(L) < std::get<0>(R);
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return std::get<0>(L) > std::get<0>(R);
+ llvm_unreachable("Unknown sort direction");
+ });
break;
case SortField::COUNT:
- llvm::sort(Results.begin(), Results.end(),
- [](const TupleType &L, const TupleType &R) {
- if (AccountSortOrder == SortDirection::ASCENDING)
- return std::get<1>(L) < std::get<1>(R);
- if (AccountSortOrder == SortDirection::DESCENDING)
- return std::get<1>(L) > std::get<1>(R);
- llvm_unreachable("Unknown sort direction");
- });
+ llvm::sort(Results, [](const TupleType &L, const TupleType &R) {
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return std::get<1>(L) < std::get<1>(R);
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return std::get<1>(L) > std::get<1>(R);
+ llvm_unreachable("Unknown sort direction");
+ });
break;
default:
// Here we need to look into the ResultRow for the rest of the data that
// we want to sort by.
- llvm::sort(Results.begin(), Results.end(),
- [&](const TupleType &L, const TupleType &R) {
- auto &LR = std::get<2>(L);
- auto &RR = std::get<2>(R);
- switch (AccountSortOutput) {
- case SortField::COUNT:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Count < RR.Count;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Count > RR.Count;
- llvm_unreachable("Unknown sort direction");
- case SortField::MIN:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Min < RR.Min;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Min > RR.Min;
- llvm_unreachable("Unknown sort direction");
- case SortField::MED:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Median < RR.Median;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Median > RR.Median;
- llvm_unreachable("Unknown sort direction");
- case SortField::PCT90:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Pct90 < RR.Pct90;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Pct90 > RR.Pct90;
- llvm_unreachable("Unknown sort direction");
- case SortField::PCT99:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Pct99 < RR.Pct99;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Pct99 > RR.Pct99;
- llvm_unreachable("Unknown sort direction");
- case SortField::MAX:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Max < RR.Max;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Max > RR.Max;
- llvm_unreachable("Unknown sort direction");
- case SortField::SUM:
- if (AccountSortOrder == SortDirection::ASCENDING)
- return LR.Sum < RR.Sum;
- if (AccountSortOrder == SortDirection::DESCENDING)
- return LR.Sum > RR.Sum;
- llvm_unreachable("Unknown sort direction");
- default:
- llvm_unreachable("Unsupported sort order");
- }
- });
+ llvm::sort(Results, [&](const TupleType &L, const TupleType &R) {
+ auto &LR = std::get<2>(L);
+ auto &RR = std::get<2>(R);
+ switch (AccountSortOutput) {
+ case SortField::COUNT:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Count < RR.Count;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Count > RR.Count;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::MIN:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Min < RR.Min;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Min > RR.Min;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::MED:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Median < RR.Median;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Median > RR.Median;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::PCT90:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Pct90 < RR.Pct90;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Pct90 > RR.Pct90;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::PCT99:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Pct99 < RR.Pct99;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Pct99 > RR.Pct99;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::MAX:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Max < RR.Max;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Max > RR.Max;
+ llvm_unreachable("Unknown sort direction");
+ case SortField::SUM:
+ if (AccountSortOrder == SortDirection::ASCENDING)
+ return LR.Sum < RR.Sum;
+ if (AccountSortOrder == SortDirection::DESCENDING)
+ return LR.Sum > RR.Sum;
+ llvm_unreachable("Unknown sort direction");
+ default:
+ llvm_unreachable("Unsupported sort order");
+ }
+ });
break;
}
diff --git a/llvm/tools/yaml2obj/yaml2macho.cpp b/llvm/tools/yaml2obj/yaml2macho.cpp
index 23fe946f1da..1f36431c05d 100644
--- a/llvm/tools/yaml2obj/yaml2macho.cpp
+++ b/llvm/tools/yaml2obj/yaml2macho.cpp
@@ -417,10 +417,9 @@ Error MachOWriter::writeLinkEditData(raw_ostream &OS) {
}
}
- llvm::sort(WriteQueue.begin(), WriteQueue.end(),
- [](const writeOperation &a, const writeOperation &b) {
- return a.first < b.first;
- });
+ llvm::sort(WriteQueue, [](const writeOperation &a, const writeOperation &b) {
+ return a.first < b.first;
+ });
for (auto writeOp : WriteQueue) {
ZeroToOffset(OS, writeOp.first);
diff --git a/llvm/unittests/ADT/StringMapTest.cpp b/llvm/unittests/ADT/StringMapTest.cpp
index 1f5c4f03164..a44bbbfee09 100644
--- a/llvm/unittests/ADT/StringMapTest.cpp
+++ b/llvm/unittests/ADT/StringMapTest.cpp
@@ -279,7 +279,7 @@ TEST_F(StringMapTest, IterMapKeys) {
Map["D"] = 3;
auto Keys = to_vector<4>(Map.keys());
- llvm::sort(Keys.begin(), Keys.end());
+ llvm::sort(Keys);
SmallVector<StringRef, 4> Expected = {"A", "B", "C", "D"};
EXPECT_EQ(Expected, Keys);
@@ -293,7 +293,7 @@ TEST_F(StringMapTest, IterSetKeys) {
Set.insert("D");
auto Keys = to_vector<4>(Set.keys());
- llvm::sort(Keys.begin(), Keys.end());
+ llvm::sort(Keys);
SmallVector<StringRef, 4> Expected = {"A", "B", "C", "D"};
EXPECT_EQ(Expected, Keys);
diff --git a/llvm/unittests/Analysis/LazyCallGraphTest.cpp b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
index 6279ebb9b5a..5e6dd1aee5a 100644
--- a/llvm/unittests/Analysis/LazyCallGraphTest.cpp
+++ b/llvm/unittests/Analysis/LazyCallGraphTest.cpp
@@ -264,7 +264,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
for (LazyCallGraph::Edge &E : A1.populate())
Nodes.push_back(E.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ("a2", Nodes[0]);
EXPECT_EQ("b2", Nodes[1]);
EXPECT_EQ("c3", Nodes[2]);
@@ -279,7 +279,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
for (LazyCallGraph::Edge &E : B1.populate())
Nodes.push_back(E.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ("b2", Nodes[0]);
EXPECT_EQ("d3", Nodes[1]);
Nodes.clear();
@@ -293,7 +293,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
for (LazyCallGraph::Edge &E : C1.populate())
Nodes.push_back(E.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ("c2", Nodes[0]);
EXPECT_EQ("d2", Nodes[1]);
Nodes.clear();
@@ -323,7 +323,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
ASSERT_EQ(1, D.size());
for (LazyCallGraph::Node &N : *D.begin())
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("d1", Nodes[0]);
EXPECT_EQ("d2", Nodes[1]);
@@ -339,7 +339,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
ASSERT_EQ(1, C.size());
for (LazyCallGraph::Node &N : *C.begin())
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("c1", Nodes[0]);
EXPECT_EQ("c2", Nodes[1]);
@@ -355,7 +355,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
ASSERT_EQ(1, B.size());
for (LazyCallGraph::Node &N : *B.begin())
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("b1", Nodes[0]);
EXPECT_EQ("b2", Nodes[1]);
@@ -373,7 +373,7 @@ TEST(LazyCallGraphTest, BasicGraphFormation) {
ASSERT_EQ(1, A.size());
for (LazyCallGraph::Node &N : *A.begin())
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("a1", Nodes[0]);
EXPECT_EQ("a2", Nodes[1]);
@@ -477,7 +477,7 @@ TEST(LazyCallGraphTest, InnerSCCFormation) {
LazyCallGraph::SCC &D = *J++;
for (LazyCallGraph::Node &N : D)
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("d1", Nodes[0]);
EXPECT_EQ("d2", Nodes[1]);
@@ -487,7 +487,7 @@ TEST(LazyCallGraphTest, InnerSCCFormation) {
LazyCallGraph::SCC &B = *J++;
for (LazyCallGraph::Node &N : B)
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("b1", Nodes[0]);
EXPECT_EQ("b2", Nodes[1]);
@@ -497,7 +497,7 @@ TEST(LazyCallGraphTest, InnerSCCFormation) {
LazyCallGraph::SCC &C = *J++;
for (LazyCallGraph::Node &N : C)
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("c1", Nodes[0]);
EXPECT_EQ("c2", Nodes[1]);
@@ -507,7 +507,7 @@ TEST(LazyCallGraphTest, InnerSCCFormation) {
LazyCallGraph::SCC &A = *J++;
for (LazyCallGraph::Node &N : A)
Nodes.push_back(N.getFunction().getName());
- llvm::sort(Nodes.begin(), Nodes.end());
+ llvm::sort(Nodes);
EXPECT_EQ(3u, Nodes.size());
EXPECT_EQ("a1", Nodes[0]);
EXPECT_EQ("a2", Nodes[1]);
diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index 06d301ea9bf..505e185190f 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -1363,10 +1363,9 @@ TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
- llvm::sort(Defs.begin(), Defs.end(),
- [](const MemoryDef *LHS, const MemoryDef *RHS) {
- return LHS->getID() < RHS->getID();
- });
+ llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
+ return LHS->getID() < RHS->getID();
+ });
};
auto SortedUserList = [&](const MemoryDef *MD) {
diff --git a/llvm/unittests/Support/Path.cpp b/llvm/unittests/Support/Path.cpp
index 851a4af438f..cba6ce94c44 100644
--- a/llvm/unittests/Support/Path.cpp
+++ b/llvm/unittests/Support/Path.cpp
@@ -895,8 +895,8 @@ TEST_F(FileSystemTest, BrokenSymlinkDirectoryIteration) {
ASSERT_NO_ERROR(ec);
VisitedNonBrokenSymlinks.push_back(path::filename(i->path()));
}
- llvm::sort(VisitedNonBrokenSymlinks.begin(), VisitedNonBrokenSymlinks.end());
- llvm::sort(VisitedBrokenSymlinks.begin(), VisitedBrokenSymlinks.end());
+ llvm::sort(VisitedNonBrokenSymlinks);
+ llvm::sort(VisitedBrokenSymlinks);
v_t ExpectedNonBrokenSymlinks = {"b", "d"};
ASSERT_EQ(ExpectedNonBrokenSymlinks.size(), VisitedNonBrokenSymlinks.size());
ASSERT_TRUE(std::equal(VisitedNonBrokenSymlinks.begin(),
@@ -922,8 +922,8 @@ TEST_F(FileSystemTest, BrokenSymlinkDirectoryIteration) {
ASSERT_NO_ERROR(ec);
VisitedNonBrokenSymlinks.push_back(path::filename(i->path()));
}
- llvm::sort(VisitedNonBrokenSymlinks.begin(), VisitedNonBrokenSymlinks.end());
- llvm::sort(VisitedBrokenSymlinks.begin(), VisitedBrokenSymlinks.end());
+ llvm::sort(VisitedNonBrokenSymlinks);
+ llvm::sort(VisitedBrokenSymlinks);
ExpectedNonBrokenSymlinks = {"b", "bb", "d", "da", "dd", "ddd", "ddd"};
ASSERT_EQ(ExpectedNonBrokenSymlinks.size(), VisitedNonBrokenSymlinks.size());
ASSERT_TRUE(std::equal(VisitedNonBrokenSymlinks.begin(),
@@ -949,8 +949,8 @@ TEST_F(FileSystemTest, BrokenSymlinkDirectoryIteration) {
ASSERT_NO_ERROR(ec);
VisitedNonBrokenSymlinks.push_back(path::filename(i->path()));
}
- llvm::sort(VisitedNonBrokenSymlinks.begin(), VisitedNonBrokenSymlinks.end());
- llvm::sort(VisitedBrokenSymlinks.begin(), VisitedBrokenSymlinks.end());
+ llvm::sort(VisitedNonBrokenSymlinks);
+ llvm::sort(VisitedBrokenSymlinks);
ExpectedNonBrokenSymlinks = {"a", "b", "ba", "bb", "bc", "c", "d", "da", "dd",
"ddd", "e"};
ASSERT_EQ(ExpectedNonBrokenSymlinks.size(), VisitedNonBrokenSymlinks.size());
diff --git a/llvm/utils/TableGen/CTagsEmitter.cpp b/llvm/utils/TableGen/CTagsEmitter.cpp
index a0f83f1c991..bd596bcb47a 100644
--- a/llvm/utils/TableGen/CTagsEmitter.cpp
+++ b/llvm/utils/TableGen/CTagsEmitter.cpp
@@ -73,7 +73,7 @@ void CTagsEmitter::run(raw_ostream &OS) {
for (const auto &D : Defs)
Tags.push_back(Tag(D.first, locate(D.second.get())));
// Emit tags.
- llvm::sort(Tags.begin(), Tags.end());
+ llvm::sort(Tags);
OS << "!_TAG_FILE_FORMAT\t1\t/original ctags format/\n";
OS << "!_TAG_FILE_SORTED\t1\t/0=unsorted, 1=sorted, 2=foldcase/\n";
for (const Tag &T : Tags)
diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index 4a409f173d6..ed68b09c265 100644
--- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -1318,7 +1318,7 @@ std::string PatternToMatch::getPredicateCheck() const {
SmallVector<const Predicate*,4> PredList;
for (const Predicate &P : Predicates)
PredList.push_back(&P);
- llvm::sort(PredList.begin(), PredList.end(), deref<llvm::less>());
+ llvm::sort(PredList, deref<llvm::less>());
std::string Check;
for (unsigned i = 0, e = PredList.size(); i != e; ++i) {
@@ -3742,7 +3742,7 @@ std::vector<Predicate> CodeGenDAGPatterns::makePredList(ListInit *L) {
}
// Sort so that different orders get canonicalized to the same string.
- llvm::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds);
return Preds;
}
diff --git a/llvm/utils/TableGen/CodeGenRegisters.cpp b/llvm/utils/TableGen/CodeGenRegisters.cpp
index b0d13b7d38f..e70a79f08af 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -725,7 +725,7 @@ struct TupleExpander : SetTheory::Expander {
//===----------------------------------------------------------------------===//
static void sortAndUniqueRegisters(CodeGenRegister::Vec &M) {
- llvm::sort(M.begin(), M.end(), deref<llvm::less>());
+ llvm::sort(M, deref<llvm::less>());
M.erase(std::unique(M.begin(), M.end(), deref<llvm::equal>()), M.end());
}
@@ -997,7 +997,7 @@ CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
for (auto &RC : RegClasses)
if (SuperRegRCsBV[RC.EnumValue])
SuperRegRCs.emplace_back(&RC);
- llvm::sort(SuperRegRCs.begin(), SuperRegRCs.end(), SizeOrder);
+ llvm::sort(SuperRegRCs, SizeOrder);
assert(SuperRegRCs.front() == BiggestSuperRegRC && "Biggest class wasn't first");
// Find all the subreg classes and order them by size too.
@@ -1008,7 +1008,7 @@ CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
if (SuperRegClassesBV.any())
SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
}
- llvm::sort(SuperRegClasses.begin(), SuperRegClasses.end(),
+ llvm::sort(SuperRegClasses,
[&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
const std::pair<CodeGenRegisterClass *, BitVector> &B) {
return SizeOrder(A.first, B.first);
@@ -1073,7 +1073,7 @@ void CodeGenRegisterClass::buildRegUnitSet(const CodeGenRegBank &RegBank,
if (!RU.Artificial)
TmpUnits.push_back(*UnitI);
}
- llvm::sort(TmpUnits.begin(), TmpUnits.end());
+ llvm::sort(TmpUnits);
std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
std::back_inserter(RegUnits));
}
@@ -1093,7 +1093,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
// Read in the user-defined (named) sub-register indices.
// More indices will be synthesized later.
std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
- llvm::sort(SRIs.begin(), SRIs.end(), LessRecord());
+ llvm::sort(SRIs, LessRecord());
for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
getSubRegIdx(SRIs[i]);
// Build composite maps from ComposedOf fields.
@@ -1102,7 +1102,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
// Read in the register definitions.
std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
- llvm::sort(Regs.begin(), Regs.end(), LessRecordRegister());
+ llvm::sort(Regs, LessRecordRegister());
// Assign the enumeration values.
for (unsigned i = 0, e = Regs.size(); i != e; ++i)
getReg(Regs[i]);
@@ -1113,7 +1113,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
for (Record *R : Tups) {
std::vector<Record *> TupRegs = *Sets.expand(R);
- llvm::sort(TupRegs.begin(), TupRegs.end(), LessRecordRegister());
+ llvm::sort(TupRegs, LessRecordRegister());
for (Record *RC : TupRegs)
getReg(RC);
}
diff --git a/llvm/utils/TableGen/CodeGenSchedule.cpp b/llvm/utils/TableGen/CodeGenSchedule.cpp
index 625944bcfe9..57b59108528 100644
--- a/llvm/utils/TableGen/CodeGenSchedule.cpp
+++ b/llvm/utils/TableGen/CodeGenSchedule.cpp
@@ -365,7 +365,7 @@ processSTIPredicate(STIPredicateFunction &Fn,
// Sort OpcodeMappings elements based on their CPU and predicate masks.
// As a last resort, order elements by opcode identifier.
- llvm::sort(OpcodeMappings.begin(), OpcodeMappings.end(),
+ llvm::sort(OpcodeMappings,
[&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) {
unsigned LhsIdx = Opcode2Index[Lhs.first];
unsigned RhsIdx = Opcode2Index[Rhs.first];
@@ -496,7 +496,7 @@ void CodeGenSchedModels::collectOptionalProcessorInfo() {
/// Gather all processor models.
void CodeGenSchedModels::collectProcModels() {
RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
- llvm::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
+ llvm::sort(ProcRecords, LessRecordFieldName());
// Reserve space because we can. Reallocation would be ok.
ProcModels.reserve(ProcRecords.size()+1);
@@ -615,7 +615,7 @@ void CodeGenSchedModels::collectSchedRW() {
// Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
// for the loop below that initializes Alias vectors.
RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
- llvm::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord());
+ llvm::sort(AliasDefs, LessRecord());
for (Record *ADef : AliasDefs) {
Record *MatchDef = ADef->getValueAsDef("MatchRW");
Record *AliasDef = ADef->getValueAsDef("AliasRW");
@@ -633,12 +633,12 @@ void CodeGenSchedModels::collectSchedRW() {
}
// Sort and add the SchedReadWrites directly referenced by instructions or
// itinerary resources. Index reads and writes in separate domains.
- llvm::sort(SWDefs.begin(), SWDefs.end(), LessRecord());
+ llvm::sort(SWDefs, LessRecord());
for (Record *SWDef : SWDefs) {
assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite");
SchedWrites.emplace_back(SchedWrites.size(), SWDef);
}
- llvm::sort(SRDefs.begin(), SRDefs.end(), LessRecord());
+ llvm::sort(SRDefs, LessRecord());
for (Record *SRDef : SRDefs) {
assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite");
SchedReads.emplace_back(SchedReads.size(), SRDef);
@@ -858,7 +858,7 @@ void CodeGenSchedModels::collectSchedClasses() {
}
// Create classes for InstRW defs.
RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
- llvm::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
+ llvm::sort(InstRWDefs, LessRecord());
LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
for (Record *RWDef : InstRWDefs)
createInstRWClass(RWDef);
@@ -1162,7 +1162,7 @@ void CodeGenSchedModels::collectProcItins() {
// Gather the read/write types for each itinerary class.
void CodeGenSchedModels::collectProcItinRW() {
RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
- llvm::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord());
+ llvm::sort(ItinRWDefs, LessRecord());
for (Record *RWDef : ItinRWDefs) {
if (!RWDef->getValueInit("SchedModel")->isComplete())
PrintFatalError(RWDef->getLoc(), "SchedModel is undefined");
diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/CodeGenTarget.cpp
index cb73ca83c9b..2766fcca161 100644
--- a/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -278,7 +278,7 @@ CodeGenRegBank &CodeGenTarget::getRegBank() const {
void CodeGenTarget::ReadRegAltNameIndices() const {
RegAltNameIndices = Records.getAllDerivedDefinitions("RegAltNameIndex");
- llvm::sort(RegAltNameIndices.begin(), RegAltNameIndices.end(), LessRecord());
+ llvm::sort(RegAltNameIndices, LessRecord());
}
/// getRegisterByName - If there is a register with the specific AsmName,
@@ -303,7 +303,7 @@ std::vector<ValueTypeByHwMode> CodeGenTarget::getRegisterVTs(Record *R)
}
// Remove duplicates.
- llvm::sort(Result.begin(), Result.end());
+ llvm::sort(Result);
Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
return Result;
}
@@ -314,7 +314,7 @@ void CodeGenTarget::ReadLegalValueTypes() const {
LegalValueTypes.insert(LegalValueTypes.end(), RC.VTs.begin(), RC.VTs.end());
// Remove duplicates.
- llvm::sort(LegalValueTypes.begin(), LegalValueTypes.end());
+ llvm::sort(LegalValueTypes);
LegalValueTypes.erase(std::unique(LegalValueTypes.begin(),
LegalValueTypes.end()),
LegalValueTypes.end());
@@ -513,7 +513,7 @@ CodeGenIntrinsicTable::CodeGenIntrinsicTable(const RecordKeeper &RC,
if (isTarget == TargetOnly)
Intrinsics.push_back(CodeGenIntrinsic(Defs[I]));
}
- llvm::sort(Intrinsics.begin(), Intrinsics.end(),
+ llvm::sort(Intrinsics,
[](const CodeGenIntrinsic &LHS, const CodeGenIntrinsic &RHS) {
return std::tie(LHS.TargetPrefix, LHS.Name) <
std::tie(RHS.TargetPrefix, RHS.Name);
@@ -709,6 +709,6 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
Properties = parseSDPatternOperatorProperties(R);
// Sort the argument attributes for later benefit.
- llvm::sort(ArgumentAttributes.begin(), ArgumentAttributes.end());
+ llvm::sort(ArgumentAttributes);
}
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 03e2f381d9c..18a3afeb453 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -2613,7 +2613,7 @@ public:
std::vector<unsigned> MergeInsnIDs;
for (const auto &IDMatcherPair : Rule.defined_insn_vars())
MergeInsnIDs.push_back(IDMatcherPair.second);
- llvm::sort(MergeInsnIDs.begin(), MergeInsnIDs.end());
+ llvm::sort(MergeInsnIDs);
for (const auto &MergeInsnID : MergeInsnIDs)
Table << MatchTable::IntValue(MergeInsnID);
Table << MatchTable::NamedValue("GIU_MergeMemOperands_EndOfList")
@@ -2812,7 +2812,7 @@ void RuleMatcher::emit(MatchTable &Table) {
InsnIDs.push_back(Pair.second);
}
- llvm::sort(InsnIDs.begin(), InsnIDs.end());
+ llvm::sort(InsnIDs);
for (const auto &InsnID : InsnIDs) {
// Reject the difficult cases until we have a more accurate check.
@@ -4288,11 +4288,11 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
std::vector<Record *> ComplexPredicates =
RK.getAllDerivedDefinitions("GIComplexOperandMatcher");
- llvm::sort(ComplexPredicates.begin(), ComplexPredicates.end(), orderByName);
+ llvm::sort(ComplexPredicates, orderByName);
std::vector<Record *> CustomRendererFns =
RK.getAllDerivedDefinitions("GICustomOperandRenderer");
- llvm::sort(CustomRendererFns.begin(), CustomRendererFns.end(), orderByName);
+ llvm::sort(CustomRendererFns, orderByName);
unsigned MaxTemporaries = 0;
for (const auto &Rule : Rules)
@@ -4371,7 +4371,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
std::vector<LLTCodeGen> TypeObjects;
for (const auto &Ty : KnownTypes)
TypeObjects.push_back(Ty);
- llvm::sort(TypeObjects.begin(), TypeObjects.end());
+ llvm::sort(TypeObjects);
OS << "// LLT Objects.\n"
<< "enum {\n";
for (const auto &TypeObject : TypeObjects) {
diff --git a/llvm/utils/TableGen/InfoByHwMode.cpp b/llvm/utils/TableGen/InfoByHwMode.cpp
index 7d1f71cc264..086e12dafd7 100644
--- a/llvm/utils/TableGen/InfoByHwMode.cpp
+++ b/llvm/utils/TableGen/InfoByHwMode.cpp
@@ -84,7 +84,7 @@ void ValueTypeByHwMode::writeToStream(raw_ostream &OS) const {
std::vector<const PairType*> Pairs;
for (const auto &P : Map)
Pairs.push_back(&P);
- llvm::sort(Pairs.begin(), Pairs.end(), deref<std::less<PairType>>());
+ llvm::sort(Pairs, deref<std::less<PairType>>());
OS << '{';
for (unsigned i = 0, e = Pairs.size(); i != e; ++i) {
@@ -176,7 +176,7 @@ void RegSizeInfoByHwMode::writeToStream(raw_ostream &OS) const {
std::vector<const PairType*> Pairs;
for (const auto &P : Map)
Pairs.push_back(&P);
- llvm::sort(Pairs.begin(), Pairs.end(), deref<std::less<PairType>>());
+ llvm::sort(Pairs, deref<std::less<PairType>>());
OS << '{';
for (unsigned i = 0, e = Pairs.size(); i != e; ++i) {
diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp
index 100399f52ff..ef0428eeed0 100644
--- a/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -140,7 +140,7 @@ void SubtargetEmitter::Enumeration(raw_ostream &OS) {
// Get all records of class and sort
std::vector<Record*> DefList =
Records.getAllDerivedDefinitions("SubtargetFeature");
- llvm::sort(DefList.begin(), DefList.end(), LessRecord());
+ llvm::sort(DefList, LessRecord());
unsigned N = DefList.size();
if (N == 0)
@@ -179,7 +179,7 @@ unsigned SubtargetEmitter::FeatureKeyValues(raw_ostream &OS) {
if (FeatureList.empty())
return 0;
- llvm::sort(FeatureList.begin(), FeatureList.end(), LessRecordFieldName());
+ llvm::sort(FeatureList, LessRecordFieldName());
// Begin feature table
OS << "// Sorted (by key) array of values for CPU features.\n"
@@ -229,7 +229,7 @@ unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
// Gather and sort processor information
std::vector<Record*> ProcessorList =
Records.getAllDerivedDefinitions("Processor");
- llvm::sort(ProcessorList.begin(), ProcessorList.end(), LessRecordFieldName());
+ llvm::sort(ProcessorList, LessRecordFieldName());
// Begin processor table
OS << "// Sorted (by key) array of values for CPU subtype.\n"
@@ -1182,7 +1182,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
WriteIDs.push_back(SchedModels.getSchedRWIdx(VW, /*IsRead=*/false));
}
}
- llvm::sort(WriteIDs.begin(), WriteIDs.end());
+ llvm::sort(WriteIDs);
for(unsigned W : WriteIDs) {
MCReadAdvanceEntry RAEntry;
RAEntry.UseIdx = UseIdx;
@@ -1200,8 +1200,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
// compression.
//
// WritePrecRes entries are sorted by ProcResIdx.
- llvm::sort(WriteProcResources.begin(), WriteProcResources.end(),
- LessWriteProcResources());
+ llvm::sort(WriteProcResources, LessWriteProcResources());
SCDesc.NumWriteProcResEntries = WriteProcResources.size();
std::vector<MCWriteProcResEntry>::iterator WPRPos =
@@ -1413,7 +1412,7 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
// Gather and sort processor information
std::vector<Record*> ProcessorList =
Records.getAllDerivedDefinitions("Processor");
- llvm::sort(ProcessorList.begin(), ProcessorList.end(), LessRecordFieldName());
+ llvm::sort(ProcessorList, LessRecordFieldName());
// Begin processor table
OS << "\n";
@@ -1479,7 +1478,7 @@ static void emitPredicateProlog(const RecordKeeper &Records, raw_ostream &OS) {
// stream.
std::vector<Record *> Prologs =
Records.getAllDerivedDefinitions("PredicateProlog");
- llvm::sort(Prologs.begin(), Prologs.end(), LessRecord());
+ llvm::sort(Prologs, LessRecord());
for (Record *P : Prologs)
Stream << P->getValueAsString("Code") << '\n';
@@ -1717,7 +1716,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
unsigned NumProcs) {
std::vector<Record*> Features =
Records.getAllDerivedDefinitions("SubtargetFeature");
- llvm::sort(Features.begin(), Features.end(), LessRecord());
+ llvm::sort(Features, LessRecord());
OS << "// ParseSubtargetFeatures - Parses features string setting specified\n"
<< "// subtarget options.\n"
OpenPOWER on IntegriCloud