summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorMandeep Singh Grang <mgrang@codeaurora.org>2018-04-13 19:47:57 +0000
committerMandeep Singh Grang <mgrang@codeaurora.org>2018-04-13 19:47:57 +0000
commit636d94db3b52c9284fa7d50979306da49f1ba703 (patch)
tree9a006fc491701953160917a7ec1878b68fb1fff4 /llvm/lib
parentdb456efc8ed89c3534398f37fb364180e328d966 (diff)
downloadbcm5719-llvm-636d94db3b52c9284fa7d50979306da49f1ba703.tar.gz
bcm5719-llvm-636d94db3b52c9284fa7d50979306da49f1ba703.zip
[Transforms] Change std::sort to llvm::sort in response to r327219
Summary: r327219 added wrappers to std::sort which randomly shuffle the container before sorting. This will help in uncovering non-determinism caused due to undefined sorting order of objects having the same key. To make use of that infrastructure we need to invoke llvm::sort instead of std::sort. Note: This patch is one of a series of patches to replace *all* std::sort to llvm::sort. Refer the comments section in D44363 for a list of all the required patches. Reviewers: kcc, pcc, danielcdh, jmolloy, sanjoy, dberlin, ruiu Reviewed By: ruiu Subscribers: ruiu, llvm-commits Differential Revision: https://reviews.llvm.org/D45142 llvm-svn: 330059
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp12
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp22
-rw-r--r--llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/ConstantHoisting.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GVNHoist.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/GVNSink.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/GuardWidening.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopSink.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/NewGVN.cpp15
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp11
-rw-r--r--llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp7
-rw-r--r--llvm/lib/Transforms/Utils/LowerSwitch.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/PredicateInfo.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp12
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SplitModule.cpp14
23 files changed, 89 insertions, 84 deletions
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 4d7fe644629..f19f6a50ddd 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -48,7 +48,7 @@ public:
BlockToIndexMapping(Function &F) {
for (BasicBlock &BB : F)
V.push_back(&BB);
- std::sort(V.begin(), V.end());
+ llvm::sort(V.begin(), V.end());
}
size_t blockToIndex(BasicBlock *BB) const {
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index b29eeeacfec..b568445b724 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1869,11 +1869,11 @@ bool LowerTypeTestsModule::lower() {
}
Sets.emplace_back(I, MaxIndex);
}
- std::sort(Sets.begin(), Sets.end(),
- [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
- const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
- return S1.second < S2.second;
- });
+ llvm::sort(Sets.begin(), Sets.end(),
+ [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
+ const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
+ return S1.second < S2.second;
+ });
// For each disjoint set we found...
for (const auto &S : Sets) {
@@ -1894,7 +1894,7 @@ bool LowerTypeTestsModule::lower() {
// Order type identifiers by global index for determinism. This ordering is
// stable as there is a one-to-one mapping between metadata and indices.
- std::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) {
+ llvm::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) {
return TypeIdInfo[M1].Index < TypeIdInfo[M2].Index;
});
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index fd5fce42620..6444a3185f6 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -679,10 +679,10 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
Sum += NameFS.second.getEntrySamples();
R.push_back(&NameFS.second);
}
- std::sort(R.begin(), R.end(),
- [](const FunctionSamples *L, const FunctionSamples *R) {
- return L->getEntrySamples() > R->getEntrySamples();
- });
+ llvm::sort(R.begin(), R.end(),
+ [](const FunctionSamples *L, const FunctionSamples *R) {
+ return L->getEntrySamples() > R->getEntrySamples();
+ });
}
return R;
}
@@ -1170,13 +1170,13 @@ static SmallVector<InstrProfValueData, 2> SortCallTargets(
SmallVector<InstrProfValueData, 2> R;
for (auto I = M.begin(); I != M.end(); ++I)
R.push_back({Function::getGUID(I->getKey()), I->getValue()});
- std::sort(R.begin(), R.end(),
- [](const InstrProfValueData &L, const InstrProfValueData &R) {
- if (L.Count == R.Count)
- return L.Value > R.Value;
- else
- return L.Count > R.Count;
- });
+ llvm::sort(R.begin(), R.end(),
+ [](const InstrProfValueData &L, const InstrProfValueData &R) {
+ if (L.Count == R.Count)
+ return L.Value > R.Value;
+ else
+ return L.Count > R.Count;
+ });
return R;
}
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 68b4d32d06a..44bb67902c8 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -272,7 +272,7 @@ namespace {
write(Len);
write(Number);
- std::sort(
+ llvm::sort(
SortedLinesByFile.begin(), SortedLinesByFile.end(),
[](StringMapEntry<GCOVLines> *LHS, StringMapEntry<GCOVLines> *RHS) {
return LHS->getKey() < RHS->getKey();
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index d950e2e730f..4f0abc6bb78 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -659,11 +659,11 @@ void SanitizerCoverageModule::InjectTraceForSwitch(
C = ConstantExpr::getCast(CastInst::ZExt, It.getCaseValue(), Int64Ty);
Initializers.push_back(C);
}
- std::sort(Initializers.begin() + 2, Initializers.end(),
- [](const Constant *A, const Constant *B) {
- return cast<ConstantInt>(A)->getLimitedValue() <
- cast<ConstantInt>(B)->getLimitedValue();
- });
+ llvm::sort(Initializers.begin() + 2, Initializers.end(),
+ [](const Constant *A, const Constant *B) {
+ return cast<ConstantInt>(A)->getLimitedValue() <
+ cast<ConstantInt>(B)->getLimitedValue();
+ });
ArrayType *ArrayOfInt64Ty = ArrayType::get(Int64Ty, Initializers.size());
GlobalVariable *GV = new GlobalVariable(
*CurModule, ArrayOfInt64Ty, false, GlobalVariable::InternalLinkage,
diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index 598c024b4b3..4b53628e442 100644
--- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -571,8 +571,8 @@ void ConstantHoistingPass::findAndMakeBaseConstant(
/// rematerialized with an add from a common base constant.
void ConstantHoistingPass::findBaseConstants() {
// Sort the constants by value and type. This invalidates the mapping!
- std::sort(ConstCandVec.begin(), ConstCandVec.end(),
- [](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
+ llvm::sort(ConstCandVec.begin(), ConstCandVec.end(),
+ [](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
if (LHS.ConstInt->getType() != RHS.ConstInt->getType())
return LHS.ConstInt->getType()->getBitWidth() <
RHS.ConstInt->getType()->getBitWidth();
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
index 66bd27f48b6..663375b1416 100644
--- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
@@ -748,11 +748,11 @@ private:
// TODO: Remove fully-redundant expressions.
// Get instruction from the Map, assume that all the Instructions
// with same VNs have same rank (this is an approximation).
- std::sort(Ranks.begin(), Ranks.end(),
- [this, &Map](const VNType &r1, const VNType &r2) {
- return (rank(*Map.lookup(r1).begin()) <
- rank(*Map.lookup(r2).begin()));
- });
+ llvm::sort(Ranks.begin(), Ranks.end(),
+ [this, &Map](const VNType &r1, const VNType &r2) {
+ return (rank(*Map.lookup(r1).begin()) <
+ rank(*Map.lookup(r2).begin()));
+ });
// - Sort VNs according to their rank, and start with lowest ranked VN
// - Take a VN and for each instruction with same VN
diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp
index c51738dc656..4368b68582e 100644
--- a/llvm/lib/Transforms/Scalar/GVNSink.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp
@@ -239,7 +239,7 @@ public:
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
- std::sort(Ops.begin(), Ops.end());
+ llvm::sort(Ops.begin(), Ops.end());
for (auto &P : Ops) {
Blocks.push_back(P.first);
Values.push_back(P.second);
@@ -361,7 +361,7 @@ public:
for (auto &U : I->uses())
op_push_back(U.getUser());
- std::sort(op_begin(), op_end());
+ llvm::sort(op_begin(), op_end());
}
void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
@@ -761,7 +761,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
}
if (Preds.size() < 2)
return 0;
- std::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds.begin(), Preds.end());
unsigned NumOrigPreds = Preds.size();
// We can only sink instructions through unconditional branches.
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index 200df9ded40..e7697e7838e 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -555,9 +555,9 @@ bool GuardWideningImpl::combineRangeChecks(
// CurrentChecks.size() will typically be 3 here, but so far there has been
// no need to hard-code that fact.
- std::sort(CurrentChecks.begin(), CurrentChecks.end(),
- [&](const GuardWideningImpl::RangeCheck &LHS,
- const GuardWideningImpl::RangeCheck &RHS) {
+ llvm::sort(CurrentChecks.begin(), CurrentChecks.end(),
+ [&](const GuardWideningImpl::RangeCheck &LHS,
+ const GuardWideningImpl::RangeCheck &RHS) {
return LHS.getOffsetValue().slt(RHS.getOffsetValue());
});
diff --git a/llvm/lib/Transforms/Scalar/LoopSink.cpp b/llvm/lib/Transforms/Scalar/LoopSink.cpp
index 0cbea15a552..a2983e60b8f 100644
--- a/llvm/lib/Transforms/Scalar/LoopSink.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopSink.cpp
@@ -200,10 +200,10 @@ static bool sinkInstruction(Loop &L, Instruction &I,
SmallVector<BasicBlock *, 2> SortedBBsToSinkInto;
SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(),
BBsToSinkInto.end());
- std::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(),
- [&](BasicBlock *A, BasicBlock *B) {
- return *LoopBlockNumber.find(A) < *LoopBlockNumber.find(B);
- });
+ llvm::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(),
+ [&](BasicBlock *A, BasicBlock *B) {
+ return *LoopBlockNumber.find(A) < *LoopBlockNumber.find(B);
+ });
BasicBlock *MoveBB = *SortedBBsToSinkInto.begin();
// FIXME: Optimize the efficiency for cloned value replacement. The current
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 3b24fdbb3dd..ef4ae4af5cf 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1479,7 +1479,7 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
- std::sort(Key.begin(), Key.end());
+ llvm::sort(Key.begin(), Key.end());
return Uniquifier.count(Key);
}
@@ -1503,7 +1503,7 @@ bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying.
- std::sort(Key.begin(), Key.end());
+ llvm::sort(Key.begin(), Key.end());
if (!Uniquifier.insert(Key).second)
return false;
@@ -4220,7 +4220,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for
// uniquifying.
- std::sort(Key.begin(), Key.end());
+ llvm::sort(Key.begin(), Key.end());
std::pair<BestFormulaeTy::const_iterator, bool> P =
BestFormulae.insert(std::make_pair(Key, FIdx));
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index 0bfa254fdb0..5635f54f491 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -439,10 +439,10 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi)
#endif // MERGEICMPS_DOT_ON
// Reorder blocks by LHS. We can do that without changing the
// semantics because we are only accessing dereferencable memory.
- std::sort(Comparisons_.begin(), Comparisons_.end(),
- [](const BCECmpBlock &a, const BCECmpBlock &b) {
- return a.Lhs() < b.Lhs();
- });
+ llvm::sort(Comparisons_.begin(), Comparisons_.end(),
+ [](const BCECmpBlock &a, const BCECmpBlock &b) {
+ return a.Lhs() < b.Lhs();
+ });
#ifdef MERGEICMPS_DOT_ON
errs() << "AFTER REORDERING:\n\n";
dump();
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 892bc7ffcee..e82f69a8aa9 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -958,7 +958,8 @@ static bool isCopyOfAPHI(const Value *V) {
// order. The BlockInstRange numbers are generated in an RPO walk of the basic
// blocks.
void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
- std::sort(Ops.begin(), Ops.end(), [&](const ValPair &P1, const ValPair &P2) {
+ llvm::sort(Ops.begin(), Ops.end(),
+ [&](const ValPair &P1, const ValPair &P2) {
return BlockInstRange.lookup(P1.second).first <
BlockInstRange.lookup(P2.second).first;
});
@@ -3423,10 +3424,10 @@ bool NewGVN::runGVN() {
for (auto &B : RPOT) {
auto *Node = DT->getNode(B);
if (Node->getChildren().size() > 1)
- std::sort(Node->begin(), Node->end(),
- [&](const DomTreeNode *A, const DomTreeNode *B) {
- return RPOOrdering[A] < RPOOrdering[B];
- });
+ llvm::sort(Node->begin(), Node->end(),
+ [&](const DomTreeNode *A, const DomTreeNode *B) {
+ return RPOOrdering[A] < RPOOrdering[B];
+ });
}
// Now a standard depth first ordering of the domtree is equivalent to RPO.
@@ -3948,7 +3949,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
// Sort the whole thing.
- std::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
+ llvm::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
for (auto &VD : DFSOrderedSet) {
int MemberDFSIn = VD.DFSIn;
int MemberDFSOut = VD.DFSOut;
@@ -4110,7 +4111,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
// If we have possible dead stores to look at, try to eliminate them.
if (CC->getStoreCount() > 0) {
convertClassToLoadsAndStores(*CC, PossibleDeadStores);
- std::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
+ llvm::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
ValueDFSStack EliminationStack;
for (auto &VD : PossibleDeadStores) {
int MemberDFSIn = VD.DFSIn;
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 016b5d19940..6b1ae2c3cab 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -522,7 +522,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
};
// We need the order of list to be stable so that naming ends up stable
// when we split edges. This makes test cases much easier to write.
- std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
+ llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
// We can sometimes end up with duplicate poll locations. This happens if
// a single loop is visited more than once. The fact this happens seems
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index f8adbc62f08..a97f6517cd5 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1823,7 +1823,7 @@ static void relocationViaAlloca(
}
}
- std::sort(Uses.begin(), Uses.end());
+ llvm::sort(Uses.begin(), Uses.end());
auto Last = std::unique(Uses.begin(), Uses.end());
Uses.erase(Last, Uses.end());
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 83e5ade9247..d5ed97f900f 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -273,7 +273,7 @@ public:
int OldSize = Slices.size();
Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
- std::sort(SliceI, Slices.end());
+ llvm::sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
}
@@ -1057,7 +1057,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
- std::sort(Slices.begin(), Slices.end());
+ llvm::sort(Slices.begin(), Slices.end());
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -1891,7 +1891,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
"All non-integer types eliminated!");
return RHSTy->getNumElements() < LHSTy->getNumElements();
};
- std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
+ llvm::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
CandidateTys.erase(
std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
CandidateTys.end());
@@ -4152,7 +4152,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
}
if (!IsSorted)
- std::sort(AS.begin(), AS.end());
+ llvm::sort(AS.begin(), AS.end());
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.
diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index e3d2c1702df..bbe17ed627f 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -1128,11 +1128,12 @@ static Loop *buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks,
// matter as we're just trying to build up the map from inside-out; we use
// the map in a more stably ordered way below.
auto OrderedClonedExitsInLoops = ClonedExitsInLoops;
- std::sort(OrderedClonedExitsInLoops.begin(), OrderedClonedExitsInLoops.end(),
- [&](BasicBlock *LHS, BasicBlock *RHS) {
- return ExitLoopMap.lookup(LHS)->getLoopDepth() <
- ExitLoopMap.lookup(RHS)->getLoopDepth();
- });
+ llvm::sort(OrderedClonedExitsInLoops.begin(),
+ OrderedClonedExitsInLoops.end(),
+ [&](BasicBlock *LHS, BasicBlock *RHS) {
+ return ExitLoopMap.lookup(LHS)->getLoopDepth() <
+ ExitLoopMap.lookup(RHS)->getLoopDepth();
+ });
// Populate the existing ExitLoopMap with everything reachable from each
// exit, starting from the inner most exit.
diff --git a/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp b/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
index b8c12ad5ea8..8382220fc9e 100644
--- a/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
+++ b/llvm/lib/Transforms/Utils/ImportedFunctionsInliningStatistics.cpp
@@ -161,7 +161,7 @@ void ImportedFunctionsInliningStatistics::dump(const bool Verbose) {
void ImportedFunctionsInliningStatistics::calculateRealInlines() {
// Removing duplicated Callers.
- std::sort(NonImportedCallers.begin(), NonImportedCallers.end());
+ llvm::sort(NonImportedCallers.begin(), NonImportedCallers.end());
NonImportedCallers.erase(
std::unique(NonImportedCallers.begin(), NonImportedCallers.end()),
NonImportedCallers.end());
@@ -190,13 +190,14 @@ ImportedFunctionsInliningStatistics::getSortedNodes() {
for (const NodesMapTy::value_type& Node : NodesMap)
SortedNodes.push_back(&Node);
- std::sort(
+ llvm::sort(
SortedNodes.begin(), SortedNodes.end(),
[&](const SortedNodesTy::value_type &Lhs,
const SortedNodesTy::value_type &Rhs) {
if (Lhs->second->NumberOfInlines != Rhs->second->NumberOfInlines)
return Lhs->second->NumberOfInlines > Rhs->second->NumberOfInlines;
- if (Lhs->second->NumberOfRealInlines != Rhs->second->NumberOfRealInlines)
+ if (Lhs->second->NumberOfRealInlines !=
+ Rhs->second->NumberOfRealInlines)
return Lhs->second->NumberOfRealInlines >
Rhs->second->NumberOfRealInlines;
return Lhs->first() < Rhs->first();
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 80b943588a1..f18bd2539a7 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -382,7 +382,7 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
Case.getCaseSuccessor()));
- std::sort(Cases.begin(), Cases.end(), CaseCmp());
+ llvm::sort(Cases.begin(), Cases.end(), CaseCmp());
// Merge case into clusters
if (Cases.size() >= 2) {
diff --git a/llvm/lib/Transforms/Utils/PredicateInfo.cpp b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
index 3c236d6a7ba..2676f6673ac 100644
--- a/llvm/lib/Transforms/Utils/PredicateInfo.cpp
+++ b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
@@ -553,7 +553,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
auto Comparator = [&](const Value *A, const Value *B) {
return valueComesBefore(OI, A, B);
};
- std::sort(OpsToRename.begin(), OpsToRename.end(), Comparator);
+ llvm::sort(OpsToRename.begin(), OpsToRename.end(), Comparator);
ValueDFS_Compare Compare(OI);
// Compute liveness, and rename in O(uses) per Op.
for (auto *Op : OpsToRename) {
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index e55b530b254..f43c82f58dd 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -475,7 +475,7 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Sort the stores by their index, making it efficient to do a lookup with a
// binary search.
- std::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
+ llvm::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first());
// Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any.
@@ -631,10 +631,10 @@ void PromoteMem2Reg::run() {
SmallVector<BasicBlock *, 32> PHIBlocks;
IDF.calculate(PHIBlocks);
if (PHIBlocks.size() > 1)
- std::sort(PHIBlocks.begin(), PHIBlocks.end(),
- [this](BasicBlock *A, BasicBlock *B) {
- return BBNumbers.lookup(A) < BBNumbers.lookup(B);
- });
+ llvm::sort(PHIBlocks.begin(), PHIBlocks.end(),
+ [this](BasicBlock *A, BasicBlock *B) {
+ return BBNumbers.lookup(A) < BBNumbers.lookup(B);
+ });
unsigned CurrentVersion = 0;
for (BasicBlock *BB : PHIBlocks)
@@ -740,7 +740,7 @@ void PromoteMem2Reg::run() {
// Ok, now we know that all of the PHI nodes are missing entries for some
// basic blocks. Start by sorting the incoming predecessors for efficient
// access.
- std::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds.begin(), Preds.end());
// Now we loop through all BB's which have entries in SomePHI and remove
// them from the Preds list.
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 389ec419b5c..19c403a48e2 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5519,7 +5519,7 @@ static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
SmallVector<int64_t,4> Values;
for (auto &C : SI->cases())
Values.push_back(C.getCaseValue()->getValue().getSExtValue());
- std::sort(Values.begin(), Values.end());
+ llvm::sort(Values.begin(), Values.end());
// If the switch is already dense, there's nothing useful to do here.
if (isSwitchDense(Values))
diff --git a/llvm/lib/Transforms/Utils/SplitModule.cpp b/llvm/lib/Transforms/Utils/SplitModule.cpp
index 7966fa43712..39a4e565c2e 100644
--- a/llvm/lib/Transforms/Utils/SplitModule.cpp
+++ b/llvm/lib/Transforms/Utils/SplitModule.cpp
@@ -180,12 +180,14 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
std::make_pair(std::distance(GVtoClusterMap.member_begin(I),
GVtoClusterMap.member_end()), I));
- std::sort(Sets.begin(), Sets.end(), [](const SortType &a, const SortType &b) {
- if (a.first == b.first)
- return a.second->getData()->getName() > b.second->getData()->getName();
- else
- return a.first > b.first;
- });
+ llvm::sort(Sets.begin(), Sets.end(),
+ [](const SortType &a, const SortType &b) {
+ if (a.first == b.first)
+ return a.second->getData()->getName() >
+ b.second->getData()->getName();
+ else
+ return a.first > b.first;
+ });
for (auto &I : Sets) {
unsigned CurrentClusterID = BalancinQueue.top().first;
OpenPOWER on IntegriCloud