summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp4
-rw-r--r--llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp12
-rw-r--r--llvm/lib/Transforms/Scalar/Float2Int.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDistribute.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInterchange.cpp15
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp19
-rw-r--r--llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/StructurizeCFG.cpp16
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/LowerSwitch.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/MemorySSA.cpp7
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp20
12 files changed, 49 insertions, 64 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 287d5bfa260..a23b1ddf825 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -326,9 +326,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
const DataLayout &DL) {
SmallSet<Value*, 8> WriteTargets;
// Iterate from the end.
- for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
- E = Local.rend(); It != E; ++It) {
- Instruction *I = *It;
+ for (Instruction *I : reverse(Local)) {
if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
Value *Addr = Store->getPointerOperand();
if (!shouldInstrumentReadWriteFromAddress(Addr))
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 39e99a00b0c..3445596fb61 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -1460,17 +1460,13 @@ bool ObjCARCOpt::Visit(Function &F,
// Use reverse-postorder on the reverse CFG for bottom-up.
bool BottomUpNestingDetected = false;
- for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
- ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
- I != E; ++I)
- BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
+ for (BasicBlock *BB : reverse(ReverseCFGPostOrder))
+ BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
// Use reverse-postorder for top-down.
bool TopDownNestingDetected = false;
- for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
- PostOrder.rbegin(), E = PostOrder.rend();
- I != E; ++I)
- TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
+ for (BasicBlock *BB : reverse(PostOrder))
+ TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
return TopDownNestingDetected && BottomUpNestingDetected;
}
diff --git a/llvm/lib/Transforms/Scalar/Float2Int.cpp b/llvm/lib/Transforms/Scalar/Float2Int.cpp
index bf37aa03eab..2ddc5436dbd 100644
--- a/llvm/lib/Transforms/Scalar/Float2Int.cpp
+++ b/llvm/lib/Transforms/Scalar/Float2Int.cpp
@@ -246,7 +246,7 @@ void Float2Int::walkBackwards(const SmallPtrSetImpl<Instruction*> &Roots) {
// Walk forwards down the list of seen instructions, so we visit defs before
// uses.
void Float2Int::walkForwards() {
- for (auto &It : make_range(SeenInsts.rbegin(), SeenInsts.rend())) {
+ for (auto &It : reverse(SeenInsts)) {
if (It.second != unknownRange())
continue;
@@ -511,7 +511,7 @@ Value *Float2Int::convert(Instruction *I, Type *ToTy) {
// Perform dead code elimination on the instructions we just modified.
void Float2Int::cleanup() {
- for (auto &I : make_range(ConvertedInsts.rbegin(), ConvertedInsts.rend()))
+ for (auto &I : reverse(ConvertedInsts))
I.first->eraseFromParent();
}
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index a62a44be4cc..464db8e1811 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -184,7 +184,7 @@ public:
// Delete the instructions backwards, as it has a reduced likelihood of
// having to update as many def-use and use-def chains.
- for (auto *Inst : make_range(Unused.rbegin(), Unused.rend())) {
+ for (auto *Inst : reverse(Unused)) {
if (!Inst->use_empty())
Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
Inst->eraseFromParent();
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 5dc0de25971..2f50e2dd77f 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -812,7 +812,6 @@ bool LoopInterchangeLegality::currentLimitations() {
// A[j+1][i+2] = A[j][i]+k;
// }
// }
- bool FoundInduction = false;
Instruction *InnerIndexVarInc = nullptr;
if (InnerInductionVar->getIncomingBlock(0) == InnerLoopPreHeader)
InnerIndexVarInc =
@@ -828,17 +827,17 @@ bool LoopInterchangeLegality::currentLimitations() {
// we do not have any instruction between the induction variable and branch
// instruction.
- for (auto I = InnerLoopLatch->rbegin(), E = InnerLoopLatch->rend();
- I != E && !FoundInduction; ++I) {
- if (isa<BranchInst>(*I) || isa<CmpInst>(*I) || isa<TruncInst>(*I))
+ bool FoundInduction = false;
+ for (const Instruction &I : reverse(*InnerLoopLatch)) {
+ if (isa<BranchInst>(I) || isa<CmpInst>(I) || isa<TruncInst>(I))
continue;
- const Instruction &Ins = *I;
// We found an instruction. If this is not induction variable then it is not
// safe to split this loop latch.
- if (!Ins.isIdenticalTo(InnerIndexVarInc))
+ if (!I.isIdenticalTo(InnerIndexVarInc))
return true;
- else
- FoundInduction = true;
+
+ FoundInduction = true;
+ break;
}
// The loop latch ended and we didn't find the induction variable return as
// current limitation.
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 190fc5a2dc4..252ab928c1c 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -2736,34 +2736,31 @@ void LSRInstance::CollectChains() {
LatchPath.push_back(LoopHeader);
// Walk the instruction stream from the loop header to the loop latch.
- for (SmallVectorImpl<BasicBlock *>::reverse_iterator
- BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend();
- BBIter != BBEnd; ++BBIter) {
- for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end();
- I != E; ++I) {
+ for (BasicBlock *BB : reverse(LatchPath)) {
+ for (Instruction &I : *BB) {
// Skip instructions that weren't seen by IVUsers analysis.
- if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&*I))
+ if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I))
continue;
// Ignore users that are part of a SCEV expression. This way we only
// consider leaf IV Users. This effectively rediscovers a portion of
// IVUsers analysis but in program order this time.
- if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(&*I)))
+ if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I)))
continue;
// Remove this instruction from any NearUsers set it may be in.
for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
ChainIdx < NChains; ++ChainIdx) {
- ChainUsersVec[ChainIdx].NearUsers.erase(&*I);
+ ChainUsersVec[ChainIdx].NearUsers.erase(&I);
}
// Search for operands that can be chained.
SmallPtrSet<Instruction*, 4> UniqueOperands;
- User::op_iterator IVOpEnd = I->op_end();
- User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE);
+ User::op_iterator IVOpEnd = I.op_end();
+ User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE);
while (IVOpIter != IVOpEnd) {
Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
if (UniqueOperands.insert(IVOpInst).second)
- ChainInstruction(&*I, IVOpInst, ChainUsersVec);
+ ChainInstruction(&I, IVOpInst, ChainUsersVec);
IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
}
} // Continue walking down the instructions.
diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index e4af9916880..eb67aea37fc 100644
--- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -377,11 +377,8 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
StoreInst *Store0) {
DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n");
BasicBlock *BB0 = Store0->getParent();
- for (BasicBlock::reverse_iterator RBI = BB1->rbegin(), RBE = BB1->rend();
- RBI != RBE; ++RBI) {
- Instruction *Inst = &*RBI;
-
- auto *Store1 = dyn_cast<StoreInst>(Inst);
+ for (Instruction &Inst : reverse(*BB1)) {
+ auto *Store1 = dyn_cast<StoreInst>(&Inst);
if (!Store1)
continue;
diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index 802b3e476aa..ff46d81cb4d 100644
--- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -501,21 +501,21 @@ void StructurizeCFG::collectInfos() {
// Reset the visited nodes
Visited.clear();
- for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend();
- OI != OE; ++OI) {
+ for (RegionNode *RN : reverse(Order)) {
- DEBUG(dbgs() << "Visiting: " <<
- ((*OI)->isSubRegion() ? "SubRegion with entry: " : "") <<
- (*OI)->getEntry()->getName() << " Loop Depth: " << LI->getLoopDepth((*OI)->getEntry()) << "\n");
+ DEBUG(dbgs() << "Visiting: "
+ << (RN->isSubRegion() ? "SubRegion with entry: " : "")
+ << RN->getEntry()->getName() << " Loop Depth: "
+ << LI->getLoopDepth(RN->getEntry()) << "\n");
// Analyze all the conditions leading to a node
- gatherPredicates(*OI);
+ gatherPredicates(RN);
// Remember that we've seen this node
- Visited.insert((*OI)->getEntry());
+ Visited.insert(RN->getEntry());
// Find the last back edges
- analyzeLoops(*OI);
+ analyzeLoops(RN);
}
}
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 40f5d4ce361..a06c8499e19 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1287,8 +1287,7 @@ updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode,
// Starting from the top, rebuild the nodes to point to the new inlined-at
// location (then rebuilding the rest of the chain behind it) and update the
// map of already-constructed inlined-at nodes.
- for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(),
- InlinedAtLocations.rend())) {
+ for (const DILocation *MD : reverse(InlinedAtLocations)) {
Last = IANodes[MD] = DILocation::getDistinct(
Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
}
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 52beb154249..e73d28fd98e 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -192,8 +192,8 @@ static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
}
// Remove incoming values in the reverse order to prevent invalidating
// *successive* index.
- for (auto III = Indices.rbegin(), IIE = Indices.rend(); III != IIE; ++III)
- PN->removeIncomingValue(*III);
+ for (unsigned III : reverse(Indices))
+ PN->removeIncomingValue(III);
}
}
diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp
index a52ea34e77b..7cff51e9866 100644
--- a/llvm/lib/Transforms/Utils/MemorySSA.cpp
+++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp
@@ -469,10 +469,9 @@ MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock,
auto It = PerBlockAccesses.find(CurrNode->getBlock());
if (It != PerBlockAccesses.end()) {
auto &Accesses = It->second;
- for (auto RAI = Accesses->rbegin(), RAE = Accesses->rend(); RAI != RAE;
- ++RAI) {
- if (isa<MemoryDef>(*RAI) || isa<MemoryPhi>(*RAI))
- return &*RAI;
+ for (MemoryAccess &RA : reverse(*Accesses)) {
+ if (isa<MemoryDef>(RA) || isa<MemoryPhi>(RA))
+ return &RA;
}
}
CurrNode = CurrNode->getIDom();
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 7c0f85c2ca1..6e1ac2c9a69 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1534,25 +1534,25 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
// Look for a store to the same pointer in BrBB.
unsigned MaxNumInstToLookAt = 9;
- for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
- RI != RE && MaxNumInstToLookAt; ++RI) {
- Instruction *CurI = &*RI;
+ for (Instruction &CurI : reverse(*BrBB)) {
+ if (!MaxNumInstToLookAt)
+ break;
// Skip debug info.
if (isa<DbgInfoIntrinsic>(CurI))
continue;
--MaxNumInstToLookAt;
// Could be calling an instruction that effects memory like free().
- if (CurI->mayHaveSideEffects() && !isa<StoreInst>(CurI))
+ if (CurI.mayHaveSideEffects() && !isa<StoreInst>(CurI))
return nullptr;
- StoreInst *SI = dyn_cast<StoreInst>(CurI);
- // Found the previous store make sure it stores to the same location.
- if (SI && SI->getPointerOperand() == StorePtr)
- // Found the previous store, return its value operand.
- return SI->getValueOperand();
- else if (SI)
+ if (auto *SI = dyn_cast<StoreInst>(&CurI)) {
+ // Found the previous store make sure it stores to the same location.
+ if (SI->getPointerOperand() == StorePtr)
+ // Found the previous store, return its value operand.
+ return SI->getValueOperand();
return nullptr; // Unknown store.
+ }
}
return nullptr;
OpenPOWER on IntegriCloud