summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/MergeFunctions.cpp20
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/IndVarSimplify.cpp31
-rw-r--r--llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp22
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp53
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnswitch.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/NaryReassociate.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/Reassociate.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/BasicBlockUtils.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/CloneFunction.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnroll.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyIndVar.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp27
17 files changed, 99 insertions, 109 deletions
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index bb2ebb46a4b..f277a51ae65 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -239,7 +239,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// we delete a constant array, we may also be holding pointer to one of its
// elements (or an element of one of its elements if we're dealing with an
// array of arrays) in the worklist.
- SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
+ SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end());
while (!WorkList.empty()) {
Value *UV = WorkList.pop_back_val();
if (!UV)
diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 21bf149e7bc..0e478ba607b 100644
--- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -207,12 +207,12 @@ private:
/// A work queue of functions that may have been modified and should be
/// analyzed again.
- std::vector<WeakVH> Deferred;
+ std::vector<WeakTrackingVH> Deferred;
/// Checks the rules of order relation introduced among functions set.
/// Returns true, if sanity check has been passed, and false if failed.
#ifndef NDEBUG
- bool doSanityCheck(std::vector<WeakVH> &Worklist);
+ bool doSanityCheck(std::vector<WeakTrackingVH> &Worklist);
#endif
/// Insert a ComparableFunction into the FnTree, or merge it away if it's
@@ -286,7 +286,7 @@ ModulePass *llvm::createMergeFunctionsPass() {
}
#ifndef NDEBUG
-bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
+bool MergeFunctions::doSanityCheck(std::vector<WeakTrackingVH> &Worklist) {
if (const unsigned Max = NumFunctionsForSanityCheck) {
unsigned TripleNumber = 0;
bool Valid = true;
@@ -294,10 +294,12 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
unsigned i = 0;
- for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
+ for (std::vector<WeakTrackingVH>::iterator I = Worklist.begin(),
+ E = Worklist.end();
I != E && i < Max; ++I, ++i) {
unsigned j = i;
- for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
+ for (std::vector<WeakTrackingVH>::iterator J = I; J != E && j < Max;
+ ++J, ++j) {
Function *F1 = cast<Function>(*I);
Function *F2 = cast<Function>(*J);
int Res1 = FunctionComparator(F1, F2, &GlobalNumbers).compare();
@@ -315,7 +317,7 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
continue;
unsigned k = j;
- for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
+ for (std::vector<WeakTrackingVH>::iterator K = J; K != E && k < Max;
++k, ++K, ++TripleNumber) {
if (K == J)
continue;
@@ -385,12 +387,12 @@ bool MergeFunctions::runOnModule(Module &M) {
// consider merging it. Otherwise it is dropped and never considered again.
if ((I != S && std::prev(I)->first == I->first) ||
(std::next(I) != IE && std::next(I)->first == I->first) ) {
- Deferred.push_back(WeakVH(I->second));
+ Deferred.push_back(WeakTrackingVH(I->second));
}
}
do {
- std::vector<WeakVH> Worklist;
+ std::vector<WeakTrackingVH> Worklist;
Deferred.swap(Worklist);
DEBUG(doSanityCheck(Worklist));
@@ -399,7 +401,7 @@ bool MergeFunctions::runOnModule(Module &M) {
DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
// Insert functions and merge them.
- for (WeakVH &I : Worklist) {
+ for (WeakTrackingVH &I : Worklist) {
if (!I)
continue;
Function *F = cast<Function>(I);
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 0f664463625..1eb98b18bfb 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1948,9 +1948,9 @@ static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI,
return isAllocLikeFn(V, TLI) && V != AI;
}
-static bool
-isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
- const TargetLibraryInfo *TLI) {
+static bool isAllocSiteRemovable(Instruction *AI,
+ SmallVectorImpl<WeakTrackingVH> &Users,
+ const TargetLibraryInfo *TLI) {
SmallVector<Instruction*, 4> Worklist;
Worklist.push_back(AI);
@@ -2034,7 +2034,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// If we have a malloc call which is only used in any amount of comparisons
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
- SmallVector<WeakVH, 64> Users;
+ SmallVector<WeakTrackingVH, 64> Users;
if (isAllocSiteRemovable(&MI, Users, &TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
// Lowering all @llvm.objectsize calls first because they may
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index dcb2a4a0c6e..3953198fe60 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -97,7 +97,7 @@ class IndVarSimplify {
TargetLibraryInfo *TLI;
const TargetTransformInfo *TTI;
- SmallVector<WeakVH, 16> DeadInsts;
+ SmallVector<WeakTrackingVH, 16> DeadInsts;
bool Changed = false;
bool isValidRewrite(Value *FromVal, Value *ToVal);
@@ -415,8 +415,8 @@ void IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) {
Compare->getName());
// In the following deletions, PN may become dead and may be deleted.
- // Use a WeakVH to observe whether this happens.
- WeakVH WeakPH = PN;
+ // Use a WeakTrackingVH to observe whether this happens.
+ WeakTrackingVH WeakPH = PN;
// Delete the old floating point exit comparison. The branch starts using the
// new comparison.
@@ -451,7 +451,7 @@ void IndVarSimplify::rewriteNonIntegerIVs(Loop *L) {
//
BasicBlock *Header = L->getHeader();
- SmallVector<WeakVH, 8> PHIs;
+ SmallVector<WeakTrackingVH, 8> PHIs;
for (BasicBlock::iterator I = Header->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I)
PHIs.push_back(PN);
@@ -901,7 +901,7 @@ class WidenIV {
PHINode *WidePhi;
Instruction *WideInc;
const SCEV *WideIncExpr;
- SmallVectorImpl<WeakVH> &DeadInsts;
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts;
SmallPtrSet<Instruction *,16> Widened;
SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
@@ -941,20 +941,13 @@ class WidenIV {
}
public:
- WidenIV(const WideIVInfo &WI, LoopInfo *LInfo,
- ScalarEvolution *SEv, DominatorTree *DTree,
- SmallVectorImpl<WeakVH> &DI, bool HasGuards) :
- OrigPhi(WI.NarrowIV),
- WideType(WI.WidestNativeType),
- LI(LInfo),
- L(LI->getLoopFor(OrigPhi->getParent())),
- SE(SEv),
- DT(DTree),
- HasGuards(HasGuards),
- WidePhi(nullptr),
- WideInc(nullptr),
- WideIncExpr(nullptr),
- DeadInsts(DI) {
+ WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
+ DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
+ bool HasGuards)
+ : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
+ L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
+ HasGuards(HasGuards), WidePhi(nullptr), WideInc(nullptr),
+ WideIncExpr(nullptr), DeadInsts(DI) {
assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended : ZeroExtended;
}
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index e6994896d3a..5e116ef2fe7 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -138,7 +138,7 @@ private:
// Tries to infer the specific address space of each address expression in
// Postorder.
- void inferAddressSpaces(ArrayRef<WeakVH> Postorder,
+ void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
ValueToAddrSpaceMapTy *InferredAddrSpace) const;
bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
@@ -147,7 +147,7 @@ private:
// address spaces if InferredAddrSpace says so. Postorder is the postorder of
// all flat expressions in the use-def graph of function F.
bool
- rewriteWithNewAddressSpaces(ArrayRef<WeakVH> Postorder,
+ rewriteWithNewAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
const ValueToAddrSpaceMapTy &InferredAddrSpace,
Function *F) const;
@@ -162,7 +162,7 @@ private:
std::vector<std::pair<Value *, bool>> &PostorderStack,
DenseSet<Value *> &Visited) const;
- std::vector<WeakVH> collectFlatAddressExpressions(Function &F) const;
+ std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
Value *cloneValueWithNewAddressSpace(
Value *V, unsigned NewAddrSpace,
@@ -303,7 +303,7 @@ void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
// Returns all flat address expressions in function F. The elements are ordered
// ordered in postorder.
-std::vector<WeakVH>
+std::vector<WeakTrackingVH>
InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
// This function implements a non-recursive postorder traversal of a partial
// use-def graph of function F.
@@ -352,7 +352,7 @@ InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
}
}
- std::vector<WeakVH> Postorder; // The resultant postorder.
+ std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
while (!PostorderStack.empty()) {
Value *TopVal = PostorderStack.back().first;
// If the operands of the expression on the top are already explored,
@@ -583,7 +583,7 @@ bool InferAddressSpaces::runOnFunction(Function &F) {
return false;
// Collects all flat address expressions in postorder.
- std::vector<WeakVH> Postorder = collectFlatAddressExpressions(F);
+ std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
// Runs a data-flow analysis to refine the address spaces of every expression
// in Postorder.
@@ -596,9 +596,9 @@ bool InferAddressSpaces::runOnFunction(Function &F) {
}
// Constants need to be tracked through RAUW to handle cases with nested
-// constant expressions, so wrap values in WeakVH.
+// constant expressions, so wrap values in WeakTrackingVH.
void InferAddressSpaces::inferAddressSpaces(
- ArrayRef<WeakVH> Postorder,
+ ArrayRef<WeakTrackingVH> Postorder,
ValueToAddrSpaceMapTy *InferredAddrSpace) const {
SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
// Initially, all expressions are in the uninitialized address space.
@@ -810,8 +810,8 @@ static Value::use_iterator skipToNextUser(Value::use_iterator I,
}
bool InferAddressSpaces::rewriteWithNewAddressSpaces(
- ArrayRef<WeakVH> Postorder,
- const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
+ ArrayRef<WeakTrackingVH> Postorder,
+ const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
// For each address expression to be modified, creates a clone of it with its
// pointer operands converted to the new address space. Since the pointer
// operands are converted, the clone is naturally in the new address space by
@@ -841,7 +841,7 @@ bool InferAddressSpaces::rewriteWithNewAddressSpaces(
SmallVector<Instruction *, 16> DeadInstructions;
// Replaces the uses of the old address expressions with the new ones.
- for (const WeakVH &WVH : Postorder) {
+ for (const WeakTrackingVH &WVH : Postorder) {
assert(WVH && "value was unexpectedly deleted");
Value *V = WVH;
Value *NewV = ValueWithNewAddrSpace.lookup(V);
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 5042fc18d7c..0f4a1b436ea 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -499,7 +499,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(
Instruction *Inst = &*I++;
// Look for memset instructions, which may be optimized to a larger memset.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
- WeakVH InstPtr(&*I);
+ WeakTrackingVH InstPtr(&*I);
if (!processLoopMemSet(MSI, BECount))
continue;
MadeChange = true;
diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
index a5a81c33a8e..35c05e84fd6 100644
--- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
@@ -40,7 +40,7 @@ static bool simplifyLoopCFG(Loop &L, DominatorTree &DT, LoopInfo &LI) {
bool Changed = false;
// Copy blocks into a temporary array to avoid iterator invalidation issues
// as we remove them.
- SmallVector<WeakVH, 16> Blocks(L.blocks());
+ SmallVector<WeakTrackingVH, 16> Blocks(L.blocks());
for (auto &Block : Blocks) {
// Attempt to merge blocks in the trivial case. Don't modify blocks which
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index af137f6faa6..ccedb98d7fa 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -900,7 +900,7 @@ static bool isHighCostExpansion(const SCEV *S,
/// If any of the instructions is the specified set are trivially dead, delete
/// them and see if this makes any of their operands subsequently dead.
static bool
-DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
+DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
bool Changed = false;
while (!DeadInsts.empty()) {
@@ -1845,7 +1845,7 @@ class LSRInstance {
void FinalizeChain(IVChain &Chain);
void CollectChains();
void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts);
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts);
void CollectInterestingTypesAndFactors();
void CollectFixupsAndInitialFormulae();
@@ -1920,19 +1920,15 @@ class LSRInstance {
const LSRUse &LU,
SCEVExpander &Rewriter) const;
- Value *Expand(const LSRUse &LU, const LSRFixup &LF,
- const Formula &F,
- BasicBlock::iterator IP,
- SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const;
+ Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
+ BasicBlock::iterator IP, SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF,
- const Formula &F,
- SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const;
- void Rewrite(const LSRUse &LU, const LSRFixup &LF,
- const Formula &F,
+ const Formula &F, SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
+ void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const;
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution);
public:
@@ -3014,7 +3010,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
/// Generate an add or subtract for each IVInc in a chain to materialize the IV
/// user's operand from the previous IV user's operand.
void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) {
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
// Find the new IVOperand for the head of the chain. It may have been replaced
// by LSR.
const IVInc &Head = Chain.Incs[0];
@@ -4759,12 +4755,10 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
/// Emit instructions for the leading candidate expression for this LSRUse (this
/// is called "expanding").
-Value *LSRInstance::Expand(const LSRUse &LU,
- const LSRFixup &LF,
- const Formula &F,
- BasicBlock::iterator IP,
+Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
+ const Formula &F, BasicBlock::iterator IP,
SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const {
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
if (LU.RigidFormula)
return LF.OperandValToReplace;
@@ -4939,12 +4933,9 @@ Value *LSRInstance::Expand(const LSRUse &LU,
/// Helper for Rewrite. PHI nodes are special because the use of their operands
/// effectively happens in their predecessor blocks, so the expression may need
/// to be expanded in multiple places.
-void LSRInstance::RewriteForPHI(PHINode *PN,
- const LSRUse &LU,
- const LSRFixup &LF,
- const Formula &F,
- SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const {
+void LSRInstance::RewriteForPHI(
+ PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F,
+ SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
DenseMap<BasicBlock *, Value *> Inserted;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
@@ -5016,11 +5007,9 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
/// Emit instructions for the leading candidate expression for this LSRUse (this
/// is called "expanding"), and update the UserInst to reference the newly
/// expanded value.
-void LSRInstance::Rewrite(const LSRUse &LU,
- const LSRFixup &LF,
- const Formula &F,
- SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const {
+void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF,
+ const Formula &F, SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
// First, find an insertion point that dominates UserInst. For PHI nodes,
// find the nearest block which dominates all the relevant uses.
if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
@@ -5058,7 +5047,7 @@ void LSRInstance::ImplementSolution(
const SmallVectorImpl<const Formula *> &Solution) {
// Keep track of instructions we may have made dead, so that
// we can remove them after we are done working.
- SmallVector<WeakVH, 16> DeadInsts;
+ SmallVector<WeakTrackingVH, 16> DeadInsts;
SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(),
"lsr");
@@ -5308,7 +5297,7 @@ static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
// Remove any extra phis created by processing inner loops.
Changed |= DeleteDeadPHIs(L->getHeader());
if (EnablePhiElim && L->isLoopSimplifyForm()) {
- SmallVector<WeakVH, 16> DeadInsts;
+ SmallVector<WeakTrackingVH, 16> DeadInsts;
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
SCEVExpander Rewriter(SE, DL, "lsr");
#ifndef NDEBUG
diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 39289340e03..6ef1464e933 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -1231,11 +1231,12 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
LoopProcessWorklist.push_back(NewLoop);
redoLoop = true;
- // Keep a WeakVH holding onto LIC. If the first call to RewriteLoopBody
+ // Keep a WeakTrackingVH holding onto LIC. If the first call to
+ // RewriteLoopBody
// deletes the instruction (for example by simplifying a PHI that feeds into
// the condition that we're unswitching on), we don't rewrite the second
// iteration.
- WeakVH LICHandle(LIC);
+ WeakTrackingVH LICHandle(LIC);
// Now we rewrite the original code to know that the condition is true and the
// new code to know that the condition is false.
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index c5bf2f28d18..d0bfe360389 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -211,7 +211,8 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
Changed = true;
SE->forgetValue(&*I);
I->replaceAllUsesWith(NewI);
- // If SeenExprs constains I's WeakVH, that entry will be replaced with
+ // If SeenExprs constains I's WeakTrackingVH, that entry will be
+ // replaced with
// nullptr.
RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI);
I = NewI->getIterator();
@@ -219,7 +220,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
// Add the rewritten instruction to SeenExprs; the original instruction
// is deleted.
const SCEV *NewSCEV = SE->getSCEV(&*I);
- SeenExprs[NewSCEV].push_back(WeakVH(&*I));
+ SeenExprs[NewSCEV].push_back(WeakTrackingVH(&*I));
// Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
// is equivalent to I. However, ScalarEvolution::getSCEV may
// weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose
@@ -239,7 +240,7 @@ bool NaryReassociatePass::doOneIteration(Function &F) {
//
// This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll.
if (NewSCEV != OldSCEV)
- SeenExprs[OldSCEV].push_back(WeakVH(&*I));
+ SeenExprs[OldSCEV].push_back(WeakTrackingVH(&*I));
}
}
}
@@ -494,7 +495,8 @@ NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
// future instruction either. Therefore, we pop it out of the stack. This
// optimization makes the algorithm O(n).
while (!Candidates.empty()) {
- // Candidates stores WeakVHs, so a candidate can be nullptr if it's removed
+ // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's
+ // removed
// during rewriting.
if (Value *Candidate = Candidates.back()) {
Instruction *CandidateInstruction = cast<Instruction>(Candidate);
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 3dcab609078..ef29d414160 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -982,7 +982,7 @@ static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i,
/// Emit a tree of add instructions, summing Ops together
/// and returning the result. Insert the tree before I.
static Value *EmitAddTreeOfValues(Instruction *I,
- SmallVectorImpl<WeakVH> &Ops){
+ SmallVectorImpl<WeakTrackingVH> &Ops) {
if (Ops.size() == 1) return Ops.back();
Value *V1 = Ops.back();
@@ -1559,7 +1559,7 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I,
? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal)
: BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal);
- SmallVector<WeakVH, 4> NewMulOps;
+ SmallVector<WeakTrackingVH, 4> NewMulOps;
for (unsigned i = 0; i != Ops.size(); ++i) {
// Only try to remove factors from expressions we're allowed to.
BinaryOperator *BOp =
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 22af21d55c0..3d5cbfc93f2 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -78,8 +78,8 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB,
bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI) {
// Recursively deleting a PHI may cause multiple PHIs to be deleted
- // or RAUW'd undef, so use an array of WeakVH for the PHIs to delete.
- SmallVector<WeakVH, 8> PHIs;
+ // or RAUW'd undef, so use an array of WeakTrackingVH for the PHIs to delete.
+ SmallVector<WeakTrackingVH, 8> PHIs;
for (BasicBlock::iterator I = BB->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I)
PHIs.push_back(PN);
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 385c12302e0..d5124ac8901 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -245,7 +245,7 @@ namespace {
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
BasicBlock::const_iterator StartingInst,
std::vector<const BasicBlock*> &ToClone){
- WeakVH &BBEntry = VMap[BB];
+ WeakTrackingVH &BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -547,7 +547,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
// Make a second pass over the PHINodes now that all of them have been
// remapped into the new function, simplifying the PHINode and performing any
// recursive simplifications exposed. This will transparently update the
- // WeakVH in the VMap. Notably, we rely on that so that if we coalesce
+ // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce
// two PHINodes, the iteration over the old PHIs remains valid, and the
// mapping will just map us to the new node (which may not even be a PHI
// node).
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 364b33d3c4e..ce6b703f352 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -562,7 +562,7 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
// that can be removed.
BB->removePredecessor(Pred, true);
- WeakVH PhiIt = &BB->front();
+ WeakTrackingVH PhiIt = &BB->front();
while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
Value *OldPhiIt = PhiIt;
@@ -1476,7 +1476,7 @@ BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
II->setAttributes(CI->getAttributes());
// Make sure that anything using the call now uses the invoke! This also
- // updates the CallGraph if present, because it uses a WeakVH.
+ // updates the CallGraph if present, because it uses a WeakTrackingVH.
CI->replaceAllUsesWith(II);
// Delete the original call
diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 85684a0a994..4ab4d7949d2 100644
--- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -757,7 +757,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
// Simplify any new induction variables in the partially unrolled loop.
if (SE && !CompletelyUnroll && Count > 1) {
- SmallVector<WeakVH, 16> DeadInsts;
+ SmallVector<WeakTrackingVH, 16> DeadInsts;
simplifyLoopIVs(L, SE, DT, LI, DeadInsts);
// Aggressively clean up dead instructions that simplifyLoopIVs already
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index a4cc6a031ad..02a5d3dbead 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -51,13 +51,13 @@ namespace {
ScalarEvolution *SE;
DominatorTree *DT;
- SmallVectorImpl<WeakVH> &DeadInsts;
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts;
bool Changed;
public:
SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
- LoopInfo *LI,SmallVectorImpl<WeakVH> &Dead)
+ LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead)
: L(Loop), LI(LI), SE(SE), DT(DT), DeadInsts(Dead), Changed(false) {
assert(LI && "IV simplification requires LoopInfo");
}
@@ -701,7 +701,7 @@ void IVVisitor::anchor() { }
/// Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
- LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead,
+ LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead,
IVVisitor *V) {
SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, Dead);
SIV.simplifyUsers(CurrIV, V);
@@ -711,7 +711,7 @@ bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
/// Simplify users of induction variables within this
/// loop. This does not actually change or add IVs.
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
- LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead) {
+ LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead) {
bool Changed = false;
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
Changed |= simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, Dead);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 55494440470..f112c555205 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -3899,11 +3899,13 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
}
/// \brief Check that the Values in the slice in VL array are still existent in
-/// the WeakVH array.
+/// the WeakTrackingVH array.
/// Vectorization of part of the VL array may cause later values in the VL array
-/// to become invalid. We track when this has happened in the WeakVH array.
-static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
- unsigned SliceBegin, unsigned SliceSize) {
+/// to become invalid. We track when this has happened in the WeakTrackingVH
+/// array.
+static bool hasValueBeenRAUWed(ArrayRef<Value *> VL,
+ ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin,
+ unsigned SliceSize) {
VL = VL.slice(SliceBegin, SliceSize);
VH = VH.slice(SliceBegin, SliceSize);
return !std::equal(VL.begin(), VL.end(), VH.begin());
@@ -3921,7 +3923,7 @@ bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
return false;
// Keep track of values that were deleted by vectorizing in the loop below.
- SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
+ SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end());
bool Changed = false;
// Look for profitable vectorizable trees at all offsets, starting at zero.
@@ -4107,7 +4109,7 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
bool Changed = false;
// Keep track of values that were deleted by vectorizing in the loop below.
- SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
+ SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end());
unsigned NextInst = 0, MaxInst = VL.size();
for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF;
@@ -4734,7 +4736,7 @@ static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
namespace {
/// Tracks instructons and its children.
-class WeakVHWithLevel final : public CallbackVH {
+class WeakTrackingVHWithLevel final : public CallbackVH {
/// Operand index of the instruction currently beeing analized.
unsigned Level = 0;
/// Is this the instruction that should be vectorized, or are we now
@@ -4743,8 +4745,8 @@ class WeakVHWithLevel final : public CallbackVH {
bool IsInitial = true;
public:
- explicit WeakVHWithLevel() = default;
- WeakVHWithLevel(Value *V) : CallbackVH(V){};
+ explicit WeakTrackingVHWithLevel() = default;
+ WeakTrackingVHWithLevel(Value *V) : CallbackVH(V){};
/// Restart children analysis each time it is repaced by the new instruction.
void allUsesReplacedWith(Value *New) override {
setValPtr(New);
@@ -4771,7 +4773,7 @@ public:
cast<Instruction>(getValPtr())->getNumOperands() > Level);
return cast<Instruction>(getValPtr())->getOperand(Level++);
}
- virtual ~WeakVHWithLevel() = default;
+ virtual ~WeakTrackingVHWithLevel() = default;
};
} // namespace
@@ -4793,7 +4795,7 @@ static bool canBeVectorized(
if (Root->getParent() != BB)
return false;
- SmallVector<WeakVHWithLevel, 8> Stack(1, Root);
+ SmallVector<WeakTrackingVHWithLevel, 8> Stack(1, Root);
SmallSet<Value *, 8> VisitedInstrs;
bool Res = false;
while (!Stack.empty()) {
@@ -5069,7 +5071,8 @@ bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
// Some of the candidates may have already been vectorized after we
- // initially collected them. If so, the WeakVHs will have nullified the
+ // initially collected them. If so, the WeakTrackingVHs will have
+ // nullified the
// values, so remove them from the set of candidates.
Candidates.remove(nullptr);
OpenPOWER on IntegriCloud