diff options
author | Sanjoy Das <sanjoy@playingwithpointers.com> | 2016-06-16 20:47:57 +0000 |
---|---|---|
committer | Sanjoy Das <sanjoy@playingwithpointers.com> | 2016-06-16 20:47:57 +0000 |
commit | 07c6521aed9ae565dd3ee71245bd159fb3355ef1 (patch) | |
tree | 7379564a86cd18106e6d78e5961069febbe35332 /llvm/lib/Transforms/Scalar/EarlyCSE.cpp | |
parent | c19c3260df7c9745ecc3ac35b358ae0a50c2bbc2 (diff) | |
download | bcm5719-llvm-07c6521aed9ae565dd3ee71245bd159fb3355ef1.tar.gz bcm5719-llvm-07c6521aed9ae565dd3ee71245bd159fb3355ef1.zip |
[EarlyCSE] Fold invariant loads
Redundant invariant loads can be CSE'ed with very little extra effort
over what early-cse already tracks, so it looks reasonable to make
early-cse handle this case.
llvm-svn: 272954
Diffstat (limited to 'llvm/lib/Transforms/Scalar/EarlyCSE.cpp')
-rw-r--r-- | llvm/lib/Transforms/Scalar/EarlyCSE.cpp | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index 4145bca367b..48253f62634 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -283,12 +283,14 @@ public: unsigned Generation; int MatchingId; bool IsAtomic; + bool IsInvariant; LoadValue() - : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false) {} + : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false), + IsInvariant(false) {} LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, - bool IsAtomic) - : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), - IsAtomic(IsAtomic) {} + bool IsAtomic, bool IsInvariant) + : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), + IsAtomic(IsAtomic), IsInvariant(IsInvariant) {} }; typedef RecyclingAllocator<BumpPtrAllocator, ScopedHashTableVal<Value *, LoadValue>> @@ -430,6 +432,11 @@ private: return true; } + bool isInvariantLoad() const { + if (auto *LI = dyn_cast<LoadInst>(Inst)) + return LI->getMetadata(LLVMContext::MD_invariant_load); + return false; + } bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { return (getPointerOperand() == Inst.getPointerOperand() && @@ -612,9 +619,16 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { } // If we have an available version of this load, and if it is the right - // generation, replace this instruction. + // generation or the load is known to be from an invariant location, + // replace this instruction. + // + // A dominating invariant load implies that the location loaded from is + // unchanging beginning at the point of the invariant load, so the load + // we're CSE'ing _away_ does not need to be invariant, only the available + // load we're CSE'ing _to_ does. LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); - if (InVal.DefInst != nullptr && InVal.Generation == CurrentGeneration && + if (InVal.DefInst != nullptr && + (InVal.Generation == CurrentGeneration || InVal.IsInvariant) && InVal.MatchingId == MemInst.getMatchingId() && // We don't yet handle removing loads with ordering of any kind. !MemInst.isVolatile() && MemInst.isUnordered() && @@ -637,7 +651,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { AvailableLoads.insert( MemInst.getPointerOperand(), LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), - MemInst.isAtomic())); + MemInst.isAtomic(), MemInst.isInvariantLoad())); LastStore = nullptr; continue; } @@ -749,7 +763,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { AvailableLoads.insert( MemInst.getPointerOperand(), LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), - MemInst.isAtomic())); + MemInst.isAtomic(), false)); // Remember that this was the last unordered store we saw for DSE. We // don't yet handle DSE on ordered or volatile stores since we don't |