diff options
author | Eli Friedman <eli.friedman@gmail.com> | 2016-08-08 04:10:22 +0000 |
---|---|---|
committer | Eli Friedman <eli.friedman@gmail.com> | 2016-08-08 04:10:22 +0000 |
commit | 02419a98499704e98236cda2e9b1b3c996ad4908 (patch) | |
tree | f9c507ee5717bd9a24076682962c5fac36b7c944 /llvm/lib/Transforms/Scalar/JumpThreading.cpp | |
parent | d931b9f20060ce3751614e889fa4629db13e9e66 (diff) | |
download | bcm5719-llvm-02419a98499704e98236cda2e9b1b3c996ad4908.tar.gz bcm5719-llvm-02419a98499704e98236cda2e9b1b3c996ad4908.zip |
[JumpThreading] Fix handling of aliasing metadata.
Summary:
The correctness fix here is that when we CSE a load with another load,
we need to combine the metadata on the two loads. This matches the
behavior of other passes, like instcombine and GVN.
There's also a minor optimization improvement here: for load PRE, the
aliasing metadata on the inserted load should be the same as the
metadata on the original load. Not sure why the old code was throwing
it away.
Issue found by inspection.
Differential Revision: http://reviews.llvm.org/D21460
llvm-svn: 277977
Diffstat (limited to 'llvm/lib/Transforms/Scalar/JumpThreading.cpp')
-rw-r--r-- | llvm/lib/Transforms/Scalar/JumpThreading.cpp | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp index d1769fc3ebb..4093cc57dfc 100644 --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -951,12 +951,17 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // Scan a few instructions up from the load, to see if it is obviously live at // the entry to its block. BasicBlock::iterator BBIt(LI); - + bool IsLoadCSE; if (Value *AvailableVal = - FindAvailableLoadedValue(LI, LoadBB, BBIt, DefMaxInstsToScan)) { + FindAvailableLoadedValue(LI, LoadBB, BBIt, DefMaxInstsToScan, nullptr, &IsLoadCSE)) { // If the value of the load is locally available within the block, just use // it. This frequently occurs for reg2mem'd allocas. + if (IsLoadCSE) { + LoadInst *NLI = cast<LoadInst>(AvailableVal); + combineMetadataForCSE(NLI, LI); + }; + // If the returned value is the load itself, replace with an undef. This can // only happen in dead loops. if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); @@ -983,6 +988,7 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; AvailablePredsTy AvailablePreds; BasicBlock *OneUnavailablePred = nullptr; + SmallVector<LoadInst*, 8> CSELoads; // If we got here, the loaded value is transparent through to the start of the // block. Check to see if it is available in any of the predecessor blocks. @@ -993,17 +999,17 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // Scan the predecessor to see if the value is available in the pred. BBIt = PredBB->end(); - AAMDNodes ThisAATags; Value *PredAvailable = FindAvailableLoadedValue(LI, PredBB, BBIt, DefMaxInstsToScan, - nullptr, &ThisAATags); + nullptr, + &IsLoadCSE); if (!PredAvailable) { OneUnavailablePred = PredBB; continue; } - // If AA tags disagree or are not present, forget about them. - if (AATags != ThisAATags) AATags = AAMDNodes(); + if (IsLoadCSE) + CSELoads.push_back(cast<LoadInst>(PredAvailable)); // If so, this load is partially redundant. Remember this info so that we // can create a PHI node. @@ -1101,6 +1107,10 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { PN->addIncoming(PredV, I->first); } + for (LoadInst *PredLI : CSELoads) { + combineMetadataForCSE(PredLI, LI); + } + LI->replaceAllUsesWith(PN); LI->eraseFromParent(); |