summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/JumpThreading.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Scalar/JumpThreading.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp11
1 files changed, 6 insertions, 5 deletions
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index de1d7d50384..b9e717cf763 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -924,8 +924,8 @@ bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) {
/// important optimization that encourages jump threading, and needs to be run
/// interlaced with other jump threading tasks.
bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
- // Don't hack volatile/atomic loads.
- if (!LI->isSimple()) return false;
+ // Don't hack volatile and ordered loads.
+ if (!LI->isUnordered()) return false;
// If the load is defined in a block with exactly one predecessor, it can't be
// partially redundant.
@@ -1055,9 +1055,10 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
if (UnavailablePred) {
assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
"Can't handle critical edge here!");
- LoadInst *NewVal = new LoadInst(LoadedPtr, LI->getName()+".pr", false,
- LI->getAlignment(),
- UnavailablePred->getTerminator());
+ LoadInst *NewVal =
+ new LoadInst(LoadedPtr, LI->getName() + ".pr", false,
+ LI->getAlignment(), LI->getOrdering(), LI->getSynchScope(),
+ UnavailablePred->getTerminator());
NewVal->setDebugLoc(LI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);
OpenPOWER on IntegriCloud