diff options
| author | David Majnemer <david.majnemer@gmail.com> | 2015-07-14 06:19:58 +0000 |
|---|---|---|
| committer | David Majnemer <david.majnemer@gmail.com> | 2015-07-14 06:19:58 +0000 |
| commit | 62690b195209faa25cf2f98ccb0669bc821b0cfe (patch) | |
| tree | 682ecc04ff35f874a13476efe457a4e85d417fef /llvm | |
| parent | 4ca1903696af3408a27f9f5766b71ed7b0dbf996 (diff) | |
| download | bcm5719-llvm-62690b195209faa25cf2f98ccb0669bc821b0cfe.tar.gz bcm5719-llvm-62690b195209faa25cf2f98ccb0669bc821b0cfe.zip | |
[SROA] Don't de-atomic volatile loads and stores
Volatile loads and stores are made visible in global state regardless of
what memory is involved. It is not correct to disregard the ordering
and synchronization scope because it is possible to synchronize with
memory operations performed by hardware.
This partially addresses PR23737.
llvm-svn: 242126
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 21 | ||||
| -rw-r--r-- | llvm/test/Transforms/SROA/basictest.ll | 11 |
2 files changed, 26 insertions, 6 deletions
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 056dd11b5ab..d1a0a82b9b0 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -2593,13 +2593,21 @@ private: V = rewriteIntegerLoad(LI); } else if (NewBeginOffset == NewAllocaBeginOffset && canConvertValue(DL, NewAllocaTy, LI.getType())) { - V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), LI.isVolatile(), - LI.getName()); + LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + LI.isVolatile(), LI.getName()); + if (LI.isVolatile()) + NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); + + V = NewLI; } else { Type *LTy = TargetTy->getPointerTo(); - V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), - getSliceAlign(TargetTy), LI.isVolatile(), - LI.getName()); + LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), + getSliceAlign(TargetTy), + LI.isVolatile(), LI.getName()); + if (LI.isVolatile()) + NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); + + V = NewLI; IsPtrAdjusted = true; } V = convertValue(DL, IRB, V, TargetTy); @@ -2722,7 +2730,8 @@ private: NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), SI.isVolatile()); } - (void)NewSI; + if (SI.isVolatile()) + NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope()); Pass.DeadInsts.insert(&SI); deleteIfTriviallyDead(OldOp); diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll index a59192d718c..7c8955b28fa 100644 --- a/llvm/test/Transforms/SROA/basictest.ll +++ b/llvm/test/Transforms/SROA/basictest.ll @@ -1595,3 +1595,14 @@ entry: store i32 %load, i32* %a.gep1 ret void } + +define void @PR23737() { +; CHECK-LABEL: @PR23737( +; CHECK: store atomic volatile {{.*}} seq_cst +; CHECK: load atomic volatile {{.*}} seq_cst +entry: + %ptr = alloca i64, align 8 + store atomic volatile i64 0, i64* %ptr seq_cst, align 8 + %load = load atomic volatile i64, i64* %ptr seq_cst, align 8 + ret void +} |

