diff options
author | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2013-09-24 11:20:27 +0000 |
---|---|---|
committer | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2013-09-24 11:20:27 +0000 |
commit | 5522a70674adec940d82ec7e929045613cd98283 (patch) | |
tree | 9aac09784f210939438a22a1a19a068501768e65 /llvm/lib/Transforms | |
parent | fd538dc745fb7fc5572cbc63bfb3061a020ef414 (diff) | |
download | bcm5719-llvm-5522a70674adec940d82ec7e929045613cd98283.tar.gz bcm5719-llvm-5522a70674adec940d82ec7e929045613cd98283.zip |
[msan] Handling of atomic load/store, atomic rmw, cmpxchg.
llvm-svn: 191287
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r-- | llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 98 |
1 files changed, 96 insertions, 2 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index cab7a7a0190..eafa2b6165e 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -66,6 +66,31 @@ /// avoids storing origin to memory when a fully initialized value is stored. /// This way it avoids needless overwritting origin of the 4-byte region on /// a short (i.e. 1 byte) clean store, and it is also good for performance. +/// +/// Atomic handling. +/// +/// Ideally, every atomic store of application value should update the +/// corresponding shadow location in an atomic way. Unfortunately, atomic store +/// of two disjoint locations can not be done without severe slowdown. +/// +/// Therefore, we implement an approximation that may err on the safe side. +/// In this implementation, every atomically accessed location in the program +/// may only change from (partially) uninitialized to fully initialized, but +/// not the other way around. We load the shadow _after_ the application load, +/// and we store the shadow _before_ the app store. Also, we always store clean +/// shadow (if the application store is atomic). This way, if the store-load +/// pair constitutes a happens-before arc, shadow store and load are correctly +/// ordered such that the load will get either the value that was stored, or +/// some later value (which is always clean). +/// +/// This does not work very well with Compare-And-Swap (CAS) and +/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW +/// must store the new shadow before the app operation, and load the shadow +/// after the app operation. Computers don't work this way. Current +/// implementation ignores the load aspect of CAS/RMW, always returning a clean +/// value. It implements the store part as a simple atomic store by storing a +/// clean shadow. + //===----------------------------------------------------------------------===// #define DEBUG_TYPE "msan" @@ -487,7 +512,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRBuilder<> IRB(&I); Value *Val = I.getValueOperand(); Value *Addr = I.getPointerOperand(); - Value *Shadow = getShadow(Val); + Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val); Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); StoreInst *NewSI = @@ -498,6 +523,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (ClCheckAccessAddress) insertCheck(Addr, &I); + if (I.isAtomic()) + I.setOrdering(addReleaseOrdering(I.getOrdering())); + if (MS.TrackOrigins) { unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) { @@ -876,6 +904,38 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); } + AtomicOrdering addReleaseOrdering(AtomicOrdering a) { + switch (a) { + case NotAtomic: + return NotAtomic; + case Unordered: + case Monotonic: + case Release: + return Release; + case Acquire: + case AcquireRelease: + return AcquireRelease; + case SequentiallyConsistent: + return SequentiallyConsistent; + } + } + + AtomicOrdering addAcquireOrdering(AtomicOrdering a) { + switch (a) { + case NotAtomic: + return NotAtomic; + case Unordered: + case Monotonic: + case Acquire: + return Acquire; + case Release: + case AcquireRelease: + return AcquireRelease; + case SequentiallyConsistent: + return SequentiallyConsistent; + } + } + // ------------------- Visitors. /// \brief Instrument LoadInst @@ -884,7 +944,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// Optionally, checks that the load address is fully defined. void visitLoadInst(LoadInst &I) { assert(I.getType()->isSized() && "Load type must have size"); - IRBuilder<> IRB(&I); + IRBuilder<> IRB(I.getNextNode()); Type *ShadowTy = getShadowTy(&I); Value *Addr = I.getPointerOperand(); if (LoadShadow) { @@ -898,6 +958,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (ClCheckAccessAddress) insertCheck(I.getPointerOperand(), &I); + if (I.isAtomic()) + I.setOrdering(addAcquireOrdering(I.getOrdering())); + if (MS.TrackOrigins) { if (LoadShadow) { unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); @@ -917,6 +980,37 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { StoreList.push_back(&I); } + void handleCASOrRMW(Instruction &I) { + assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); + + IRBuilder<> IRB(&I); + Value *Addr = I.getOperand(0); + Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); + + if (ClCheckAccessAddress) + insertCheck(Addr, &I); + + // Only test the conditional argument of cmpxchg instruction. + // The other argument can potentially be uninitialized, but we can not + // detect this situation reliably without possible false positives. + if (isa<AtomicCmpXchgInst>(I)) + insertCheck(I.getOperand(1), &I); + + IRB.CreateStore(getCleanShadow(&I), ShadowPtr); + + setShadow(&I, getCleanShadow(&I)); + } + + void visitAtomicRMWInst(AtomicRMWInst &I) { + handleCASOrRMW(I); + I.setOrdering(addReleaseOrdering(I.getOrdering())); + } + + void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { + handleCASOrRMW(I); + I.setOrdering(addReleaseOrdering(I.getOrdering())); + } + // Vector manipulation. void visitExtractElementInst(ExtractElementInst &I) { insertCheck(I.getOperand(1), &I); |