diff options
-rw-r--r-- | llvm/include/llvm/Analysis/AliasAnalysis.h | 8 | ||||
-rw-r--r-- | llvm/lib/Analysis/AliasAnalysis.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/MemorySSA.cpp | 26 | ||||
-rw-r--r-- | llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll | 1 |
4 files changed, 36 insertions, 7 deletions
diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h index 484df2f55d9..1b8b9751faa 100644 --- a/llvm/include/llvm/Analysis/AliasAnalysis.h +++ b/llvm/include/llvm/Analysis/AliasAnalysis.h @@ -524,6 +524,14 @@ public: /// Check whether or not an instruction may read or write the specified /// memory location. /// + /// Note explicitly that getModRefInfo considers the effects of reading and + /// writing the memory location, and not the effect of ordering relative to + /// other instructions. Thus, a volatile load is considered to be Ref, + /// because it does not actually write memory, it just can't be reordered + /// relative to other volatiles (or removed). Atomic ordered loads/stores are + /// considered ModRef ATM because conservatively, the visible effect appears + /// as if memory was written, not just an ordering constraint. + /// /// An instruction that doesn't read or write memory may be trivially LICM'd /// for example. /// diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp index fbc030b26aa..4c6423d5c17 100644 --- a/llvm/lib/Analysis/AliasAnalysis.cpp +++ b/llvm/lib/Analysis/AliasAnalysis.cpp @@ -332,8 +332,8 @@ FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) { ModRefInfo AAResults::getModRefInfo(const LoadInst *L, const MemoryLocation &Loc) { - // Be conservative in the face of volatile/atomic. - if (!L->isUnordered()) + // Be conservative in the face of atomic. + if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered)) return MRI_ModRef; // If the load address doesn't alias the given address, it doesn't read @@ -347,8 +347,8 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L, ModRefInfo AAResults::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) { - // Be conservative in the face of volatile/atomic. - if (!S->isUnordered()) + // Be conservative in the face of atomic. + if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered)) return MRI_ModRef; if (Loc.Ptr) { diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp index bbf8eff8b73..d1cf6f4d655 100644 --- a/llvm/lib/Transforms/Utils/MemorySSA.cpp +++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp @@ -849,7 +849,8 @@ struct RenamePassData { } // anonymous namespace namespace llvm { -/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no longer does caching on its own, +/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no +/// longer does caching on its own, /// but the name has been retained for the moment. class MemorySSA::CachingWalker final : public MemorySSAWalker { ClobberWalker Walker; @@ -1456,6 +1457,19 @@ MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, return NewAccess; } +// Return true if the instruction has ordering constraints. +// Note specifically that this only considers stores and loads +// because others are still considered ModRef by getModRefInfo. +static inline bool isOrdered(const Instruction *I) { + if (auto *SI = dyn_cast<StoreInst>(I)) { + if (!SI->isUnordered()) + return true; + } else if (auto *LI = dyn_cast<LoadInst>(I)) { + if (!LI->isUnordered()) + return true; + } + return false; +} /// \brief Helper function to create new memory accesses MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { // The assume intrinsic has a control dependency which we model by claiming @@ -1468,7 +1482,15 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { // Find out what affect this instruction has on memory. ModRefInfo ModRef = AA->getModRefInfo(I); - bool Def = bool(ModRef & MRI_Mod); + // The isOrdered check is used to ensure that volatiles end up as defs + // (atomics end up as ModRef right now anyway). Until we separate the + // ordering chain from the memory chain, this enables people to see at least + // some relative ordering to volatiles. Note that getClobberingMemoryAccess + // will still give an answer that bypasses other volatile loads. TODO: + // Separate memory aliasing and ordering into two different chains so that we + // can precisely represent both "what memory will this read/write/is clobbered + // by" and "what instructions can I move this past". + bool Def = bool(ModRef & MRI_Mod) || isOrdered(I); bool Use = bool(ModRef & MRI_Ref); // It's possible for an instruction to not modify memory at all. During diff --git a/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll b/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll index 8c74f8b28ef..46d29bad0f4 100644 --- a/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll +++ b/llvm/test/Transforms/NewGVN/volatile-nonvolatile.ll @@ -1,4 +1,3 @@ -; XFAIL: * ; RUN: opt -tbaa -newgvn -S < %s | FileCheck %s %struct.t = type { i32* } |