summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2015-01-26 18:54:27 +0000
committerPhilip Reames <listmail@philipreames.com>2015-01-26 18:54:27 +0000
commita7ad6a589c30bf316b2e0e9456f521de12d7a679 (patch)
treebcca7a98a004d17ec779b0d3eb722e211ee1f742 /llvm/lib
parent805bc02c2b9500e6ae62dac5a075eb732ac83597 (diff)
downloadbcm5719-llvm-a7ad6a589c30bf316b2e0e9456f521de12d7a679.tar.gz
bcm5719-llvm-a7ad6a589c30bf316b2e0e9456f521de12d7a679.zip
Refine memory dependence's notion of volatile semantics
According to my reading of the LangRef, volatiles are only ordered with respect to other volatiles. It is entirely legal and profitable to forward unrelated loads over the volatile load. This patch implements this for GVN by refining the transition rules MemoryDependenceAnalysis uses when encountering a volatile. The added test cases show where the extra flexibility is profitable for local dependence optimizations. I have a related change (227110) which will extend this to non-local dependence (i.e. PRE), but that's essentially orthogonal to the semantic change in this patch. I have tested the two together and can confirm that PRE works over a volatile load with both changes. I will be submitting a PRE w/volatiles test case seperately in the near future. Differential Revision: http://reviews.llvm.org/D6901 llvm-svn: 227112
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp43
1 files changed, 27 insertions, 16 deletions
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 0f3307c21b1..40f5f70084b 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -362,6 +362,17 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
}
}
+static bool isVolatile(Instruction *Inst) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ return LI->isVolatile();
+ else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ return SI->isVolatile();
+ else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
+ return AI->isVolatile();
+ return false;
+}
+
+
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignores may-aliases with
/// read-only operations. If isLoad is false, this routine ignores may-aliases
@@ -448,12 +459,26 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// does not alias with when this atomic load indicates that another thread may
// be accessing the location.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+
+ // While volatile access cannot be eliminated, they do not have to clobber
+ // non-aliasing locations, as normal accesses, for example, can be safely
+ // reordered with volatile accesses.
+ if (LI->isVolatile()) {
+ if (!QueryInst)
+ // Original QueryInst *may* be volatile
+ return MemDepResult::getClobber(LI);
+ if (isVolatile(QueryInst))
+ // Ordering required if QueryInst is itself volatile
+ return MemDepResult::getClobber(LI);
+ // Otherwise, volatile doesn't imply any special ordering
+ }
+
// Atomic loads have complications involved.
// A Monotonic (or higher) load is OK if the query inst is itself not atomic.
// An Acquire (or higher) load sets the HasSeenAcquire flag, so that any
// release store will know to return getClobber.
// FIXME: This is overly conservative.
- if (!LI->isUnordered()) {
+ if (LI->isAtomic() && LI->getOrdering() > Unordered) {
if (!QueryInst)
return MemDepResult::getClobber(LI);
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
@@ -470,13 +495,6 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
HasSeenAcquire = true;
}
- // FIXME: this is overly conservative.
- // While volatile access cannot be eliminated, they do not have to clobber
- // non-aliasing locations, as normal accesses can for example be reordered
- // with volatile accesses.
- if (LI->isVolatile())
- return MemDepResult::getClobber(LI);
-
AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
// If we found a pointer, check if it could be the same as our pointer.
@@ -890,14 +908,7 @@ getNonLocalPointerDependency(Instruction *QueryInst,
// Doing so would require piping through the QueryInst all the way through.
// TODO: volatiles can't be elided, but they can be reordered with other
// non-volatile accesses.
- auto isVolatile = [](Instruction *Inst) {
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- return LI->isVolatile();
- } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- return SI->isVolatile();
- }
- return false;
- };
+
// We currently give up on any instruction which is ordered, but we do handle
// atomic instructions which are unordered.
// TODO: Handle ordered instructions
OpenPOWER on IntegriCloud