summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2016-05-06 18:17:13 +0000
committerPhilip Reames <listmail@philipreames.com>2016-05-06 18:17:13 +0000
commitae8997f496ed9a0c1548129983a7ad974939553c (patch)
treea15e8415d1044a8001e0bd09fa3247a2618827ea /llvm/lib
parent31407ba009c80d56e83b2b0e6d72c1ae9aebc430 (diff)
downloadbcm5719-llvm-ae8997f496ed9a0c1548129983a7ad974939553c.tar.gz
bcm5719-llvm-ae8997f496ed9a0c1548129983a7ad974939553c.zip
[GVN] Do local FRE for unordered atomic loads
This patch is the first in a small series teaching GVN to optimize unordered loads aggressively. This change just handles block local FRE because that's the simplest thing which lets me test MDA, and the AvailableValue pieces. Somewhat suprisingly, MDA appears fine and only a couple of small changes are needed in GVN. Once this is in, I'll tackle non-local FRE and PRE. The former looks like a natural extension of this, the later will require a couple of minor changes. Differential Revision: http://reviews.llvm.org/D19440 llvm-svn: 268770
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp25
1 files changed, 21 insertions, 4 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 9565dc8c3dc..116dc69b372 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1219,6 +1219,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
assert((DepInfo.isDef() || DepInfo.isClobber()) &&
"expected a local dependence");
+ assert(LI->isUnordered() && "rules below are incorrect for ordered access");
const DataLayout &DL = LI->getModule()->getDataLayout();
@@ -1227,7 +1228,8 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (Address) {
+ // Can't forward from non-atomic to atomic without violating memory model.
+ if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
int Offset =
AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI);
if (Offset != -1) {
@@ -1244,7 +1246,8 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
- if (DepLI != LI && Address) {
+ // Can't forward from non-atomic to atomic without violating memory model.
+ if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
int Offset =
AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
@@ -1258,7 +1261,7 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (Address) {
+ if (Address && !LI->isAtomic()) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
DepMI, DL);
if (Offset != -1) {
@@ -1304,6 +1307,10 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
LI->getType(), DL))
return false;
+ // Can't forward from non-atomic to atomic without violating memory model.
+ if (S->isAtomic() < LI->isAtomic())
+ return false;
+
Res = AvailableValue::get(S->getValueOperand());
return true;
}
@@ -1316,6 +1323,10 @@ bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
return false;
+ // Can't forward from non-atomic to atomic without violating memory model.
+ if (LD->isAtomic() < LI->isAtomic())
+ return false;
+
Res = AvailableValue::getLoad(LD);
return true;
}
@@ -1587,6 +1598,11 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeAddress))
return false;
+ // This code hasn't been audited for atomic, ordered, or volatile memory
+ // access.
+ if (!LI->isSimple())
+ return false;
+
// Step 1: Find the non-local dependencies of the load.
LoadDepVect Deps;
MD->getNonLocalPointerDependency(LI, Deps);
@@ -1755,7 +1771,8 @@ bool GVN::processLoad(LoadInst *L) {
if (!MD)
return false;
- if (!L->isSimple())
+ // This code hasn't been audited for ordered or volatile memory access
+ if (!L->isUnordered())
return false;
if (L->use_empty()) {
OpenPOWER on IntegriCloud