diff options
author | George Burgess IV <george.burgess.iv@gmail.com> | 2016-08-03 19:57:02 +0000 |
---|---|---|
committer | George Burgess IV <george.burgess.iv@gmail.com> | 2016-08-03 19:57:02 +0000 |
commit | 024f3d2683e77428955a3151ba42ab53b031e4c0 (patch) | |
tree | 3c134c15d6583b21d02efa562fa8a3855e30007e /llvm | |
parent | 2e9675ff5247da937b1594f06fd5b8bbab7c5bf1 (diff) | |
download | bcm5719-llvm-024f3d2683e77428955a3151ba42ab53b031e4c0.tar.gz bcm5719-llvm-024f3d2683e77428955a3151ba42ab53b031e4c0.zip |
[MSSA] Add special handling for invariant/constant loads.
This is a follow-up to r277637. It teaches MemorySSA that invariant
loads (and loads of provably constant memory) are always liveOnEntry.
llvm-svn: 277640
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Transforms/Utils/MemorySSA.cpp | 23 | ||||
-rw-r--r-- | llvm/test/Transforms/Util/MemorySSA/load-invariant.ll | 18 | ||||
-rw-r--r-- | llvm/unittests/Transforms/Utils/MemorySSA.cpp | 33 |
3 files changed, 73 insertions, 1 deletions
diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp index 16ece32a296..f90ab6eac2b 100644 --- a/llvm/lib/Transforms/Utils/MemorySSA.cpp +++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp @@ -170,6 +170,7 @@ template <> struct DenseMapInfo<MemoryLocOrCall> { } }; } + namespace { struct UpwardsMemoryQuery { // True if our original query started off as a call @@ -251,6 +252,17 @@ static Reorderability getLoadReorderability(const LoadInst *Use, return Result; } +static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, + const Instruction *I) { + // If the memory can't be changed, then loads of the memory can't be + // clobbered. + // + // FIXME: We should handle invariant groups, as well. It's a bit harder, + // because we need to pay close attention to invariant group barriers. + return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || + AA.pointsToConstantMemory(I)); +} + static bool instructionClobbersQuery(MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, @@ -1332,6 +1344,11 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock( continue; } + if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { + MU->setDefiningAccess(MSSA->getLiveOnEntryDef()); + continue; + } + MemoryLocOrCall UseMLOC(MU); auto &LocInfo = LocStackInfo[UseMLOC]; // If the pop epoch changed, it means we've removed stuff from top of @@ -2246,6 +2263,12 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { if (auto *CacheResult = Cache.lookup(StartingAccess, Q.StartingLoc, Q.IsCall)) return CacheResult; + if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { + MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); + Cache.insert(StartingAccess, LiveOnEntry, Q.StartingLoc, Q.IsCall); + return LiveOnEntry; + } + // Start with the thing we already think clobbers this location MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); diff --git a/llvm/test/Transforms/Util/MemorySSA/load-invariant.ll b/llvm/test/Transforms/Util/MemorySSA/load-invariant.ll index e387ff4c530..3c55db11597 100644 --- a/llvm/test/Transforms/Util/MemorySSA/load-invariant.ll +++ b/llvm/test/Transforms/Util/MemorySSA/load-invariant.ll @@ -1,4 +1,3 @@ -; XFAIL: * ; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s ; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output < %s 2>&1 | FileCheck %s ; @@ -12,6 +11,7 @@ declare void @clobberAllTheThings() +; CHECK-LABEL: define i32 @foo define i32 @foo() { ; CHECK: 1 = MemoryDef(liveOnEntry) ; CHECK-NEXT: call void @clobberAllTheThings() @@ -22,4 +22,20 @@ define i32 @foo() { ret i32 %1 } +; CHECK-LABEL: define i32 @bar +define i32 @bar(i32* %a) { +; CHECK: 1 = MemoryDef(liveOnEntry) +; CHECK-NEXT: call void @clobberAllTheThings() + call void @clobberAllTheThings() + +; CHECK: 2 = MemoryDef(1) +; CHECK-NEXT: %1 = load atomic i32 + %1 = load atomic i32, i32* %a acquire, align 4, !invariant.load !0 + +; CHECK: MemoryUse(2) +; CHECK-NEXT: %2 = load i32 + %2 = load i32, i32* %a, align 4 + ret i32 %2 +} + !0 = !{} diff --git a/llvm/unittests/Transforms/Utils/MemorySSA.cpp b/llvm/unittests/Transforms/Utils/MemorySSA.cpp index 16264b68669..956cc00089d 100644 --- a/llvm/unittests/Transforms/Utils/MemorySSA.cpp +++ b/llvm/unittests/Transforms/Utils/MemorySSA.cpp @@ -418,3 +418,36 @@ TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) { MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad); EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore)); } + +// Test that our walker properly handles loads with the invariant group +// attribute. It's a bit hacky, since we add the invariant attribute *after* +// building MSSA. Otherwise, the use optimizer will optimize it for us, which +// isn't what we want. +// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA. +TEST_F(MemorySSATest, WalkerInvariantLoadOpt) { + F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), + GlobalValue::ExternalLinkage, "F", &M); + B.SetInsertPoint(BasicBlock::Create(C, "", F)); + Type *Int8 = Type::getInt8Ty(C); + Constant *One = ConstantInt::get(Int8, 1); + Value *AllocA = B.CreateAlloca(Int8, One, ""); + + Instruction *Store = B.CreateStore(One, AllocA); + Instruction *Load = B.CreateLoad(AllocA); + + setupAnalyses(); + MemorySSA &MSSA = Analyses->MSSA; + MemorySSAWalker *Walker = Analyses->Walker; + + auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load)); + auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store)); + EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA); + + // ...At the time of writing, no cache should exist for LoadMA. Be a bit + // flexible to future changes. + Walker->invalidateInfo(LoadMA); + Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {})); + + MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA); + EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef()); +} |