summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorMax Kazantsev <max.kazantsev@azul.com>2017-10-11 08:10:43 +0000
committerMax Kazantsev <max.kazantsev@azul.com>2017-10-11 08:10:43 +0000
commit3b81809e06da76b644a842caaccb1591900801e3 (patch)
tree94215c65f7de873daa2948a03d53ddaec4afd763 /llvm/lib/Transforms
parent4a6d5b72af56742e56684d52a2c80068597be4bc (diff)
downloadbcm5719-llvm-3b81809e06da76b644a842caaccb1591900801e3.tar.gz
bcm5719-llvm-3b81809e06da76b644a842caaccb1591900801e3.zip
[GVN] Prevent LoadPRE from hoisting across instructions that don't pass control flow to successors
This patch fixes the miscompile that happens when PRE hoists loads across guards and other instructions that don't always pass control flow to their successors. PRE is now prohibited to hoist across such instructions because there is no guarantee that the load standing after such instruction is still valid before such instruction. For example, a load from under a guard may be invalid before the guard in the following case: int array[LEN]; ... guard(0 <= index && index < LEN); use(array[index]); Differential Revision: https://reviews.llvm.org/D37460 llvm-svn: 315440
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp77
1 files changed, 77 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index c3cc2375e3b..135b33d84be 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -38,6 +38,7 @@
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
@@ -1048,7 +1049,32 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// backwards through predecessors if needed.
BasicBlock *LoadBB = LI->getParent();
BasicBlock *TmpBB = LoadBB;
+ bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI);
+ // Check that there is no implicit control flow instructions above our load in
+ // its block. If there is an instruction that doesn't always pass the
+ // execution to the following instruction, then moving through it may become
+ // invalid. For example:
+ //
+ // int arr[LEN];
+ // int index = ???;
+ // ...
+ // guard(0 <= index && index < LEN);
+ // use(arr[index]);
+ //
+ // It is illegal to move the array access to any point above the guard,
+ // because if the index is out of bounds we should deoptimize rather than
+ // access the array.
+ // Check that there is no guard in this block above our intruction.
+ if (!IsSafeToSpeculativelyExecute) {
+ auto It = FirstImplicitControlFlowInsts.find(TmpBB);
+ if (It != FirstImplicitControlFlowInsts.end()) {
+ assert(It->second->getParent() == TmpBB &&
+ "Implicit control flow map broken?");
+ if (OI->dominates(It->second, LI))
+ return false;
+ }
+ }
while (TmpBB->getSinglePredecessor()) {
TmpBB = TmpBB->getSinglePredecessor();
if (TmpBB == LoadBB) // Infinite (unreachable) loop.
@@ -1063,6 +1089,11 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// which it was not previously executed.
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
return false;
+
+ // Check that there is no implicit control flow in a block above.
+ if (!IsSafeToSpeculativelyExecute &&
+ FirstImplicitControlFlowInsts.count(TmpBB))
+ return false;
}
assert(TmpBB);
@@ -1979,6 +2010,8 @@ bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
TLI = &RunTLI;
VN.setAliasAnalysis(&RunAA);
MD = RunMD;
+ OrderedInstructions OrderedInstrs(DT);
+ OI = &OrderedInstrs;
VN.setMemDep(MD);
ORE = RunORE;
@@ -2068,6 +2101,9 @@ bool GVN::processBlock(BasicBlock *BB) {
DEBUG(verifyRemoved(*I));
(*I)->eraseFromParent();
}
+
+ if (!InstrsToErase.empty())
+ OI->invalidateBlock(BB);
InstrsToErase.clear();
if (AtStart)
@@ -2263,6 +2299,7 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
MD->removeInstruction(CurInst);
DEBUG(verifyRemoved(CurInst));
CurInst->eraseFromParent();
+ OI->invalidateBlock(CurrentBlock);
++NumGVNInstr;
return true;
@@ -2329,6 +2366,7 @@ bool GVN::iterateOnFunction(Function &F) {
// RPOT walks the graph in its constructor and will not be invalidated during
// processBlock.
ReversePostOrderTraversal<Function *> RPOT(&F);
+ fillImplicitControlFlowInfo(RPOT);
for (BasicBlock *BB : RPOT)
Changed |= processBlock(BB);
@@ -2340,6 +2378,45 @@ void GVN::cleanupGlobalSets() {
LeaderTable.clear();
BlockRPONumber.clear();
TableAllocator.Reset();
+ FirstImplicitControlFlowInsts.clear();
+}
+
+void
+GVN::fillImplicitControlFlowInfo(ReversePostOrderTraversal<Function *> &RPOT) {
+ auto MayNotTransferExecutionToSuccessor = [&](const Instruction *I) {
+ // If a block's instruction doesn't always pass the control to its successor
+ // instruction, mark the block as having implicit control flow. We use them
+ // to avoid wrong assumptions of sort "if A is executed and B post-dominates
+ // A, then B is also executed". This is not true is there is an implicit
+ // control flow instruction (e.g. a guard) between them.
+ //
+ // TODO: Currently, isGuaranteedToTransferExecutionToSuccessor returns false
+ // for volatile stores and loads because they can trap. The discussion on
+ // whether or not it is correct is still ongoing. We might want to get rid
+ // of this logic in the future. Anyways, trapping instructions shouldn't
+ // introduce implicit control flow, so we explicitly allow them here. This
+ // must be removed once isGuaranteedToTransferExecutionToSuccessor is fixed.
+ if (isGuaranteedToTransferExecutionToSuccessor(I))
+ return false;
+ if (auto *LI = dyn_cast<LoadInst>(I)) {
+ assert(LI->isVolatile() && "Non-volatile load should transfer execution"
+ " to successor!");
+ return false;
+ }
+ if (auto *SI = dyn_cast<StoreInst>(I)) {
+ assert(SI->isVolatile() && "Non-volatile store should transfer execution"
+ " to successor!");
+ return false;
+ }
+ return true;
+ };
+
+ for (BasicBlock *BB : RPOT)
+ for (auto &I : *BB)
+ if (MayNotTransferExecutionToSuccessor(&I)) {
+ FirstImplicitControlFlowInsts[BB] = &I;
+ break;
+ }
}
/// Verify that the specified instruction does not occur in our
OpenPOWER on IntegriCloud