summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp29
1 files changed, 20 insertions, 9 deletions
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index e47b636348e..2d0cb6fbf21 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -54,6 +54,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -113,6 +114,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
ScalarEvolution *SE = nullptr;
DominatorTree *DT = nullptr;
LoopInfo *LI = nullptr;
+ TargetLibraryInfo *TLI = nullptr;
PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
: FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
@@ -131,6 +133,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
for (Loop *I : *LI) {
runOnLoopAndSubLoops(I);
}
@@ -141,6 +144,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
// We no longer modify the IR at all in this pass. Thus all
// analysis are preserved.
AU.setPreservesAll();
@@ -165,6 +169,7 @@ struct PlaceSafepoints : public FunctionPass {
// We modify the graph wholesale (inlining, block insertion, etc). We
// preserve nothing at the moment. We could potentially preserve dom tree
// if that was worth doing
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
}
@@ -174,10 +179,11 @@ struct PlaceSafepoints : public FunctionPass {
// callers job.
static void
InsertSafepointPoll(Instruction *InsertBefore,
- std::vector<CallSite> &ParsePointsNeeded /*rval*/);
+ std::vector<CallSite> &ParsePointsNeeded /*rval*/,
+ const TargetLibraryInfo &TLI);
-static bool needsStatepoint(const CallSite &CS) {
- if (callsGCLeafFunction(CS))
+static bool needsStatepoint(const CallSite &CS, const TargetLibraryInfo &TLI) {
+ if (callsGCLeafFunction(CS, TLI))
return false;
if (CS.isCall()) {
CallInst *call = cast<CallInst>(CS.getInstruction());
@@ -194,7 +200,8 @@ static bool needsStatepoint(const CallSite &CS) {
/// answer; i.e. false is always valid.
static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
BasicBlock *Pred,
- DominatorTree &DT) {
+ DominatorTree &DT,
+ const TargetLibraryInfo &TLI) {
// In general, we're looking for any cut of the graph which ensures
// there's a call safepoint along every edge between Header and Pred.
// For the moment, we look only for the 'cuts' that consist of a single call
@@ -217,7 +224,7 @@ static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
// unconditional poll. In practice, this is only a theoretical concern
// since we don't have any methods with conditional-only safepoint
// polls.
- if (needsStatepoint(CS))
+ if (needsStatepoint(CS, TLI))
return true;
}
@@ -321,7 +328,7 @@ bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
continue;
}
if (CallSafepointsEnabled &&
- containsUnconditionalCallSafepoint(L, Header, Pred, *DT)) {
+ containsUnconditionalCallSafepoint(L, Header, Pred, *DT, *TLI)) {
// Note: This is only semantically legal since we won't do any further
// IPO or inlining before the actual call insertion.. If we hadn't, we
// might latter loose this call safepoint.
@@ -472,6 +479,9 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
if (!shouldRewriteFunction(F))
return false;
+ const TargetLibraryInfo &TLI =
+ getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+
bool Modified = false;
// In various bits below, we rely on the fact that uses are reachable from
@@ -578,7 +588,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
// safepoint polls themselves.
for (Instruction *PollLocation : PollsNeeded) {
std::vector<CallSite> RuntimeCalls;
- InsertSafepointPoll(PollLocation, RuntimeCalls);
+ InsertSafepointPoll(PollLocation, RuntimeCalls, TLI);
ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
RuntimeCalls.end());
}
@@ -610,7 +620,8 @@ INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
static void
InsertSafepointPoll(Instruction *InsertBefore,
- std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
+ std::vector<CallSite> &ParsePointsNeeded /*rval*/,
+ const TargetLibraryInfo &TLI) {
BasicBlock *OrigBB = InsertBefore->getParent();
Module *M = InsertBefore->getModule();
assert(M && "must be part of a module");
@@ -669,7 +680,7 @@ InsertSafepointPoll(Instruction *InsertBefore,
assert(ParsePointsNeeded.empty());
for (auto *CI : Calls) {
// No safepoint needed or wanted
- if (!needsStatepoint(CI))
+ if (!needsStatepoint(CI, TLI))
continue;
// These are likely runtime calls. Should we assert that via calling
OpenPOWER on IntegriCloud