summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorReid Kleckner <reid@kleckner.net>2015-04-14 20:38:14 +0000
committerReid Kleckner <reid@kleckner.net>2015-04-14 20:38:14 +0000
commit223de262b93b2a720638c07fa7364dcaa7c56039 (patch)
treea78edc7f54861b36cf67749273bf24b14188be3d /llvm/lib/Analysis
parent16d8e44da56335664599f6f6c97e0f35f7313193 (diff)
downloadbcm5719-llvm-223de262b93b2a720638c07fa7364dcaa7c56039.tar.gz
bcm5719-llvm-223de262b93b2a720638c07fa7364dcaa7c56039.zip
[Inliner] Don't inline functions with frameescape calls
Inlining such intrinsics is very difficult, since you need to simultaneously transform many calls to llvm.framerecover and potentially duplicate the functions containing them. Normally this intrinsic isn't added until EH preparation, which is part of the backend pass pipeline after inlining. However, if it were to get fed through the inliner, this change will ensure that it doesn't break the code. llvm-svn: 234937
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/IPA/InlineCost.cpp27
1 files changed, 19 insertions, 8 deletions
diff --git a/llvm/lib/Analysis/IPA/InlineCost.cpp b/llvm/lib/Analysis/IPA/InlineCost.cpp
index aa015647926..cacf70d041b 100644
--- a/llvm/lib/Analysis/IPA/InlineCost.cpp
+++ b/llvm/lib/Analysis/IPA/InlineCost.cpp
@@ -64,6 +64,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool ContainsNoDuplicateCall;
bool HasReturn;
bool HasIndirectBr;
+ bool HasFrameEscape;
/// Number of bytes allocated statically by the callee.
uint64_t AllocatedSize;
@@ -148,12 +149,12 @@ public:
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
- AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
- FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
- NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
- NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
- NumInstructionsSimplified(0), SROACostSavings(0),
- SROACostSavingsLost(0) {}
+ HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
+ NumVectorInstructions(0), FiftyPercentVectorBonus(0),
+ TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
+ NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
+ NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
+ SROACostSavings(0), SROACostSavingsLost(0) {}
bool analyzeCall(CallSite CS);
@@ -743,6 +744,9 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
case Intrinsic::memmove:
// SROA can usually chew through these intrinsics, but they aren't free.
return false;
+ case Intrinsic::frameescape:
+ HasFrameEscape = true;
+ return false;
}
}
@@ -941,7 +945,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
// If the visit this instruction detected an uninlinable pattern, abort.
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
- HasIndirectBr)
+ HasIndirectBr || HasFrameEscape)
return false;
// If the caller is a recursive function then we don't want to inline
@@ -1171,7 +1175,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// returns false, and we can bail on out.
if (!analyzeBlock(BB, EphValues)) {
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
- HasIndirectBr)
+ HasIndirectBr || HasFrameEscape)
return false;
// If the caller is a recursive function then we don't want to inline
@@ -1372,6 +1376,13 @@ bool InlineCostAnalysis::isInlineViable(Function &F) {
if (!ReturnsTwice && CS.isCall() &&
cast<CallInst>(CS.getInstruction())->canReturnTwice())
return false;
+
+ // Disallow inlining functions that call @llvm.frameescape. Doing this
+ // correctly would require major changes to the inliner.
+ if (CS.getCalledFunction() &&
+ CS.getCalledFunction()->getIntrinsicID() ==
+ llvm::Intrinsic::frameescape)
+ return false;
}
}
OpenPOWER on IntegriCloud