summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorAlexey Samsonov <vonosmas@gmail.com>2015-07-29 19:36:08 +0000
committerAlexey Samsonov <vonosmas@gmail.com>2015-07-29 19:36:08 +0000
commit869a5ff37fe76de8f8d519655a9fd313d30ab02d (patch)
tree5cac4f771f52358fb63146819913a8bfa1481756 /llvm/lib
parent8bc43a142ba4c852820ec1e285f6ead6229baa7c (diff)
downloadbcm5719-llvm-869a5ff37fe76de8f8d519655a9fd313d30ab02d.tar.gz
bcm5719-llvm-869a5ff37fe76de8f8d519655a9fd313d30ab02d.zip
[ASan] Disable dynamic alloca and UAR detection in presence of returns_twice calls.
Summary: returns_twice (most importantly, setjmp) functions are optimization-hostile: if local variable is promoted to register, and is changed between setjmp() and longjmp() calls, this update will be undone. This is the reason why "man setjmp" advises to mark all these locals as "volatile". This can not be enough for ASan, though: when it replaces static alloca with dynamic one, optionally called if UAR mode is enabled, it adds a whole lot of SSA values, and computations of local variable addresses, that can involve virtual registers, and cause unexpected behavior, when these registers are restored from buffer saved in setjmp. To fix this, just disable dynamic alloca and UAR tricks whenever we see a returns_twice call in the function. Reviewers: rnk Subscribers: llvm-commits, kcc Differential Revision: http://reviews.llvm.org/D11495 llvm-svn: 243561
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp27
1 files changed, 18 insertions, 9 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 47b736f3cd1..071cd796b66 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -568,7 +568,8 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
AllocaForValueMapTy AllocaForValue;
- bool HasNonEmptyInlineAsm;
+ bool HasNonEmptyInlineAsm = false;
+ bool HasReturnsTwiceCall = false;
std::unique_ptr<CallInst> EmptyInlineAsm;
FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
@@ -580,7 +581,6 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
IntptrPtrTy(PointerType::get(IntptrTy, 0)),
Mapping(ASan.Mapping),
StackAlignment(1 << Mapping.Scale),
- HasNonEmptyInlineAsm(false),
EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
bool runOnFunction() {
@@ -682,9 +682,13 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
AllocaPoisonCallVec.push_back(APC);
}
- void visitCallInst(CallInst &CI) {
- HasNonEmptyInlineAsm |=
- CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get());
+ void visitCallSite(CallSite CS) {
+ Instruction *I = CS.getInstruction();
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ HasNonEmptyInlineAsm |=
+ CI->isInlineAsm() && !CI->isIdenticalTo(EmptyInlineAsm.get());
+ HasReturnsTwiceCall |= CI->canReturnTwice();
+ }
}
// ---------------------- Helpers.
@@ -1820,10 +1824,15 @@ void FunctionStackPoisoner::poisonStack() {
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
LocalStackSize <= kMaxStackMallocSize;
- // Don't do dynamic alloca or stack malloc in presence of inline asm:
- // too often it makes assumptions on which registers are available.
- bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
- DoStackMalloc &= !HasNonEmptyInlineAsm;
+ bool DoDynamicAlloca = ClDynamicAllocaStack;
+ // Don't do dynamic alloca or stack malloc if:
+ // 1) There is inline asm: too often it makes assumptions on which registers
+ // are available.
+ // 2) There is a returns_twice call (typically setjmp), which is
+ // optimization-hostile, and doesn't play well with introduced indirect
+ // register-relative calculation of local variable addresses.
+ DoDynamicAlloca &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
+ DoStackMalloc &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall;
Value *StaticAlloca =
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
OpenPOWER on IntegriCloud