diff options
| author | Anna Zaks <ganna@apple.com> | 2015-06-25 23:35:45 +0000 |
|---|---|---|
| committer | Anna Zaks <ganna@apple.com> | 2015-06-25 23:35:45 +0000 |
| commit | 4f652b69b110de48e07ea334f4d41f83bc4784b2 (patch) | |
| tree | a81cca9d27e52355252e8f4a22aaa09e9324a3bf /llvm | |
| parent | f799edef28f95d3cd20863fdf9825b421f6c6baf (diff) | |
| download | bcm5719-llvm-4f652b69b110de48e07ea334f4d41f83bc4784b2.tar.gz bcm5719-llvm-4f652b69b110de48e07ea334f4d41f83bc4784b2.zip | |
[asan] Don't run stack malloc on functions containing inline assembly.
It makes LLVM run out of registers even on 64-bit platforms. For example, the
following test case fails on darwin.
clang -cc1 -O0 -triple x86_64-apple-macosx10.10.0 -emit-obj -fsanitize=address -mstackrealign -o ~/tmp/ex.o -x c ex.c
error: inline assembly requires more registers than available
void TestInlineAssembly(const unsigned char *S, unsigned int pS, unsigned char *D, unsigned int pD, unsigned int h) {
unsigned int sr = 4, pDiffD = pD - 5;
unsigned int pDiffS = (pS << 1) - 5;
char flagSA = ((pS & 15) == 0),
flagDA = ((pD & 15) == 0);
asm volatile (
"mov %0, %%"PTR_REG("si")"\n"
"mov %2, %%"PTR_REG("cx")"\n"
"mov %1, %%"PTR_REG("di")"\n"
"mov %8, %%"PTR_REG("ax")"\n"
:
: "m" (S), "m" (D), "m" (pS), "m" (pDiffS), "m" (pDiffD), "m" (sr), "m" (flagSA), "m" (flagDA), "m" (h)
: "%"PTR_REG("si"), "%"PTR_REG("di"), "%"PTR_REG("ax"), "%"PTR_REG("cx"), "%"PTR_REG("dx"), "memory"
);
}
http://reviews.llvm.org/D10719
llvm-svn: 240722
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 7 | ||||
| -rw-r--r-- | llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll | 56 |
2 files changed, 59 insertions, 4 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 2dd2fe6211c..ff4368198be 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1753,11 +1753,10 @@ void FunctionStackPoisoner::poisonStack() { uint64_t LocalStackSize = L.FrameSize; bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; - // Don't do dynamic alloca in presence of inline asm: too often it makes - // assumptions on which registers are available. Don't do stack malloc in the - // presence of inline asm on 32-bit platforms for the same reason. + // Don't do dynamic alloca or stack malloc in presence of inline asm: + // too often it makes assumptions on which registers are available. bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm; - DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32; + DoStackMalloc &= !HasNonEmptyInlineAsm; Value *StaticAlloca = DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); diff --git a/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll new file mode 100644 index 00000000000..7827f3fbf27 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll @@ -0,0 +1,56 @@ +; RUN: opt < %s -asan -S -o %t.ll +; RUN: FileCheck %s < %t.ll + +; Don't do stack malloc on functions containing inline assembly on 64-bit +; platforms. It makes LLVM run out of registers. + +; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) +; CHECK: %MyAlloca +; CHECK-NOT: call {{.*}} @__asan_stack_malloc + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.10.0" + +define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) #0 { +entry: + %S.addr = alloca i8*, align 8 + %pS.addr = alloca i32, align 4 + %D.addr = alloca i8*, align 8 + %pD.addr = alloca i32, align 4 + %h.addr = alloca i32, align 4 + %sr = alloca i32, align 4 + %pDiffD = alloca i32, align 4 + %pDiffS = alloca i32, align 4 + %flagSA = alloca i8, align 1 + %flagDA = alloca i8, align 1 + store i8* %S, i8** %S.addr, align 8 + store i32 %pS, i32* %pS.addr, align 4 + store i8* %D, i8** %D.addr, align 8 + store i32 %pD, i32* %pD.addr, align 4 + store i32 %h, i32* %h.addr, align 4 + store i32 4, i32* %sr, align 4 + %0 = load i32, i32* %pD.addr, align 4 + %sub = sub i32 %0, 5 + store i32 %sub, i32* %pDiffD, align 4 + %1 = load i32, i32* %pS.addr, align 4 + %shl = shl i32 %1, 1 + %sub1 = sub i32 %shl, 5 + store i32 %sub1, i32* %pDiffS, align 4 + %2 = load i32, i32* %pS.addr, align 4 + %and = and i32 %2, 15 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + %conv2 = trunc i32 %conv to i8 + store i8 %conv2, i8* %flagSA, align 1 + %3 = load i32, i32* %pD.addr, align 4 + %and3 = and i32 %3, 15 + %cmp4 = icmp eq i32 %and3, 0 + %conv5 = zext i1 %cmp4 to i32 + %conv6 = trunc i32 %conv5 to i8 + store i8 %conv6, i8* %flagDA, align 1 + call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** %S.addr, i8** %D.addr, i32* %pS.addr, i32* %pDiffS, i32* %pDiffD, i32* %sr, i8* %flagSA, i8* %flagDA, i32* %h.addr) #1 + ret void +} + +attributes #0 = { nounwind sanitize_address } +attributes #1 = { nounwind } |

