diff options
| author | NAKAMURA Takumi <geek4civic@gmail.com> | 2014-06-25 12:41:52 +0000 |
|---|---|---|
| committer | NAKAMURA Takumi <geek4civic@gmail.com> | 2014-06-25 12:41:52 +0000 |
| commit | 1db5995d1419a5476f8f3974bdd1af20605885bc (patch) | |
| tree | 3d60e8f0750bedc9a0d86c6382600b963e330ad6 /llvm/test/CodeGen | |
| parent | 3cf5c9de141a0456b85bbfaf3ab055c22256c5fc (diff) | |
| download | bcm5719-llvm-1db5995d1419a5476f8f3974bdd1af20605885bc.tar.gz bcm5719-llvm-1db5995d1419a5476f8f3974bdd1af20605885bc.zip | |
Re-apply r211399, "Generate native unwind info on Win64" with a fix to ignore SEH pseudo ops in X86 JIT emitter.
--
This patch enables LLVM to emit Win64-native unwind info rather than
DWARF CFI. It handles all corner cases (I hope), including stack
realignment.
Because the unwind info is not flexible enough to describe stack frames
with a gap of unknown size in the middle, such as the one caused by
stack realignment, I modified register spilling code to place all spills
into the fixed frame slots, so that they can be accessed relative to the
frame pointer.
Patch by Vadim Chugunov!
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4081
llvm-svn: 211691
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/2007-05-05-Personality.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-intel-ocl.ll | 62 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/gcc_except_table.ll | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/win64_eh.ll | 170 |
5 files changed, 214 insertions, 38 deletions
diff --git a/llvm/test/CodeGen/X86/2007-05-05-Personality.ll b/llvm/test/CodeGen/X86/2007-05-05-Personality.ll index 5b8fe72b5d0..b99c58c6e4a 100644 --- a/llvm/test/CodeGen/X86/2007-05-05-Personality.ll +++ b/llvm/test/CodeGen/X86/2007-05-05-Personality.ll @@ -1,12 +1,14 @@ ; RUN: llc < %s -mtriple=i686-pc-linux-gnu -o - | FileCheck %s --check-prefix=LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=LIN ; RUN: llc < %s -mtriple=i386-pc-mingw32 -o - | FileCheck %s --check-prefix=WIN ; RUN: llc < %s -mtriple=i686-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-gnu -o - | FileCheck %s --check-prefix=WIN64 ; LIN: .cfi_personality 0, __gnat_eh_personality ; LIN: .cfi_lsda 0, .Lexception0 ; WIN: .cfi_personality 0, ___gnat_eh_personality ; WIN: .cfi_lsda 0, Lexception0 +; WIN64: .seh_handler __gnat_eh_personality +; WIN64: .seh_handlerdata @error = external global i8 @@ -15,7 +17,7 @@ entry: invoke void @raise() to label %eh_then unwind label %unwind -unwind: ; preds = %entry +unwind: ; preds = %entry %eh_ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*) catch i8* @error %eh_select = extractvalue { i8*, i32 } %eh_ptr, 1 diff --git a/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll index 1259cf47b2b..dfb98bb1ab3 100644 --- a/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll +++ b/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll @@ -1,7 +1,7 @@ ; RUN: llc -mcpu=generic -mtriple=x86_64-mingw32 < %s | FileCheck %s ; CHECK: subq $40, %rsp -; CHECK: movaps %xmm8, (%rsp) -; CHECK: movaps %xmm7, 16(%rsp) +; CHECK: movaps %xmm8, 16(%rsp) +; CHECK: movaps %xmm7, (%rsp) define i32 @a() nounwind { entry: diff --git a/llvm/test/CodeGen/X86/avx-intel-ocl.ll b/llvm/test/CodeGen/X86/avx-intel-ocl.ll index 7337815a39a..3e051bff768 100644 --- a/llvm/test/CodeGen/X86/avx-intel-ocl.ll +++ b/llvm/test/CodeGen/X86/avx-intel-ocl.ll @@ -7,21 +7,21 @@ declare <16 x float> @func_float16_ptr(<16 x float>, <16 x float> *) declare <16 x float> @func_float16(<16 x float>, <16 x float>) declare i32 @func_int(i32, i32) -; WIN64: testf16_inp +; WIN64-LABEL: testf16_inp ; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} ; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} ; WIN64: leaq {{.*}}(%rsp), %rcx ; WIN64: call ; WIN64: ret -; X32: testf16_inp +; X32-LABEL: testf16_inp ; X32: movl %eax, (%esp) ; X32: vaddps {{.*}}, {{%ymm[0-1]}} ; X32: vaddps {{.*}}, {{%ymm[0-1]}} ; X32: call ; X32: ret -; X64: testf16_inp +; X64-LABEL: testf16_inp ; X64: vaddps {{.*}}, {{%ymm[0-1]}} ; X64: vaddps {{.*}}, {{%ymm[0-1]}} ; X64: leaq {{.*}}(%rsp), %rdi @@ -41,14 +41,14 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind { ;test calling conventions - preserved registers ; preserved ymm6-ymm15 -; WIN64: testf16_regs +; WIN64-LABEL: testf16_regs ; WIN64: call ; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} ; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} ; WIN64: ret ; preserved ymm8-ymm15 -; X64: testf16_regs +; X64-LABEL: testf16_regs ; X64: call ; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} ; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} @@ -65,28 +65,30 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind { } ; test calling conventions - prolog and epilog -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill -; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rsp).*}} # 32-byte Spill +; WIN64-LABEL: test_prolog_epilog +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill +; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill ; WIN64: call -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload -; WIN64: vmovaps {{.*(%rsp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload - +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload +; WIN64: vmovaps {{.*(%rbp).*}}, {{%ymm([6-9]|1[0-5])}} # 32-byte Reload + +; X64-LABEL: test_prolog_epilog ; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill ; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill ; X64: vmovups {{%ymm([8-9]|1[0-5])}}, {{.*}}(%rsp) ## 32-byte Folded Spill @@ -111,12 +113,14 @@ define intel_ocl_bicc <16 x float> @test_prolog_epilog(<16 x float> %a, <16 x fl ; test functions with integer parameters ; pass parameters on stack for 32-bit platform +; X32-LABEL: test_int ; X32: movl {{.*}}, 4(%esp) ; X32: movl {{.*}}, (%esp) ; X32: call ; X32: addl {{.*}}, %eax ; pass parameters in registers for 64-bit platform +; X64-LABEL: test_int ; X64: leal {{.*}}, %edi ; X64: movl {{.*}}, %esi ; X64: call @@ -128,21 +132,21 @@ define i32 @test_int(i32 %a, i32 %b) nounwind { ret i32 %c } -; WIN64: test_float4 +; WIN64-LABEL: test_float4 ; WIN64-NOT: vzeroupper ; WIN64: call ; WIN64-NOT: vzeroupper ; WIN64: call ; WIN64: ret -; X64: test_float4 +; X64-LABEL: test_float4 ; X64-NOT: vzeroupper ; X64: call ; X64-NOT: vzeroupper ; X64: call ; X64: ret -; X32: test_float4 +; X32-LABEL: test_float4 ; X32: vzeroupper ; X32: call ; X32: vzeroupper diff --git a/llvm/test/CodeGen/X86/gcc_except_table.ll b/llvm/test/CodeGen/X86/gcc_except_table.ll index 8c328ec58f9..a732eb1efbd 100644 --- a/llvm/test/CodeGen/X86/gcc_except_table.ll +++ b/llvm/test/CodeGen/X86/gcc_except_table.ll @@ -13,14 +13,14 @@ define i32 @main() uwtable optsize ssp { ; APPLE: GCC_except_table0: ; APPLE: Lexception0: -; MINGW64: .cfi_startproc -; MINGW64: .cfi_personality 0, __gxx_personality_v0 -; MINGW64: .cfi_lsda 0, .Lexception0 -; MINGW64: .cfi_def_cfa_offset 16 +; MINGW64: .seh_proc +; MINGW64: .seh_handler __gxx_personality_v0 +; MINGW64: .seh_setframe 5, 0 ; MINGW64: callq _Unwind_Resume -; MINGW64: .cfi_endproc +; MINGW64: .seh_handlerdata ; MINGW64: GCC_except_table0: ; MINGW64: Lexception0: +; MINGW64: .seh_endproc ; MINGW32: .cfi_startproc ; MINGW32: .cfi_personality 0, ___gxx_personality_v0 diff --git a/llvm/test/CodeGen/X86/win64_eh.ll b/llvm/test/CodeGen/X86/win64_eh.ll new file mode 100644 index 00000000000..f1f874eb2f5 --- /dev/null +++ b/llvm/test/CodeGen/X86/win64_eh.ll @@ -0,0 +1,170 @@ +; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-win32 | FileCheck %s -check-prefix=WIN64 +; RUN: llc < %s -O0 -mcpu=corei7 -mtriple=x86_64-pc-mingw32 | FileCheck %s -check-prefix=WIN64 + +; Check function without prolog +define void @foo0() uwtable { +entry: + ret void +} +; WIN64-LABEL: foo0: +; WIN64: .seh_proc foo0 +; WIN64: .seh_endprologue +; WIN64: ret +; WIN64: .seh_endproc + +; Checks a small stack allocation +define void @foo1() uwtable { +entry: + %baz = alloca [2000 x i16], align 2 + ret void +} +; WIN64-LABEL: foo1: +; WIN64: .seh_proc foo1 +; WIN64: subq $4000, %rsp +; WIN64: .seh_stackalloc 4000 +; WIN64: .seh_endprologue +; WIN64: addq $4000, %rsp +; WIN64: ret +; WIN64: .seh_endproc + +; Checks a stack allocation requiring call to __chkstk/___chkstk_ms +define void @foo2() uwtable { +entry: + %baz = alloca [4000 x i16], align 2 + ret void +} +; WIN64-LABEL: foo2: +; WIN64: .seh_proc foo2 +; WIN64: movabsq $8000, %rax +; WIN64: callq {{__chkstk|___chkstk_ms}} +; WIN64: subq %rax, %rsp +; WIN64: .seh_stackalloc 8000 +; WIN64: .seh_endprologue +; WIN64: addq $8000, %rsp +; WIN64: ret +; WIN64: .seh_endproc + + +; Checks stack push +define i32 @foo3(i32 %f_arg, i32 %e_arg, i32 %d_arg, i32 %c_arg, i32 %b_arg, i32 %a_arg) uwtable { +entry: + %a = alloca i32 + %b = alloca i32 + %c = alloca i32 + %d = alloca i32 + %e = alloca i32 + %f = alloca i32 + store i32 %a_arg, i32* %a + store i32 %b_arg, i32* %b + store i32 %c_arg, i32* %c + store i32 %d_arg, i32* %d + store i32 %e_arg, i32* %e + store i32 %f_arg, i32* %f + %tmp = load i32* %a + %tmp1 = mul i32 %tmp, 2 + %tmp2 = load i32* %b + %tmp3 = mul i32 %tmp2, 3 + %tmp4 = add i32 %tmp1, %tmp3 + %tmp5 = load i32* %c + %tmp6 = mul i32 %tmp5, 5 + %tmp7 = add i32 %tmp4, %tmp6 + %tmp8 = load i32* %d + %tmp9 = mul i32 %tmp8, 7 + %tmp10 = add i32 %tmp7, %tmp9 + %tmp11 = load i32* %e + %tmp12 = mul i32 %tmp11, 11 + %tmp13 = add i32 %tmp10, %tmp12 + %tmp14 = load i32* %f + %tmp15 = mul i32 %tmp14, 13 + %tmp16 = add i32 %tmp13, %tmp15 + ret i32 %tmp16 +} +; WIN64-LABEL: foo3: +; WIN64: .seh_proc foo3 +; WIN64: pushq %rsi +; WIN64: .seh_pushreg 6 +; WIN64: subq $24, %rsp +; WIN64: .seh_stackalloc 24 +; WIN64: .seh_endprologue +; WIN64: addq $24, %rsp +; WIN64: popq %rsi +; WIN64: ret +; WIN64: .seh_endproc + + +; Check emission of eh handler and handler data +declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*) +declare void @_d_eh_resume_unwind(i8*) + +declare i32 @bar() + +define i32 @foo4() #0 { +entry: + %step = alloca i32, align 4 + store i32 0, i32* %step + %tmp = load i32* %step + + %tmp1 = invoke i32 @bar() + to label %finally unwind label %landingpad + +finally: + store i32 1, i32* %step + br label %endtryfinally + +landingpad: + %landing_pad = landingpad { i8*, i32 } personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality + cleanup + %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0 + store i32 2, i32* %step + call void @_d_eh_resume_unwind(i8* %tmp3) + unreachable + +endtryfinally: + %tmp10 = load i32* %step + ret i32 %tmp10 +} +; WIN64-LABEL: foo4: +; WIN64: .seh_proc foo4 +; WIN64: .seh_handler _d_eh_personality, @unwind, @except +; WIN64: subq $56, %rsp +; WIN64: .seh_stackalloc 56 +; WIN64: .seh_endprologue +; WIN64: addq $56, %rsp +; WIN64: ret +; WIN64: .seh_handlerdata +; WIN64: .seh_endproc + + +; Check stack re-alignment and xmm spilling +define void @foo5() uwtable { +entry: + %s = alloca i32, align 64 + call void asm sideeffect "", "~{rbx},~{rdi},~{xmm6},~{xmm7}"() + ret void +} +; WIN64-LABEL: foo5: +; WIN64: .seh_proc foo5 +; WIN64: pushq %rbp +; WIN64: .seh_pushreg 5 +; WIN64: movq %rsp, %rbp +; WIN64: pushq %rdi +; WIN64: .seh_pushreg 7 +; WIN64: pushq %rbx +; WIN64: .seh_pushreg 3 +; WIN64: andq $-64, %rsp +; WIN64: subq $128, %rsp +; WIN64: .seh_stackalloc 48 +; WIN64: .seh_setframe 5, 64 +; WIN64: movaps %xmm7, -32(%rbp) # 16-byte Spill +; WIN64: movaps %xmm6, -48(%rbp) # 16-byte Spill +; WIN64: .seh_savexmm 6, 16 +; WIN64: .seh_savexmm 7, 32 +; WIN64: .seh_endprologue +; WIN64: movaps -48(%rbp), %xmm6 # 16-byte Reload +; WIN64: movaps -32(%rbp), %xmm7 # 16-byte Reload +; WIN64: leaq -16(%rbp), %rsp +; WIN64: popq %rbx +; WIN64: popq %rdi +; WIN64: popq %rbp +; WIN64: retq +; WIN64: .seh_endproc |

