summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/musttail-varargs.ll
diff options
context:
space:
mode:
authorReid Kleckner <reid@kleckner.net>2014-08-29 21:42:08 +0000
committerReid Kleckner <reid@kleckner.net>2014-08-29 21:42:08 +0000
commit16e5541211b527b4403f471834e890c7ce82a49d (patch)
tree28e912f0f187377cbde6f05edefc9d378249a198 /llvm/test/CodeGen/X86/musttail-varargs.ll
parent329d4a2b292ec19e2bb0eeb03889c7a6757e137d (diff)
downloadbcm5719-llvm-16e5541211b527b4403f471834e890c7ce82a49d.tar.gz
bcm5719-llvm-16e5541211b527b4403f471834e890c7ce82a49d.zip
musttail: Forward regparms of variadic functions on x86_64
Summary: If a variadic function body contains a musttail call, then we copy all of the remaining register parameters into virtual registers in the function prologue. We track the virtual registers through the function body, and add them as additional registers to pass to the call. Because this is all done in virtual registers, the register allocator usually gives us good code. If the function does a call, however, it will have to spill and reload all argument registers (ew). Forwarding regparms on x86_32 is not implemented because most compilers don't support varargs in 32-bit with regparms. Reviewers: majnemer Subscribers: aemerson, llvm-commits Differential Revision: http://reviews.llvm.org/D5060 llvm-svn: 216780
Diffstat (limited to 'llvm/test/CodeGen/X86/musttail-varargs.ll')
-rw-r--r--llvm/test/CodeGen/X86/musttail-varargs.ll119
1 files changed, 119 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/musttail-varargs.ll b/llvm/test/CodeGen/X86/musttail-varargs.ll
new file mode 100644
index 00000000000..1e99c141c03
--- /dev/null
+++ b/llvm/test/CodeGen/X86/musttail-varargs.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
+; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
+
+; Test that we actually spill and reload all arguments in the variadic argument
+; pack. Doing a normal call will clobber all argument registers, and we will
+; spill around it. A simple adjustment should not require any XMM spills.
+
+declare void(i8*, ...)* @get_f(i8* %this)
+
+define void @f_thunk(i8* %this, ...) {
+ %fptr = call void(i8*, ...)*(i8*)* @get_f(i8* %this)
+ musttail call void (i8*, ...)* %fptr(i8* %this, ...)
+ ret void
+}
+
+; Save and restore 6 GPRs, 8 XMMs, and AL around the call.
+
+; LINUX-LABEL: f_thunk:
+; LINUX-DAG: movq %rdi, {{.*}}
+; LINUX-DAG: movq %rsi, {{.*}}
+; LINUX-DAG: movq %rdx, {{.*}}
+; LINUX-DAG: movq %rcx, {{.*}}
+; LINUX-DAG: movq %r8, {{.*}}
+; LINUX-DAG: movq %r9, {{.*}}
+; LINUX-DAG: movb %al, {{.*}}
+; LINUX-DAG: movaps %xmm0, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm1, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm2, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm3, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm4, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm5, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm6, {{[0-9]*}}(%rsp)
+; LINUX-DAG: movaps %xmm7, {{[0-9]*}}(%rsp)
+; LINUX: callq get_f
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm0
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm1
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm2
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm3
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm4
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm5
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm6
+; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm7
+; LINUX-DAG: movq {{.*}}, %rdi
+; LINUX-DAG: movq {{.*}}, %rsi
+; LINUX-DAG: movq {{.*}}, %rdx
+; LINUX-DAG: movq {{.*}}, %rcx
+; LINUX-DAG: movq {{.*}}, %r8
+; LINUX-DAG: movq {{.*}}, %r9
+; LINUX-DAG: movb {{.*}}, %al
+; LINUX: jmpq *{{.*}} # TAILCALL
+
+; WINDOWS-LABEL: f_thunk:
+; WINDOWS-NOT: mov{{.}}ps
+; WINDOWS-DAG: movq %rdx, {{.*}}
+; WINDOWS-DAG: movq %rcx, {{.*}}
+; WINDOWS-DAG: movq %r8, {{.*}}
+; WINDOWS-DAG: movq %r9, {{.*}}
+; WINDOWS-NOT: mov{{.}}ps
+; WINDOWS: callq get_f
+; WINDOWS-NOT: mov{{.}}ps
+; WINDOWS-DAG: movq {{.*}}, %rdx
+; WINDOWS-DAG: movq {{.*}}, %rcx
+; WINDOWS-DAG: movq {{.*}}, %r8
+; WINDOWS-DAG: movq {{.*}}, %r9
+; WINDOWS-NOT: mov{{.}}ps
+; WINDOWS: jmpq *{{.*}} # TAILCALL
+
+; This thunk shouldn't require any spills and reloads, assuming the register
+; allocator knows what it's doing.
+
+define void @g_thunk(i8* %fptr_i8, ...) {
+ %fptr = bitcast i8* %fptr_i8 to void (i8*, ...)*
+ musttail call void (i8*, ...)* %fptr(i8* %fptr_i8, ...)
+ ret void
+}
+
+; LINUX-LABEL: g_thunk:
+; LINUX-NOT: movq
+; LINUX: jmpq *%rdi # TAILCALL
+
+; WINDOWS-LABEL: g_thunk:
+; WINDOWS-NOT: movq
+; WINDOWS: jmpq *%rcx # TAILCALL
+
+; Do a simple multi-exit multi-bb test.
+
+%struct.Foo = type { i1, i8*, i8* }
+
+@g = external global i32
+
+define void @h_thunk(%struct.Foo* %this, ...) {
+ %cond_p = getelementptr %struct.Foo* %this, i32 0, i32 0
+ %cond = load i1* %cond_p
+ br i1 %cond, label %then, label %else
+
+then:
+ %a_p = getelementptr %struct.Foo* %this, i32 0, i32 1
+ %a_i8 = load i8** %a_p
+ %a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
+ musttail call void (%struct.Foo*, ...)* %a(%struct.Foo* %this, ...)
+ ret void
+
+else:
+ %b_p = getelementptr %struct.Foo* %this, i32 0, i32 2
+ %b_i8 = load i8** %b_p
+ %b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
+ store i32 42, i32* @g
+ musttail call void (%struct.Foo*, ...)* %b(%struct.Foo* %this, ...)
+ ret void
+}
+
+; LINUX-LABEL: h_thunk:
+; LINUX: jne
+; LINUX: jmpq *{{.*}} # TAILCALL
+; LINUX: jmpq *{{.*}} # TAILCALL
+; WINDOWS-LABEL: h_thunk:
+; WINDOWS: jne
+; WINDOWS: jmpq *{{.*}} # TAILCALL
+; WINDOWS: jmpq *{{.*}} # TAILCALL
OpenPOWER on IntegriCloud