diff options
| author | Charles Davis <cdavis5x@gmail.com> | 2015-08-25 23:27:41 +0000 |
|---|---|---|
| committer | Charles Davis <cdavis5x@gmail.com> | 2015-08-25 23:27:41 +0000 |
| commit | 119525914c1b2700011555da404167ddd032c890 (patch) | |
| tree | 8ed66d047d247c9b811e2a5ca3cc244df6893ade /llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll | |
| parent | 54be3b1f03dde06f0c4e97ca5f82e44e6b530823 (diff) | |
| download | bcm5719-llvm-119525914c1b2700011555da404167ddd032c890.tar.gz bcm5719-llvm-119525914c1b2700011555da404167ddd032c890.zip | |
Make variable argument intrinsics behave correctly in a Win64 CC function.
Summary:
This change makes the variable argument intrinsics, `llvm.va_start` and
`llvm.va_copy`, and the `va_arg` instruction behave as they do on Windows
inside a `CallingConv::X86_64_Win64` function. It's needed for a Clang patch
I have to add support for GCC's `__builtin_ms_va_list` constructs.
Reviewers: nadav, asl, eugenis
CC: llvm-commits
Differential Revision: http://llvm-reviews.chandlerc.com/D1622
llvm-svn: 245990
Diffstat (limited to 'llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll | 108 |
1 files changed, 108 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll b/llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll new file mode 100644 index 00000000000..e3436521a5b --- /dev/null +++ b/llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll @@ -0,0 +1,108 @@ +; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-linux-gnu | FileCheck %s + +; Verify that the var arg parameters which are passed in registers are stored +; in home stack slots allocated by the caller and that AP is correctly +; calculated. +define x86_64_win64cc void @average_va(i32 %count, ...) nounwind { +entry: +; CHECK: pushq +; CHECK: movq %r9, 40(%rsp) +; CHECK: movq %r8, 32(%rsp) +; CHECK: movq %rdx, 24(%rsp) +; CHECK: leaq 24(%rsp), %rax + + %ap = alloca i8*, align 8 ; <i8**> [#uses=1] + %ap.0 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap.0) + ret void +} + +declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_copy(i8*, i8*) nounwind +declare void @llvm.va_end(i8*) nounwind + +; CHECK-LABEL: f5: +; CHECK: pushq +; CHECK: leaq 56(%rsp), +define x86_64_win64cc i8** @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap.0) + ret i8** %ap +} + +; CHECK-LABEL: f4: +; CHECK: pushq +; CHECK: leaq 48(%rsp), +define x86_64_win64cc i8** @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap.0) + ret i8** %ap +} + +; CHECK-LABEL: f3: +; CHECK: pushq +; CHECK: leaq 40(%rsp), +define x86_64_win64cc i8** @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap.0) + ret i8** %ap +} + +; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes +; are copied using va_copy. + +; CHECK-LABEL: copy1: +; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]] +; CHECK: movq [[REG_copy1]], 8(%rsp) +; CHECK: movq [[REG_copy1]], (%rsp) +; CHECK: ret +define x86_64_win64cc void @copy1(i64 %a0, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %cp = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + %cp.0 = bitcast i8** %cp to i8* + call void @llvm.va_start(i8* %ap.0) + call void @llvm.va_copy(i8* %cp.0, i8* %ap.0) + ret void +} + +; CHECK-LABEL: copy4: +; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]] +; CHECK: movq [[REG_copy4]], 8(%rsp) +; CHECK: movq [[REG_copy4]], (%rsp) +; CHECK: ret +define x86_64_win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %cp = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + %cp.0 = bitcast i8** %cp to i8* + call void @llvm.va_start(i8* %ap.0) + call void @llvm.va_copy(i8* %cp.0, i8* %ap.0) + ret void +} + +; CHECK-LABEL: arg4: +; va_start: +; CHECK: leaq 48(%rsp), [[REG_arg4_1:%[a-z]+]] +; CHECK: movq [[REG_arg4_1]], (%rsp) +; va_arg: +; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]] +; CHECK: movq [[REG_arg4_2]], (%rsp) +; CHECK: movl 48(%rsp), %eax +; CHECK: ret +define x86_64_win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap.0 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap.0) + %tmp = va_arg i8** %ap, i32 + ret i32 %tmp +} |

