diff options
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/win64_vararg.ll | 59 | 
2 files changed, 62 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 174cc4602d9..a6e894b7cab 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -582,10 +582,12 @@ void X86TargetLowering::resetOperationActions() {    // VASTART needs to be custom lowered to use the VarArgsFrameIndex    setOperationAction(ISD::VASTART           , MVT::Other, Custom);    setOperationAction(ISD::VAEND             , MVT::Other, Expand); -  if (Subtarget->is64Bit()) { +  if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) { +    // TargetInfo::X86_64ABIBuiltinVaList      setOperationAction(ISD::VAARG           , MVT::Other, Custom);      setOperationAction(ISD::VACOPY          , MVT::Other, Custom);    } else { +    // TargetInfo::CharPtrBuiltinVaList      setOperationAction(ISD::VAARG           , MVT::Other, Expand);      setOperationAction(ISD::VACOPY          , MVT::Other, Expand);    } diff --git a/llvm/test/CodeGen/X86/win64_vararg.ll b/llvm/test/CodeGen/X86/win64_vararg.ll index 52bc50922c2..07b7826c000 100644 --- a/llvm/test/CodeGen/X86/win64_vararg.ll +++ b/llvm/test/CodeGen/X86/win64_vararg.ll @@ -18,6 +18,7 @@ entry:  }  declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_copy(i8*, i8*) nounwind  ; CHECK: f5:  ; CHECK: pushq @@ -51,3 +52,61 @@ entry:    call void @llvm.va_start(i8* %ap1)    ret i8* %ap1  } + +; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes +; are copied using va_copy. +; CHECK: copy4: +; CHECK: subq $16 +; CHECK: leaq 56(%rsp), [[REGISTER:%[a-z]+]] +; CHECK: movq [[REGISTER]], 8(%rsp) +; CHECK: movq [[REGISTER]], (%rsp) +; CHECK: addq $16 +; CHECK: ret +define void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: +  %ap = alloca i8*, align 8 +  %cp = alloca i8*, align 8 +  %ap1 = bitcast i8** %ap to i8* +  %cp1 = bitcast i8** %cp to i8* +  call void @llvm.va_start(i8* %ap1) +  call void @llvm.va_copy(i8* %cp1, i8* %ap1) +  ret void +} + +; CHECK: copy1: +; CHECK: subq $16 +; CHECK: leaq 32(%rsp), [[REGISTER:%[a-z]+]] +; CHECK: movq [[REGISTER]], 8(%rsp) +; CHECK: movq [[REGISTER]], (%rsp) +; CHECK: addq $16 +; CHECK: ret +define void @copy1(i64 %a0, ...) nounwind { +entry: +  %ap = alloca i8*, align 8 +  %cp = alloca i8*, align 8 +  %ap1 = bitcast i8** %ap to i8* +  %cp1 = bitcast i8** %cp to i8* +  call void @llvm.va_start(i8* %ap1) +  call void @llvm.va_copy(i8* %cp1, i8* %ap1) +  ret void +} + +; CHECK: arg4: +; CHECK: pushq +; va_start: +; CHECK: leaq 48(%rsp), [[REG1:%[a-z]+]] +; CHECK: movq [[REG1]], (%rsp) +; va_arg: +; CHECK: leaq 52(%rsp), [[REG2:%[a-z]+]] +; CHECK: movq [[REG2]], (%rsp) +; CHECK: movl 48(%rsp), %eax +; CHECK: popq +; CHECK: ret +define i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: +  %ap = alloca i8*, align 8 +  %ap1 = bitcast i8** %ap to i8* +  call void @llvm.va_start(i8* %ap1) +  %tmp = va_arg i8** %ap, i32 +  ret i32 %tmp +}  | 

