summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorIgor Breger <igor.breger@intel.com>2017-08-30 15:10:15 +0000
committerIgor Breger <igor.breger@intel.com>2017-08-30 15:10:15 +0000
commit36d447d8a84533cf58d5a039a4945f8d17173ef9 (patch)
treeaadb20d87c6893f0e4cbc80f13a5f3e279c9ad04 /llvm/test
parent1e34508bccd7ab464347e3ad332ae264bdd4dc3a (diff)
downloadbcm5719-llvm-36d447d8a84533cf58d5a039a4945f8d17173ef9.tar.gz
bcm5719-llvm-36d447d8a84533cf58d5a039a4945f8d17173ef9.zip
[GlobalISel][X86] Support variadic function call.
Summary: Support variadic function call. Port the implementation from X86FastISel. Reviewers: zvi, guyblank, oren_ben_simhon Reviewed By: guyblank Subscribers: rovka, kristof.beyls, llvm-commits Differential Revision: https://reviews.llvm.org/D37261 llvm-svn: 312130
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/callingconv.ll74
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll89
2 files changed, 163 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
index 7ffae938dd5..eb7eef7fb5d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -356,3 +356,77 @@ define void @test_abi_exts_call(i8* %addr) {
call void @take_char(i8 zeroext %val)
ret void
}
+
+declare void @variadic_callee(i8*, ...)
+define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
+; X32-LABEL: test_variadic_call_1:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi10:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: movl 16(%esp), %eax
+; X32-NEXT: movl 20(%esp), %ecx
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: movl (%ecx), %ecx
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: movl %ecx, 4(%esp)
+; X32-NEXT: calll variadic_callee
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_variadic_call_1:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi8:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: movq (%rdi), %rdi
+; X64-NEXT: movl (%rsi), %esi
+; X64-NEXT: movb $0, %al
+; X64-NEXT: callq variadic_callee
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+
+ %addr = load i8*, i8** %addr_ptr
+ %val = load i32, i32* %val_ptr
+ call void (i8*, ...) @variadic_callee(i8* %addr, i32 %val)
+ ret void
+}
+
+define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
+; X32-LABEL: test_variadic_call_2:
+; X32: # BB#0:
+; X32-NEXT: subl $12, %esp
+; X32-NEXT: .Lcfi11:
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: movl 16(%esp), %eax
+; X32-NEXT: movl 20(%esp), %ecx
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: movl (%ecx), %edx
+; X32-NEXT: movl 4(%ecx), %ecx
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: movl $4, %eax
+; X32-NEXT: leal (%esp,%eax), %eax
+; X32-NEXT: movl %edx, 4(%esp)
+; X32-NEXT: movl %ecx, 4(%eax)
+; X32-NEXT: calll variadic_callee
+; X32-NEXT: addl $12, %esp
+; X32-NEXT: retl
+;
+; X64-LABEL: test_variadic_call_2:
+; X64: # BB#0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: .Lcfi9:
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: movq (%rdi), %rdi
+; X64-NEXT: movq (%rsi), %rcx
+; X64-NEXT: movb $1, %al
+; X64-NEXT: movq %rcx, %xmm0
+; X64-NEXT: callq variadic_callee
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+
+ %addr = load i8*, i8** %addr_ptr
+ %val = load double, double* %val_ptr
+ call void (i8*, ...) @variadic_callee(i8* %addr, double %val)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index 03b765cf48e..6e38e557d24 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -697,3 +697,92 @@ define void @test_abi_exts_call(i8* %addr) {
ret void
}
+declare void @variadic_callee(i8*, ...)
+define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
+; ALL-LABEL: name: test_variadic_call_1
+
+; X32: fixedStack:
+; X32-NEXT: - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: 0,
+; X32-NEXT: isImmutable: true, isAliased: false, callee-saved-register: '' }
+; X32-NEXT: - { id: 1, type: default, offset: 0, size: 4, alignment: 16, stack-id: 0,
+; X32-NEXT: isImmutable: true, isAliased: false, callee-saved-register: '' }
+; X32: %2(p0) = G_FRAME_INDEX %fixed-stack.1
+; X32-NEXT: %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+; X32-NEXT: %3(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: %4(p0) = G_LOAD %0(p0) :: (load 4 from %ir.addr_ptr)
+; X32-NEXT: %5(s32) = G_LOAD %1(p0) :: (load 4 from %ir.val_ptr)
+; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %6(p0) = COPY %esp
+; X32-NEXT: %7(s32) = G_CONSTANT i32 0
+; X32-NEXT: %8(p0) = G_GEP %6, %7(s32)
+; X32-NEXT: G_STORE %4(p0), %8(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: %9(p0) = COPY %esp
+; X32-NEXT: %10(s32) = G_CONSTANT i32 4
+; X32-NEXT: %11(p0) = G_GEP %9, %10(s32)
+; X32-NEXT: G_STORE %5(s32), %11(p0) :: (store 4 into stack + 4, align 0)
+; X32-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: %0(p0) = COPY %rdi
+; X64-NEXT: %1(p0) = COPY %rsi
+; X64-NEXT: %2(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr_ptr)
+; X64-NEXT: %3(s32) = G_LOAD %1(p0) :: (load 4 from %ir.val_ptr)
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %rdi = COPY %2(p0)
+; X64-NEXT: %esi = COPY %3(s32)
+; X64-NEXT: %al = MOV8ri 0
+; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit %al
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ %addr = load i8*, i8** %addr_ptr
+ %val = load i32, i32* %val_ptr
+ call void (i8*, ...) @variadic_callee(i8* %addr, i32 %val)
+ ret void
+}
+
+define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
+; ALL-LABEL: name: test_variadic_call_2
+
+; X32: fixedStack:
+; X32-NEXT: - { id: 0, type: default, offset: 4, size: 4, alignment: 4, stack-id: 0,
+; X32-NEXT: isImmutable: true, isAliased: false, callee-saved-register: '' }
+; X32-NEXT: - { id: 1, type: default, offset: 0, size: 4, alignment: 16, stack-id: 0,
+; X32-NEXT: isImmutable: true, isAliased: false, callee-saved-register: '' }
+; X32: %2(p0) = G_FRAME_INDEX %fixed-stack.1
+; X32-NEXT: %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+; X32-NEXT: %3(p0) = G_FRAME_INDEX %fixed-stack.0
+; X32-NEXT: %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+; X32-NEXT: %4(p0) = G_LOAD %0(p0) :: (load 4 from %ir.addr_ptr)
+; X32-NEXT: %5(s64) = G_LOAD %1(p0) :: (load 8 from %ir.val_ptr, align 4)
+; X32-NEXT: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: %6(p0) = COPY %esp
+; X32-NEXT: %7(s32) = G_CONSTANT i32 0
+; X32-NEXT: %8(p0) = G_GEP %6, %7(s32)
+; X32-NEXT: G_STORE %4(p0), %8(p0) :: (store 4 into stack, align 0)
+; X32-NEXT: %9(p0) = COPY %esp
+; X32-NEXT: %10(s32) = G_CONSTANT i32 4
+; X32-NEXT: %11(p0) = G_GEP %9, %10(s32)
+; X32-NEXT: G_STORE %5(s64), %11(p0) :: (store 8 into stack + 4, align 0)
+; X32-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit %esp
+; X32-NEXT: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
+; X32-NEXT: RET 0
+
+; X64: %1(p0) = COPY %rsi
+; X64-NEXT: %2(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr_ptr)
+; X64-NEXT: %3(s64) = G_LOAD %1(p0) :: (load 8 from %ir.val_ptr)
+; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: %rdi = COPY %2(p0)
+; X64-NEXT: %xmm0 = COPY %3(s64)
+; X64-NEXT: %al = MOV8ri 1
+; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %rdi, implicit %xmm0, implicit %al
+; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
+; X64-NEXT: RET 0
+
+ %addr = load i8*, i8** %addr_ptr
+ %val = load double, double* %val_ptr
+ call void (i8*, ...) @variadic_callee(i8* %addr, double %val)
+ ret void
+}
OpenPOWER on IntegriCloud