summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll195
1 files changed, 195 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
new file mode 100644
index 00000000000..120beadbc32
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc %s -stop-after=irtranslator -verify-machineinstrs -mtriple aarch64-apple-darwin -global-isel -o - 2>&1 | FileCheck %s --check-prefixes=DARWIN,COMMON
+; RUN: llc %s -stop-after=irtranslator -verify-machineinstrs -mtriple aarch64-windows -global-isel -o - 2>&1 | FileCheck %s --check-prefixes=WINDOWS,COMMON
+
+declare void @simple_fn()
+define void @tail_call() {
+ ; COMMON-LABEL: name: tail_call
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: TCRETURNdi @simple_fn, 0, csr_aarch64_aapcs, implicit $sp
+ tail call void @simple_fn()
+ ret void
+}
+
+; We should get a TCRETURNri here.
+; FIXME: We don't need the COPY.
+define void @indirect_tail_call(void()* %func) {
+ ; COMMON-LABEL: name: indirect_tail_call
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x0
+ ; COMMON: [[COPY:%[0-9]+]]:tcgpr64(p0) = COPY $x0
+ ; COMMON: TCRETURNri [[COPY]](p0), 0, csr_aarch64_aapcs, implicit $sp
+ tail call void %func()
+ ret void
+}
+
+declare void @outgoing_args_fn(i32)
+; Right now, callees with outgoing arguments should not be tail called.
+; TODO: Support this.
+define void @test_outgoing_args(i32 %a) {
+ ; COMMON-LABEL: name: test_outgoing_args
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $w0
+ ; COMMON: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[COPY]](s32)
+ ; COMMON: BL @outgoing_args_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @outgoing_args_fn(i32 %a)
+ ret void
+}
+
+; Right now, we don't want to tail call callees with nonvoid return types, since
+; call lowering will insert COPYs after the call.
+; TODO: Support this.
+declare i32 @nonvoid_ret()
+define i32 @test_nonvoid_ret() {
+ ; COMMON-LABEL: name: test_nonvoid_ret
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @nonvoid_ret, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit-def $w0
+ ; COMMON: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[COPY]](s32)
+ ; COMMON: RET_ReallyLR implicit $w0
+ %call = tail call i32 @nonvoid_ret()
+ ret i32 %call
+}
+
+; Don't want to handle swifterror at all right now, since lowerCall will
+; insert a COPY after the call right now.
+; TODO: Support this.
+%swift_error = type {i64, i8}
+define float @swifterror(%swift_error** swifterror %ptr) {
+ ; COMMON-LABEL: name: swifterror
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x21
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x21
+ ; COMMON: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]](p0)
+ ; COMMON: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY1]]
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $x21 = COPY [[COPY2]](p0)
+ ; COMMON: BL @swifterror, csr_aarch64_aapcs_swifterror, implicit-def $lr, implicit $sp, implicit $x21, implicit-def $s0, implicit-def $x21
+ ; COMMON: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; COMMON: [[COPY4:%[0-9]+]]:gpr64all = COPY $x21
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $s0 = COPY [[COPY3]](s32)
+ ; COMMON: $x21 = COPY [[COPY4]]
+ ; COMMON: RET_ReallyLR implicit $s0, implicit $x21
+ %call = tail call float @swifterror(%swift_error** swifterror %ptr)
+ ret float %call
+}
+
+define swiftcc float @swifterror_swiftcc(%swift_error** swifterror %ptr) {
+ ; COMMON-LABEL: name: swifterror_swiftcc
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x21
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x21
+ ; COMMON: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]](p0)
+ ; COMMON: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY1]]
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $x21 = COPY [[COPY2]](p0)
+ ; COMMON: BL @swifterror_swiftcc, csr_aarch64_aapcs_swifterror, implicit-def $lr, implicit $sp, implicit $x21, implicit-def $s0, implicit-def $x21
+ ; COMMON: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; COMMON: [[COPY4:%[0-9]+]]:gpr64all = COPY $x21
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $s0 = COPY [[COPY3]](s32)
+ ; COMMON: $x21 = COPY [[COPY4]]
+ ; COMMON: RET_ReallyLR implicit $s0, implicit $x21
+ %call = tail call swiftcc float @swifterror_swiftcc(%swift_error** swifterror %ptr)
+ ret float %call
+}
+
+; Right now, this should not be tail called.
+; TODO: Support this.
+declare void @varargs(i32, double, i64, ...)
+define void @test_varargs() {
+ ; COMMON-LABEL: name: test_varargs
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+ ; COMMON: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; COMMON: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[C]](s32)
+ ; COMMON: $d0 = COPY [[C1]](s64)
+ ; COMMON: $x1 = COPY [[C2]](s64)
+ ; COMMON: BL @varargs, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12)
+ ret void
+}
+
+; Unsupported calling convention for tail calls. Make sure we never tail call
+; it.
+declare ghccc void @bad_call_conv_fn()
+define void @test_bad_call_conv() {
+ ; COMMON-LABEL: name: test_bad_call_conv
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @bad_call_conv_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call ghccc void @bad_call_conv_fn()
+ ret void
+}
+
+; Shouldn't tail call when the caller has byval arguments.
+define void @test_byval(i8* byval %ptr) {
+ ; COMMON-LABEL: name: test_byval
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; COMMON: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @simple_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @simple_fn()
+ ret void
+}
+
+; Shouldn't tail call when the caller has inreg arguments.
+define void @test_inreg(i8* inreg %ptr) {
+ ; COMMON-LABEL: name: test_inreg
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x0
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @simple_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @simple_fn()
+ ret void
+}
+
+; Shouldn't tail call when the OS doesn't support it. Windows supports this,
+; so we should be able to tail call there.
+declare extern_weak void @extern_weak_fn()
+define void @test_extern_weak() {
+ ; DARWIN-LABEL: name: test_extern_weak
+ ; DARWIN: bb.1 (%ir-block.0):
+ ; DARWIN: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; DARWIN: BL @extern_weak_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; DARWIN: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; DARWIN: RET_ReallyLR
+ ; WINDOWS-LABEL: name: test_extern_weak
+ ; WINDOWS: bb.1 (%ir-block.0):
+ ; WINDOWS: TCRETURNdi @extern_weak_fn, 0, csr_aarch64_aapcs, implicit $sp
+ tail call void @extern_weak_fn()
+ ret void
+}
+
+; Right now, mismatched calling conventions should not be tail called.
+; TODO: Support this.
+declare fastcc void @fast_fn()
+define void @test_mismatched_caller() {
+ ; COMMON-LABEL: name: test_mismatched_caller
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @fast_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call fastcc void @fast_fn()
+ ret void
+}
OpenPOWER on IntegriCloud