summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorJessica Paquette <jpaquette@apple.com>2019-09-05 20:18:34 +0000
committerJessica Paquette <jpaquette@apple.com>2019-09-05 20:18:34 +0000
commit20e866709882dbad73d9cbb99f056cd6ea113498 (patch)
tree95c22040ef9077d06762aeb46975ae32732913b1 /llvm/test
parent31817731167135870259ef1e7387746345b96a2f (diff)
downloadbcm5719-llvm-20e866709882dbad73d9cbb99f056cd6ea113498.tar.gz
bcm5719-llvm-20e866709882dbad73d9cbb99f056cd6ea113498.zip
Recommit "[AArch64][GlobalISel] Teach AArch64CallLowering to handle basic sibling calls"
Recommit basic sibling call lowering (https://reviews.llvm.org/D67189) The issue was that if you have a return type other than void, call lowering will emit COPYs to get the return value after the call. Disallow sibling calls other than ones that return void for now. Also proactively disable swifterror tail calls for now, since there's a similar issue with COPYs there. Update call-translator-tail-call.ll to include test cases for each of these things. llvm-svn: 371114
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll195
-rw-r--r--llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll3
2 files changed, 198 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
new file mode 100644
index 00000000000..120beadbc32
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc %s -stop-after=irtranslator -verify-machineinstrs -mtriple aarch64-apple-darwin -global-isel -o - 2>&1 | FileCheck %s --check-prefixes=DARWIN,COMMON
+; RUN: llc %s -stop-after=irtranslator -verify-machineinstrs -mtriple aarch64-windows -global-isel -o - 2>&1 | FileCheck %s --check-prefixes=WINDOWS,COMMON
+
+declare void @simple_fn()
+define void @tail_call() {
+ ; COMMON-LABEL: name: tail_call
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: TCRETURNdi @simple_fn, 0, csr_aarch64_aapcs, implicit $sp
+ tail call void @simple_fn()
+ ret void
+}
+
+; We should get a TCRETURNri here.
+; FIXME: We don't need the COPY.
+define void @indirect_tail_call(void()* %func) {
+ ; COMMON-LABEL: name: indirect_tail_call
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x0
+ ; COMMON: [[COPY:%[0-9]+]]:tcgpr64(p0) = COPY $x0
+ ; COMMON: TCRETURNri [[COPY]](p0), 0, csr_aarch64_aapcs, implicit $sp
+ tail call void %func()
+ ret void
+}
+
+declare void @outgoing_args_fn(i32)
+; Right now, callees with outgoing arguments should not be tail called.
+; TODO: Support this.
+define void @test_outgoing_args(i32 %a) {
+ ; COMMON-LABEL: name: test_outgoing_args
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $w0
+ ; COMMON: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[COPY]](s32)
+ ; COMMON: BL @outgoing_args_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @outgoing_args_fn(i32 %a)
+ ret void
+}
+
+; Right now, we don't want to tail call callees with nonvoid return types, since
+; call lowering will insert COPYs after the call.
+; TODO: Support this.
+declare i32 @nonvoid_ret()
+define i32 @test_nonvoid_ret() {
+ ; COMMON-LABEL: name: test_nonvoid_ret
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @nonvoid_ret, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit-def $w0
+ ; COMMON: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[COPY]](s32)
+ ; COMMON: RET_ReallyLR implicit $w0
+ %call = tail call i32 @nonvoid_ret()
+ ret i32 %call
+}
+
+; Don't want to handle swifterror at all right now, since lowerCall will
+; insert a COPY after the call right now.
+; TODO: Support this.
+%swift_error = type {i64, i8}
+define float @swifterror(%swift_error** swifterror %ptr) {
+ ; COMMON-LABEL: name: swifterror
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x21
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x21
+ ; COMMON: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]](p0)
+ ; COMMON: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY1]]
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $x21 = COPY [[COPY2]](p0)
+ ; COMMON: BL @swifterror, csr_aarch64_aapcs_swifterror, implicit-def $lr, implicit $sp, implicit $x21, implicit-def $s0, implicit-def $x21
+ ; COMMON: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; COMMON: [[COPY4:%[0-9]+]]:gpr64all = COPY $x21
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $s0 = COPY [[COPY3]](s32)
+ ; COMMON: $x21 = COPY [[COPY4]]
+ ; COMMON: RET_ReallyLR implicit $s0, implicit $x21
+ %call = tail call float @swifterror(%swift_error** swifterror %ptr)
+ ret float %call
+}
+
+define swiftcc float @swifterror_swiftcc(%swift_error** swifterror %ptr) {
+ ; COMMON-LABEL: name: swifterror_swiftcc
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x21
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x21
+ ; COMMON: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]](p0)
+ ; COMMON: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY1]]
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $x21 = COPY [[COPY2]](p0)
+ ; COMMON: BL @swifterror_swiftcc, csr_aarch64_aapcs_swifterror, implicit-def $lr, implicit $sp, implicit $x21, implicit-def $s0, implicit-def $x21
+ ; COMMON: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; COMMON: [[COPY4:%[0-9]+]]:gpr64all = COPY $x21
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $s0 = COPY [[COPY3]](s32)
+ ; COMMON: $x21 = COPY [[COPY4]]
+ ; COMMON: RET_ReallyLR implicit $s0, implicit $x21
+ %call = tail call swiftcc float @swifterror_swiftcc(%swift_error** swifterror %ptr)
+ ret float %call
+}
+
+; Right now, this should not be tail called.
+; TODO: Support this.
+declare void @varargs(i32, double, i64, ...)
+define void @test_varargs() {
+ ; COMMON-LABEL: name: test_varargs
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+ ; COMMON: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; COMMON: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: $w0 = COPY [[C]](s32)
+ ; COMMON: $d0 = COPY [[C1]](s64)
+ ; COMMON: $x1 = COPY [[C2]](s64)
+ ; COMMON: BL @varargs, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12)
+ ret void
+}
+
+; Unsupported calling convention for tail calls. Make sure we never tail call
+; it.
+declare ghccc void @bad_call_conv_fn()
+define void @test_bad_call_conv() {
+ ; COMMON-LABEL: name: test_bad_call_conv
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @bad_call_conv_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call ghccc void @bad_call_conv_fn()
+ ret void
+}
+
+; Shouldn't tail call when the caller has byval arguments.
+define void @test_byval(i8* byval %ptr) {
+ ; COMMON-LABEL: name: test_byval
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; COMMON: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @simple_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @simple_fn()
+ ret void
+}
+
+; Shouldn't tail call when the caller has inreg arguments.
+define void @test_inreg(i8* inreg %ptr) {
+ ; COMMON-LABEL: name: test_inreg
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: liveins: $x0
+ ; COMMON: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @simple_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call void @simple_fn()
+ ret void
+}
+
+; Shouldn't tail call when the OS doesn't support it. Windows supports this,
+; so we should be able to tail call there.
+declare extern_weak void @extern_weak_fn()
+define void @test_extern_weak() {
+ ; DARWIN-LABEL: name: test_extern_weak
+ ; DARWIN: bb.1 (%ir-block.0):
+ ; DARWIN: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; DARWIN: BL @extern_weak_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; DARWIN: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; DARWIN: RET_ReallyLR
+ ; WINDOWS-LABEL: name: test_extern_weak
+ ; WINDOWS: bb.1 (%ir-block.0):
+ ; WINDOWS: TCRETURNdi @extern_weak_fn, 0, csr_aarch64_aapcs, implicit $sp
+ tail call void @extern_weak_fn()
+ ret void
+}
+
+; Right now, mismatched calling conventions should not be tail called.
+; TODO: Support this.
+declare fastcc void @fast_fn()
+define void @test_mismatched_caller() {
+ ; COMMON-LABEL: name: test_mismatched_caller
+ ; COMMON: bb.1 (%ir-block.0):
+ ; COMMON: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: BL @fast_fn, csr_aarch64_aapcs, implicit-def $lr, implicit $sp
+ ; COMMON: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; COMMON: RET_ReallyLR
+ tail call fastcc void @fast_fn()
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll b/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
index d7e3748d22a..3fb9e320f89 100644
--- a/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
+++ b/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
@@ -1,4 +1,7 @@
; RUN: llc -mtriple aarch64--none-eabi -mattr=+bti < %s | FileCheck %s
+; RUN: llc -mtriple aarch64--none-eabi -global-isel -global-isel-abort=2 -pass-remarks-missed=gisel* -mattr=+bti %s -verify-machineinstrs -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,FALLBACK
+
+; FALLBACK: remark: <unknown>:0:0: unable to translate instruction: call: ' tail call void %p()' (in function: bti_enabled)
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-arm-none-eabi"
OpenPOWER on IntegriCloud