summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64/sibling-call.ll
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2019-09-12 10:22:23 +0000
committerTim Northover <tnorthover@apple.com>2019-09-12 10:22:23 +0000
commitf1c28929125400a1680868f7c6eea720de256779 (patch)
tree0ef7b4fac0adf3a403b55134b9df4a89616efaaa /llvm/test/CodeGen/AArch64/sibling-call.ll
parent98534843fb4c14ebe8022143cdcfc2a4ea8d2d02 (diff)
downloadbcm5719-llvm-f1c28929125400a1680868f7c6eea720de256779.tar.gz
bcm5719-llvm-f1c28929125400a1680868f7c6eea720de256779.zip
AArch64: support arm64_32, an ILP32 slice for watchOS.
This is the main CodeGen patch to support the arm64_32 watchOS ABI in LLVM. FastISel is mostly disabled for now since it would generate incorrect code for ILP32. llvm-svn: 371722
Diffstat (limited to 'llvm/test/CodeGen/AArch64/sibling-call.ll')
-rw-r--r--llvm/test/CodeGen/AArch64/sibling-call.ll24
1 files changed, 12 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll
index be59f27fa85..a9e0225187e 100644
--- a/llvm/test/CodeGen/AArch64/sibling-call.ll
+++ b/llvm/test/CodeGen/AArch64/sibling-call.ll
@@ -1,8 +1,8 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-ldst-opt=0 | FileCheck %s
declare void @callee_stack0()
-declare void @callee_stack8([8 x i32], i64)
-declare void @callee_stack16([8 x i32], i64, i64)
+declare void @callee_stack8([8 x i64], i64)
+declare void @callee_stack16([8 x i64], i64, i64)
define void @caller_to0_from0() nounwind {
; CHECK-LABEL: caller_to0_from0:
@@ -12,7 +12,7 @@ define void @caller_to0_from0() nounwind {
; CHECK-NEXT: b callee_stack0
}
-define void @caller_to0_from8([8 x i32], i64) nounwind{
+define void @caller_to0_from8([8 x i64], i64) nounwind{
; CHECK-LABEL: caller_to0_from8:
; CHECK-NEXT: // %bb.
@@ -26,51 +26,51 @@ define void @caller_to8_from0() {
; Caller isn't going to clean up any extra stack we allocate, so it
; can't be a tail call.
- tail call void @callee_stack8([8 x i32] undef, i64 42)
+ tail call void @callee_stack8([8 x i64] undef, i64 42)
ret void
; CHECK: bl callee_stack8
}
-define void @caller_to8_from8([8 x i32], i64 %a) {
+define void @caller_to8_from8([8 x i64], i64 %a) {
; CHECK-LABEL: caller_to8_from8:
; CHECK-NOT: sub sp, sp,
; This should reuse our stack area for the 42
- tail call void @callee_stack8([8 x i32] undef, i64 42)
+ tail call void @callee_stack8([8 x i64] undef, i64 42)
ret void
; CHECK: str {{x[0-9]+}}, [sp]
; CHECK-NEXT: b callee_stack8
}
-define void @caller_to16_from8([8 x i32], i64 %a) {
+define void @caller_to16_from8([8 x i64], i64 %a) {
; CHECK-LABEL: caller_to16_from8:
; Shouldn't be a tail call: we can't use SP+8 because our caller might
; have something there. This may sound obvious but implementation does
; some funky aligning.
- tail call void @callee_stack16([8 x i32] undef, i64 undef, i64 undef)
+ tail call void @callee_stack16([8 x i64] undef, i64 undef, i64 undef)
; CHECK: bl callee_stack16
ret void
}
-define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
+define void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: caller_to8_from24:
; CHECK-NOT: sub sp, sp
; Reuse our area, putting "42" at incoming sp
- tail call void @callee_stack8([8 x i32] undef, i64 42)
+ tail call void @callee_stack8([8 x i64] undef, i64 42)
ret void
; CHECK: str {{x[0-9]+}}, [sp]
; CHECK-NEXT: b callee_stack8
}
-define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
+define void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
; CHECK-LABEL: caller_to16_from16:
; CHECK-NOT: sub sp, sp,
; Here we want to make sure that both loads happen before the stores:
; otherwise either %a or %b will be wrongly clobbered.
- tail call void @callee_stack16([8 x i32] undef, i64 %b, i64 %a)
+ tail call void @callee_stack16([8 x i64] undef, i64 %b, i64 %a)
ret void
; CHECK: ldr [[VAL0:x[0-9]+]],
OpenPOWER on IntegriCloud