summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2016-04-07 00:05:49 +0000
committerHans Wennborg <hans@hanshq.net>2016-04-07 00:05:49 +0000
commitab16be799c24f3d10e71463b0a943723f9972d3c (patch)
tree976540ab68147897ad374803c88fd2cc62f13797 /llvm/test
parent4c20bef1ef1d29e8824dbf68ee91f072dd1b2f09 (diff)
downloadbcm5719-llvm-ab16be799c24f3d10e71463b0a943723f9972d3c.tar.gz
bcm5719-llvm-ab16be799c24f3d10e71463b0a943723f9972d3c.zip
Re-commit r265039 "[X86] Merge adjacent stack adjustments in eliminateCallFramePseudoInstr (PR27140)"
Third time's the charm? The previous attempt (r265345) caused ASan test failures on X86, as broken CFI caused stack traces to not work. This version of the patch makes sure not to merge with stack adjustments that have CFI, and to not add merged instructions' offests to the CFI about to be generated. This is already covered by the lit tests; I just got the expectations wrong previously. llvm-svn: 265623
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-push.ll2
-rw-r--r--llvm/test/CodeGen/X86/force-align-stack-alloca.ll14
-rw-r--r--llvm/test/CodeGen/X86/localescape.ll3
-rw-r--r--llvm/test/CodeGen/X86/memset-2.ll3
-rw-r--r--llvm/test/CodeGen/X86/movtopush.ll64
-rw-r--r--llvm/test/CodeGen/X86/push-cfi-debug.ll2
-rw-r--r--llvm/test/CodeGen/X86/push-cfi.ll2
8 files changed, 75 insertions, 17 deletions
diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
index eae0ec21c09..acd32e49e60 100644
--- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
+++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
@@ -1,6 +1,6 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=i686-unknown-linux -relocation-model=static -stats 2>&1 | \
-; RUN: grep asm-printer | grep 15
+; RUN: grep asm-printer | grep 14
;
; It's possible to schedule this in 14 instructions by avoiding
; callee-save registers, but the scheduler isn't currently that
diff --git a/llvm/test/CodeGen/X86/fold-push.ll b/llvm/test/CodeGen/X86/fold-push.ll
index eaf91351021..9d3afd1c449 100644
--- a/llvm/test/CodeGen/X86/fold-push.ll
+++ b/llvm/test/CodeGen/X86/fold-push.ll
@@ -14,7 +14,7 @@ define void @test(i32 %a, i32 %b) optsize nounwind {
; SLM: movl (%esp), [[RELOAD:%e..]]
; SLM-NEXT: pushl [[RELOAD]]
; CHECK: calll
-; CHECK-NEXT: addl $4, %esp
+; CHECK-NEXT: addl $8, %esp
%c = add i32 %a, %b
call void @foo(i32 %c)
call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
diff --git a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
index d0cf3417008..8d42680e199 100644
--- a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
+++ b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
@@ -32,15 +32,21 @@ define i64 @g(i32 %i) nounwind {
; CHECK: movl %{{...}}, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
;
-; Next we set up the memset call, and then undo it.
+; Next we set up the memset call.
; CHECK: subl $20, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
+; CHECK: pushl
+; CHECK: pushl
+; CHECK: pushl
; CHECK: calll memset
-; CHECK-NEXT: addl $32, %esp
+;
+; Deallocating 32 bytes of outgoing call frame for memset and
+; allocating 28 bytes for calling f yields a 4-byte adjustment:
+; CHECK-NEXT: addl $4, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
;
-; Next we set up the call to 'f'.
-; CHECK: subl $28, %esp
+; And move on to call 'f', and then restore the stack.
+; CHECK: pushl
; CHECK-NOT: {{[^ ,]*}}, %esp
; CHECK: calll f
; CHECK-NEXT: addl $32, %esp
diff --git a/llvm/test/CodeGen/X86/localescape.ll b/llvm/test/CodeGen/X86/localescape.ll
index 07c3b7f4a35..10ab8dd9672 100644
--- a/llvm/test/CodeGen/X86/localescape.ll
+++ b/llvm/test/CodeGen/X86/localescape.ll
@@ -137,6 +137,5 @@ define void @alloc_func_no_frameaddr() {
; X86: movl $13, (%esp)
; X86: pushl $0
; X86: calll _print_framealloc_from_fp
-; X86: addl $4, %esp
-; X86: addl $8, %esp
+; X86: addl $12, %esp
; X86: retl
diff --git a/llvm/test/CodeGen/X86/memset-2.ll b/llvm/test/CodeGen/X86/memset-2.ll
index d7983b136bf..e9253d36ed4 100644
--- a/llvm/test/CodeGen/X86/memset-2.ll
+++ b/llvm/test/CodeGen/X86/memset-2.ll
@@ -6,8 +6,7 @@ declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
define fastcc void @t1() nounwind {
; CHECK-LABEL: t1:
; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: subl $12, %esp
-; CHECK-NEXT: subl $4, %esp
+; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: pushl $188
; CHECK-NEXT: pushl $0
; CHECK-NEXT: pushl $0
diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index 5dd465ef5a3..d715ccfa8c6 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -2,6 +2,7 @@
; RUN: llc < %s -mtriple=i686-windows -no-x86-call-frame-opt | FileCheck %s -check-prefix=NOPUSH
; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-windows -stackrealign -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
+; RUN: llc < %s -mtriple=i686-pc-linux | FileCheck %s -check-prefix=LINUX
%class.Class = type { i32 }
%struct.s = type { i64 }
@@ -12,6 +13,10 @@ declare x86_thiscallcc void @thiscall(%class.Class* %class, i32 %a, i32 %b, i32
declare void @oneparam(i32 %a)
declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d)
+declare void @inalloca(<{ %struct.s }>* inalloca)
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
; We should get pushes for x86, even though there is a reserved call frame.
; Make sure we don't touch x86-64, and that turning it off works.
@@ -223,8 +228,7 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
-; NORMAL-NEXT: subl $20, %esp
+; NORMAL-NEXT: subl $4, %esp
; NORMAL-NEXT: movl 20(%esp), [[E1:%e..]]
; NORMAL-NEXT: movl 24(%esp), [[E2:%e..]]
; NORMAL-NEXT: movl [[E2]], 4(%esp)
@@ -261,7 +265,7 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll *16(%esp)
-; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: addl $24, %esp
define void @test10() optsize {
%stack_fptr = alloca void (i32, i32, i32, i32)*
store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
@@ -314,8 +318,7 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll _good
-; NORMAL-NEXT: addl $16, %esp
-; NORMAL-NEXT: subl $20, %esp
+; NORMAL-NEXT: subl $4, %esp
; NORMAL: movl $8, 16(%esp)
; NORMAL-NEXT: movl $7, 12(%esp)
; NORMAL-NEXT: movl $6, 8(%esp)
@@ -358,3 +361,54 @@ entry:
call void @good(i32 %val1, i32 %val2, i32 %val3, i32 %add)
ret i32* %ptr3
}
+
+; Make sure to fold adjacent stack adjustments.
+; LINUX-LABEL: pr27140:
+; LINUX: subl $12, %esp
+; LINUX: .cfi_def_cfa_offset 16
+; LINUX-NOT: sub
+; LINUX: pushl $4
+; LINUX: .cfi_adjust_cfa_offset 4
+; LINUX: pushl $3
+; LINUX: .cfi_adjust_cfa_offset 4
+; LINUX: pushl $2
+; LINUX: .cfi_adjust_cfa_offset 4
+; LINUX: pushl $1
+; LINUX: .cfi_adjust_cfa_offset 4
+; LINUX: calll good
+; LINUX: addl $28, %esp
+; LINUX: .cfi_adjust_cfa_offset -16
+; LINUX-NOT: add
+; LINUX: retl
+define void @pr27140() optsize {
+entry:
+ tail call void @good(i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
+; Check that a stack restore (leal -4(%ebp), %esp) doesn't get merged with a
+; stack adjustment (addl $12, %esp). Just because it's a lea doesn't mean it's
+; simply decreasing the stack pointer.
+; NORMAL-LABEL: test14:
+; NORMAL: calll _B_func
+; NORMAL: leal -4(%ebp), %esp
+; NORMAL-NOT: %esp
+; NORMAL: retl
+%struct.A = type { i32, i32 }
+%struct.B = type { i8 }
+declare x86_thiscallcc %struct.B* @B_ctor(%struct.B* returned, %struct.A* byval)
+declare void @B_func(%struct.B* sret, %struct.B*, i32)
+define void @test14(%struct.A* %a) {
+entry:
+ %ref.tmp = alloca %struct.B, align 1
+ %agg.tmp = alloca i64, align 4
+ %tmpcast = bitcast i64* %agg.tmp to %struct.A*
+ %tmp = alloca %struct.B, align 1
+ %0 = bitcast %struct.A* %a to i64*
+ %1 = load i64, i64* %0, align 4
+ store i64 %1, i64* %agg.tmp, align 4
+ %call = call x86_thiscallcc %struct.B* @B_ctor(%struct.B* %ref.tmp, %struct.A* byval %tmpcast)
+ %2 = getelementptr inbounds %struct.B, %struct.B* %tmp, i32 0, i32 0
+ call void @B_func(%struct.B* sret %tmp, %struct.B* %ref.tmp, i32 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/push-cfi-debug.ll b/llvm/test/CodeGen/X86/push-cfi-debug.ll
index 1dfe64e6980..66023992d55 100644
--- a/llvm/test/CodeGen/X86/push-cfi-debug.ll
+++ b/llvm/test/CodeGen/X86/push-cfi-debug.ll
@@ -23,7 +23,7 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0
; CHECK: .cfi_adjust_cfa_offset 4
; CHECK: calll stdfoo
; CHECK: .cfi_adjust_cfa_offset -8
-; CHECK: addl $8, %esp
+; CHECK: addl $20, %esp
; CHECK: .cfi_adjust_cfa_offset -8
define void @test1() #0 !dbg !4 {
entry:
diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll
index 6389708f42c..f0772fc28c6 100644
--- a/llvm/test/CodeGen/X86/push-cfi.ll
+++ b/llvm/test/CodeGen/X86/push-cfi.ll
@@ -82,7 +82,7 @@ cleanup:
; LINUX-NEXT: Ltmp{{[0-9]+}}:
; LINUX-NEXT: .cfi_adjust_cfa_offset 4
; LINUX-NEXT: call
-; LINUX-NEXT: addl $16, %esp
+; LINUX-NEXT: addl $28, %esp
; LINUX: .cfi_adjust_cfa_offset -16
; DARWIN-NOT: .cfi_escape
; DARWIN-NOT: pushl
OpenPOWER on IntegriCloud