summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2016-03-31 20:27:30 +0000
committerHans Wennborg <hans@hanshq.net>2016-03-31 20:27:30 +0000
commit132cd621216b7b09a170b9f10aa7494a87ce82fc (patch)
tree8b059861d4ec1b20a379309c2c0993dde1742325 /llvm/test/CodeGen
parentaab59b7a2871f79c80c77aa21054c331722b521a (diff)
downloadbcm5719-llvm-132cd621216b7b09a170b9f10aa7494a87ce82fc.tar.gz
bcm5719-llvm-132cd621216b7b09a170b9f10aa7494a87ce82fc.zip
Revert r265039 "[X86] Merge adjacent stack adjustments in eliminateCallFramePseudoInstr (PR27140)"
I think it might have caused these build breakages: http://lab.llvm.org:8011/builders/clang-x86-win2008-selfhost/builds/7234/steps/build%20stage%202/logs/stdio http://lab.llvm.org:8011/builders/sanitizer-windows/builds/19566/steps/run%20tests/logs/stdio llvm-svn: 265046
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-push.ll2
-rw-r--r--llvm/test/CodeGen/X86/force-align-stack-alloca.ll14
-rw-r--r--llvm/test/CodeGen/X86/localescape.ll3
-rw-r--r--llvm/test/CodeGen/X86/memset-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/movtopush.ll33
-rw-r--r--llvm/test/CodeGen/X86/push-cfi-debug.ll4
-rw-r--r--llvm/test/CodeGen/X86/push-cfi.ll4
8 files changed, 18 insertions, 46 deletions
diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
index acd32e49e60..eae0ec21c09 100644
--- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
+++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
@@ -1,6 +1,6 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=i686-unknown-linux -relocation-model=static -stats 2>&1 | \
-; RUN: grep asm-printer | grep 14
+; RUN: grep asm-printer | grep 15
;
; It's possible to schedule this in 14 instructions by avoiding
; callee-save registers, but the scheduler isn't currently that
diff --git a/llvm/test/CodeGen/X86/fold-push.ll b/llvm/test/CodeGen/X86/fold-push.ll
index 9d3afd1c449..eaf91351021 100644
--- a/llvm/test/CodeGen/X86/fold-push.ll
+++ b/llvm/test/CodeGen/X86/fold-push.ll
@@ -14,7 +14,7 @@ define void @test(i32 %a, i32 %b) optsize nounwind {
; SLM: movl (%esp), [[RELOAD:%e..]]
; SLM-NEXT: pushl [[RELOAD]]
; CHECK: calll
-; CHECK-NEXT: addl $8, %esp
+; CHECK-NEXT: addl $4, %esp
%c = add i32 %a, %b
call void @foo(i32 %c)
call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
diff --git a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
index 8d42680e199..d0cf3417008 100644
--- a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
+++ b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
@@ -32,21 +32,15 @@ define i64 @g(i32 %i) nounwind {
; CHECK: movl %{{...}}, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
;
-; Next we set up the memset call.
+; Next we set up the memset call, and then undo it.
; CHECK: subl $20, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
-; CHECK: pushl
-; CHECK: pushl
-; CHECK: pushl
; CHECK: calll memset
-;
-; Deallocating 32 bytes of outgoing call frame for memset and
-; allocating 28 bytes for calling f yields a 4-byte adjustment:
-; CHECK-NEXT: addl $4, %esp
+; CHECK-NEXT: addl $32, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
;
-; And move on to call 'f', and then restore the stack.
-; CHECK: pushl
+; Next we set up the call to 'f'.
+; CHECK: subl $28, %esp
; CHECK-NOT: {{[^ ,]*}}, %esp
; CHECK: calll f
; CHECK-NEXT: addl $32, %esp
diff --git a/llvm/test/CodeGen/X86/localescape.ll b/llvm/test/CodeGen/X86/localescape.ll
index 10ab8dd9672..07c3b7f4a35 100644
--- a/llvm/test/CodeGen/X86/localescape.ll
+++ b/llvm/test/CodeGen/X86/localescape.ll
@@ -137,5 +137,6 @@ define void @alloc_func_no_frameaddr() {
; X86: movl $13, (%esp)
; X86: pushl $0
; X86: calll _print_framealloc_from_fp
-; X86: addl $12, %esp
+; X86: addl $4, %esp
+; X86: addl $8, %esp
; X86: retl
diff --git a/llvm/test/CodeGen/X86/memset-2.ll b/llvm/test/CodeGen/X86/memset-2.ll
index e9a7b566b1d..7f37b62a28b 100644
--- a/llvm/test/CodeGen/X86/memset-2.ll
+++ b/llvm/test/CodeGen/X86/memset-2.ll
@@ -5,7 +5,7 @@ declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
define fastcc void @t1() nounwind {
; CHECK-LABEL: t1:
-; CHECK: subl $16, %esp
+; CHECK: subl $12, %esp
; CHECK: pushl $188
; CHECK-NEXT: pushl $0
; CHECK-NEXT: pushl $0
diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index ceefb0aa58d..5dd465ef5a3 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -2,7 +2,6 @@
; RUN: llc < %s -mtriple=i686-windows -no-x86-call-frame-opt | FileCheck %s -check-prefix=NOPUSH
; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
; RUN: llc < %s -mtriple=i686-windows -stackrealign -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
-; RUN: llc < %s -mtriple=i686-pc-linux | FileCheck %s -check-prefix=LINUX
%class.Class = type { i32 }
%struct.s = type { i64 }
@@ -224,7 +223,8 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
-; NORMAL-NEXT: subl $4, %esp
+; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: subl $20, %esp
; NORMAL-NEXT: movl 20(%esp), [[E1:%e..]]
; NORMAL-NEXT: movl 24(%esp), [[E2:%e..]]
; NORMAL-NEXT: movl [[E2]], 4(%esp)
@@ -261,7 +261,7 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll *16(%esp)
-; NORMAL-NEXT: addl $24, %esp
+; NORMAL-NEXT: addl $16, %esp
define void @test10() optsize {
%stack_fptr = alloca void (i32, i32, i32, i32)*
store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
@@ -314,7 +314,8 @@ entry:
; NORMAL-NEXT: pushl $2
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll _good
-; NORMAL-NEXT: subl $4, %esp
+; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: subl $20, %esp
; NORMAL: movl $8, 16(%esp)
; NORMAL-NEXT: movl $7, 12(%esp)
; NORMAL-NEXT: movl $6, 8(%esp)
@@ -357,27 +358,3 @@ entry:
call void @good(i32 %val1, i32 %val2, i32 %val3, i32 %add)
ret i32* %ptr3
}
-
-; Make sure to fold adjacent stack adjustments.
-; LINUX-LABEL: pr27140:
-; LINUX: subl $12, %esp
-; LINUX: .cfi_def_cfa_offset 16
-; LINUX-NOT: sub
-; LINUX: pushl $4
-; LINUX: .cfi_adjust_cfa_offset 4
-; LINUX: pushl $3
-; LINUX: .cfi_adjust_cfa_offset 4
-; LINUX: pushl $2
-; LINUX: .cfi_adjust_cfa_offset 4
-; LINUX: pushl $1
-; LINUX: .cfi_adjust_cfa_offset 4
-; LINUX: calll good
-; LINUX: addl $28, %esp
-; LINUX: .cfi_adjust_cfa_offset -28
-; LINUX-NOT: add
-; LINUX: retl
-define void @pr27140() optsize {
-entry:
- tail call void @good(i32 1, i32 2, i32 3, i32 4)
- ret void
-}
diff --git a/llvm/test/CodeGen/X86/push-cfi-debug.ll b/llvm/test/CodeGen/X86/push-cfi-debug.ll
index ee4cb8e0e60..cc00fab525a 100644
--- a/llvm/test/CodeGen/X86/push-cfi-debug.ll
+++ b/llvm/test/CodeGen/X86/push-cfi-debug.ll
@@ -23,8 +23,8 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0
; CHECK: .cfi_adjust_cfa_offset 4
; CHECK: calll stdfoo
; CHECK: .cfi_adjust_cfa_offset -8
-; CHECK: addl $20, %esp
-; CHECK: .cfi_adjust_cfa_offset -20
+; CHECK: addl $8, %esp
+; CHECK: .cfi_adjust_cfa_offset -8
define void @test1() #0 !dbg !4 {
entry:
tail call void @foo(i32 1, i32 2) #1, !dbg !10
diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll
index 5498af51f23..6389708f42c 100644
--- a/llvm/test/CodeGen/X86/push-cfi.ll
+++ b/llvm/test/CodeGen/X86/push-cfi.ll
@@ -82,8 +82,8 @@ cleanup:
; LINUX-NEXT: Ltmp{{[0-9]+}}:
; LINUX-NEXT: .cfi_adjust_cfa_offset 4
; LINUX-NEXT: call
-; LINUX-NEXT: addl $28, %esp
-; LINUX: .cfi_adjust_cfa_offset -28
+; LINUX-NEXT: addl $16, %esp
+; LINUX: .cfi_adjust_cfa_offset -16
; DARWIN-NOT: .cfi_escape
; DARWIN-NOT: pushl
define void @test2_nofp() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
OpenPOWER on IntegriCloud