summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/atomic-fp.ll65
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store-wide.ll58
-rw-r--r--llvm/test/CodeGen/X86/atomic-mi.ll431
-rw-r--r--llvm/test/CodeGen/X86/atomic-non-integer.ll54
-rw-r--r--llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll2
5 files changed, 307 insertions, 303 deletions
diff --git a/llvm/test/CodeGen/X86/atomic-fp.ll b/llvm/test/CodeGen/X86/atomic-fp.ll
index 23b5b1ecfe1..faeba24abdc 100644
--- a/llvm/test/CodeGen/X86/atomic-fp.ll
+++ b/llvm/test/CodeGen/X86/atomic-fp.ll
@@ -77,14 +77,13 @@ define void @fadd_64r(double* %loc, double %val) nounwind {
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: pushl %esi
; X86-NOSSE-NEXT: andl $-8, %esp
-; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: subl $24, %esp
; X86-NOSSE-NEXT: movl 8(%ebp), %esi
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b (%esi)
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esi)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: faddl 12(%ebp)
@@ -283,13 +282,12 @@ define void @fadd_64g() nounwind {
; X86-NOSSE-NEXT: movl %esp, %ebp
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: andl $-8, %esp
-; X86-NOSSE-NEXT: subl $24, %esp
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b glob64
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll glob64
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fld1
; X86-NOSSE-NEXT: faddl {{[0-9]+}}(%esp)
@@ -484,13 +482,12 @@ define void @fadd_64imm() nounwind {
; X86-NOSSE-NEXT: movl %esp, %ebp
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: andl $-8, %esp
-; X86-NOSSE-NEXT: subl $24, %esp
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b -559038737
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll -559038737
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fld1
; X86-NOSSE-NEXT: faddl {{[0-9]+}}(%esp)
@@ -691,13 +688,12 @@ define void @fadd_64stack() nounwind {
; X86-NOSSE-NEXT: movl %esp, %ebp
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: andl $-8, %esp
-; X86-NOSSE-NEXT: subl $32, %esp
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b (%esp)
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fld1
; X86-NOSSE-NEXT: faddl {{[0-9]+}}(%esp)
@@ -831,15 +827,14 @@ define void @fadd_array(i64* %arg, double %arg1, i64 %arg2) nounwind {
; X86-NOSSE-NEXT: pushl %edi
; X86-NOSSE-NEXT: pushl %esi
; X86-NOSSE-NEXT: andl $-8, %esp
-; X86-NOSSE-NEXT: subl $24, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
; X86-NOSSE-NEXT: movl 20(%ebp), %esi
; X86-NOSSE-NEXT: movl 8(%ebp), %edi
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b (%edi,%esi,8)
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%edi,%esi,8)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: faddl 12(%ebp)
diff --git a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
index d1b280661a3..7be6cb2738c 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -45,22 +45,21 @@ define i64 @test2(i64* %ptr) {
;
; NOSSE-LABEL: test2:
; NOSSE: # %bb.0:
-; NOSSE-NEXT: pushl %ebx
+; NOSSE-NEXT: pushl %ebp
; NOSSE-NEXT: .cfi_def_cfa_offset 8
-; NOSSE-NEXT: pushl %esi
-; NOSSE-NEXT: .cfi_def_cfa_offset 12
-; NOSSE-NEXT: .cfi_offset %esi, -12
-; NOSSE-NEXT: .cfi_offset %ebx, -8
-; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
-; NOSSE-NEXT: xorl %eax, %eax
-; NOSSE-NEXT: xorl %edx, %edx
-; NOSSE-NEXT: xorl %ecx, %ecx
-; NOSSE-NEXT: xorl %ebx, %ebx
-; NOSSE-NEXT: lock cmpxchg8b (%esi)
-; NOSSE-NEXT: popl %esi
-; NOSSE-NEXT: .cfi_def_cfa_offset 8
-; NOSSE-NEXT: popl %ebx
-; NOSSE-NEXT: .cfi_def_cfa_offset 4
+; NOSSE-NEXT: .cfi_offset %ebp, -8
+; NOSSE-NEXT: movl %esp, %ebp
+; NOSSE-NEXT: .cfi_def_cfa_register %ebp
+; NOSSE-NEXT: andl $-8, %esp
+; NOSSE-NEXT: subl $8, %esp
+; NOSSE-NEXT: movl 8(%ebp), %eax
+; NOSSE-NEXT: fildll (%eax)
+; NOSSE-NEXT: fistpll (%esp)
+; NOSSE-NEXT: movl (%esp), %eax
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; NOSSE-NEXT: movl %ebp, %esp
+; NOSSE-NEXT: popl %ebp
+; NOSSE-NEXT: .cfi_def_cfa %esp, 4
; NOSSE-NEXT: retl
%val = load atomic i64, i64* %ptr seq_cst, align 8
ret i64 %val
@@ -102,22 +101,21 @@ define i64 @test4(i64* %ptr) {
;
; NOSSE-LABEL: test4:
; NOSSE: # %bb.0:
-; NOSSE-NEXT: pushl %ebx
-; NOSSE-NEXT: .cfi_def_cfa_offset 8
-; NOSSE-NEXT: pushl %esi
-; NOSSE-NEXT: .cfi_def_cfa_offset 12
-; NOSSE-NEXT: .cfi_offset %esi, -12
-; NOSSE-NEXT: .cfi_offset %ebx, -8
-; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
-; NOSSE-NEXT: xorl %eax, %eax
-; NOSSE-NEXT: xorl %edx, %edx
-; NOSSE-NEXT: xorl %ecx, %ecx
-; NOSSE-NEXT: xorl %ebx, %ebx
-; NOSSE-NEXT: lock cmpxchg8b (%esi)
-; NOSSE-NEXT: popl %esi
+; NOSSE-NEXT: pushl %ebp
; NOSSE-NEXT: .cfi_def_cfa_offset 8
-; NOSSE-NEXT: popl %ebx
-; NOSSE-NEXT: .cfi_def_cfa_offset 4
+; NOSSE-NEXT: .cfi_offset %ebp, -8
+; NOSSE-NEXT: movl %esp, %ebp
+; NOSSE-NEXT: .cfi_def_cfa_register %ebp
+; NOSSE-NEXT: andl $-8, %esp
+; NOSSE-NEXT: subl $8, %esp
+; NOSSE-NEXT: movl 8(%ebp), %eax
+; NOSSE-NEXT: fildll (%eax)
+; NOSSE-NEXT: fistpll (%esp)
+; NOSSE-NEXT: movl (%esp), %eax
+; NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; NOSSE-NEXT: movl %ebp, %esp
+; NOSSE-NEXT: popl %ebp
+; NOSSE-NEXT: .cfi_def_cfa %esp, 4
; NOSSE-NEXT: retl
%val = load atomic volatile i64, i64* %ptr seq_cst, align 8
ret i64 %val
diff --git a/llvm/test/CodeGen/X86/atomic-mi.ll b/llvm/test/CodeGen/X86/atomic-mi.ll
index 492d7ae8f2d..f660d3311fd 100644
--- a/llvm/test/CodeGen/X86/atomic-mi.ll
+++ b/llvm/test/CodeGen/X86/atomic-mi.ll
@@ -331,20 +331,22 @@ define void @add_64i(i64* %p) {
;
; X32-LABEL: add_64i:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: addl $2, %ebx
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl (%esi), %eax
@@ -355,10 +357,11 @@ define void @add_64i(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB14_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'addq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -375,22 +378,24 @@ define void @add_64r(i64* %p, i64 %v) {
;
; X32-LABEL: add_64r:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl 12(%ebp), %ebx
+; X32-NEXT: adcl 16(%ebp), %ecx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
@@ -399,10 +404,11 @@ define void @add_64r(i64* %p, i64 %v) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB15_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'addq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -565,22 +571,24 @@ define void @sub_64r(i64* %p, i64 %v) {
;
; X32-LABEL: sub_64r:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: subl 12(%ebp), %ebx
+; X32-NEXT: sbbl 16(%ebp), %ecx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
@@ -589,10 +597,11 @@ define void @sub_64r(i64* %p, i64 %v) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB23_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'subq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -737,19 +746,21 @@ define void @and_64i(i64* %p) {
;
; X32-LABEL: and_64i:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
; X32-NEXT: andl $2, %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
@@ -760,10 +771,11 @@ define void @and_64i(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB31_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'andq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -780,22 +792,24 @@ define void @and_64r(i64* %p, i64 %v) {
;
; X32-LABEL: and_64r:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: andl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: andl 16(%ebp), %ecx
+; X32-NEXT: andl 12(%ebp), %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
@@ -804,10 +818,11 @@ define void @and_64r(i64* %p, i64 %v) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB32_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'andq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -973,20 +988,22 @@ define void @or_64i(i64* %p) {
;
; X32-LABEL: or_64i:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: orl $2, %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
@@ -996,10 +1013,11 @@ define void @or_64i(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB41_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'orq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -1016,22 +1034,24 @@ define void @or_64r(i64* %p, i64 %v) {
;
; X32-LABEL: or_64r:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: orl 16(%ebp), %ecx
+; X32-NEXT: orl 12(%ebp), %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
@@ -1040,10 +1060,11 @@ define void @or_64r(i64* %p, i64 %v) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB42_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'orq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -1209,20 +1230,22 @@ define void @xor_64i(i64* %p) {
;
; X32-LABEL: xor_64i:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: xorl $2, %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
@@ -1232,10 +1255,11 @@ define void @xor_64i(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB51_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'xorq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -1252,22 +1276,24 @@ define void @xor_64r(i64* %p, i64 %v) {
;
; X32-LABEL: xor_64r:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: xorl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: xorl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: xorl 16(%ebp), %ecx
+; X32-NEXT: xorl 12(%ebp), %ebx
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
@@ -1276,10 +1302,11 @@ define void @xor_64r(i64* %p, i64 %v) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB52_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'xorq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -1406,20 +1433,22 @@ define void @inc_64(i64* %p) {
;
; X32-LABEL: inc_64:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: addl $1, %ebx
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl (%esi), %eax
@@ -1430,10 +1459,11 @@ define void @inc_64(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB58_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
;
; SLOW_INC-LABEL: inc_64:
@@ -1551,20 +1581,22 @@ define void @dec_64(i64* %p) {
;
; X32-LABEL: dec_64:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: addl $-1, %ebx
; X32-NEXT: adcl $-1, %ecx
; X32-NEXT: movl (%esi), %eax
@@ -1575,10 +1607,11 @@ define void @dec_64(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB63_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
;
; SLOW_INC-LABEL: dec_64:
@@ -1681,20 +1714,22 @@ define void @not_64(i64* %p) {
;
; X32-LABEL: not_64:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%esi)
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
+; X32-NEXT: .cfi_offset %esi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
+; X32-NEXT: movl (%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: notl %ecx
; X32-NEXT: notl %ebx
; X32-NEXT: movl (%esi), %eax
@@ -1705,10 +1740,11 @@ define void @not_64(i64* %p) {
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB68_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do not check X86-32 as it cannot do 'notq'.
%1 = load atomic i64, i64* %p acquire, align 8
@@ -1803,40 +1839,37 @@ define void @neg_64(i64* %p) {
;
; X32-LABEL: neg_64:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 12
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: .cfi_def_cfa_register %ebp
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: andl $-8, %esp
+; X32-NEXT: subl $8, %esp
; X32-NEXT: .cfi_offset %esi, -16
-; X32-NEXT: .cfi_offset %edi, -12
-; X32-NEXT: .cfi_offset %ebx, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: movl 8(%ebp), %esi
+; X32-NEXT: fildll (%esi)
+; X32-NEXT: fistpll (%esp)
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: lock cmpxchg8b (%edi)
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: negl %ebx
-; X32-NEXT: sbbl %edx, %esi
-; X32-NEXT: movl (%edi), %eax
-; X32-NEXT: movl 4(%edi), %edx
+; X32-NEXT: subl (%esp), %ebx
+; X32-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl (%esi), %eax
+; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
; X32-NEXT: .LBB73_1: # %atomicrmw.start
; X32-NEXT: # =>This Inner Loop Header: Depth=1
-; X32-NEXT: movl %esi, %ecx
-; X32-NEXT: lock cmpxchg8b (%edi)
+; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB73_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
+; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: popl %edi
-; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 4
+; X32-NEXT: popl %ebp
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; We do neg check X86-32 as it canneg do 'negq'.
%1 = load atomic i64, i64* %p acquire, align 8
diff --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll
index a0ede060eb3..5d54eca05b3 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll
@@ -448,28 +448,17 @@ define double @load_double(double* %fptr) {
;
; X86-NOSSE-LABEL: load_double:
; X86-NOSSE: # %bb.0:
-; X86-NOSSE-NEXT: pushl %ebx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: pushl %esi
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12
-; X86-NOSSE-NEXT: subl $12, %esp
+; X86-NOSSE-NEXT: subl $20, %esp
; X86-NOSSE-NEXT: .cfi_def_cfa_offset 24
-; X86-NOSSE-NEXT: .cfi_offset %esi, -12
-; X86-NOSSE-NEXT: .cfi_offset %ebx, -8
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b (%esi)
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, (%esp)
; X86-NOSSE-NEXT: fldl (%esp)
-; X86-NOSSE-NEXT: addl $12, %esp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12
-; X86-NOSSE-NEXT: popl %esi
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: popl %ebx
+; X86-NOSSE-NEXT: addl $20, %esp
; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
@@ -827,28 +816,17 @@ define double @load_double_seq_cst(double* %fptr) {
;
; X86-NOSSE-LABEL: load_double_seq_cst:
; X86-NOSSE: # %bb.0:
-; X86-NOSSE-NEXT: pushl %ebx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: pushl %esi
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12
-; X86-NOSSE-NEXT: subl $12, %esp
+; X86-NOSSE-NEXT: subl $20, %esp
; X86-NOSSE-NEXT: .cfi_def_cfa_offset 24
-; X86-NOSSE-NEXT: .cfi_offset %esi, -12
-; X86-NOSSE-NEXT: .cfi_offset %ebx, -8
-; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOSSE-NEXT: xorl %eax, %eax
-; X86-NOSSE-NEXT: xorl %edx, %edx
-; X86-NOSSE-NEXT: xorl %ecx, %ecx
-; X86-NOSSE-NEXT: xorl %ebx, %ebx
-; X86-NOSSE-NEXT: lock cmpxchg8b (%esi)
-; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: movl %eax, (%esp)
; X86-NOSSE-NEXT: fldl (%esp)
-; X86-NOSSE-NEXT: addl $12, %esp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12
-; X86-NOSSE-NEXT: popl %esi
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: popl %ebx
+; X86-NOSSE-NEXT: addl $20, %esp
; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll b/llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll
index d9548b98839..fd40d7f92e9 100644
--- a/llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll
+++ b/llvm/test/CodeGen/X86/misched_phys_reg_assign_order.ll
@@ -49,4 +49,4 @@ k.end: ; preds = %entry
declare i32 @m()
-attributes #0 = { "no-frame-pointer-elim-non-leaf" }
+attributes #0 = { noimplicitfloat "no-frame-pointer-elim-non-leaf" }
OpenPOWER on IntegriCloud