summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll4
-rw-r--r--llvm/test/CodeGen/X86/Atomics-64.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store-wide.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic-minmax-i6432.ll65
-rw-r--r--llvm/test/CodeGen/X86/atomic128.ll315
-rw-r--r--llvm/test/CodeGen/X86/atomic16.ll77
-rw-r--r--llvm/test/CodeGen/X86/atomic32.ll80
-rw-r--r--llvm/test/CodeGen/X86/atomic64.ll41
-rw-r--r--llvm/test/CodeGen/X86/atomic6432.ll92
-rw-r--r--llvm/test/CodeGen/X86/atomic8.ll79
-rw-r--r--llvm/test/CodeGen/X86/atomic_op.ll12
-rw-r--r--llvm/test/CodeGen/X86/pr5145.ll16
12 files changed, 575 insertions, 210 deletions
diff --git a/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
index f9bf3109ea1..850f678c9c2 100644
--- a/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ b/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
@@ -11,9 +11,9 @@ entry:
; CHECK: movl 4([[REG]]), %edx
; CHECK: LBB0_1:
; CHECK: movl %eax, %ebx
-; CHECK: addl {{%[a-z]+}}, %ebx
+; CHECK: addl $1, %ebx
; CHECK: movl %edx, %ecx
-; CHECK: adcl {{%[a-z]+}}, %ecx
+; CHECK: adcl $0, %ecx
; CHECK: lock
; CHECK-NEXT: cmpxchg8b ([[REG]])
; CHECK-NEXT: jne
diff --git a/llvm/test/CodeGen/X86/Atomics-64.ll b/llvm/test/CodeGen/X86/Atomics-64.ll
index f9c25fc8226..c392e947407 100644
--- a/llvm/test/CodeGen/X86/Atomics-64.ll
+++ b/llvm/test/CodeGen/X86/Atomics-64.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 > %t.x86-64
-; RUN: llc < %s -march=x86 > %t.x86
+; RUN: llc < %s -march=x86 -mattr=cx16 > %t.x86
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin8"
diff --git a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
index 17e04f05903..7352d5a5800 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=corei7 -march=x86 -verify-machineinstrs | FileCheck %s
; 64-bit load/store on x86-32
; FIXME: The generated code can be substantially improved.
diff --git a/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll b/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
index 1cfbc49ab1c..ffb7a3fd6f6 100644
--- a/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
+++ b/llvm/test/CodeGen/X86/atomic-minmax-i6432.ll
@@ -1,6 +1,5 @@
-; RUN: llc -march=x86 -mattr=+cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
-; RUN: llc -march=x86 -mattr=-cmov -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=NOCMOV
-; RUN: llc -march=x86 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
+; RUN: llc -march=x86 -mattr=+cmov,cx16 -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
+; RUN: llc -march=x86 -mattr=cx16 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
@sc64 = external global i64
@@ -9,87 +8,39 @@ define void @atomic_maxmin_i6432() {
%1 = atomicrmw max i64* @sc64, i64 5 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setl
-; LINUX: cmpl
-; LINUX: setl
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: cmpl
-; NOCMOV: setl
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setg
-; LINUX: cmpl
-; LINUX: setg
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: cmpl
-; NOCMOV: setg
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: setb
-; LINUX: cmpl
-; LINUX: setb
+; LINUX: seta
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: cmpl
-; NOCMOV: setb
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
-; LINUX: seta
-; LINUX: cmpl
-; LINUX: seta
+; LINUX: setb
; LINUX: cmovne
; LINUX: cmovne
; LINUX: lock
; LINUX-NEXT: cmpxchg8b
; LINUX: jne [[LABEL]]
-; NOCMOV: [[LABEL:.LBB[0-9]+_[0-9]+]]
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: cmpl
-; NOCMOV: seta
-; NOCMOV: jne
-; NOCMOV: jne
-; NOCMOV: lock
-; NOCMOV-NEXT: cmpxchg8b
-; NOCMOV: jne [[LABEL]]
ret void
}
@@ -98,8 +49,8 @@ define void @atomic_maxmin_i6432() {
define void @tf_bug(i8* %ptr) nounwind {
; PIC-LABEL: tf_bug:
-; PIC: movl _id-L1$pb(
-; PIC: movl (_id-L1$pb)+4(
+; PIC-DAG: movl _id-L1$pb(
+; PIC-DAG: movl (_id-L1$pb)+4(
%tmp1 = atomicrmw add i64* @id, i64 1 seq_cst
%tmp2 = add i64 %tmp1, 1
%tmp3 = bitcast i8* %ptr to i64*
diff --git a/llvm/test/CodeGen/X86/atomic128.ll b/llvm/test/CodeGen/X86/atomic128.ll
new file mode 100644
index 00000000000..ddc53a53202
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomic128.ll
@@ -0,0 +1,315 @@
+; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
+
+@var = global i128 0
+
+define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+; CHECK-LABEL: val_compare_and_swap:
+; CHECK: movq %rsi, %rax
+; CHECK: movq %rcx, %rbx
+; CHECK: movq %r8, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+ %val = extractvalue { i128, i1 } %pair, 0
+ ret i128 %val
+}
+
+define void @fetch_and_nand(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_nand:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rdx, %rcx
+; CHECK: andq [[INCHI]], %rcx
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: andq %rsi, %rbx
+; CHECK: notq %rbx
+; CHECK: notq %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+ %val = atomicrmw nand i128* %p, i128 %bits release
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_or(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_or:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: orq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: orq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw or i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_add(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_add:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: addq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: adcq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw add i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_sub(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_sub:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: movq %rax, %rbx
+ ; INCLO equivalent comes in in %rsi, so it makes sense it stays there.
+; CHECK: subq %rsi, %rbx
+; CHECK: movq %rdx, %rcx
+; CHECK: sbbq [[INCHI]], %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw sub i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_min(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_min:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setle [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw min i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_max(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_max:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setae [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setge [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw max i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umin(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umin:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rsi, %rax
+; CHECK: setbe [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: setbe [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umin i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define void @fetch_and_umax(i128* %p, i128 %bits) {
+; CHECK-LABEL: fetch_and_umax:
+; CHECK-DAG: movq %rdx, [[INCHI:%[a-z0-9]+]]
+; CHECK-DAG: movq (%rdi), %rax
+; CHECK-DAG: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: cmpq %rax, %rsi
+; CHECK: setb [[CMP:%[a-z0-9]+]]
+; CHECK: cmpq [[INCHI]], %rdx
+; CHECK: seta [[HICMP:%[a-z0-9]+]]
+; CHECK: je [[USE_LO:.?LBB[0-9]+_[0-9]+]]
+
+; CHECK: movb [[HICMP]], [[CMP]]
+; CHECK: [[USE_LO]]:
+; CHECK: testb [[CMP]], [[CMP]]
+; CHECK: movq %rsi, %rbx
+; CHECK: cmovneq %rax, %rbx
+; CHECK: movq [[INCHI]], %rcx
+; CHECK: cmovneq %rdx, %rcx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+; CHECK: movq %rax, _var
+; CHECK: movq %rdx, _var+8
+
+ %val = atomicrmw umax i128* %p, i128 %bits seq_cst
+ store i128 %val, i128* @var, align 16
+ ret void
+}
+
+define i128 @atomic_load_seq_cst(i128* %p) {
+; CHECK-LABEL: atomic_load_seq_cst:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p seq_cst, align 16
+ ret i128 %r
+}
+
+define i128 @atomic_load_relaxed(i128* %p) {
+; CHECK: atomic_load_relaxed:
+; CHECK: xorl %eax, %eax
+; CHECK: xorl %edx, %edx
+; CHECK: xorl %ebx, %ebx
+; CHECK: xorl %ecx, %ecx
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+
+ %r = load atomic i128* %p monotonic, align 16
+ ret i128 %r
+}
+
+define void @atomic_store_seq_cst(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_seq_cst:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p seq_cst, align 16
+ ret void
+}
+
+define void @atomic_store_release(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_release:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p release, align 16
+ ret void
+}
+
+define void @atomic_store_relaxed(i128* %p, i128 %in) {
+; CHECK-LABEL: atomic_store_relaxed:
+; CHECK: movq %rdx, %rcx
+; CHECK: movq %rsi, %rbx
+; CHECK: movq (%rdi), %rax
+; CHECK: movq 8(%rdi), %rdx
+
+; CHECK: [[LOOP:.?LBB[0-9]+_[0-9]+]]:
+; CHECK: lock
+; CHECK: cmpxchg16b (%rdi)
+; CHECK: jne [[LOOP]]
+
+ store atomic i128 %in, i128* %p unordered, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/atomic16.ll b/llvm/test/CodeGen/X86/atomic16.ll
index 45d3ff46a04..faaa4c49d39 100644
--- a/llvm/test/CodeGen/X86/atomic16.ll
+++ b/llvm/test/CodeGen/X86/atomic16.ll
@@ -4,8 +4,8 @@
@sc16 = external global i16
define void @atomic_fetch_add16() nounwind {
-; X64: atomic_fetch_add16
-; X32: atomic_fetch_add16
+; X64-LABEL: atomic_fetch_add16
+; X32-LABEL: atomic_fetch_add16
entry:
; 32-bit
%t1 = atomicrmw add i16* @sc16, i16 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub16() nounwind {
-; X64: atomic_fetch_sub16
-; X32: atomic_fetch_sub16
+; X64-LABEL: atomic_fetch_sub16
+; X32-LABEL: atomic_fetch_sub16
%t1 = atomicrmw sub i16* @sc16, i16 1 acquire
; X64: lock
; X64: decw
@@ -62,18 +62,18 @@ define void @atomic_fetch_sub16() nounwind {
}
define void @atomic_fetch_and16() nounwind {
-; X64: atomic_fetch_and16
-; X32: atomic_fetch_and16
+; X64-LABEL: atomic_fetch_and16
+; X32-LABEL: atomic_fetch_and16
%t1 = atomicrmw and i16* @sc16, i16 3 acquire
; X64: lock
; X64: andw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: andw $3
%t2 = atomicrmw and i16* @sc16, i16 5 acquire
-; X64: andw
+; X64: andl
; X64: lock
; X64: cmpxchgw
-; X32: andw
+; X32: andl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw and i16* @sc16, i16 %t2 acquire
@@ -87,18 +87,18 @@ define void @atomic_fetch_and16() nounwind {
}
define void @atomic_fetch_or16() nounwind {
-; X64: atomic_fetch_or16
-; X32: atomic_fetch_or16
+; X64-LABEL: atomic_fetch_or16
+; X32-LABEL: atomic_fetch_or16
%t1 = atomicrmw or i16* @sc16, i16 3 acquire
; X64: lock
; X64: orw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: orw $3
%t2 = atomicrmw or i16* @sc16, i16 5 acquire
-; X64: orw
+; X64: orl
; X64: lock
; X64: cmpxchgw
-; X32: orw
+; X32: orl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw or i16* @sc16, i16 %t2 acquire
@@ -112,18 +112,18 @@ define void @atomic_fetch_or16() nounwind {
}
define void @atomic_fetch_xor16() nounwind {
-; X64: atomic_fetch_xor16
-; X32: atomic_fetch_xor16
+; X64-LABEL: atomic_fetch_xor16
+; X32-LABEL: atomic_fetch_xor16
%t1 = atomicrmw xor i16* @sc16, i16 3 acquire
; X64: lock
; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66
; X32: lock
; X32: xorw $3
%t2 = atomicrmw xor i16* @sc16, i16 5 acquire
-; X64: xorw
+; X64: xorl
; X64: lock
; X64: cmpxchgw
-; X32: xorw
+; X32: xorl
; X32: lock
; X32: cmpxchgw
%t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire
@@ -137,15 +137,15 @@ define void @atomic_fetch_xor16() nounwind {
}
define void @atomic_fetch_nand16(i16 %x) nounwind {
-; X64: atomic_fetch_nand16
-; X32: atomic_fetch_nand16
+; X64-LABEL: atomic_fetch_nand16
+; X32-LABEL: atomic_fetch_nand16
%t1 = atomicrmw nand i16* @sc16, i16 %x acquire
-; X64: andw
-; X64: notw
+; X64: andl
+; X64: notl
; X64: lock
; X64: cmpxchgw
-; X32: andw
-; X32: notw
+; X32: andl
+; X32: notl
; X32: lock
; X32: cmpxchgw
ret void
@@ -155,12 +155,16 @@ define void @atomic_fetch_nand16(i16 %x) nounwind {
define void @atomic_fetch_max16(i16 %x) nounwind {
%t1 = atomicrmw max i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -171,12 +175,16 @@ define void @atomic_fetch_max16(i16 %x) nounwind {
define void @atomic_fetch_min16(i16 %x) nounwind {
%t1 = atomicrmw min i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movswl
+; X64: movswl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movswl
+; X32: movswl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -187,12 +195,16 @@ define void @atomic_fetch_min16(i16 %x) nounwind {
define void @atomic_fetch_umax16(i16 %x) nounwind {
%t1 = atomicrmw umax i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
@@ -203,11 +215,16 @@ define void @atomic_fetch_umax16(i16 %x) nounwind {
define void @atomic_fetch_umin16(i16 %x) nounwind {
%t1 = atomicrmw umin i16* @sc16, i16 %x acquire
-; X64: cmpw
+; X64: movzwl
+; X64: movzwl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgw
-; X32: cmpw
+
+; X32: movzwl
+; X32: movzwl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgw
diff --git a/llvm/test/CodeGen/X86/atomic32.ll b/llvm/test/CodeGen/X86/atomic32.ll
index 474c0e6a985..4f2cbe0ce2d 100644
--- a/llvm/test/CodeGen/X86/atomic32.ll
+++ b/llvm/test/CodeGen/X86/atomic32.ll
@@ -5,8 +5,8 @@
@sc32 = external global i32
define void @atomic_fetch_add32() nounwind {
-; X64: atomic_fetch_add32
-; X32: atomic_fetch_add32
+; X64-LABEL: atomic_fetch_add32:
+; X32-LABEL: atomic_fetch_add32:
entry:
; 32-bit
%t1 = atomicrmw add i32* @sc32, i32 1 acquire
@@ -35,8 +35,8 @@ entry:
}
define void @atomic_fetch_sub32() nounwind {
-; X64: atomic_fetch_sub32
-; X32: atomic_fetch_sub32
+; X64-LABEL: atomic_fetch_sub32:
+; X32-LABEL: atomic_fetch_sub32:
%t1 = atomicrmw sub i32* @sc32, i32 1 acquire
; X64: lock
; X64: decl
@@ -63,8 +63,8 @@ define void @atomic_fetch_sub32() nounwind {
}
define void @atomic_fetch_and32() nounwind {
-; X64: atomic_fetch_and32
-; X32: atomic_fetch_and32
+; X64-LABEL: atomic_fetch_and32:
+; X32-LABEL: atomic_fetch_and32:
%t1 = atomicrmw and i32* @sc32, i32 3 acquire
; X64: lock
; X64: andl $3
@@ -88,8 +88,8 @@ define void @atomic_fetch_and32() nounwind {
}
define void @atomic_fetch_or32() nounwind {
-; X64: atomic_fetch_or32
-; X32: atomic_fetch_or32
+; X64-LABEL: atomic_fetch_or32:
+; X32-LABEL: atomic_fetch_or32:
%t1 = atomicrmw or i32* @sc32, i32 3 acquire
; X64: lock
; X64: orl $3
@@ -113,8 +113,8 @@ define void @atomic_fetch_or32() nounwind {
}
define void @atomic_fetch_xor32() nounwind {
-; X64: atomic_fetch_xor32
-; X32: atomic_fetch_xor32
+; X64-LABEL: atomic_fetch_xor32:
+; X32-LABEL: atomic_fetch_xor32:
%t1 = atomicrmw xor i32* @sc32, i32 3 acquire
; X64: lock
; X64: xorl $3
@@ -138,8 +138,8 @@ define void @atomic_fetch_xor32() nounwind {
}
define void @atomic_fetch_nand32(i32 %x) nounwind {
-; X64: atomic_fetch_nand32
-; X32: atomic_fetch_nand32
+; X64-LABEL: atomic_fetch_nand32:
+; X32-LABEL: atomic_fetch_nand32:
%t1 = atomicrmw nand i32* @sc32, i32 %x acquire
; X64: andl
; X64: notl
@@ -155,19 +155,22 @@ define void @atomic_fetch_nand32(i32 %x) nounwind {
}
define void @atomic_fetch_max32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_max32:
+; X32-LABEL: atomic_fetch_max32:
+
%t1 = atomicrmw max i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jl
+; NOCMOV: subl
+; NOCMOV: jge
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -177,19 +180,23 @@ define void @atomic_fetch_max32(i32 %x) nounwind {
}
define void @atomic_fetch_min32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_min32:
+; X32-LABEL: atomic_fetch_min32:
+; NOCMOV-LABEL: atomic_fetch_min32:
+
%t1 = atomicrmw min i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jg
+; NOCMOV: subl
+; NOCMOV: jle
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -199,19 +206,23 @@ define void @atomic_fetch_min32(i32 %x) nounwind {
}
define void @atomic_fetch_umax32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax32:
+; X32-LABEL: atomic_fetch_umax32:
+; NOCMOV-LABEL: atomic_fetch_umax32:
+
%t1 = atomicrmw umax i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: jb
+; NOCMOV: subl
+; NOCMOV: ja
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -221,19 +232,23 @@ define void @atomic_fetch_umax32(i32 %x) nounwind {
}
define void @atomic_fetch_umin32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin32:
+; X32-LABEL: atomic_fetch_umin32:
+; NOCMOV-LABEL: atomic_fetch_umin32:
+
%t1 = atomicrmw umin i32* @sc32, i32 %x acquire
-; X64: cmpl
+; X64: subl
; X64: cmov
; X64: lock
; X64: cmpxchgl
-; X32: cmpl
+; X32: subl
; X32: cmov
; X32: lock
; X32: cmpxchgl
-; NOCMOV: cmpl
-; NOCMOV: ja
+; NOCMOV: subl
+; NOCMOV: jb
; NOCMOV: lock
; NOCMOV: cmpxchgl
ret void
@@ -243,6 +258,9 @@ define void @atomic_fetch_umin32(i32 %x) nounwind {
}
define void @atomic_fetch_cmpxchg32() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg32:
+; X32-LABEL: atomic_fetch_cmpxchg32:
+
%t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
; X64: lock
; X64: cmpxchgl
@@ -254,6 +272,9 @@ define void @atomic_fetch_cmpxchg32() nounwind {
}
define void @atomic_fetch_store32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_store32:
+; X32-LABEL: atomic_fetch_store32:
+
store atomic i32 %x, i32* @sc32 release, align 4
; X64-NOT: lock
; X64: movl
@@ -265,6 +286,9 @@ define void @atomic_fetch_store32(i32 %x) nounwind {
}
define void @atomic_fetch_swap32(i32 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap32:
+; X32-LABEL: atomic_fetch_swap32:
+
%t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
; X64-NOT: lock
; X64: xchgl
diff --git a/llvm/test/CodeGen/X86/atomic64.ll b/llvm/test/CodeGen/X86/atomic64.ll
index 4f55edc0567..11b4e6864da 100644
--- a/llvm/test/CodeGen/X86/atomic64.ll
+++ b/llvm/test/CodeGen/X86/atomic64.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X64: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X64: lock
@@ -22,7 +23,8 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X64: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
; X64: lock
; X64: decq
@@ -40,7 +42,8 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X64: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and64:
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
; X64: lock
; X64: andq $3
@@ -56,7 +59,8 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X64: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
; X64: lock
; X64: orq $3
@@ -72,7 +76,8 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X64: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor64:
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X64: lock
; X64: xorq $3
@@ -88,8 +93,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X64: atomic_fetch_nand64
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X64: andq
; X64: notq
@@ -107,8 +112,10 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max64:
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -126,8 +133,10 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -145,8 +154,10 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax64:
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -164,8 +175,10 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X64: cmpq
+; X64: subq
; X64: cmov
; X64: lock
; X64: cmpxchgq
@@ -183,6 +196,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg64:
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X64: lock
; X64: cmpxchgq
@@ -194,6 +209,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X64-NOT: lock
; X64: movq
@@ -205,6 +222,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X64-NOT: lock
; X64: xchgq
diff --git a/llvm/test/CodeGen/X86/atomic6432.ll b/llvm/test/CodeGen/X86/atomic6432.ll
index c0f7267abe7..1c4b0f43bf7 100644
--- a/llvm/test/CodeGen/X86/atomic6432.ll
+++ b/llvm/test/CodeGen/X86/atomic6432.ll
@@ -3,7 +3,8 @@
@sc64 = external global i64
define void @atomic_fetch_add64() nounwind {
-; X32: atomic_fetch_add64
+; X64-LABEL: atomic_fetch_add64:
+; X32-LABEL: atomic_fetch_add64:
entry:
%t1 = atomicrmw add i64* @sc64, i64 1 acquire
; X32: addl
@@ -30,20 +31,21 @@ entry:
}
define void @atomic_fetch_sub64() nounwind {
-; X32: atomic_fetch_sub64
+; X64-LABEL: atomic_fetch_sub64:
+; X32-LABEL: atomic_fetch_sub64:
%t1 = atomicrmw sub i64* @sc64, i64 1 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-1
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t2 = atomicrmw sub i64* @sc64, i64 3 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-3
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw sub i64* @sc64, i64 5 acquire
-; X32: subl
-; X32: sbbl
+; X32: addl $-5
+; X32: adcl $-1
; X32: lock
; X32: cmpxchg8b
%t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire
@@ -56,15 +58,16 @@ define void @atomic_fetch_sub64() nounwind {
}
define void @atomic_fetch_and64() nounwind {
-; X32: atomic_fetch_and64
+; X64-LABEL: atomic_fetch_and:64
+; X32-LABEL: atomic_fetch_and64:
%t1 = atomicrmw and i64* @sc64, i64 3 acquire
-; X32: andl
-; X32: andl
+; X32: andl $3
+; X32-NOT: andl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw and i64* @sc64, i64 5 acquire
-; X32: andl
-; X32: andl
+ %t2 = atomicrmw and i64* @sc64, i64 4294967297 acquire
+; X32: andl $1
+; X32: andl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw and i64* @sc64, i64 %t2 acquire
@@ -77,15 +80,16 @@ define void @atomic_fetch_and64() nounwind {
}
define void @atomic_fetch_or64() nounwind {
-; X32: atomic_fetch_or64
+; X64-LABEL: atomic_fetch_or64:
+; X32-LABEL: atomic_fetch_or64:
%t1 = atomicrmw or i64* @sc64, i64 3 acquire
-; X32: orl
-; X32: orl
+; X32: orl $3
+; X32-NOT: orl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw or i64* @sc64, i64 5 acquire
-; X32: orl
-; X32: orl
+ %t2 = atomicrmw or i64* @sc64, i64 4294967297 acquire
+; X32: orl $1
+; X32: orl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw or i64* @sc64, i64 %t2 acquire
@@ -98,15 +102,16 @@ define void @atomic_fetch_or64() nounwind {
}
define void @atomic_fetch_xor64() nounwind {
-; X32: atomic_fetch_xor64
+; X64-LABEL: atomic_fetch_xor:64
+; X32-LABEL: atomic_fetch_xor64:
%t1 = atomicrmw xor i64* @sc64, i64 3 acquire
; X32: xorl
-; X32: xorl
+; X32-NOT: xorl
; X32: lock
; X32: cmpxchg8b
- %t2 = atomicrmw xor i64* @sc64, i64 5 acquire
-; X32: xorl
-; X32: xorl
+ %t2 = atomicrmw xor i64* @sc64, i64 4294967297 acquire
+; X32: xorl $1
+; X32: xorl $1
; X32: lock
; X32: cmpxchg8b
%t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire
@@ -119,7 +124,8 @@ define void @atomic_fetch_xor64() nounwind {
}
define void @atomic_fetch_nand64(i64 %x) nounwind {
-; X32: atomic_fetch_nand64
+; X64-LABEL: atomic_fetch_nand64:
+; X32-LABEL: atomic_fetch_nand64:
%t1 = atomicrmw nand i64* @sc64, i64 %x acquire
; X32: andl
; X32: andl
@@ -132,10 +138,11 @@ define void @atomic_fetch_nand64(i64 %x) nounwind {
}
define void @atomic_fetch_max64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_max:64
+; X32-LABEL: atomic_fetch_max64:
%t1 = atomicrmw max i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -145,10 +152,11 @@ define void @atomic_fetch_max64(i64 %x) nounwind {
}
define void @atomic_fetch_min64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_min64:
+; X32-LABEL: atomic_fetch_min64:
%t1 = atomicrmw min i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -158,10 +166,11 @@ define void @atomic_fetch_min64(i64 %x) nounwind {
}
define void @atomic_fetch_umax64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax:64
+; X32-LABEL: atomic_fetch_umax64:
%t1 = atomicrmw umax i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -171,10 +180,11 @@ define void @atomic_fetch_umax64(i64 %x) nounwind {
}
define void @atomic_fetch_umin64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin64:
+; X32-LABEL: atomic_fetch_umin64:
%t1 = atomicrmw umin i64* @sc64, i64 %x acquire
-; X32: cmpl
-; X32: cmpl
-; X32: cmov
+; X32: subl
+; X32: subl
; X32: cmov
; X32: cmov
; X32: lock
@@ -184,6 +194,8 @@ define void @atomic_fetch_umin64(i64 %x) nounwind {
}
define void @atomic_fetch_cmpxchg64() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg:64
+; X32-LABEL: atomic_fetch_cmpxchg64:
%t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
; X32: lock
; X32: cmpxchg8b
@@ -192,6 +204,8 @@ define void @atomic_fetch_cmpxchg64() nounwind {
}
define void @atomic_fetch_store64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_store64:
+; X32-LABEL: atomic_fetch_store64:
store atomic i64 %x, i64* @sc64 release, align 8
; X32: lock
; X32: cmpxchg8b
@@ -200,6 +214,8 @@ define void @atomic_fetch_store64(i64 %x) nounwind {
}
define void @atomic_fetch_swap64(i64 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap64:
+; X32-LABEL: atomic_fetch_swap64:
%t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
; X32: lock
; X32: xchg8b
diff --git a/llvm/test/CodeGen/X86/atomic8.ll b/llvm/test/CodeGen/X86/atomic8.ll
index 203b26f0ab9..5eef9b295e8 100644
--- a/llvm/test/CodeGen/X86/atomic8.ll
+++ b/llvm/test/CodeGen/X86/atomic8.ll
@@ -4,8 +4,8 @@
@sc8 = external global i8
define void @atomic_fetch_add8() nounwind {
-; X64: atomic_fetch_add8
-; X32: atomic_fetch_add8
+; X64-LABEL: atomic_fetch_add8:
+; X32-LABEL: atomic_fetch_add8:
entry:
; 32-bit
%t1 = atomicrmw add i8* @sc8, i8 1 acquire
@@ -34,8 +34,8 @@ entry:
}
define void @atomic_fetch_sub8() nounwind {
-; X64: atomic_fetch_sub8
-; X32: atomic_fetch_sub8
+; X64-LABEL: atomic_fetch_sub8:
+; X32-LABEL: atomic_fetch_sub8:
%t1 = atomicrmw sub i8* @sc8, i8 1 acquire
; X64: lock
; X64: decb
@@ -62,8 +62,8 @@ define void @atomic_fetch_sub8() nounwind {
}
define void @atomic_fetch_and8() nounwind {
-; X64: atomic_fetch_and8
-; X32: atomic_fetch_and8
+; X64-LABEL: atomic_fetch_and8:
+; X32-LABEL: atomic_fetch_and8:
%t1 = atomicrmw and i8* @sc8, i8 3 acquire
; X64: lock
; X64: andb $3
@@ -87,8 +87,8 @@ define void @atomic_fetch_and8() nounwind {
}
define void @atomic_fetch_or8() nounwind {
-; X64: atomic_fetch_or8
-; X32: atomic_fetch_or8
+; X64-LABEL: atomic_fetch_or8:
+; X32-LABEL: atomic_fetch_or8:
%t1 = atomicrmw or i8* @sc8, i8 3 acquire
; X64: lock
; X64: orb $3
@@ -112,8 +112,8 @@ define void @atomic_fetch_or8() nounwind {
}
define void @atomic_fetch_xor8() nounwind {
-; X64: atomic_fetch_xor8
-; X32: atomic_fetch_xor8
+; X64-LABEL: atomic_fetch_xor8:
+; X32-LABEL: atomic_fetch_xor8:
%t1 = atomicrmw xor i8* @sc8, i8 3 acquire
; X64: lock
; X64: xorb $3
@@ -137,8 +137,8 @@ define void @atomic_fetch_xor8() nounwind {
}
define void @atomic_fetch_nand8(i8 %x) nounwind {
-; X64: atomic_fetch_nand8
-; X32: atomic_fetch_nand8
+; X64-LABEL: atomic_fetch_nand8:
+; X32-LABEL: atomic_fetch_nand8:
%t1 = atomicrmw nand i8* @sc8, i8 %x acquire
; X64: andb
; X64: notb
@@ -154,14 +154,18 @@ define void @atomic_fetch_nand8(i8 %x) nounwind {
}
define void @atomic_fetch_max8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_max8:
+; X32-LABEL: atomic_fetch_max8:
%t1 = atomicrmw max i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -170,14 +174,18 @@ define void @atomic_fetch_max8(i8 %x) nounwind {
}
define void @atomic_fetch_min8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_min8:
+; X32-LABEL: atomic_fetch_min8:
%t1 = atomicrmw min i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movsbl
+; X64: movsbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movsbl
+; X32: movsbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -186,14 +194,18 @@ define void @atomic_fetch_min8(i8 %x) nounwind {
}
define void @atomic_fetch_umax8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umax8:
+; X32-LABEL: atomic_fetch_umax8:
%t1 = atomicrmw umax i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -202,13 +214,18 @@ define void @atomic_fetch_umax8(i8 %x) nounwind {
}
define void @atomic_fetch_umin8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_umin8:
+; X32-LABEL: atomic_fetch_umin8:
%t1 = atomicrmw umin i8* @sc8, i8 %x acquire
-; X64: cmpb
-; X64: cmov
+; X64: movzbl
+; X64: movzbl
+; X64: subl
; X64: lock
; X64: cmpxchgb
-; X32: cmpb
-; X32: cmov
+
+; X32: movzbl
+; X32: movzbl
+; X32: subl
; X32: lock
; X32: cmpxchgb
ret void
@@ -217,6 +234,8 @@ define void @atomic_fetch_umin8(i8 %x) nounwind {
}
define void @atomic_fetch_cmpxchg8() nounwind {
+; X64-LABEL: atomic_fetch_cmpxchg8:
+; X32-LABEL: atomic_fetch_cmpxchg8:
%t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire acquire
; X64: lock
; X64: cmpxchgb
@@ -228,6 +247,8 @@ define void @atomic_fetch_cmpxchg8() nounwind {
}
define void @atomic_fetch_store8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_store8:
+; X32-LABEL: atomic_fetch_store8:
store atomic i8 %x, i8* @sc8 release, align 4
; X64-NOT: lock
; X64: movb
@@ -239,6 +260,8 @@ define void @atomic_fetch_store8(i8 %x) nounwind {
}
define void @atomic_fetch_swap8(i8 %x) nounwind {
+; X64-LABEL: atomic_fetch_swap8:
+; X32-LABEL: atomic_fetch_swap8:
%t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
; X64-NOT: lock
; X64: xchgb
diff --git a/llvm/test/CodeGen/X86/atomic_op.ll b/llvm/test/CodeGen/X86/atomic_op.ll
index cb639abadd6..d0ab28aa61f 100644
--- a/llvm/test/CodeGen/X86/atomic_op.ll
+++ b/llvm/test/CodeGen/X86/atomic_op.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+cmov,cx16 -verify-machineinstrs | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -110,19 +110,19 @@ entry:
%17 = extractvalue { i32, i1 } %pair17, 0
store i32 %17, i32* %old
; CHECK: movl [[R17atomic:.*]], %eax
- ; CHECK: movl $1401, %[[R17mask:[a-z]*]]
- ; CHECK: andl %eax, %[[R17mask]]
- ; CHECK: notl %[[R17mask]]
+ ; CHECK: movl %eax, %[[R17mask:[a-z]*]]
+ ; CHECK: notl %[[R17mask]]
+ ; CHECK: orl $-1402, %[[R17mask]]
; CHECK: lock
; CHECK: cmpxchgl %[[R17mask]], [[R17atomic]]
; CHECK: jne
; CHECK: movl %eax,
%18 = atomicrmw nand i32* %val2, i32 1401 monotonic
store i32 %18, i32* %old
- ; CHECK: andl
- ; CHECK: andl
; CHECK: notl
; CHECK: notl
+ ; CHECK: orl $252645135
+ ; CHECK: orl $252645135
; CHECK: lock
; CHECK: cmpxchg8b
%19 = atomicrmw nand i64* %temp64, i64 17361641481138401520 monotonic
diff --git a/llvm/test/CodeGen/X86/pr5145.ll b/llvm/test/CodeGen/X86/pr5145.ll
index d048db8a850..32a797ba138 100644
--- a/llvm/test/CodeGen/X86/pr5145.ll
+++ b/llvm/test/CodeGen/X86/pr5145.ll
@@ -5,29 +5,29 @@ define void @atomic_maxmin_i8() {
; CHECK: atomic_maxmin_i8
%1 = atomicrmw max i8* @sc8, i8 5 acquire
; CHECK: [[LABEL1:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovl
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL1]]
%2 = atomicrmw min i8* @sc8, i8 6 acquire
; CHECK: [[LABEL3:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovg
+; CHECK: movsbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL3]]
%3 = atomicrmw umax i8* @sc8, i8 7 acquire
; CHECK: [[LABEL5:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmovb
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL5]]
%4 = atomicrmw umin i8* @sc8, i8 8 acquire
; CHECK: [[LABEL7:\.?LBB[0-9]+_[0-9]+]]:
-; CHECK: cmpb
-; CHECK: cmova
+; CHECK: movzbl
+; CHECK: cmpl
; CHECK: lock
; CHECK-NEXT: cmpxchgb
; CHECK: jne [[LABEL7]]
OpenPOWER on IntegriCloud