summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/atomic8.ll
diff options
context:
space:
mode:
authorMichael Liao <michael.liao@intel.com>2012-09-20 03:06:15 +0000
committerMichael Liao <michael.liao@intel.com>2012-09-20 03:06:15 +0000
commit3237662b65a7fb6d829f0e60dd440c575ae0115e (patch)
tree3e640a81a590d57b0bcdd65e6a095ff59f6bf8fa /llvm/test/CodeGen/X86/atomic8.ll
parent51fc86ddfe621dad30742861d806471cbaaa6387 (diff)
downloadbcm5719-llvm-3237662b65a7fb6d829f0e60dd440c575ae0115e.tar.gz
bcm5719-llvm-3237662b65a7fb6d829f0e60dd440c575ae0115e.zip
Re-work X86 code generation of atomic ops with spin-loop
- Rewrite/merge pseudo-atomic instruction emitters to address the following issue: * Reduce one unnecessary load in spin-loop previously the spin-loop looks like thisMBB: newMBB: ld t1 = [bitinstr.addr] op t2 = t1, [bitinstr.val] not t3 = t2 (if Invert) mov EAX = t1 lcs dest = [bitinstr.addr], t3 [EAX is implicit] bz newMBB fallthrough -->nextMBB the 'ld' at the beginning of newMBB should be lift out of the loop as lcs (or CMPXCHG on x86) will load the current memory value into EAX. This loop is refined as: thisMBB: EAX = LOAD [MI.addr] mainMBB: t1 = OP [MI.val], EAX LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] JNE mainMBB sinkMBB: * Remove immopc as, so far, all pseudo-atomic instructions has all-register form only, there is no immedidate operand. * Remove unnecessary attributes/modifiers in pseudo-atomic instruction td * Fix issues in PR13458 - Add comprehensive tests on atomic ops on various data types. NOTE: Some of them are turned off due to missing functionality. - Revise tests due to the new spin-loop generated. llvm-svn: 164281
Diffstat (limited to 'llvm/test/CodeGen/X86/atomic8.ll')
-rw-r--r--llvm/test/CodeGen/X86/atomic8.ll251
1 files changed, 251 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/atomic8.ll b/llvm/test/CodeGen/X86/atomic8.ll
new file mode 100644
index 00000000000..035a28dbffa
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomic8.ll
@@ -0,0 +1,251 @@
+; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
+; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
+; XFAIL: *
+
+@sc8 = external global i8
+
+define void @atomic_fetch_add8() nounwind {
+; X64: atomic_fetch_add8
+; X32: atomic_fetch_add8
+entry:
+; 32-bit
+ %t1 = atomicrmw add i8* @sc8, i8 1 acquire
+; X64: lock
+; X64: incb
+; X32: lock
+; X32: incb
+ %t2 = atomicrmw add i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: addb $3
+; X32: lock
+; X32: addb $3
+ %t3 = atomicrmw add i8* @sc8, i8 5 acquire
+; X64: lock
+; X64: xaddb
+; X32: lock
+; X32: xaddb
+ %t4 = atomicrmw add i8* @sc8, i8 %t3 acquire
+; X64: lock
+; X64: addb
+; X32: lock
+; X32: addb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_sub8() nounwind {
+; X64: atomic_fetch_sub8
+; X32: atomic_fetch_sub8
+ %t1 = atomicrmw sub i8* @sc8, i8 1 acquire
+; X64: lock
+; X64: decb
+; X32: lock
+; X32: decb
+ %t2 = atomicrmw sub i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: subb $3
+; X32: lock
+; X32: subb $3
+ %t3 = atomicrmw sub i8* @sc8, i8 5 acquire
+; X64: lock
+; X64: xaddb
+; X32: lock
+; X32: xaddb
+ %t4 = atomicrmw sub i8* @sc8, i8 %t3 acquire
+; X64: lock
+; X64: subb
+; X32: lock
+; X32: subb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_and8() nounwind {
+; X64: atomic_fetch_and8
+; X32: atomic_fetch_and8
+ %t1 = atomicrmw and i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: andb $3
+; X32: lock
+; X32: andb $3
+ %t2 = atomicrmw and i8* @sc8, i8 5 acquire
+; X64: andb
+; X64: lock
+; X64: cmpxchgb
+; X32: andb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw and i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: andb
+; X32: lock
+; X32: andb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_or8() nounwind {
+; X64: atomic_fetch_or8
+; X32: atomic_fetch_or8
+ %t1 = atomicrmw or i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: orb $3
+; X32: lock
+; X32: orb $3
+ %t2 = atomicrmw or i8* @sc8, i8 5 acquire
+; X64: orb
+; X64: lock
+; X64: cmpxchgb
+; X32: orb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw or i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: orb
+; X32: lock
+; X32: orb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_xor8() nounwind {
+; X64: atomic_fetch_xor8
+; X32: atomic_fetch_xor8
+ %t1 = atomicrmw xor i8* @sc8, i8 3 acquire
+; X64: lock
+; X64: xorb $3
+; X32: lock
+; X32: xorb $3
+ %t2 = atomicrmw xor i8* @sc8, i8 5 acquire
+; X64: xorb
+; X64: lock
+; X64: cmpxchgb
+; X32: xorb
+; X32: lock
+; X32: cmpxchgb
+ %t3 = atomicrmw xor i8* @sc8, i8 %t2 acquire
+; X64: lock
+; X64: xorb
+; X32: lock
+; X32: xorb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_nand8(i8 %x) nounwind {
+; X64: atomic_fetch_nand8
+; X32: atomic_fetch_nand8
+ %t1 = atomicrmw nand i8* @sc8, i8 %x acquire
+; X64: andb
+; X64: notb
+; X64: lock
+; X64: cmpxchgb
+; X32: andb
+; X32: notb
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_max8(i8 %x) nounwind {
+ %t1 = atomicrmw max i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_min8(i8 %x) nounwind {
+ %t1 = atomicrmw min i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umax8(i8 %x) nounwind {
+ %t1 = atomicrmw umax i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_umin8(i8 %x) nounwind {
+ %t1 = atomicrmw umin i8* @sc8, i8 %x acquire
+; X64: cmpb
+; X64: cmov
+; X64: lock
+; X64: cmpxchgb
+; X32: cmpb
+; X32: cmov
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_cmpxchg8() nounwind {
+ %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire
+; X64: lock
+; X64: cmpxchgb
+; X32: lock
+; X32: cmpxchgb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_store8(i8 %x) nounwind {
+ store atomic i8 %x, i8* @sc8 release, align 4
+; X64-NOT: lock
+; X64: movb
+; X32-NOT: lock
+; X32: movb
+ ret void
+; X64: ret
+; X32: ret
+}
+
+define void @atomic_fetch_swap8(i8 %x) nounwind {
+ %t1 = atomicrmw xchg i8* @sc8, i8 %x acquire
+; X64-NOT: lock
+; X64: xchgb
+; X32-NOT: lock
+; X32: xchgb
+ ret void
+; X64: ret
+; X32: ret
+}
OpenPOWER on IntegriCloud