diff options
| author | JF Bastien <jfb@google.com> | 2014-12-15 22:34:58 +0000 |
|---|---|---|
| committer | JF Bastien <jfb@google.com> | 2014-12-15 22:34:58 +0000 |
| commit | 388b8794c9cc83dcad29b34a751405eb4f5371d3 (patch) | |
| tree | 5ce800a9b481c3ee8f38a5aa27e1f2c4ad2eb748 /llvm/test/CodeGen/X86/atomic16.ll | |
| parent | 71e11a1d0d2c0419ff2edd1bea0c091441156f12 (diff) | |
| download | bcm5719-llvm-388b8794c9cc83dcad29b34a751405eb4f5371d3.tar.gz bcm5719-llvm-388b8794c9cc83dcad29b34a751405eb4f5371d3.zip | |
x86: Emit LOCK prefix after DATA16
Summary: x86 allows either ordering for the LOCK and DATA16 prefixes, but using GCC+GAS leads to different code generation than using LLVM. This change matches the order that GAS emits the x86 prefixes when a semicolon isn't used in inline assembly (see tc-i386.c comment before define LOCK_PREFIX), and helps simplify tooling that operates on the instruction's byte sequence (such as NaCl's validator). This change shouldn't have any performance impact.
Test Plan: ninja check
Reviewers: craig.topper, jvoung
Subscribers: jfb, llvm-commits
Differential Revision: http://reviews.llvm.org/D6630
llvm-svn: 224283
Diffstat (limited to 'llvm/test/CodeGen/X86/atomic16.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/atomic16.ll | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/atomic16.ll b/llvm/test/CodeGen/X86/atomic16.ll index faaa4c49d39..f6892de43d8 100644 --- a/llvm/test/CodeGen/X86/atomic16.ll +++ b/llvm/test/CodeGen/X86/atomic16.ll @@ -15,17 +15,17 @@ entry: ; X32: incw %t2 = atomicrmw add i16* @sc16, i16 3 acquire ; X64: lock -; X64: addw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: addw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: addw $3 %t3 = atomicrmw add i16* @sc16, i16 5 acquire ; X64: lock -; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X64: xaddw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xaddw %t4 = atomicrmw add i16* @sc16, i16 %t3 acquire ; X64: lock -; X64: addw {{.*}} # encoding: [0xf0,0x66 +; X64: addw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: addw ret void @@ -43,17 +43,17 @@ define void @atomic_fetch_sub16() nounwind { ; X32: decw %t2 = atomicrmw sub i16* @sc16, i16 3 acquire ; X64: lock -; X64: subw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: subw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: subw $3 %t3 = atomicrmw sub i16* @sc16, i16 5 acquire ; X64: lock -; X64: xaddw {{.*}} # encoding: [0xf0,0x66 +; X64: xaddw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xaddw %t4 = atomicrmw sub i16* @sc16, i16 %t3 acquire ; X64: lock -; X64: subw {{.*}} # encoding: [0xf0,0x66 +; X64: subw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: subw ret void @@ -66,7 +66,7 @@ define void @atomic_fetch_and16() nounwind { ; X32-LABEL: atomic_fetch_and16 %t1 = atomicrmw and i16* @sc16, i16 3 acquire ; X64: lock -; X64: andw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: andw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: andw $3 %t2 = atomicrmw and i16* @sc16, i16 5 acquire @@ -78,7 +78,7 @@ define void @atomic_fetch_and16() nounwind { ; X32: cmpxchgw %t3 = atomicrmw and i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: andw {{.*}} # encoding: [0xf0,0x66 +; X64: andw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: andw ret void @@ -91,7 +91,7 @@ define void @atomic_fetch_or16() nounwind { ; X32-LABEL: atomic_fetch_or16 %t1 = atomicrmw or i16* @sc16, i16 3 acquire ; X64: lock -; X64: orw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: orw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: orw $3 %t2 = atomicrmw or i16* @sc16, i16 5 acquire @@ -103,7 +103,7 @@ define void @atomic_fetch_or16() nounwind { ; X32: cmpxchgw %t3 = atomicrmw or i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: orw {{.*}} # encoding: [0xf0,0x66 +; X64: orw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: orw ret void @@ -116,7 +116,7 @@ define void @atomic_fetch_xor16() nounwind { ; X32-LABEL: atomic_fetch_xor16 %t1 = atomicrmw xor i16* @sc16, i16 3 acquire ; X64: lock -; X64: xorw $3, {{.*}} # encoding: [0xf0,0x66 +; X64: xorw $3, {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xorw $3 %t2 = atomicrmw xor i16* @sc16, i16 5 acquire @@ -128,7 +128,7 @@ define void @atomic_fetch_xor16() nounwind { ; X32: cmpxchgw %t3 = atomicrmw xor i16* @sc16, i16 %t2 acquire ; X64: lock -; X64: xorw {{.*}} # encoding: [0xf0,0x66 +; X64: xorw {{.*}} # encoding: [0x66,0xf0 ; X32: lock ; X32: xorw ret void |

