summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/atomic-eflags-reuse.ll')
-rw-r--r--llvm/test/CodeGen/X86/atomic-eflags-reuse.ll56
1 files changed, 28 insertions, 28 deletions
diff --git a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
index 260680eebf5..df4b00ddbe8 100644
--- a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -4,14 +4,14 @@
define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_cmov_slt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: cmovgl %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_cmov_slt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: cmovgl %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -25,14 +25,14 @@ entry:
define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_cmov_sge:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: cmovlel %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_cmov_sge:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: cmovlel %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -46,14 +46,14 @@ entry:
define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmov_sle:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: cmovgel %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmov_sle:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: cmovgel %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -67,14 +67,14 @@ entry:
define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmov_sgt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: cmovll %edx, %esi
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmov_sgt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: cmovll %edx, %esi
; SLOWINCDEC-NEXT: movl %esi, %eax
@@ -89,7 +89,7 @@ entry:
; FIXME: (setcc slt x, 0) gets combined into shr early.
define i8 @test_add_1_setcc_slt(i64* %p) #0 {
; CHECK-LABEL: test_add_1_setcc_slt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
@@ -104,13 +104,13 @@ entry:
define i8 @test_sub_1_setcc_sgt(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_setcc_sgt:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: setge %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_setcc_sgt:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $-1, (%rdi)
; SLOWINCDEC-NEXT: setge %al
; SLOWINCDEC-NEXT: retq
@@ -123,10 +123,10 @@ entry:
define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-LABEL: test_add_1_brcond_sge:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock incq (%rdi)
; FASTINCDEC-NEXT: jle .LBB6_2
-; FASTINCDEC-NEXT: # BB#1: # %t
+; FASTINCDEC-NEXT: # %bb.1: # %t
; FASTINCDEC-NEXT: movl %esi, %eax
; FASTINCDEC-NEXT: retq
; FASTINCDEC-NEXT: .LBB6_2: # %f
@@ -134,10 +134,10 @@ define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_add_1_brcond_sge:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock addq $1, (%rdi)
; SLOWINCDEC-NEXT: jle .LBB6_2
-; SLOWINCDEC-NEXT: # BB#1: # %t
+; SLOWINCDEC-NEXT: # %bb.1: # %t
; SLOWINCDEC-NEXT: movl %esi, %eax
; SLOWINCDEC-NEXT: retq
; SLOWINCDEC-NEXT: .LBB6_2: # %f
@@ -158,7 +158,7 @@ f:
define i32 @test_add_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
; CHECK-LABEL: test_add_1_cmov_sle:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -174,7 +174,7 @@ entry:
define i32 @test_add_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
; CHECK-LABEL: test_add_1_cmov_sgt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -192,7 +192,7 @@ entry:
define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 {
; CHECK-LABEL: test_add_1_setcc_sgt_reuse:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: lock xaddq %rcx, (%rdi)
; CHECK-NEXT: testq %rcx, %rcx
@@ -209,7 +209,7 @@ entry:
define i8 @test_sub_2_setcc_sgt(i64* %p) #0 {
; CHECK-LABEL: test_sub_2_setcc_sgt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-2, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -225,7 +225,7 @@ entry:
define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
; CHECK-LABEL: test_add_1_cmov_cmov:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
@@ -240,13 +240,13 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_eq(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: sete %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_eq:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock subq $1, (%rdi)
; SLOWINCDEC-NEXT: sete %al
; SLOWINCDEC-NEXT: retq
@@ -259,13 +259,13 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_ne(i64* %p) #0 {
; FASTINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; FASTINCDEC: # BB#0: # %entry
+; FASTINCDEC: # %bb.0: # %entry
; FASTINCDEC-NEXT: lock decq (%rdi)
; FASTINCDEC-NEXT: setne %al
; FASTINCDEC-NEXT: retq
;
; SLOWINCDEC-LABEL: test_sub_1_cmp_1_setcc_ne:
-; SLOWINCDEC: # BB#0: # %entry
+; SLOWINCDEC: # %bb.0: # %entry
; SLOWINCDEC-NEXT: lock subq $1, (%rdi)
; SLOWINCDEC-NEXT: setne %al
; SLOWINCDEC-NEXT: retq
@@ -278,7 +278,7 @@ entry:
define i8 @test_sub_1_cmp_1_setcc_ugt(i64* %p) #0 {
; CHECK-LABEL: test_sub_1_cmp_1_setcc_ugt:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lock subq $1, (%rdi)
; CHECK-NEXT: seta %al
; CHECK-NEXT: retq
@@ -293,7 +293,7 @@ entry:
; comparison can be folded into the atomic subtract.
define i8 @test_sub_1_cmp_1_setcc_sle(i64* %p) #0 {
; CHECK-LABEL: test_sub_1_cmp_1_setcc_sle:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-1, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: cmpq $2, %rax
@@ -308,7 +308,7 @@ entry:
define i8 @test_sub_3_cmp_3_setcc_eq(i64* %p) #0 {
; CHECK-LABEL: test_sub_3_cmp_3_setcc_eq:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lock subq $3, (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
@@ -323,7 +323,7 @@ entry:
; comparison can be folded into the atomic subtract.
define i8 @test_sub_3_cmp_3_setcc_uge(i64* %p) #0 {
; CHECK-LABEL: test_sub_3_cmp_3_setcc_uge:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq $-3, %rax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: cmpq $2, %rax
OpenPOWER on IntegriCloud