summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2018-07-14 09:32:37 +0000
committerChandler Carruth <chandlerc@gmail.com>2018-07-14 09:32:37 +0000
commitfb503ac0277ab6613d96c35f0eb6f789e585d3df (patch)
treef00c658efd574e1708029161ed152460cb6cf289 /llvm
parent7426cf6717c341717f852142d8d11e81f0d67590 (diff)
downloadbcm5719-llvm-fb503ac0277ab6613d96c35f0eb6f789e585d3df.tar.gz
bcm5719-llvm-fb503ac0277ab6613d96c35f0eb6f789e585d3df.zip
[x86/SLH] Fix an issue where we wouldn't harden any loads if we found
no conditions. This is only valid to do if we're hardening calls and rets with LFENCE which results in an LFENCE guarding the entire entry block for us. llvm-svn: 337089
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp6
-rw-r--r--llvm/test/CodeGen/X86/speculative-load-hardening.ll155
2 files changed, 91 insertions, 70 deletions
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 2bbb187b293..f5933cbc8ca 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -367,9 +367,9 @@ bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
++NumLFENCEsInserted;
}
- // If we have no conditionals to protect in blocks, then all we needed to do
- // was protect the entry and so we're done.
- if (Infos.empty())
+ // If we guarded the entry with an LFENCE and have no conditionals to protect
+ // in blocks, then we're done.
+ if (FenceCallAndRet && Infos.empty())
// We may have changed the function's code at this point to insert fences.
return true;
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 4976086bbe0..e160ce1715d 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -8,6 +8,27 @@ declare void @leak(i32 %v1, i32 %v2)
declare void @sink(i32)
+define i32 @test_trivial_entry_load(i32* %ptr) nounwind {
+; X64-LABEL: test_trivial_entry_load:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movq %rsp, %rcx
+; X64-NEXT: movq $-1, %rax
+; X64-NEXT: sarq $63, %rcx
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: orl %ecx, %eax
+; X64-NEXT: shlq $47, %rcx
+; X64-NEXT: orq %rcx, %rsp
+; X64-NEXT: retq
+;
+; X64-LFENCE-LABEL: test_trivial_entry_load:
+; X64-LFENCE: # %bb.0: # %entry
+; X64-LFENCE-NEXT: movl (%rdi), %eax
+; X64-LFENCE-NEXT: retq
+entry:
+ %v = load i32, i32* %ptr
+ ret i32 %v
+}
+
define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2, i32** %ptr3) nounwind {
; X64-LABEL: test_basic_conditions:
; X64: # %bb.0: # %entry
@@ -18,33 +39,33 @@ define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr
; X64-NEXT: movq $-1, %rbx
; X64-NEXT: sarq $63, %rax
; X64-NEXT: testl %edi, %edi
-; X64-NEXT: jne .LBB0_1
+; X64-NEXT: jne .LBB1_1
; X64-NEXT: # %bb.2: # %then1
; X64-NEXT: cmovneq %rbx, %rax
; X64-NEXT: testl %esi, %esi
-; X64-NEXT: je .LBB0_4
-; X64-NEXT: .LBB0_1:
+; X64-NEXT: je .LBB1_4
+; X64-NEXT: .LBB1_1:
; X64-NEXT: cmoveq %rbx, %rax
-; X64-NEXT: .LBB0_8: # %exit
+; X64-NEXT: .LBB1_8: # %exit
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: popq %rbx
; X64-NEXT: popq %r14
; X64-NEXT: popq %r15
; X64-NEXT: retq
-; X64-NEXT: .LBB0_4: # %then2
+; X64-NEXT: .LBB1_4: # %then2
; X64-NEXT: movq %r8, %r15
; X64-NEXT: cmovneq %rbx, %rax
; X64-NEXT: testl %edx, %edx
-; X64-NEXT: je .LBB0_6
+; X64-NEXT: je .LBB1_6
; X64-NEXT: # %bb.5: # %else3
; X64-NEXT: cmoveq %rbx, %rax
; X64-NEXT: movslq (%r9), %rcx
; X64-NEXT: orq %rax, %rcx
; X64-NEXT: leaq (%r15,%rcx,4), %r14
; X64-NEXT: movl %ecx, (%r15,%rcx,4)
-; X64-NEXT: jmp .LBB0_7
-; X64-NEXT: .LBB0_6: # %then3
+; X64-NEXT: jmp .LBB1_7
+; X64-NEXT: .LBB1_6: # %then3
; X64-NEXT: cmovneq %rbx, %rax
; X64-NEXT: movl (%rcx), %ecx
; X64-NEXT: addl (%r15), %ecx
@@ -61,38 +82,38 @@ define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr
; X64-NEXT: callq leak
; X64-NEXT: movq %rsp, %rax
; X64-NEXT: sarq $63, %rax
-; X64-NEXT: .LBB0_7: # %merge
+; X64-NEXT: .LBB1_7: # %merge
; X64-NEXT: movslq (%r14), %rcx
; X64-NEXT: orq %rax, %rcx
; X64-NEXT: movl $0, (%r15,%rcx,4)
-; X64-NEXT: jmp .LBB0_8
+; X64-NEXT: jmp .LBB1_8
;
; X64-LFENCE-LABEL: test_basic_conditions:
; X64-LFENCE: # %bb.0: # %entry
; X64-LFENCE-NEXT: testl %edi, %edi
-; X64-LFENCE-NEXT: jne .LBB0_6
+; X64-LFENCE-NEXT: jne .LBB1_6
; X64-LFENCE-NEXT: # %bb.1: # %then1
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: testl %esi, %esi
-; X64-LFENCE-NEXT: je .LBB0_2
-; X64-LFENCE-NEXT: .LBB0_6: # %exit
+; X64-LFENCE-NEXT: je .LBB1_2
+; X64-LFENCE-NEXT: .LBB1_6: # %exit
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: retq
-; X64-LFENCE-NEXT: .LBB0_2: # %then2
+; X64-LFENCE-NEXT: .LBB1_2: # %then2
; X64-LFENCE-NEXT: pushq %r14
; X64-LFENCE-NEXT: pushq %rbx
; X64-LFENCE-NEXT: pushq %rax
; X64-LFENCE-NEXT: movq %r8, %rbx
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: testl %edx, %edx
-; X64-LFENCE-NEXT: je .LBB0_3
+; X64-LFENCE-NEXT: je .LBB1_3
; X64-LFENCE-NEXT: # %bb.4: # %else3
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: movslq (%r9), %rax
; X64-LFENCE-NEXT: leaq (%rbx,%rax,4), %r14
; X64-LFENCE-NEXT: movl %eax, (%rbx,%rax,4)
-; X64-LFENCE-NEXT: jmp .LBB0_5
-; X64-LFENCE-NEXT: .LBB0_3: # %then3
+; X64-LFENCE-NEXT: jmp .LBB1_5
+; X64-LFENCE-NEXT: .LBB1_3: # %then3
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: movl (%rcx), %eax
; X64-LFENCE-NEXT: addl (%rbx), %eax
@@ -102,7 +123,7 @@ define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr
; X64-LFENCE-NEXT: addl (%r14), %esi
; X64-LFENCE-NEXT: # kill: def $edi killed $edi killed $rdi
; X64-LFENCE-NEXT: callq leak
-; X64-LFENCE-NEXT: .LBB0_5: # %merge
+; X64-LFENCE-NEXT: .LBB1_5: # %merge
; X64-LFENCE-NEXT: movslq (%r14), %rax
; X64-LFENCE-NEXT: movl $0, (%rbx,%rax,4)
; X64-LFENCE-NEXT: addq $8, %rsp
@@ -164,21 +185,21 @@ define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
; X64-NEXT: movq $-1, %r15
; X64-NEXT: sarq $63, %rax
; X64-NEXT: testl %edi, %edi
-; X64-NEXT: je .LBB1_2
+; X64-NEXT: je .LBB2_2
; X64-NEXT: # %bb.1:
; X64-NEXT: cmoveq %r15, %rax
-; X64-NEXT: jmp .LBB1_5
-; X64-NEXT: .LBB1_2: # %l.header.preheader
+; X64-NEXT: jmp .LBB2_5
+; X64-NEXT: .LBB2_2: # %l.header.preheader
; X64-NEXT: movq %rcx, %r14
; X64-NEXT: movq %rdx, %r12
; X64-NEXT: movl %esi, %ebp
; X64-NEXT: cmovneq %r15, %rax
; X64-NEXT: xorl %ebx, %ebx
-; X64-NEXT: jmp .LBB1_3
+; X64-NEXT: jmp .LBB2_3
; X64-NEXT: .p2align 4, 0x90
-; X64-NEXT: .LBB1_6: # in Loop: Header=BB1_3 Depth=1
+; X64-NEXT: .LBB2_6: # in Loop: Header=BB2_3 Depth=1
; X64-NEXT: cmovgeq %r15, %rax
-; X64-NEXT: .LBB1_3: # %l.header
+; X64-NEXT: .LBB2_3: # %l.header
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movslq (%r12), %rcx
; X64-NEXT: orq %rax, %rcx
@@ -192,10 +213,10 @@ define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
; X64-NEXT: sarq $63, %rax
; X64-NEXT: incl %ebx
; X64-NEXT: cmpl %ebp, %ebx
-; X64-NEXT: jl .LBB1_6
+; X64-NEXT: jl .LBB2_6
; X64-NEXT: # %bb.4:
; X64-NEXT: cmovlq %r15, %rax
-; X64-NEXT: .LBB1_5: # %exit
+; X64-NEXT: .LBB2_5: # %exit
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: popq %rbx
@@ -213,7 +234,7 @@ define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
; X64-LFENCE-NEXT: pushq %rbx
; X64-LFENCE-NEXT: pushq %rax
; X64-LFENCE-NEXT: testl %edi, %edi
-; X64-LFENCE-NEXT: jne .LBB1_3
+; X64-LFENCE-NEXT: jne .LBB2_3
; X64-LFENCE-NEXT: # %bb.1: # %l.header.preheader
; X64-LFENCE-NEXT: movq %rcx, %r14
; X64-LFENCE-NEXT: movq %rdx, %r15
@@ -221,7 +242,7 @@ define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: xorl %ebx, %ebx
; X64-LFENCE-NEXT: .p2align 4, 0x90
-; X64-LFENCE-NEXT: .LBB1_2: # %l.header
+; X64-LFENCE-NEXT: .LBB2_2: # %l.header
; X64-LFENCE-NEXT: # =>This Inner Loop Header: Depth=1
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: movslq (%r15), %rax
@@ -229,8 +250,8 @@ define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
; X64-LFENCE-NEXT: callq sink
; X64-LFENCE-NEXT: incl %ebx
; X64-LFENCE-NEXT: cmpl %ebp, %ebx
-; X64-LFENCE-NEXT: jl .LBB1_2
-; X64-LFENCE-NEXT: .LBB1_3: # %exit
+; X64-LFENCE-NEXT: jl .LBB2_2
+; X64-LFENCE-NEXT: .LBB2_3: # %exit
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: addq $8, %rsp
; X64-LFENCE-NEXT: popq %rbx
@@ -270,11 +291,11 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-NEXT: movq $-1, %r12
; X64-NEXT: sarq $63, %rax
; X64-NEXT: testl %edi, %edi
-; X64-NEXT: je .LBB2_2
+; X64-NEXT: je .LBB3_2
; X64-NEXT: # %bb.1:
; X64-NEXT: cmoveq %r12, %rax
-; X64-NEXT: jmp .LBB2_10
-; X64-NEXT: .LBB2_2: # %l1.header.preheader
+; X64-NEXT: jmp .LBB3_10
+; X64-NEXT: .LBB3_2: # %l1.header.preheader
; X64-NEXT: movq %r8, %r14
; X64-NEXT: movq %rcx, %rbx
; X64-NEXT: movl %edx, %ebp
@@ -283,21 +304,21 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-NEXT: xorl %r13d, %r13d
; X64-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT: testl %r15d, %r15d
-; X64-NEXT: jg .LBB2_5
-; X64-NEXT: jmp .LBB2_4
+; X64-NEXT: jg .LBB3_5
+; X64-NEXT: jmp .LBB3_4
; X64-NEXT: .p2align 4, 0x90
-; X64-NEXT: .LBB2_12:
+; X64-NEXT: .LBB3_12:
; X64-NEXT: cmovgeq %r12, %rax
; X64-NEXT: testl %r15d, %r15d
-; X64-NEXT: jle .LBB2_4
-; X64-NEXT: .LBB2_5: # %l2.header.preheader
+; X64-NEXT: jle .LBB3_4
+; X64-NEXT: .LBB3_5: # %l2.header.preheader
; X64-NEXT: cmovleq %r12, %rax
; X64-NEXT: xorl %r15d, %r15d
-; X64-NEXT: jmp .LBB2_6
+; X64-NEXT: jmp .LBB3_6
; X64-NEXT: .p2align 4, 0x90
-; X64-NEXT: .LBB2_11: # in Loop: Header=BB2_6 Depth=1
+; X64-NEXT: .LBB3_11: # in Loop: Header=BB3_6 Depth=1
; X64-NEXT: cmovgeq %r12, %rax
-; X64-NEXT: .LBB2_6: # %l2.header
+; X64-NEXT: .LBB3_6: # %l2.header
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movslq (%rbx), %rcx
; X64-NEXT: orq %rax, %rcx
@@ -311,15 +332,15 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-NEXT: sarq $63, %rax
; X64-NEXT: incl %r15d
; X64-NEXT: cmpl %ebp, %r15d
-; X64-NEXT: jl .LBB2_11
+; X64-NEXT: jl .LBB3_11
; X64-NEXT: # %bb.7:
; X64-NEXT: cmovlq %r12, %rax
; X64-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %r15d # 4-byte Reload
-; X64-NEXT: jmp .LBB2_8
+; X64-NEXT: jmp .LBB3_8
; X64-NEXT: .p2align 4, 0x90
-; X64-NEXT: .LBB2_4:
+; X64-NEXT: .LBB3_4:
; X64-NEXT: cmovgq %r12, %rax
-; X64-NEXT: .LBB2_8: # %l1.latch
+; X64-NEXT: .LBB3_8: # %l1.latch
; X64-NEXT: movslq (%rbx), %rcx
; X64-NEXT: orq %rax, %rcx
; X64-NEXT: movq %rax, %rdx
@@ -332,10 +353,10 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-NEXT: sarq $63, %rax
; X64-NEXT: incl %r13d
; X64-NEXT: cmpl %r15d, %r13d
-; X64-NEXT: jl .LBB2_12
+; X64-NEXT: jl .LBB3_12
; X64-NEXT: # %bb.9:
; X64-NEXT: cmovlq %r12, %rax
-; X64-NEXT: .LBB2_10: # %exit
+; X64-NEXT: .LBB3_10: # %exit
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: addq $8, %rsp
@@ -357,7 +378,7 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-LFENCE-NEXT: pushq %rbx
; X64-LFENCE-NEXT: pushq %rax
; X64-LFENCE-NEXT: testl %edi, %edi
-; X64-LFENCE-NEXT: jne .LBB2_6
+; X64-LFENCE-NEXT: jne .LBB3_6
; X64-LFENCE-NEXT: # %bb.1: # %l1.header.preheader
; X64-LFENCE-NEXT: movq %r8, %r14
; X64-LFENCE-NEXT: movq %rcx, %rbx
@@ -366,19 +387,19 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: xorl %r12d, %r12d
; X64-LFENCE-NEXT: .p2align 4, 0x90
-; X64-LFENCE-NEXT: .LBB2_2: # %l1.header
+; X64-LFENCE-NEXT: .LBB3_2: # %l1.header
; X64-LFENCE-NEXT: # =>This Loop Header: Depth=1
-; X64-LFENCE-NEXT: # Child Loop BB2_4 Depth 2
+; X64-LFENCE-NEXT: # Child Loop BB3_4 Depth 2
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: testl %r15d, %r15d
-; X64-LFENCE-NEXT: jle .LBB2_5
+; X64-LFENCE-NEXT: jle .LBB3_5
; X64-LFENCE-NEXT: # %bb.3: # %l2.header.preheader
-; X64-LFENCE-NEXT: # in Loop: Header=BB2_2 Depth=1
+; X64-LFENCE-NEXT: # in Loop: Header=BB3_2 Depth=1
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: xorl %ebp, %ebp
; X64-LFENCE-NEXT: .p2align 4, 0x90
-; X64-LFENCE-NEXT: .LBB2_4: # %l2.header
-; X64-LFENCE-NEXT: # Parent Loop BB2_2 Depth=1
+; X64-LFENCE-NEXT: .LBB3_4: # %l2.header
+; X64-LFENCE-NEXT: # Parent Loop BB3_2 Depth=1
; X64-LFENCE-NEXT: # => This Inner Loop Header: Depth=2
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: movslq (%rbx), %rax
@@ -386,17 +407,17 @@ define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %pt
; X64-LFENCE-NEXT: callq sink
; X64-LFENCE-NEXT: incl %ebp
; X64-LFENCE-NEXT: cmpl %r13d, %ebp
-; X64-LFENCE-NEXT: jl .LBB2_4
-; X64-LFENCE-NEXT: .LBB2_5: # %l1.latch
-; X64-LFENCE-NEXT: # in Loop: Header=BB2_2 Depth=1
+; X64-LFENCE-NEXT: jl .LBB3_4
+; X64-LFENCE-NEXT: .LBB3_5: # %l1.latch
+; X64-LFENCE-NEXT: # in Loop: Header=BB3_2 Depth=1
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: movslq (%rbx), %rax
; X64-LFENCE-NEXT: movl (%r14,%rax,4), %edi
; X64-LFENCE-NEXT: callq sink
; X64-LFENCE-NEXT: incl %r12d
; X64-LFENCE-NEXT: cmpl %r15d, %r12d
-; X64-LFENCE-NEXT: jl .LBB2_2
-; X64-LFENCE-NEXT: .LBB2_6: # %exit
+; X64-LFENCE-NEXT: jl .LBB3_2
+; X64-LFENCE-NEXT: .LBB3_6: # %exit
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: addq $8, %rsp
; X64-LFENCE-NEXT: popq %rbx
@@ -454,7 +475,7 @@ define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) nounwind personality
; X64-NEXT: movq $-1, %rcx
; X64-NEXT: sarq $63, %rax
; X64-NEXT: cmpl $41, %edi
-; X64-NEXT: jg .LBB3_1
+; X64-NEXT: jg .LBB4_1
; X64-NEXT: # %bb.2: # %thrower
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq %rsi, %rbx
@@ -479,17 +500,17 @@ define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) nounwind personality
; X64-NEXT: movq %rsp, %rax
; X64-NEXT: sarq $63, %rax
; X64-NEXT: .Ltmp1:
-; X64-NEXT: jmp .LBB3_3
-; X64-NEXT: .LBB3_1:
+; X64-NEXT: jmp .LBB4_3
+; X64-NEXT: .LBB4_1:
; X64-NEXT: cmovleq %rcx, %rax
-; X64-NEXT: .LBB3_3: # %exit
+; X64-NEXT: .LBB4_3: # %exit
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: popq %rbx
; X64-NEXT: popq %r14
; X64-NEXT: popq %rbp
; X64-NEXT: retq
-; X64-NEXT: .LBB3_4: # %lpad
+; X64-NEXT: .LBB4_4: # %lpad
; X64-NEXT: .Ltmp2:
; X64-NEXT: movq %rsp, %rcx
; X64-NEXT: sarq $63, %rcx
@@ -511,7 +532,7 @@ define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) nounwind personality
; X64-LFENCE-NEXT: pushq %r14
; X64-LFENCE-NEXT: pushq %rbx
; X64-LFENCE-NEXT: cmpl $41, %edi
-; X64-LFENCE-NEXT: jg .LBB3_2
+; X64-LFENCE-NEXT: jg .LBB4_2
; X64-LFENCE-NEXT: # %bb.1: # %thrower
; X64-LFENCE-NEXT: movq %rdx, %r14
; X64-LFENCE-NEXT: movq %rsi, %rbx
@@ -527,13 +548,13 @@ define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) nounwind personality
; X64-LFENCE-NEXT: movq %rax, %rdi
; X64-LFENCE-NEXT: callq __cxa_throw
; X64-LFENCE-NEXT: .Ltmp1:
-; X64-LFENCE-NEXT: .LBB3_2: # %exit
+; X64-LFENCE-NEXT: .LBB4_2: # %exit
; X64-LFENCE-NEXT: lfence
; X64-LFENCE-NEXT: popq %rbx
; X64-LFENCE-NEXT: popq %r14
; X64-LFENCE-NEXT: popq %rbp
; X64-LFENCE-NEXT: retq
-; X64-LFENCE-NEXT: .LBB3_3: # %lpad
+; X64-LFENCE-NEXT: .LBB4_3: # %lpad
; X64-LFENCE-NEXT: .Ltmp2:
; X64-LFENCE-NEXT: movl (%rax), %eax
; X64-LFENCE-NEXT: addl (%rbx), %eax
OpenPOWER on IntegriCloud