summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorPetar Jovanovic <petar.jovanovic@imgtec.com>2017-06-28 10:21:17 +0000
committerPetar Jovanovic <petar.jovanovic@imgtec.com>2017-06-28 10:21:17 +0000
commit7b3a38ec306c49861869dc9d2a2bd99af82b3280 (patch)
tree91bea90da1471b702f66e72f4ea3baf16e3a5329 /llvm/test/CodeGen
parent77b5536e4e7612c2a47e1f56253a0fc84f1beef9 (diff)
downloadbcm5719-llvm-7b3a38ec306c49861869dc9d2a2bd99af82b3280.tar.gz
bcm5719-llvm-7b3a38ec306c49861869dc9d2a2bd99af82b3280.zip
[X86] Correct dwarf unwind information in function epilogue
CFI instructions that set appropriate cfa offset and cfa register are now inserted in emitEpilogue() in X86FrameLowering. Majority of the changes in this patch: 1. Ensure that CFI instructions do not affect code generation. 2. Enable maintaining correct information about cfa offset and cfa register in a function when basic blocks are reordered, merged, split, duplicated. These changes are target independent and described below. Changed CFI instructions so that they: 1. are duplicable 2. are not counted as instructions when tail duplicating or tail merging 3. can be compared as equal Add information to each MachineBasicBlock about cfa offset and cfa register that are valid at its entry and exit (incoming and outgoing CFI info). Add support for updating this information when basic blocks are merged, split, duplicated, created. Add a verification pass (CFIInfoVerifier) that checks that outgoing cfa offset and register of predecessor blocks match incoming values of their successors. Incoming and outgoing CFI information is used by a late pass (CFIInstrInserter) that corrects CFA calculation rule for a basic block if needed. That means that additional CFI instructions get inserted at basic block beginning to correct the rule for calculating CFA. Having CFI instructions in function epilogue can cause incorrect CFA calculation rule for some basic blocks. This can happen if, due to basic block reordering, or the existence of multiple epilogue blocks, some of the blocks have wrong cfa offset and register values set by the epilogue block above them. Patch by Violeta Vukobrat. Differential Revision: https://reviews.llvm.org/D18046 llvm-svn: 306529
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll2
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll2
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll2
-rw-r--r--llvm/test/CodeGen/X86/O0-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/X86/avg.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-vbroadcast.ll6
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll54
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-intrinsics.ll14
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll118
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll10
-rw-r--r--llvm/test/CodeGen/X86/emutls-pie.ll12
-rw-r--r--llvm/test/CodeGen/X86/emutls.ll32
-rw-r--r--llvm/test/CodeGen/X86/epilogue-cfi-fp.ll44
-rw-r--r--llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll50
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-store.ll40
-rw-r--r--llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll14
-rw-r--r--llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll8
-rw-r--r--llvm/test/CodeGen/X86/haddsub-2.ll48
-rw-r--r--llvm/test/CodeGen/X86/hipe-cc64.ll2
-rw-r--r--llvm/test/CodeGen/X86/imul.ll14
-rw-r--r--llvm/test/CodeGen/X86/legalize-shift-64.ll16
-rw-r--r--llvm/test/CodeGen/X86/load-combine.ll8
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll74
-rw-r--r--llvm/test/CodeGen/X86/memset-nonzero.ll2
-rw-r--r--llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll72
-rw-r--r--llvm/test/CodeGen/X86/movtopush.ll4
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-result.ll322
-rw-r--r--llvm/test/CodeGen/X86/mul-i256.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr21792.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr29112.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr30430.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr32241.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr32256.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr32329.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr32345.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr32451.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr9743.ll2
-rw-r--r--llvm/test/CodeGen/X86/push-cfi-debug.ll4
-rw-r--r--llvm/test/CodeGen/X86/push-cfi-obj.ll7
-rw-r--r--llvm/test/CodeGen/X86/push-cfi.ll3
-rw-r--r--llvm/test/CodeGen/X86/return-ext.ll6
-rw-r--r--llvm/test/CodeGen/X86/rtm.ll2
-rw-r--r--llvm/test/CodeGen/X86/setcc-lowering.ll2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-call-lowering.ll2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke.ll6
-rw-r--r--llvm/test/CodeGen/X86/throws-cfi-fp.ll103
-rw-r--r--llvm/test/CodeGen/X86/throws-cfi-no-fp.ll102
-rw-r--r--llvm/test/CodeGen/X86/vector-sext.ll26
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-avx512.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-v1.ll2
-rw-r--r--llvm/test/CodeGen/X86/wide-integer-cmp.ll6
-rw-r--r--llvm/test/CodeGen/X86/x86-framelowering-trap.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll2
55 files changed, 1058 insertions, 243 deletions
diff --git a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
index 6814ed1d894..4f0d7348da0 100644
--- a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
+++ b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
@@ -23,6 +23,8 @@ lpad: ; preds = %cont, %entry
}
; CHECK: lpad
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: Ltmp
declare i32 @__gxx_personality_v0(...)
diff --git a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index ba5de8eb5fc..3dda56bdc65 100644
--- a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -88,6 +88,8 @@ define void @full_test() {
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; X32-NEXT: addl $60, %esp
+; X32-NEXT: .Lcfi1:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: full_test:
diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
index a5dc7906363..88ea96c43ba 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -23,6 +23,8 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
; X32-NEXT: addl 8(%ebp), %eax
; X32-NEXT: adcl 12(%ebp), %edx
; X32-NEXT: popl %ebp
+; X32-NEXT: .Lcfi3:
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
%ret = add i64 %arg1, %arg2
ret i64 %ret
diff --git a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll
index a9ec94defea..09ff60ed591 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll
@@ -19,6 +19,8 @@ define i32* @allocai32() {
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movl %esp, %eax
; X32-NEXT: popl %ecx
+; X32-NEXT: .Lcfi1:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X32ABI-LABEL: allocai32:
diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll
index 5e375cc42e0..bd742c2c70a 100644
--- a/llvm/test/CodeGen/X86/O0-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -46,7 +46,9 @@
; CHECK-NEXT: Post-RA pseudo instruction expansion pass
; CHECK-NEXT: X86 pseudo instruction expansion pass
; CHECK-NEXT: Analyze Machine Code For Garbage Collection
+; CHECK-NEXT: Verify that corresponding in/out CFI info matches
; CHECK-NEXT: X86 vzeroupper inserter
+; CHECK-NEXT: CFI Instruction Inserter
; CHECK-NEXT: Contiguously Lay Out Funclets
; CHECK-NEXT: StackMap Liveness Analysis
; CHECK-NEXT: Live DEBUG_VALUE analysis
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index e5f7cc5c6dd..8ba0d0e275e 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -591,6 +591,8 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX1-NEXT: vmovups %ymm0, (%rax)
; AVX1-NEXT: vmovups %ymm1, (%rax)
; AVX1-NEXT: addq $24, %rsp
+; AVX1-NEXT: .Lcfi1:
+; AVX1-NEXT: .cfi_def_cfa_offset 8
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
index 350c0d7873e..576a3afa6bb 100644
--- a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -414,6 +414,8 @@ define <16 x float> @broadcast_ss_spill(float %x) {
; ALL-NEXT: callq func_f32
; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload
; ALL-NEXT: addq $24, %rsp
+; ALL-NEXT: .Lcfi1:
+; ALL-NEXT: .cfi_def_cfa_offset 8
; ALL-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -427,13 +429,15 @@ define <8 x double> @broadcast_sd_spill(double %x) {
; ALL-LABEL: broadcast_sd_spill:
; ALL: # BB#0:
; ALL-NEXT: subq $24, %rsp
-; ALL-NEXT: .Lcfi1:
+; ALL-NEXT: .Lcfi2:
; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: callq func_f64
; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload
; ALL-NEXT: addq $24, %rsp
+; ALL-NEXT: .Lcfi3:
+; ALL-NEXT: .cfi_def_cfa_offset 8
; ALL-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index d56c4675b73..faf90a16d30 100644
--- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -289,6 +289,8 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi1:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1)
ret i64 %res
@@ -305,7 +307,7 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512F-32-LABEL: test_mask_pcmpeq_b:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $12, %esp
-; AVX512F-32-NEXT: .Lcfi1:
+; AVX512F-32-NEXT: .Lcfi2:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
@@ -313,6 +315,8 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi3:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
ret i64 %res
@@ -366,13 +370,15 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) {
; AVX512F-32-LABEL: test_pcmpgt_b:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $12, %esp
-; AVX512F-32-NEXT: .Lcfi2:
+; AVX512F-32-NEXT: .Lcfi4:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovq %k0, (%esp)
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi5:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1)
ret i64 %res
@@ -389,7 +395,7 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512F-32-LABEL: test_mask_pcmpgt_b:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $12, %esp
-; AVX512F-32-NEXT: .Lcfi3:
+; AVX512F-32-NEXT: .Lcfi6:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpcmpgtb %zmm1, %zmm0, %k0 {%k1}
@@ -397,6 +403,8 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) {
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi7:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask)
ret i64 %res
@@ -1593,7 +1601,7 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512F-32-LABEL: test_cmp_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $60, %esp
-; AVX512F-32-NEXT: .Lcfi4:
+; AVX512F-32-NEXT: .Lcfi8:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 64
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovq %k0, {{[0-9]+}}(%esp)
@@ -1624,6 +1632,8 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $60, %esp
+; AVX512F-32-NEXT: .Lcfi9:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1)
%res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1)
@@ -1673,17 +1683,17 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-LABEL: test_mask_cmp_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: pushl %ebx
-; AVX512F-32-NEXT: .Lcfi5:
+; AVX512F-32-NEXT: .Lcfi10:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: pushl %esi
-; AVX512F-32-NEXT: .Lcfi6:
+; AVX512F-32-NEXT: .Lcfi11:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 12
; AVX512F-32-NEXT: subl $60, %esp
-; AVX512F-32-NEXT: .Lcfi7:
+; AVX512F-32-NEXT: .Lcfi12:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 72
-; AVX512F-32-NEXT: .Lcfi8:
+; AVX512F-32-NEXT: .Lcfi13:
; AVX512F-32-NEXT: .cfi_offset %esi, -12
-; AVX512F-32-NEXT: .Lcfi9:
+; AVX512F-32-NEXT: .Lcfi14:
; AVX512F-32-NEXT: .cfi_offset %ebx, -8
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: movb %cl, %al
@@ -2426,8 +2436,14 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-NEXT: addl %esi, %eax
; AVX512F-32-NEXT: adcxl %ecx, %edx
; AVX512F-32-NEXT: addl $60, %esp
+; AVX512F-32-NEXT: .Lcfi15:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 12
; AVX512F-32-NEXT: popl %esi
+; AVX512F-32-NEXT: .Lcfi16:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: popl %ebx
+; AVX512F-32-NEXT: .Lcfi17:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask)
%res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask)
@@ -2477,7 +2493,7 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512F-32-LABEL: test_ucmp_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $60, %esp
-; AVX512F-32-NEXT: .Lcfi10:
+; AVX512F-32-NEXT: .Lcfi18:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 64
; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512F-32-NEXT: kmovq %k0, {{[0-9]+}}(%esp)
@@ -2508,6 +2524,8 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) {
; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $60, %esp
+; AVX512F-32-NEXT: .Lcfi19:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1)
%res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1)
@@ -2557,17 +2575,17 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-LABEL: test_mask_x86_avx512_ucmp_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: pushl %ebx
-; AVX512F-32-NEXT: .Lcfi11:
+; AVX512F-32-NEXT: .Lcfi20:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: pushl %esi
-; AVX512F-32-NEXT: .Lcfi12:
+; AVX512F-32-NEXT: .Lcfi21:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 12
; AVX512F-32-NEXT: subl $60, %esp
-; AVX512F-32-NEXT: .Lcfi13:
+; AVX512F-32-NEXT: .Lcfi22:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 72
-; AVX512F-32-NEXT: .Lcfi14:
+; AVX512F-32-NEXT: .Lcfi23:
; AVX512F-32-NEXT: .cfi_offset %esi, -12
-; AVX512F-32-NEXT: .Lcfi15:
+; AVX512F-32-NEXT: .Lcfi24:
; AVX512F-32-NEXT: .cfi_offset %ebx, -8
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX512F-32-NEXT: movb %cl, %al
@@ -3310,8 +3328,14 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-NEXT: addl %esi, %eax
; AVX512F-32-NEXT: adcxl %ecx, %edx
; AVX512F-32-NEXT: addl $60, %esp
+; AVX512F-32-NEXT: .Lcfi25:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 12
; AVX512F-32-NEXT: popl %esi
+; AVX512F-32-NEXT: .Lcfi26:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
; AVX512F-32-NEXT: popl %ebx
+; AVX512F-32-NEXT: .Lcfi27:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask)
%res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask)
diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
index 5472f057ef2..2281c01a84f 100644
--- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -1600,6 +1600,8 @@ define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) {
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi1:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.kunpck.dq(i64 %x0, i64 %x1)
ret i64 %res
@@ -1617,13 +1619,15 @@ define i64@test_int_x86_avx512_cvtb2mask_512(<64 x i8> %x0) {
; AVX512F-32-LABEL: test_int_x86_avx512_cvtb2mask_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $12, %esp
-; AVX512F-32-NEXT: .Lcfi1:
+; AVX512F-32-NEXT: .Lcfi2:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 16
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
; AVX512F-32-NEXT: kmovq %k0, (%esp)
; AVX512F-32-NEXT: movl (%esp), %eax
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $12, %esp
+; AVX512F-32-NEXT: .Lcfi3:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.cvtb2mask.512(<64 x i8> %x0)
ret i64 %res
@@ -1801,7 +1805,7 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x
; AVX512F-32-LABEL: test_int_x86_avx512_ptestm_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $20, %esp
-; AVX512F-32-NEXT: .Lcfi2:
+; AVX512F-32-NEXT: .Lcfi4:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 24
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
@@ -1815,6 +1819,8 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x
; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $20, %esp
+; AVX512F-32-NEXT: .Lcfi5:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2)
%res1 = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1)
@@ -1866,7 +1872,7 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %
; AVX512F-32-LABEL: test_int_x86_avx512_ptestnm_b_512:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: subl $20, %esp
-; AVX512F-32-NEXT: .Lcfi3:
+; AVX512F-32-NEXT: .Lcfi6:
; AVX512F-32-NEXT: .cfi_def_cfa_offset 24
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
@@ -1880,6 +1886,8 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %
; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax
; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx
; AVX512F-32-NEXT: addl $20, %esp
+; AVX512F-32-NEXT: .Lcfi7:
+; AVX512F-32-NEXT: .cfi_def_cfa_offset 4
; AVX512F-32-NEXT: retl
%res = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2)
%res1 = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1)
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index 530e2c544cf..8e25f2fd3e7 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -33,6 +33,8 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd %xmm1, %xmm0 {%k1}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi1:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastd_epi32:
@@ -57,7 +59,7 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastd_epi32:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi1:
+; X32-NEXT: .Lcfi2:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -66,6 +68,8 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi3:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastd_epi32:
@@ -162,7 +166,7 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-LABEL: test_mm_mask_broadcastq_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi2:
+; X32-NEXT: .Lcfi4:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -171,6 +175,8 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64>
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm1, %xmm0 {%k1}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi5:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastq_epi64:
@@ -192,7 +198,7 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_maskz_broadcastq_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi3:
+; X32-NEXT: .Lcfi6:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -201,6 +207,8 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi7:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastq_epi64:
@@ -236,7 +244,7 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i
; X32-LABEL: test_mm256_mask_broadcastq_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi4:
+; X32-NEXT: .Lcfi8:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -245,6 +253,8 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm1, %ymm0 {%k1}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi9:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastq_epi64:
@@ -266,7 +276,7 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastq_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi5:
+; X32-NEXT: .Lcfi10:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -275,6 +285,8 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi11:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastq_epi64:
@@ -310,7 +322,7 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x
; X32-LABEL: test_mm_mask_broadcastsd_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi6:
+; X32-NEXT: .Lcfi12:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -319,6 +331,8 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi13:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastsd_pd:
@@ -340,7 +354,7 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_maskz_broadcastsd_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi7:
+; X32-NEXT: .Lcfi14:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -349,6 +363,8 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi15:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastsd_pd:
@@ -384,7 +400,7 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2
; X32-LABEL: test_mm256_mask_broadcastsd_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi8:
+; X32-NEXT: .Lcfi16:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -393,6 +409,8 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi17:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_broadcastsd_pd:
@@ -414,7 +432,7 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm256_maskz_broadcastsd_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi9:
+; X32-NEXT: .Lcfi18:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -423,6 +441,8 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi19:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_broadcastsd_pd:
@@ -458,7 +478,7 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl
; X32-LABEL: test_mm_mask_broadcastss_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi10:
+; X32-NEXT: .Lcfi20:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -467,6 +487,8 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastss %xmm1, %xmm0 {%k1}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi21:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_broadcastss_ps:
@@ -488,7 +510,7 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_broadcastss_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi11:
+; X32-NEXT: .Lcfi22:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -497,6 +519,8 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z}
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi23:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_broadcastss_ps:
@@ -584,7 +608,7 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-LABEL: test_mm_mask_movddup_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi12:
+; X32-NEXT: .Lcfi24:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -593,6 +617,8 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi25:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_movddup_pd:
@@ -614,7 +640,7 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) {
; X32-LABEL: test_mm_maskz_movddup_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi13:
+; X32-NEXT: .Lcfi26:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -623,6 +649,8 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi27:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_movddup_pd:
@@ -658,7 +686,7 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-LABEL: test_mm256_mask_movddup_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi14:
+; X32-NEXT: .Lcfi28:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -667,6 +695,8 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi29:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_movddup_pd:
@@ -688,7 +718,7 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) {
; X32-LABEL: test_mm256_maskz_movddup_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi15:
+; X32-NEXT: .Lcfi30:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -697,6 +727,8 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi31:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_movddup_pd:
@@ -732,7 +764,7 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-LABEL: test_mm_mask_movehdup_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi16:
+; X32-NEXT: .Lcfi32:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -741,6 +773,8 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = xmm1[1,1,3,3]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi33:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_movehdup_ps:
@@ -762,7 +796,7 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_movehdup_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi17:
+; X32-NEXT: .Lcfi34:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -771,6 +805,8 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi35:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_movehdup_ps:
@@ -858,7 +894,7 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-LABEL: test_mm_mask_moveldup_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi18:
+; X32-NEXT: .Lcfi36:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -867,6 +903,8 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = xmm1[0,0,2,2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi37:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_moveldup_ps:
@@ -888,7 +926,7 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) {
; X32-LABEL: test_mm_maskz_moveldup_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi19:
+; X32-NEXT: .Lcfi38:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -897,6 +935,8 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi39:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_moveldup_ps:
@@ -984,7 +1024,7 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64
; X32-LABEL: test_mm256_mask_permutex_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi20:
+; X32-NEXT: .Lcfi40:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -993,6 +1033,8 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi41:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_permutex_epi64:
@@ -1014,7 +1056,7 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
; X32-LABEL: test_mm256_maskz_permutex_epi64:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi21:
+; X32-NEXT: .Lcfi42:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1023,6 +1065,8 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi43:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_permutex_epi64:
@@ -1058,7 +1102,7 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x
; X32-LABEL: test_mm256_mask_permutex_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi22:
+; X32-NEXT: .Lcfi44:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1067,6 +1111,8 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi45:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_permutex_pd:
@@ -1088,7 +1134,7 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
; X32-LABEL: test_mm256_maskz_permutex_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi23:
+; X32-NEXT: .Lcfi46:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1097,6 +1143,8 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) {
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi47:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_permutex_pd:
@@ -1132,7 +1180,7 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-LABEL: test_mm_mask_shuffle_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi24:
+; X32-NEXT: .Lcfi48:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -1141,6 +1189,8 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} = xmm1[1],xmm2[1]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi49:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_shuffle_pd:
@@ -1162,7 +1212,7 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou
; X32-LABEL: test_mm_maskz_shuffle_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi25:
+; X32-NEXT: .Lcfi50:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $3, %al
@@ -1171,6 +1221,8 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi51:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_shuffle_pd:
@@ -1206,7 +1258,7 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-LABEL: test_mm256_mask_shuffle_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi26:
+; X32-NEXT: .Lcfi52:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1215,6 +1267,8 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} = ymm1[1],ymm2[1],ymm1[2],ymm2[2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi53:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_mask_shuffle_pd:
@@ -1236,7 +1290,7 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x
; X32-LABEL: test_mm256_maskz_shuffle_pd:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi27:
+; X32-NEXT: .Lcfi54:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1245,6 +1299,8 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[2],ymm1[2]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi55:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_shuffle_pd:
@@ -1280,7 +1336,7 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float>
; X32-LABEL: test_mm_mask_shuffle_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi28:
+; X32-NEXT: .Lcfi56:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1289,6 +1345,8 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float>
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[0,1],xmm2[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi57:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_mask_shuffle_ps:
@@ -1310,7 +1368,7 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float
; X32-LABEL: test_mm_maskz_shuffle_ps:
; X32: # BB#0:
; X32-NEXT: pushl %eax
-; X32-NEXT: .Lcfi29:
+; X32-NEXT: .Lcfi58:
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: andb $15, %al
@@ -1319,6 +1377,8 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1],xmm1[0,0]
; X32-NEXT: popl %eax
+; X32-NEXT: .Lcfi59:
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
;
; X64-LABEL: test_mm_maskz_shuffle_ps:
diff --git a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll
index 38a461ff0be..98265fdc1cc 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll
@@ -13,6 +13,8 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) {
; CHECK-NEXT: callq func_f32
; CHECK-NEXT: vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload
; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -25,13 +27,15 @@ define <4 x float> @_128_broadcast_ss_spill(float %x) {
; CHECK-LABEL: _128_broadcast_ss_spill:
; CHECK: # BB#0:
; CHECK-NEXT: subq $24, %rsp
-; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .Lcfi2:
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: callq func_f32
; CHECK-NEXT: vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%a = fadd float %x, %x
call void @func_f32(float %a)
@@ -45,13 +49,15 @@ define <4 x double> @_256_broadcast_sd_spill(double %x) {
; CHECK-LABEL: _256_broadcast_sd_spill:
; CHECK: # BB#0:
; CHECK-NEXT: subq $24, %rsp
-; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .Lcfi4:
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vaddsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: callq func_f64
; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload
; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: .Lcfi5:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%a = fadd double %x, %x
call void @func_f64(double %a)
diff --git a/llvm/test/CodeGen/X86/emutls-pie.ll b/llvm/test/CodeGen/X86/emutls-pie.ll
index 5db8c888a4e..2ff66e8b917 100644
--- a/llvm/test/CodeGen/X86/emutls-pie.ll
+++ b/llvm/test/CodeGen/X86/emutls-pie.ll
@@ -18,13 +18,19 @@ define i32 @my_get_xyz() {
; X32-NEXT: calll my_emutls_get_address@PLT
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $8, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X64-LABEL: my_get_xyz:
; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi
; X64-NEXT: callq my_emutls_get_address@PLT
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: popq %rcx
+; X64-NEXT: :
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
@@ -44,13 +50,19 @@ define i32 @f1() {
; X32-NEXT: calll __emutls_get_address@PLT
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $8, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X64-LABEL: f1:
; X64: leaq __emutls_v.i(%rip), %rdi
; X64-NEXT: callq __emutls_get_address@PLT
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: popq %rcx
+; X64-NEXT: :
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll
index 9266fe962df..eaff77734a6 100644
--- a/llvm/test/CodeGen/X86/emutls.ll
+++ b/llvm/test/CodeGen/X86/emutls.ll
@@ -16,12 +16,16 @@ define i32 @my_get_xyz() {
; X32-NEXT: calll my_emutls_get_address
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X64-LABEL: my_get_xyz:
; X64: movl $my_emutls_v_xyz, %edi
; X64-NEXT: callq my_emutls_get_address
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: popq %rcx
+; X64-NEXT: :
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
@@ -45,12 +49,16 @@ define i32 @f1() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X64-LABEL: f1:
; X64: movl $__emutls_v.i1, %edi
; X64-NEXT: callq __emutls_get_address
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: popq %rcx
+; X64-NEXT: :
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
@@ -63,11 +71,15 @@ define i32* @f2() {
; X32: movl $__emutls_v.i1, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X64-LABEL: f2:
; X64: movl $__emutls_v.i1, %edi
; X64-NEXT: callq __emutls_get_address
; X64-NEXT: popq %rcx
+; X64-NEXT: :
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
@@ -92,6 +104,8 @@ define i32* @f4() {
; X32: movl $__emutls_v.i2, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -116,6 +130,8 @@ define i32* @f6() {
; X32: movl $__emutls_v.i3, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -128,6 +144,8 @@ define i32 @f7() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -140,6 +158,8 @@ define i32* @f8() {
; X32: movl $__emutls_v.i4, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -152,6 +172,8 @@ define i32 @f9() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -164,6 +186,8 @@ define i32* @f10() {
; X32: movl $__emutls_v.i5, (%esp)
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -176,6 +200,8 @@ define i16 @f11() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movzwl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -189,6 +215,8 @@ define i32 @f12() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movswl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -203,6 +231,8 @@ define i8 @f13() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movb (%eax), %al
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
@@ -216,6 +246,8 @@ define i32 @f14() {
; X32-NEXT: calll __emutls_get_address
; X32-NEXT: movsbl (%eax), %eax
; X32-NEXT: addl $12, %esp
+; X32-NEXT: :
+; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
entry:
diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll
new file mode 100644
index 00000000000..d964576d31c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll
@@ -0,0 +1,44 @@
+; RUN: llc -O0 %s -o - | FileCheck %s
+
+; ModuleID = 'epilogue-cfi-fp.c'
+source_filename = "epilogue-cfi-fp.c"
+target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+target triple = "i686-pc-linux"
+
+; Function Attrs: noinline nounwind
+define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) #0 {
+
+; CHECK-LABEL: foo:
+; CHECK: popl %ebp
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %esp, 4
+; CHECK-NEXT: retl
+
+entry:
+ %i.addr = alloca i32, align 4
+ %j.addr = alloca i32, align 4
+ %k.addr = alloca i32, align 4
+ %l.addr = alloca i32, align 4
+ %m.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ store i32 %j, i32* %j.addr, align 4
+ store i32 %k, i32* %k.addr, align 4
+ store i32 %l, i32* %l.addr, align 4
+ store i32 %m, i32* %m.addr, align 4
+ ret i32 0
+}
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5, !6, !7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "epilogue-cfi-fp.c", directory: "epilogue-dwarf/test")
+!2 = !{}
+!3 = !{i32 1, !"NumRegisterParameters", i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 4}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{i32 1, !"wchar_size", i32 4}
+!7 = !{i32 7, !"PIC Level", i32 2}
+
diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll
new file mode 100644
index 00000000000..275a9c8fc1a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll
@@ -0,0 +1,50 @@
+; RUN: llc -O0 < %s | FileCheck %s
+
+; ModuleID = 'epilogue-cfi-no-fp.c'
+source_filename = "epilogue-cfi-no-fp.c"
+target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+target triple = "i686-pc-linux"
+
+; Function Attrs: noinline nounwind
+define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) {
+; CHECK-LABEL: foo:
+; CHECK: addl $20, %esp
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popl %esi
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 12
+; CHECK-NEXT: popl %edi
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 4
+; CHECK-NEXT: retl
+entry:
+ %i.addr = alloca i32, align 4
+ %j.addr = alloca i32, align 4
+ %k.addr = alloca i32, align 4
+ %l.addr = alloca i32, align 4
+ %m.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ store i32 %j, i32* %j.addr, align 4
+ store i32 %k, i32* %k.addr, align 4
+ store i32 %l, i32* %l.addr, align 4
+ store i32 %m, i32* %m.addr, align 4
+ ret i32 0
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5, !6, !7}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "epilogue-cfi-no-fp.c", directory: "epilogue-dwarf/test")
+!2 = !{}
+!3 = !{i32 1, !"NumRegisterParameters", i32 0}
+!4 = !{i32 2, !"Dwarf Version", i32 4}
+!5 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = !{i32 1, !"wchar_size", i32 4}
+!7 = !{i32 7, !"PIC Level", i32 2}
+
+
diff --git a/llvm/test/CodeGen/X86/fast-isel-store.ll b/llvm/test/CodeGen/X86/fast-isel-store.ll
index 528682bf70b..65fb1115e68 100644
--- a/llvm/test/CodeGen/X86/fast-isel-store.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-store.ll
@@ -376,6 +376,8 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE64-NEXT: movupd %xmm0, (%eax)
; SSE64-NEXT: movupd %xmm1, 16(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi1:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64:
@@ -407,7 +409,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-LABEL: test_store_4xf64_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi1:
+; SSE64-NEXT: .Lcfi2:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd {{[0-9]+}}(%esp), %xmm1
@@ -415,6 +417,8 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-NEXT: movapd %xmm0, (%eax)
; SSE64-NEXT: movapd %xmm1, 16(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi3:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64_aligned:
@@ -446,7 +450,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-LABEL: test_store_16xi32:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi2:
+; SSE64-NEXT: .Lcfi4:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -455,6 +459,8 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-NEXT: movups %xmm2, 32(%eax)
; SSE64-NEXT: movups %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi5:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32:
@@ -496,7 +502,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-LABEL: test_store_16xi32_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi3:
+; SSE64-NEXT: .Lcfi6:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -505,6 +511,8 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-NEXT: movaps %xmm2, 32(%eax)
; SSE64-NEXT: movaps %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi7:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32_aligned:
@@ -546,7 +554,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-LABEL: test_store_16xf32:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi4:
+; SSE64-NEXT: .Lcfi8:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -555,6 +563,8 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-NEXT: movups %xmm2, 32(%eax)
; SSE64-NEXT: movups %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi9:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32:
@@ -596,7 +606,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-LABEL: test_store_16xf32_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi5:
+; SSE64-NEXT: .Lcfi10:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -605,6 +615,8 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-NEXT: movaps %xmm2, 32(%eax)
; SSE64-NEXT: movaps %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi11:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32_aligned:
@@ -650,7 +662,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-LABEL: test_store_8xf64:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi6:
+; SSE64-NEXT: .Lcfi12:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -663,6 +675,8 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-NEXT: movupd %xmm2, 32(%eax)
; SSE64-NEXT: movupd %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi13:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64:
@@ -692,6 +706,8 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY64-NEXT: vmovupd %ymm1, 32(%eax)
; AVXONLY64-NEXT: movl %ebp, %esp
; AVXONLY64-NEXT: popl %ebp
+; AVXONLY64-NEXT: .Lcfi3:
+; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64:
@@ -727,7 +743,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-LABEL: test_store_8xf64_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi7:
+; SSE64-NEXT: .Lcfi14:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -740,6 +756,8 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-NEXT: movapd %xmm2, 32(%eax)
; SSE64-NEXT: movapd %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
+; SSE64-NEXT: .Lcfi15:
+; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64_aligned:
@@ -753,12 +771,12 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-LABEL: test_store_8xf64_aligned:
; AVXONLY64: # BB#0:
; AVXONLY64-NEXT: pushl %ebp
-; AVXONLY64-NEXT: .Lcfi3:
-; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
; AVXONLY64-NEXT: .Lcfi4:
+; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
+; AVXONLY64-NEXT: .Lcfi5:
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
; AVXONLY64-NEXT: movl %esp, %ebp
-; AVXONLY64-NEXT: .Lcfi5:
+; AVXONLY64-NEXT: .Lcfi6:
; AVXONLY64-NEXT: .cfi_def_cfa_register %ebp
; AVXONLY64-NEXT: andl $-32, %esp
; AVXONLY64-NEXT: subl $32, %esp
@@ -769,6 +787,8 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-NEXT: vmovapd %ymm1, 32(%eax)
; AVXONLY64-NEXT: movl %ebp, %esp
; AVXONLY64-NEXT: popl %ebp
+; AVXONLY64-NEXT: .Lcfi7:
+; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64_aligned:
diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll
index ab797e04b40..ef86f998fc0 100644
--- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll
+++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll
@@ -18,9 +18,14 @@ entry:
}
; CHECK-LABEL: noDebug
-; CHECK: addq $24, %rsp
+; CHECK: addq $16, %rsp
+; CHECK: addq $8, %rsp
; CHECK: popq %rbx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: popq %r14
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
@@ -41,9 +46,14 @@ entry:
; CHECK-LABEL: withDebug
; CHECK: #DEBUG_VALUE: test:j <- %RBX
-; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: addq $16, %rsp
+; CHECK: addq $8, %rsp
; CHECK: popq %rbx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: popq %r14
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64)
diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
index f9ecf707810..593fb449082 100644
--- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
+++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
@@ -9,6 +9,8 @@ define i64 @fn1NoDebug(i64 %a) {
; CHECK-LABEL: fn1NoDebug
; CHECK: popq %rcx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: ret
define i64 @fn1WithDebug(i64 %a) !dbg !4 {
@@ -19,6 +21,8 @@ define i64 @fn1WithDebug(i64 %a) !dbg !4 {
; CHECK-LABEL: fn1WithDebug
; CHECK: popq %rcx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: ret
%struct.Buffer = type { i8, [63 x i8] }
@@ -33,6 +37,8 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) {
; CHECK-NOT: sub
; CHECK: mov
; CHECK-NEXT: pop
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: ret
define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 {
@@ -46,6 +52,8 @@ define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 {
; CHECK-NOT: sub
; CHECK: mov
; CHECK-NEXT: pop
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: ret
declare i64 @fn(i64, i64)
diff --git a/llvm/test/CodeGen/X86/haddsub-2.ll b/llvm/test/CodeGen/X86/haddsub-2.ll
index fd023d01803..428b4110219 100644
--- a/llvm/test/CodeGen/X86/haddsub-2.ll
+++ b/llvm/test/CodeGen/X86/haddsub-2.ll
@@ -736,11 +736,23 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) {
; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: .Lcfi12:
+; SSE3-NEXT: .cfi_def_cfa_offset 48
; SSE3-NEXT: popq %r12
+; SSE3-NEXT: .Lcfi13:
+; SSE3-NEXT: .cfi_def_cfa_offset 40
; SSE3-NEXT: popq %r13
+; SSE3-NEXT: .Lcfi14:
+; SSE3-NEXT: .cfi_def_cfa_offset 32
; SSE3-NEXT: popq %r14
+; SSE3-NEXT: .Lcfi15:
+; SSE3-NEXT: .cfi_def_cfa_offset 24
; SSE3-NEXT: popq %r15
+; SSE3-NEXT: .Lcfi16:
+; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: popq %rbp
+; SSE3-NEXT: .Lcfi17:
+; SSE3-NEXT: .cfi_def_cfa_offset 8
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_vphadd_w_test:
@@ -1263,34 +1275,34 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
; SSE3-LABEL: avx2_hadd_w:
; SSE3: # BB#0:
; SSE3-NEXT: pushq %rbp
-; SSE3-NEXT: .Lcfi12:
+; SSE3-NEXT: .Lcfi18:
; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: pushq %r15
-; SSE3-NEXT: .Lcfi13:
+; SSE3-NEXT: .Lcfi19:
; SSE3-NEXT: .cfi_def_cfa_offset 24
; SSE3-NEXT: pushq %r14
-; SSE3-NEXT: .Lcfi14:
+; SSE3-NEXT: .Lcfi20:
; SSE3-NEXT: .cfi_def_cfa_offset 32
; SSE3-NEXT: pushq %r13
-; SSE3-NEXT: .Lcfi15:
+; SSE3-NEXT: .Lcfi21:
; SSE3-NEXT: .cfi_def_cfa_offset 40
; SSE3-NEXT: pushq %r12
-; SSE3-NEXT: .Lcfi16:
+; SSE3-NEXT: .Lcfi22:
; SSE3-NEXT: .cfi_def_cfa_offset 48
; SSE3-NEXT: pushq %rbx
-; SSE3-NEXT: .Lcfi17:
+; SSE3-NEXT: .Lcfi23:
; SSE3-NEXT: .cfi_def_cfa_offset 56
-; SSE3-NEXT: .Lcfi18:
+; SSE3-NEXT: .Lcfi24:
; SSE3-NEXT: .cfi_offset %rbx, -56
-; SSE3-NEXT: .Lcfi19:
+; SSE3-NEXT: .Lcfi25:
; SSE3-NEXT: .cfi_offset %r12, -48
-; SSE3-NEXT: .Lcfi20:
+; SSE3-NEXT: .Lcfi26:
; SSE3-NEXT: .cfi_offset %r13, -40
-; SSE3-NEXT: .Lcfi21:
+; SSE3-NEXT: .Lcfi27:
; SSE3-NEXT: .cfi_offset %r14, -32
-; SSE3-NEXT: .Lcfi22:
+; SSE3-NEXT: .Lcfi28:
; SSE3-NEXT: .cfi_offset %r15, -24
-; SSE3-NEXT: .Lcfi23:
+; SSE3-NEXT: .Lcfi29:
; SSE3-NEXT: .cfi_offset %rbp, -16
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pextrw $1, %xmm0, %r10d
@@ -1375,11 +1387,23 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) {
; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
; SSE3-NEXT: popq %rbx
+; SSE3-NEXT: .Lcfi30:
+; SSE3-NEXT: .cfi_def_cfa_offset 48
; SSE3-NEXT: popq %r12
+; SSE3-NEXT: .Lcfi31:
+; SSE3-NEXT: .cfi_def_cfa_offset 40
; SSE3-NEXT: popq %r13
+; SSE3-NEXT: .Lcfi32:
+; SSE3-NEXT: .cfi_def_cfa_offset 32
; SSE3-NEXT: popq %r14
+; SSE3-NEXT: .Lcfi33:
+; SSE3-NEXT: .cfi_def_cfa_offset 24
; SSE3-NEXT: popq %r15
+; SSE3-NEXT: .Lcfi34:
+; SSE3-NEXT: .cfi_def_cfa_offset 16
; SSE3-NEXT: popq %rbp
+; SSE3-NEXT: .Lcfi35:
+; SSE3-NEXT: .cfi_def_cfa_offset 8
; SSE3-NEXT: retq
;
; SSSE3-LABEL: avx2_hadd_w:
diff --git a/llvm/test/CodeGen/X86/hipe-cc64.ll b/llvm/test/CodeGen/X86/hipe-cc64.ll
index 43e2e1409fd..c61ea289a4a 100644
--- a/llvm/test/CodeGen/X86/hipe-cc64.ll
+++ b/llvm/test/CodeGen/X86/hipe-cc64.ll
@@ -91,6 +91,8 @@ define cc 11 { i64, i64, i64 } @tailcaller(i64 %hp, i64 %p) #0 {
; CHECK-NEXT: movl $47, %ecx
; CHECK-NEXT: movl $63, %r8d
; CHECK-NEXT: popq %rax
+ ; CHECK-NEXT: :
+ ; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: jmp tailcallee
%ret = tail call cc11 { i64, i64, i64 } @tailcallee(i64 %hp, i64 %p, i64 15,
i64 31, i64 47, i64 63, i64 79) #1
diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll
index 45a83cc5dfd..e1c5ff1b35c 100644
--- a/llvm/test/CodeGen/X86/imul.ll
+++ b/llvm/test/CodeGen/X86/imul.ll
@@ -309,6 +309,8 @@ define i64 @test5(i64 %a) {
; X86-NEXT: subl %ecx, %edx
; X86-NEXT: subl %esi, %edx
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi2:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
entry:
%tmp3 = mul i64 %a, -31
@@ -351,9 +353,9 @@ define i64 @test7(i64 %a) {
; X86-LABEL: test7:
; X86: # BB#0: # %entry
; X86-NEXT: pushl %esi
-; X86-NEXT: .Lcfi2:
-; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .Lcfi3:
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .Lcfi4:
; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -366,6 +368,8 @@ define i64 @test7(i64 %a) {
; X86-NEXT: subl %ecx, %edx
; X86-NEXT: subl %esi, %edx
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi5:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
entry:
%tmp3 = mul i64 %a, -33
@@ -382,9 +386,9 @@ define i64 @testOverflow(i64 %a) {
; X86-LABEL: testOverflow:
; X86: # BB#0: # %entry
; X86-NEXT: pushl %esi
-; X86-NEXT: .Lcfi4:
+; X86-NEXT: .Lcfi6:
; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .Lcfi5:
+; X86-NEXT: .Lcfi7:
; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $-1, %edx
@@ -396,6 +400,8 @@ define i64 @testOverflow(i64 %a) {
; X86-NEXT: addl %esi, %edx
; X86-NEXT: subl {{[0-9]+}}(%esp), %edx
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi8:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
entry:
%tmp3 = mul i64 %a, 9223372036854775807
diff --git a/llvm/test/CodeGen/X86/legalize-shift-64.ll b/llvm/test/CodeGen/X86/legalize-shift-64.ll
index b3f2116e648..e1e68f37dc6 100644
--- a/llvm/test/CodeGen/X86/legalize-shift-64.ll
+++ b/llvm/test/CodeGen/X86/legalize-shift-64.ll
@@ -125,9 +125,17 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) {
; CHECK-NEXT: movl %esi, 4(%eax)
; CHECK-NEXT: movl %edi, (%eax)
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi8:
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: popl %edi
+; CHECK-NEXT: .Lcfi9:
+; CHECK-NEXT: .cfi_def_cfa_offset 12
; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: .Lcfi10:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: .Lcfi11:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl $4
%shl = shl <2 x i64> %A, %B
ret <2 x i64> %shl
@@ -138,12 +146,12 @@ define i32 @test6() {
; CHECK-LABEL: test6:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %ebp
-; CHECK-NEXT: .Lcfi8:
+; CHECK-NEXT: .Lcfi12:
; CHECK-NEXT: .cfi_def_cfa_offset 8
-; CHECK-NEXT: .Lcfi9:
+; CHECK-NEXT: .Lcfi13:
; CHECK-NEXT: .cfi_offset %ebp, -8
; CHECK-NEXT: movl %esp, %ebp
-; CHECK-NEXT: .Lcfi10:
+; CHECK-NEXT: .Lcfi14:
; CHECK-NEXT: .cfi_def_cfa_register %ebp
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: subl $16, %esp
@@ -172,6 +180,8 @@ define i32 @test6() {
; CHECK-NEXT: .LBB5_4: # %if.then
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
+; CHECK-NEXT: .Lcfi15:
+; CHECK-NEXT: .cfi_def_cfa %esp, 4
; CHECK-NEXT: retl
%x = alloca i32, align 4
%t = alloca i64, align 8
diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll
index e737a51cf40..21c132caa78 100644
--- a/llvm/test/CodeGen/X86/load-combine.ll
+++ b/llvm/test/CodeGen/X86/load-combine.ll
@@ -378,6 +378,8 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) {
; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_uses:
@@ -482,9 +484,9 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
; CHECK-LABEL: load_i32_by_i8_bswap_store_in_between:
; CHECK: # BB#0:
; CHECK-NEXT: pushl %esi
-; CHECK-NEXT: .Lcfi2:
-; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: .Lcfi4:
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -500,6 +502,8 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) {
; CHECK-NEXT: movzbl 3(%ecx), %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi5:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
;
; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between:
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 77254ba6760..60c2799d265 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -1764,6 +1764,8 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-NEXT: vmovdqa64 %zmm2, %zmm0
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: .Lcfi3:
+; KNL_32-NEXT: .cfi_def_cfa %esp, 4
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16i64:
@@ -1781,12 +1783,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; SKX_32-LABEL: test_gather_16i64:
; SKX_32: # BB#0:
; SKX_32-NEXT: pushl %ebp
-; SKX_32-NEXT: .Lcfi1:
-; SKX_32-NEXT: .cfi_def_cfa_offset 8
; SKX_32-NEXT: .Lcfi2:
+; SKX_32-NEXT: .cfi_def_cfa_offset 8
+; SKX_32-NEXT: .Lcfi3:
; SKX_32-NEXT: .cfi_offset %ebp, -8
; SKX_32-NEXT: movl %esp, %ebp
-; SKX_32-NEXT: .Lcfi3:
+; SKX_32-NEXT: .Lcfi4:
; SKX_32-NEXT: .cfi_def_cfa_register %ebp
; SKX_32-NEXT: andl $-64, %esp
; SKX_32-NEXT: subl $64, %esp
@@ -1801,6 +1803,8 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; SKX_32-NEXT: vmovdqa64 %zmm2, %zmm0
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: .Lcfi5:
+; SKX_32-NEXT: .cfi_def_cfa %esp, 4
; SKX_32-NEXT: retl
%res = call <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32 4, <16 x i1> %mask, <16 x i64> %src0)
ret <16 x i64> %res
@@ -1867,12 +1871,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-LABEL: test_gather_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi3:
-; KNL_32-NEXT: .cfi_def_cfa_offset 8
; KNL_32-NEXT: .Lcfi4:
+; KNL_32-NEXT: .cfi_def_cfa_offset 8
+; KNL_32-NEXT: .Lcfi5:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi5:
+; KNL_32-NEXT: .Lcfi6:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1887,6 +1891,8 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-NEXT: vmovapd %zmm2, %zmm0
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: .Lcfi7:
+; KNL_32-NEXT: .cfi_def_cfa %esp, 4
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_gather_16f64:
@@ -1904,12 +1910,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; SKX_32-LABEL: test_gather_16f64:
; SKX_32: # BB#0:
; SKX_32-NEXT: pushl %ebp
-; SKX_32-NEXT: .Lcfi4:
+; SKX_32-NEXT: .Lcfi6:
; SKX_32-NEXT: .cfi_def_cfa_offset 8
-; SKX_32-NEXT: .Lcfi5:
+; SKX_32-NEXT: .Lcfi7:
; SKX_32-NEXT: .cfi_offset %ebp, -8
; SKX_32-NEXT: movl %esp, %ebp
-; SKX_32-NEXT: .Lcfi6:
+; SKX_32-NEXT: .Lcfi8:
; SKX_32-NEXT: .cfi_def_cfa_register %ebp
; SKX_32-NEXT: andl $-64, %esp
; SKX_32-NEXT: subl $64, %esp
@@ -1924,6 +1930,8 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; SKX_32-NEXT: vmovapd %zmm2, %zmm0
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: .Lcfi9:
+; SKX_32-NEXT: .cfi_def_cfa %esp, 4
; SKX_32-NEXT: retl
%res = call <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32 4, <16 x i1> %mask, <16 x double> %src0)
ret <16 x double> %res
@@ -1989,12 +1997,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-LABEL: test_scatter_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi6:
+; KNL_32-NEXT: .Lcfi8:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi7:
+; KNL_32-NEXT: .Lcfi9:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi8:
+; KNL_32-NEXT: .Lcfi10:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2008,6 +2016,8 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2}
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: .Lcfi11:
+; KNL_32-NEXT: .cfi_def_cfa %esp, 4
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
@@ -2025,12 +2035,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; SKX_32-LABEL: test_scatter_16i64:
; SKX_32: # BB#0:
; SKX_32-NEXT: pushl %ebp
-; SKX_32-NEXT: .Lcfi7:
+; SKX_32-NEXT: .Lcfi10:
; SKX_32-NEXT: .cfi_def_cfa_offset 8
-; SKX_32-NEXT: .Lcfi8:
+; SKX_32-NEXT: .Lcfi11:
; SKX_32-NEXT: .cfi_offset %ebp, -8
; SKX_32-NEXT: movl %esp, %ebp
-; SKX_32-NEXT: .Lcfi9:
+; SKX_32-NEXT: .Lcfi12:
; SKX_32-NEXT: .cfi_def_cfa_register %ebp
; SKX_32-NEXT: andl $-64, %esp
; SKX_32-NEXT: subl $64, %esp
@@ -2044,6 +2054,8 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; SKX_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2}
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: .Lcfi13:
+; SKX_32-NEXT: .cfi_def_cfa %esp, 4
; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32 4, <16 x i1> %mask)
@@ -2111,12 +2123,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-LABEL: test_scatter_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi9:
+; KNL_32-NEXT: .Lcfi12:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi10:
+; KNL_32-NEXT: .Lcfi13:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi11:
+; KNL_32-NEXT: .Lcfi14:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2130,6 +2142,8 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2}
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: .Lcfi15:
+; KNL_32-NEXT: .cfi_def_cfa %esp, 4
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
@@ -2147,12 +2161,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; SKX_32-LABEL: test_scatter_16f64:
; SKX_32: # BB#0:
; SKX_32-NEXT: pushl %ebp
-; SKX_32-NEXT: .Lcfi10:
+; SKX_32-NEXT: .Lcfi14:
; SKX_32-NEXT: .cfi_def_cfa_offset 8
-; SKX_32-NEXT: .Lcfi11:
+; SKX_32-NEXT: .Lcfi15:
; SKX_32-NEXT: .cfi_offset %ebp, -8
; SKX_32-NEXT: movl %esp, %ebp
-; SKX_32-NEXT: .Lcfi12:
+; SKX_32-NEXT: .Lcfi16:
; SKX_32-NEXT: .cfi_def_cfa_register %ebp
; SKX_32-NEXT: andl $-64, %esp
; SKX_32-NEXT: subl $64, %esp
@@ -2166,6 +2180,8 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; SKX_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2}
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: .Lcfi17:
+; SKX_32-NEXT: .cfi_def_cfa %esp, 4
; SKX_32-NEXT: vzeroupper
; SKX_32-NEXT: retl
call void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x double*> %ptrs, i32 4, <16 x i1> %mask)
@@ -2192,12 +2208,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-LABEL: test_pr28312:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi12:
+; KNL_32-NEXT: .Lcfi16:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi13:
+; KNL_32-NEXT: .Lcfi17:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi14:
+; KNL_32-NEXT: .Lcfi18:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
@@ -2215,6 +2231,8 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; KNL_32-NEXT: movl %ebp, %esp
; KNL_32-NEXT: popl %ebp
+; KNL_32-NEXT: .Lcfi19:
+; KNL_32-NEXT: .cfi_def_cfa %esp, 4
; KNL_32-NEXT: retl
;
; SKX-LABEL: test_pr28312:
@@ -2229,12 +2247,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; SKX_32-LABEL: test_pr28312:
; SKX_32: # BB#0:
; SKX_32-NEXT: pushl %ebp
-; SKX_32-NEXT: .Lcfi13:
+; SKX_32-NEXT: .Lcfi18:
; SKX_32-NEXT: .cfi_def_cfa_offset 8
-; SKX_32-NEXT: .Lcfi14:
+; SKX_32-NEXT: .Lcfi19:
; SKX_32-NEXT: .cfi_offset %ebp, -8
; SKX_32-NEXT: movl %esp, %ebp
-; SKX_32-NEXT: .Lcfi15:
+; SKX_32-NEXT: .Lcfi20:
; SKX_32-NEXT: .cfi_def_cfa_register %ebp
; SKX_32-NEXT: andl $-32, %esp
; SKX_32-NEXT: subl $32, %esp
@@ -2245,6 +2263,8 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; SKX_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; SKX_32-NEXT: movl %ebp, %esp
; SKX_32-NEXT: popl %ebp
+; SKX_32-NEXT: .Lcfi21:
+; SKX_32-NEXT: .cfi_def_cfa %esp, 4
; SKX_32-NEXT: retl
%g1 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef)
%g2 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef)
diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll
index 13258fd81de..6dd9f01356e 100644
--- a/llvm/test/CodeGen/X86/memset-nonzero.ll
+++ b/llvm/test/CodeGen/X86/memset-nonzero.ll
@@ -149,6 +149,8 @@ define void @memset_256_nonzero_bytes(i8* %x) {
; SSE-NEXT: movl $256, %edx # imm = 0x100
; SSE-NEXT: callq memset
; SSE-NEXT: popq %rax
+; SSE-NEXT: .Lcfi1:
+; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
;
; SSE2FAST-LABEL: memset_256_nonzero_bytes:
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 1d5829407b7..314243e1dbe 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -76,7 +76,11 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: movl %esi, 4(%eax)
; X32-SSE1-NEXT: movl %edx, (%eax)
; X32-SSE1-NEXT: popl %esi
+; X32-SSE1-NEXT: .Lcfi4:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: popl %edi
+; X32-SSE1-NEXT: .Lcfi5:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_2i64_i64_12:
@@ -377,9 +381,9 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-LABEL: merge_4i32_i32_23u5:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %esi
-; X32-SSE1-NEXT: .Lcfi4:
+; X32-SSE1-NEXT: .Lcfi6:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT: .Lcfi5:
+; X32-SSE1-NEXT: .Lcfi7:
; X32-SSE1-NEXT: .cfi_offset %esi, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -390,6 +394,8 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
; X32-SSE1-NEXT: movl %edx, (%eax)
; X32-SSE1-NEXT: movl %ecx, 12(%eax)
; X32-SSE1-NEXT: popl %esi
+; X32-SSE1-NEXT: .Lcfi8:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_4i32_i32_23u5:
@@ -527,24 +533,24 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-LABEL: merge_8i16_i16_23u567u9:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %ebp
-; X32-SSE1-NEXT: .Lcfi6:
+; X32-SSE1-NEXT: .Lcfi9:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %ebx
-; X32-SSE1-NEXT: .Lcfi7:
+; X32-SSE1-NEXT: .Lcfi10:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 12
; X32-SSE1-NEXT: pushl %edi
-; X32-SSE1-NEXT: .Lcfi8:
+; X32-SSE1-NEXT: .Lcfi11:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 16
; X32-SSE1-NEXT: pushl %esi
-; X32-SSE1-NEXT: .Lcfi9:
+; X32-SSE1-NEXT: .Lcfi12:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 20
-; X32-SSE1-NEXT: .Lcfi10:
+; X32-SSE1-NEXT: .Lcfi13:
; X32-SSE1-NEXT: .cfi_offset %esi, -20
-; X32-SSE1-NEXT: .Lcfi11:
+; X32-SSE1-NEXT: .Lcfi14:
; X32-SSE1-NEXT: .cfi_offset %edi, -16
-; X32-SSE1-NEXT: .Lcfi12:
+; X32-SSE1-NEXT: .Lcfi15:
; X32-SSE1-NEXT: .cfi_offset %ebx, -12
-; X32-SSE1-NEXT: .Lcfi13:
+; X32-SSE1-NEXT: .Lcfi16:
; X32-SSE1-NEXT: .cfi_offset %ebp, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -561,9 +567,17 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movw %dx, (%eax)
; X32-SSE1-NEXT: movw %di, 6(%eax)
; X32-SSE1-NEXT: popl %esi
+; X32-SSE1-NEXT: .Lcfi17:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 16
; X32-SSE1-NEXT: popl %edi
+; X32-SSE1-NEXT: .Lcfi18:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 12
; X32-SSE1-NEXT: popl %ebx
+; X32-SSE1-NEXT: .Lcfi19:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: popl %ebp
+; X32-SSE1-NEXT: .Lcfi20:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_8i16_i16_23u567u9:
@@ -641,9 +655,9 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %esi
-; X32-SSE1-NEXT: .Lcfi14:
+; X32-SSE1-NEXT: .Lcfi21:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE1-NEXT: .Lcfi15:
+; X32-SSE1-NEXT: .Lcfi22:
; X32-SSE1-NEXT: .cfi_offset %esi, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -658,6 +672,8 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movw $0, 10(%eax)
; X32-SSE1-NEXT: movw $0, 8(%eax)
; X32-SSE1-NEXT: popl %esi
+; X32-SSE1-NEXT: .Lcfi23:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
@@ -695,12 +711,12 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
; X32-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %ebx
-; X32-SSE1-NEXT: .Lcfi16:
+; X32-SSE1-NEXT: .Lcfi24:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: subl $12, %esp
-; X32-SSE1-NEXT: .Lcfi17:
+; X32-SSE1-NEXT: .Lcfi25:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 20
-; X32-SSE1-NEXT: .Lcfi18:
+; X32-SSE1-NEXT: .Lcfi26:
; X32-SSE1-NEXT: .cfi_offset %ebx, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -751,7 +767,11 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
; X32-SSE1-NEXT: movb {{[0-9]+}}(%esp), %cl # 1-byte Reload
; X32-SSE1-NEXT: movb %cl, 3(%eax)
; X32-SSE1-NEXT: addl $12, %esp
+; X32-SSE1-NEXT: .Lcfi27:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: popl %ebx
+; X32-SSE1-NEXT: .Lcfi28:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF:
@@ -868,12 +888,12 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %ebx
-; X32-SSE1-NEXT: .Lcfi19:
+; X32-SSE1-NEXT: .Lcfi29:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %eax
-; X32-SSE1-NEXT: .Lcfi20:
+; X32-SSE1-NEXT: .Lcfi30:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT: .Lcfi21:
+; X32-SSE1-NEXT: .Lcfi31:
; X32-SSE1-NEXT: .cfi_offset %ebx, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -895,7 +915,11 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
; X32-SSE1-NEXT: movb $0, 14(%eax)
; X32-SSE1-NEXT: movb $0, 13(%eax)
; X32-SSE1-NEXT: addl $4, %esp
+; X32-SSE1-NEXT: .Lcfi32:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: popl %ebx
+; X32-SSE1-NEXT: .Lcfi33:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
@@ -990,14 +1014,14 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
; X32-SSE1-LABEL: merge_2i64_i64_12_volatile:
; X32-SSE1: # BB#0:
; X32-SSE1-NEXT: pushl %edi
-; X32-SSE1-NEXT: .Lcfi22:
+; X32-SSE1-NEXT: .Lcfi34:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: pushl %esi
-; X32-SSE1-NEXT: .Lcfi23:
+; X32-SSE1-NEXT: .Lcfi35:
; X32-SSE1-NEXT: .cfi_def_cfa_offset 12
-; X32-SSE1-NEXT: .Lcfi24:
+; X32-SSE1-NEXT: .Lcfi36:
; X32-SSE1-NEXT: .cfi_offset %esi, -12
-; X32-SSE1-NEXT: .Lcfi25:
+; X32-SSE1-NEXT: .Lcfi37:
; X32-SSE1-NEXT: .cfi_offset %edi, -8
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -1010,7 +1034,11 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin
; X32-SSE1-NEXT: movl %esi, 4(%eax)
; X32-SSE1-NEXT: movl %edx, (%eax)
; X32-SSE1-NEXT: popl %esi
+; X32-SSE1-NEXT: .Lcfi38:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 8
; X32-SSE1-NEXT: popl %edi
+; X32-SSE1-NEXT: .Lcfi39:
+; X32-SSE1-NEXT: .cfi_def_cfa_offset 4
; X32-SSE1-NEXT: retl $4
;
; X32-SSE41-LABEL: merge_2i64_i64_12_volatile:
diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index d715ccfa8c6..eb5ce65311a 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -376,8 +376,10 @@ entry:
; LINUX: pushl $1
; LINUX: .cfi_adjust_cfa_offset 4
; LINUX: calll good
-; LINUX: addl $28, %esp
+; LINUX: addl $16, %esp
; LINUX: .cfi_adjust_cfa_offset -16
+; LINUX: addl $12, %esp
+; LINUX: .cfi_def_cfa_offset 4
; LINUX-NOT: add
; LINUX: retl
define void @pr27140() optsize {
diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll
index 65d80a699e2..8400ca9b95a 100644
--- a/llvm/test/CodeGen/X86/mul-constant-result.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-result.ll
@@ -33,84 +33,148 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: .LBB0_6:
; X86-NEXT: addl %eax, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi2:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_39:
+; X86-NEXT: .Lcfi3:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: .LBB0_40:
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi4:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_7:
+; X86-NEXT: .Lcfi5:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi6:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_8:
+; X86-NEXT: .Lcfi7:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: shll $2, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi8:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_9:
+; X86-NEXT: .Lcfi9:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi10:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_10:
+; X86-NEXT: .Lcfi11:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi12:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_11:
+; X86-NEXT: .Lcfi13:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (,%eax,8), %ecx
; X86-NEXT: jmp .LBB0_12
; X86-NEXT: .LBB0_13:
; X86-NEXT: shll $3, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi14:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_14:
+; X86-NEXT: .Lcfi15:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi16:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_15:
+; X86-NEXT: .Lcfi17:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi18:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_16:
+; X86-NEXT: .Lcfi19:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi20:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_17:
+; X86-NEXT: .Lcfi21:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi22:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_18:
+; X86-NEXT: .Lcfi23:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi24:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_19:
+; X86-NEXT: .Lcfi25:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: jmp .LBB0_20
; X86-NEXT: .LBB0_21:
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi26:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_22:
+; X86-NEXT: .Lcfi27:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: shll $4, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi28:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_23:
+; X86-NEXT: .Lcfi29:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $4, %ecx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi30:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_24:
+; X86-NEXT: .Lcfi31:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi32:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_25:
+; X86-NEXT: .Lcfi33:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: shll $2, %ecx
; X86-NEXT: jmp .LBB0_12
@@ -118,20 +182,32 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi34:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_27:
+; X86-NEXT: .Lcfi35:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi36:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_28:
+; X86-NEXT: .Lcfi37:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: .LBB0_20:
; X86-NEXT: leal (%eax,%ecx,4), %ecx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi38:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_29:
+; X86-NEXT: .Lcfi39:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: shll $3, %ecx
; X86-NEXT: jmp .LBB0_12
@@ -139,13 +215,21 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi40:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_31:
+; X86-NEXT: .Lcfi41:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi42:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_32:
+; X86-NEXT: .Lcfi43:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
; X86-NEXT: jmp .LBB0_12
@@ -153,21 +237,33 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi44:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_34:
+; X86-NEXT: .Lcfi45:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi46:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_35:
+; X86-NEXT: .Lcfi47:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
; X86-NEXT: addl %eax, %ecx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi48:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_36:
+; X86-NEXT: .Lcfi49:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shll $5, %ecx
; X86-NEXT: subl %eax, %ecx
@@ -179,10 +275,16 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: subl %eax, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi50:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
; X86-NEXT: .LBB0_38:
+; X86-NEXT: .Lcfi51:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: shll $5, %eax
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi52:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-HSW-LABEL: mult:
@@ -525,431 +627,431 @@ define i32 @foo() local_unnamed_addr #0 {
; X86-LABEL: foo:
; X86: # BB#0:
; X86-NEXT: pushl %ebx
-; X86-NEXT: .Lcfi2:
+; X86-NEXT: .Lcfi53:
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %edi
-; X86-NEXT: .Lcfi3:
+; X86-NEXT: .Lcfi54:
; X86-NEXT: .cfi_def_cfa_offset 12
; X86-NEXT: pushl %esi
-; X86-NEXT: .Lcfi4:
+; X86-NEXT: .Lcfi55:
; X86-NEXT: .cfi_def_cfa_offset 16
-; X86-NEXT: .Lcfi5:
+; X86-NEXT: .Lcfi56:
; X86-NEXT: .cfi_offset %esi, -16
-; X86-NEXT: .Lcfi6:
+; X86-NEXT: .Lcfi57:
; X86-NEXT: .cfi_offset %edi, -12
-; X86-NEXT: .Lcfi7:
+; X86-NEXT: .Lcfi58:
; X86-NEXT: .cfi_offset %ebx, -8
; X86-NEXT: pushl $0
-; X86-NEXT: .Lcfi8:
+; X86-NEXT: .Lcfi59:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $1
-; X86-NEXT: .Lcfi9:
+; X86-NEXT: .Lcfi60:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi10:
+; X86-NEXT: .Lcfi61:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %esi
; X86-NEXT: xorl $1, %esi
; X86-NEXT: pushl $1
-; X86-NEXT: .Lcfi11:
+; X86-NEXT: .Lcfi62:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $2
-; X86-NEXT: .Lcfi12:
+; X86-NEXT: .Lcfi63:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi13:
+; X86-NEXT: .Lcfi64:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $2, %edi
; X86-NEXT: pushl $1
-; X86-NEXT: .Lcfi14:
+; X86-NEXT: .Lcfi65:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $3
-; X86-NEXT: .Lcfi15:
+; X86-NEXT: .Lcfi66:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi16:
+; X86-NEXT: .Lcfi67:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $3, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $2
-; X86-NEXT: .Lcfi17:
+; X86-NEXT: .Lcfi68:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $4
-; X86-NEXT: .Lcfi18:
+; X86-NEXT: .Lcfi69:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi19:
+; X86-NEXT: .Lcfi70:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $4, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $2
-; X86-NEXT: .Lcfi20:
+; X86-NEXT: .Lcfi71:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $5
-; X86-NEXT: .Lcfi21:
+; X86-NEXT: .Lcfi72:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi22:
+; X86-NEXT: .Lcfi73:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $5, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $3
-; X86-NEXT: .Lcfi23:
+; X86-NEXT: .Lcfi74:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $6
-; X86-NEXT: .Lcfi24:
+; X86-NEXT: .Lcfi75:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi25:
+; X86-NEXT: .Lcfi76:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $6, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $3
-; X86-NEXT: .Lcfi26:
+; X86-NEXT: .Lcfi77:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $7
-; X86-NEXT: .Lcfi27:
+; X86-NEXT: .Lcfi78:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi28:
+; X86-NEXT: .Lcfi79:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $7, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $4
-; X86-NEXT: .Lcfi29:
+; X86-NEXT: .Lcfi80:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $8
-; X86-NEXT: .Lcfi30:
+; X86-NEXT: .Lcfi81:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi31:
+; X86-NEXT: .Lcfi82:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $8, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $4
-; X86-NEXT: .Lcfi32:
+; X86-NEXT: .Lcfi83:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $9
-; X86-NEXT: .Lcfi33:
+; X86-NEXT: .Lcfi84:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi34:
+; X86-NEXT: .Lcfi85:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $9, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $5
-; X86-NEXT: .Lcfi35:
+; X86-NEXT: .Lcfi86:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $10
-; X86-NEXT: .Lcfi36:
+; X86-NEXT: .Lcfi87:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi37:
+; X86-NEXT: .Lcfi88:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $10, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $5
-; X86-NEXT: .Lcfi38:
+; X86-NEXT: .Lcfi89:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $11
-; X86-NEXT: .Lcfi39:
+; X86-NEXT: .Lcfi90:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi40:
+; X86-NEXT: .Lcfi91:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $11, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $6
-; X86-NEXT: .Lcfi41:
+; X86-NEXT: .Lcfi92:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $12
-; X86-NEXT: .Lcfi42:
+; X86-NEXT: .Lcfi93:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi43:
+; X86-NEXT: .Lcfi94:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $12, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $6
-; X86-NEXT: .Lcfi44:
+; X86-NEXT: .Lcfi95:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $13
-; X86-NEXT: .Lcfi45:
+; X86-NEXT: .Lcfi96:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi46:
+; X86-NEXT: .Lcfi97:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $13, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $7
-; X86-NEXT: .Lcfi47:
+; X86-NEXT: .Lcfi98:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $14
-; X86-NEXT: .Lcfi48:
+; X86-NEXT: .Lcfi99:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi49:
+; X86-NEXT: .Lcfi100:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $14, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $7
-; X86-NEXT: .Lcfi50:
+; X86-NEXT: .Lcfi101:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $15
-; X86-NEXT: .Lcfi51:
+; X86-NEXT: .Lcfi102:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi52:
+; X86-NEXT: .Lcfi103:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $15, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $8
-; X86-NEXT: .Lcfi53:
+; X86-NEXT: .Lcfi104:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $16
-; X86-NEXT: .Lcfi54:
+; X86-NEXT: .Lcfi105:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi55:
+; X86-NEXT: .Lcfi106:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $16, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $8
-; X86-NEXT: .Lcfi56:
+; X86-NEXT: .Lcfi107:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $17
-; X86-NEXT: .Lcfi57:
+; X86-NEXT: .Lcfi108:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi58:
+; X86-NEXT: .Lcfi109:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $17, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $9
-; X86-NEXT: .Lcfi59:
+; X86-NEXT: .Lcfi110:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $18
-; X86-NEXT: .Lcfi60:
+; X86-NEXT: .Lcfi111:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi61:
+; X86-NEXT: .Lcfi112:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $18, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $9
-; X86-NEXT: .Lcfi62:
+; X86-NEXT: .Lcfi113:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $19
-; X86-NEXT: .Lcfi63:
+; X86-NEXT: .Lcfi114:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi64:
+; X86-NEXT: .Lcfi115:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $19, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $10
-; X86-NEXT: .Lcfi65:
+; X86-NEXT: .Lcfi116:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $20
-; X86-NEXT: .Lcfi66:
+; X86-NEXT: .Lcfi117:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi67:
+; X86-NEXT: .Lcfi118:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $20, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $10
-; X86-NEXT: .Lcfi68:
+; X86-NEXT: .Lcfi119:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $21
-; X86-NEXT: .Lcfi69:
+; X86-NEXT: .Lcfi120:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi70:
+; X86-NEXT: .Lcfi121:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $21, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $11
-; X86-NEXT: .Lcfi71:
+; X86-NEXT: .Lcfi122:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $22
-; X86-NEXT: .Lcfi72:
+; X86-NEXT: .Lcfi123:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi73:
+; X86-NEXT: .Lcfi124:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $22, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $11
-; X86-NEXT: .Lcfi74:
+; X86-NEXT: .Lcfi125:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $23
-; X86-NEXT: .Lcfi75:
+; X86-NEXT: .Lcfi126:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi76:
+; X86-NEXT: .Lcfi127:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $23, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $12
-; X86-NEXT: .Lcfi77:
+; X86-NEXT: .Lcfi128:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $24
-; X86-NEXT: .Lcfi78:
+; X86-NEXT: .Lcfi129:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi79:
+; X86-NEXT: .Lcfi130:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $24, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $12
-; X86-NEXT: .Lcfi80:
+; X86-NEXT: .Lcfi131:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $25
-; X86-NEXT: .Lcfi81:
+; X86-NEXT: .Lcfi132:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi82:
+; X86-NEXT: .Lcfi133:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $25, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $13
-; X86-NEXT: .Lcfi83:
+; X86-NEXT: .Lcfi134:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $26
-; X86-NEXT: .Lcfi84:
+; X86-NEXT: .Lcfi135:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi85:
+; X86-NEXT: .Lcfi136:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $26, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $13
-; X86-NEXT: .Lcfi86:
+; X86-NEXT: .Lcfi137:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $27
-; X86-NEXT: .Lcfi87:
+; X86-NEXT: .Lcfi138:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi88:
+; X86-NEXT: .Lcfi139:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $27, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $14
-; X86-NEXT: .Lcfi89:
+; X86-NEXT: .Lcfi140:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $28
-; X86-NEXT: .Lcfi90:
+; X86-NEXT: .Lcfi141:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi91:
+; X86-NEXT: .Lcfi142:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $28, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $14
-; X86-NEXT: .Lcfi92:
+; X86-NEXT: .Lcfi143:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $29
-; X86-NEXT: .Lcfi93:
+; X86-NEXT: .Lcfi144:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi94:
+; X86-NEXT: .Lcfi145:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $29, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: pushl $15
-; X86-NEXT: .Lcfi95:
+; X86-NEXT: .Lcfi146:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $30
-; X86-NEXT: .Lcfi96:
+; X86-NEXT: .Lcfi147:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi97:
+; X86-NEXT: .Lcfi148:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %edi
; X86-NEXT: xorl $30, %edi
; X86-NEXT: orl %ebx, %edi
; X86-NEXT: pushl $15
-; X86-NEXT: .Lcfi98:
+; X86-NEXT: .Lcfi149:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $31
-; X86-NEXT: .Lcfi99:
+; X86-NEXT: .Lcfi150:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi100:
+; X86-NEXT: .Lcfi151:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: xorl $31, %ebx
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: orl %esi, %ebx
; X86-NEXT: pushl $16
-; X86-NEXT: .Lcfi101:
+; X86-NEXT: .Lcfi152:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: pushl $32
-; X86-NEXT: .Lcfi102:
+; X86-NEXT: .Lcfi153:
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: calll mult
; X86-NEXT: addl $8, %esp
-; X86-NEXT: .Lcfi103:
+; X86-NEXT: .Lcfi154:
; X86-NEXT: .cfi_adjust_cfa_offset -8
; X86-NEXT: xorl $32, %eax
; X86-NEXT: orl %ebx, %eax
@@ -959,8 +1061,14 @@ define i32 @foo() local_unnamed_addr #0 {
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: .LBB1_2:
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi155:
+; X86-NEXT: .cfi_def_cfa_offset 12
; X86-NEXT: popl %edi
+; X86-NEXT: .Lcfi156:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: popl %ebx
+; X86-NEXT: .Lcfi157:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-HSW-LABEL: foo:
@@ -1184,10 +1292,20 @@ define i32 @foo() local_unnamed_addr #0 {
; X64-HSW-NEXT: movl $-1, %eax
; X64-HSW-NEXT: cmovel %r12d, %eax
; X64-HSW-NEXT: popq %rbx
+; X64-HSW-NEXT: .Lcfi10:
+; X64-HSW-NEXT: .cfi_def_cfa_offset 40
; X64-HSW-NEXT: popq %r12
+; X64-HSW-NEXT: .Lcfi11:
+; X64-HSW-NEXT: .cfi_def_cfa_offset 32
; X64-HSW-NEXT: popq %r14
+; X64-HSW-NEXT: .Lcfi12:
+; X64-HSW-NEXT: .cfi_def_cfa_offset 24
; X64-HSW-NEXT: popq %r15
+; X64-HSW-NEXT: .Lcfi13:
+; X64-HSW-NEXT: .cfi_def_cfa_offset 16
; X64-HSW-NEXT: popq %rbp
+; X64-HSW-NEXT: .Lcfi14:
+; X64-HSW-NEXT: .cfi_def_cfa_offset 8
; X64-HSW-NEXT: retq
%1 = tail call i32 @mult(i32 1, i32 0)
%2 = icmp ne i32 %1, 1
diff --git a/llvm/test/CodeGen/X86/mul-i256.ll b/llvm/test/CodeGen/X86/mul-i256.ll
index acd86e94989..08a843b63b0 100644
--- a/llvm/test/CodeGen/X86/mul-i256.ll
+++ b/llvm/test/CodeGen/X86/mul-i256.ll
@@ -193,6 +193,8 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X32-NEXT: popl %edi
; X32-NEXT: popl %ebx
; X32-NEXT: popl %ebp
+; X32-NEXT: .Lcfi6:
+; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
;
; X64-LABEL: test:
@@ -267,8 +269,14 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X64-NEXT: movq %rax, 16(%r9)
; X64-NEXT: movq %rdx, 24(%r9)
; X64-NEXT: popq %rbx
+; X64-NEXT: .Lcfi6:
+; X64-NEXT: .cfi_def_cfa_offset 24
; X64-NEXT: popq %r14
+; X64-NEXT: .Lcfi7:
+; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: popq %r15
+; X64-NEXT: .Lcfi8:
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
%av = load i256, i256* %a
diff --git a/llvm/test/CodeGen/X86/pr21792.ll b/llvm/test/CodeGen/X86/pr21792.ll
index 84b7467e6a1..5e7ae28d161 100644
--- a/llvm/test/CodeGen/X86/pr21792.ll
+++ b/llvm/test/CodeGen/X86/pr21792.ll
@@ -29,6 +29,8 @@ define void @func(<4 x float> %vx) {
; CHECK-NEXT: leaq stuff+8(%r9), %r9
; CHECK-NEXT: callq toto
; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
entry:
%tmp2 = bitcast <4 x float> %vx to <2 x i64>
diff --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll
index 8c970b3d477..52a8a25d735 100644
--- a/llvm/test/CodeGen/X86/pr29112.ll
+++ b/llvm/test/CodeGen/X86/pr29112.ll
@@ -66,6 +66,8 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vaddps {{[0-9]+}}(%rsp), %xmm1, %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: addq $88, %rsp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> <i32 4, i32 20, i32 1, i32 17>
diff --git a/llvm/test/CodeGen/X86/pr30430.ll b/llvm/test/CodeGen/X86/pr30430.ll
index 14d81f14fc3..9f93294811c 100644
--- a/llvm/test/CodeGen/X86/pr30430.ll
+++ b/llvm/test/CodeGen/X86/pr30430.ll
@@ -111,6 +111,8 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float
; CHECK-NEXT: vmovss %xmm14, (%rsp) # 4-byte Spill
; CHECK-NEXT: movq %rbp, %rsp
; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: retq
entry:
%__A.addr.i = alloca float, align 4
diff --git a/llvm/test/CodeGen/X86/pr32241.ll b/llvm/test/CodeGen/X86/pr32241.ll
index e1f726f0c62..d16c7761dbf 100644
--- a/llvm/test/CodeGen/X86/pr32241.ll
+++ b/llvm/test/CodeGen/X86/pr32241.ll
@@ -54,7 +54,11 @@ define i32 @_Z3foov() {
; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: addl $24, %esp
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi4:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
entry:
%aa = alloca i16, align 2
diff --git a/llvm/test/CodeGen/X86/pr32256.ll b/llvm/test/CodeGen/X86/pr32256.ll
index e29b56236e2..6c7deb991e3 100644
--- a/llvm/test/CodeGen/X86/pr32256.ll
+++ b/llvm/test/CodeGen/X86/pr32256.ll
@@ -28,6 +28,8 @@ define void @_Z1av() {
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp)
; CHECK-NEXT: addl $2, %esp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
entry:
%b = alloca i8, align 1
diff --git a/llvm/test/CodeGen/X86/pr32329.ll b/llvm/test/CodeGen/X86/pr32329.ll
index 7cb38863e89..e6e4ad7a713 100644
--- a/llvm/test/CodeGen/X86/pr32329.ll
+++ b/llvm/test/CodeGen/X86/pr32329.ll
@@ -64,9 +64,17 @@ define void @foo() local_unnamed_addr {
; X86-NEXT: imull %eax, %ebx
; X86-NEXT: movb %bl, var_218
; X86-NEXT: popl %esi
+; X86-NEXT: .Lcfi8:
+; X86-NEXT: .cfi_def_cfa_offset 16
; X86-NEXT: popl %edi
+; X86-NEXT: .Lcfi9:
+; X86-NEXT: .cfi_def_cfa_offset 12
; X86-NEXT: popl %ebx
+; X86-NEXT: .Lcfi10:
+; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: popl %ebp
+; X86-NEXT: .Lcfi11:
+; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-LABEL: foo:
diff --git a/llvm/test/CodeGen/X86/pr32345.ll b/llvm/test/CodeGen/X86/pr32345.ll
index e9182698dd9..f2b37a806f5 100644
--- a/llvm/test/CodeGen/X86/pr32345.ll
+++ b/llvm/test/CodeGen/X86/pr32345.ll
@@ -90,6 +90,8 @@ define void @foo() {
; 6860-NEXT: popl %edi
; 6860-NEXT: popl %ebx
; 6860-NEXT: popl %ebp
+; 6860-NEXT: .Lcfi6:
+; 6860-NEXT: .cfi_def_cfa %esp, 4
; 6860-NEXT: retl
;
; X64-LABEL: foo:
@@ -136,6 +138,8 @@ define void @foo() {
; 686-NEXT: movb %dl, (%eax)
; 686-NEXT: movl %ebp, %esp
; 686-NEXT: popl %ebp
+; 686-NEXT: .Lcfi3:
+; 686-NEXT: .cfi_def_cfa %esp, 4
; 686-NEXT: retl
bb:
%tmp = alloca i64, align 8
diff --git a/llvm/test/CodeGen/X86/pr32451.ll b/llvm/test/CodeGen/X86/pr32451.ll
index e4643a863f9..22a045f2939 100644
--- a/llvm/test/CodeGen/X86/pr32451.ll
+++ b/llvm/test/CodeGen/X86/pr32451.ll
@@ -33,7 +33,11 @@ define i8** @japi1_convert_690(i8**, i8***, i32) {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; CHECK-NEXT: movl %eax, (%ecx)
; CHECK-NEXT: addl $16, %esp
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: popl %ebx
+; CHECK-NEXT: .Lcfi4:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
top:
%3 = alloca i8***
diff --git a/llvm/test/CodeGen/X86/pr9743.ll b/llvm/test/CodeGen/X86/pr9743.ll
index 6597c235330..6ab33193946 100644
--- a/llvm/test/CodeGen/X86/pr9743.ll
+++ b/llvm/test/CodeGen/X86/pr9743.ll
@@ -14,4 +14,6 @@ define void @f() {
; CHECK-NEXT: :
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/X86/push-cfi-debug.ll b/llvm/test/CodeGen/X86/push-cfi-debug.ll
index 7f438e306e4..01fa12e87d0 100644
--- a/llvm/test/CodeGen/X86/push-cfi-debug.ll
+++ b/llvm/test/CodeGen/X86/push-cfi-debug.ll
@@ -23,8 +23,10 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0
; CHECK: .cfi_adjust_cfa_offset 4
; CHECK: calll stdfoo
; CHECK: .cfi_adjust_cfa_offset -8
-; CHECK: addl $20, %esp
+; CHECK: addl $8, %esp
; CHECK: .cfi_adjust_cfa_offset -8
+; CHECK: addl $12, %esp
+; CHECK: .cfi_def_cfa_offset 4
define void @test1() #0 !dbg !4 {
entry:
tail call void @foo(i32 1, i32 2) #1, !dbg !10
diff --git a/llvm/test/CodeGen/X86/push-cfi-obj.ll b/llvm/test/CodeGen/X86/push-cfi-obj.ll
index 33291ec3318..2c9ec334027 100644
--- a/llvm/test/CodeGen/X86/push-cfi-obj.ll
+++ b/llvm/test/CodeGen/X86/push-cfi-obj.ll
@@ -12,7 +12,7 @@
; LINUX-NEXT: ]
; LINUX-NEXT: Address: 0x0
; LINUX-NEXT: Offset: 0x68
-; LINUX-NEXT: Size: 64
+; LINUX-NEXT: Size: 72
; LINUX-NEXT: Link: 0
; LINUX-NEXT: Info: 0
; LINUX-NEXT: AddressAlignment: 4
@@ -22,8 +22,9 @@
; LINUX-NEXT: SectionData (
; LINUX-NEXT: 0000: 1C000000 00000000 017A504C 5200017C |.........zPLR..||
; LINUX-NEXT: 0010: 08070000 00000000 1B0C0404 88010000 |................|
-; LINUX-NEXT: 0020: 1C000000 24000000 00000000 1D000000 |....$...........|
+; LINUX-NEXT: 0020: 24000000 24000000 00000000 1D000000 |$...$...........|
; LINUX-NEXT: 0030: 04000000 00410E08 8502420D 05432E10 |.....A....B..C..|
+; LINUX-NEXT: 0040: 540C0404 410C0508 |T...A...|
; LINUX-NEXT: )
declare i32 @__gxx_personality_v0(...)
@@ -35,7 +36,7 @@ entry:
to label %continue unwind label %cleanup
continue:
ret void
-cleanup:
+cleanup:
landingpad { i8*, i32 }
cleanup
ret void
diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll
index 5428f12ad1c..4497ec5ebd4 100644
--- a/llvm/test/CodeGen/X86/push-cfi.ll
+++ b/llvm/test/CodeGen/X86/push-cfi.ll
@@ -82,8 +82,9 @@ cleanup:
; LINUX-NEXT: Lcfi{{[0-9]+}}:
; LINUX-NEXT: .cfi_adjust_cfa_offset 4
; LINUX-NEXT: call
-; LINUX-NEXT: addl $28, %esp
+; LINUX-NEXT: addl $16, %esp
; LINUX: .cfi_adjust_cfa_offset -16
+; LINUX: addl $12, %esp
; DARWIN-NOT: .cfi_escape
; DARWIN-NOT: pushl
define void @test2_nofp() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
diff --git a/llvm/test/CodeGen/X86/return-ext.ll b/llvm/test/CodeGen/X86/return-ext.ll
index ef160f43b4a..d2bbebaf8a0 100644
--- a/llvm/test/CodeGen/X86/return-ext.ll
+++ b/llvm/test/CodeGen/X86/return-ext.ll
@@ -106,6 +106,8 @@ entry:
; CHECK: call
; CHECK-NEXT: movzbl
; CHECK-NEXT: {{pop|add}}
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}}
; CHECK-NEXT: ret
}
@@ -120,6 +122,8 @@ entry:
; CHECK: call
; CHECK-NEXT: movzbl
; CHECK-NEXT: {{pop|add}}
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}}
; CHECK-NEXT: ret
}
@@ -134,5 +138,7 @@ entry:
; CHECK: call
; CHECK-NEXT: movzwl
; CHECK-NEXT: {{pop|add}}
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}}
; CHECK-NEXT: ret
}
diff --git a/llvm/test/CodeGen/X86/rtm.ll b/llvm/test/CodeGen/X86/rtm.ll
index a8562677c7b..37825c2dc58 100644
--- a/llvm/test/CodeGen/X86/rtm.ll
+++ b/llvm/test/CodeGen/X86/rtm.ll
@@ -76,6 +76,8 @@ define void @f2(i32 %x) nounwind uwtable {
; X64-NEXT: xabort $1
; X64-NEXT: callq f1
; X64-NEXT: popq %rax
+; X64-NEXT: .Lcfi1:
+; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
entry:
%x.addr = alloca i32, align 4
diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll
index 2628f824ea4..45f96b7e794 100644
--- a/llvm/test/CodeGen/X86/setcc-lowering.ll
+++ b/llvm/test/CodeGen/X86/setcc-lowering.ll
@@ -92,6 +92,8 @@ define void @pr26232(i64 %a, <16 x i1> %b) {
; KNL-32-NEXT: jne .LBB1_1
; KNL-32-NEXT: # BB#2: # %for_exit600
; KNL-32-NEXT: popl %esi
+; KNL-32-NEXT: .Lcfi2:
+; KNL-32-NEXT: .cfi_def_cfa_offset 4
; KNL-32-NEXT: retl
allocas:
br label %for_test11.preheader
diff --git a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll
index bd2dd53b654..3d011693a49 100644
--- a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll
@@ -83,6 +83,8 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
; CHECK: callq return_i1
; CHECK-NEXT: .Ltmp5:
; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: .Lcfi11:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
entry:
%safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %a)
diff --git a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll
index b88ca03805f..c019e98eb65 100644
--- a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll
+++ b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll
@@ -69,6 +69,8 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
; CHECK: callq return_i1
; CHECK-NEXT: .Ltmp4:
; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
entry:
%safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 1, i32 0, i32 0, i32 addrspace(1)* %a)
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke.ll b/llvm/test/CodeGen/X86/statepoint-invoke.ll
index 29f8e3ed4f7..fc0bf5b4e3c 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke.ll
+++ b/llvm/test/CodeGen/X86/statepoint-invoke.ll
@@ -142,6 +142,8 @@ normal_return:
; CHECK-LABEL: %normal_return
; CHECK: xorl %eax, %eax
; CHECK-NEXT: popq
+ ; CHECK-NEXT: :
+ ; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%null.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 13, i32 13)
%undef.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 14, i32 14)
@@ -169,6 +171,8 @@ entry:
normal_return:
; CHECK: leaq
; CHECK-NEXT: popq
+ ; CHECK-NEXT: :
+ ; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%aa.rel = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %sp, i32 13, i32 13)
%aa.converted = bitcast i32 addrspace(1)* %aa.rel to i64 addrspace(1)*
@@ -177,6 +181,8 @@ normal_return:
exceptional_return:
; CHECK: movl $15
; CHECK-NEXT: popq
+ ; CHECK-NEXT: :
+ ; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
%landing_pad = landingpad token
cleanup
diff --git a/llvm/test/CodeGen/X86/throws-cfi-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-fp.ll
new file mode 100644
index 00000000000..35e8a68239c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/throws-cfi-fp.ll
@@ -0,0 +1,103 @@
+; RUN: llc %s -o - | FileCheck %s
+
+; ModuleID = 'throws-cfi-fp.cpp'
+source_filename = "throws-cfi-fp.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+$__clang_call_terminate = comdat any
+
+@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1
+@_ZTIi = external constant i8*
+@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00"
+
+; Function Attrs: uwtable
+define void @_Z6throwsv() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+
+; CHECK-LABEL: _Z6throwsv:
+; CHECK: popq %rbp
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa %rbp, 16
+
+entry:
+ %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1
+ br i1 %.b5, label %if.then, label %try.cont
+
+if.then: ; preds = %entry
+ %exception = tail call i8* @__cxa_allocate_exception(i64 4)
+ %0 = bitcast i8* %exception to i32*
+ store i32 1, i32* %0, align 16
+ invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+ to label %unreachable unwind label %lpad
+
+lpad: ; preds = %if.then
+ %1 = landingpad { i8*, i32 }
+ catch i8* null
+ %2 = extractvalue { i8*, i32 } %1, 0
+ %3 = tail call i8* @__cxa_begin_catch(i8* %2)
+ %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0))
+ invoke void @__cxa_rethrow() #4
+ to label %unreachable unwind label %lpad1
+
+lpad1: ; preds = %lpad
+ %4 = landingpad { i8*, i32 }
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+try.cont: ; preds = %entry
+ ret void
+
+eh.resume: ; preds = %lpad1
+ resume { i8*, i32 } %4
+
+terminate.lpad: ; preds = %lpad1
+ %5 = landingpad { i8*, i32 }
+ catch i8* null
+ %6 = extractvalue { i8*, i32 } %5, 0
+ tail call void @__clang_call_terminate(i8* %6) #5
+ unreachable
+
+unreachable: ; preds = %lpad, %if.then
+ unreachable
+}
+
+declare i8* @__cxa_allocate_exception(i64)
+
+declare void @__cxa_throw(i8*, i8*, i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_rethrow()
+
+declare void @__cxa_end_catch()
+
+; Function Attrs: noinline noreturn nounwind
+declare void @__clang_call_terminate(i8*)
+
+declare void @_ZSt9terminatev()
+
+; Function Attrs: nounwind
+declare i32 @puts(i8* nocapture readonly) #3
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8, !9}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
+!1 = !DIFile(filename: "throws-cfi-fp.cpp", directory: "epilogue-dwarf/test")
+!2 = !{}
+!3 = !{!4}
+!4 = !DIGlobalVariableExpression(var: !5)
+!5 = distinct !DIGlobalVariable(name: "ShouldThrow", linkageName: "_ZL11ShouldThrow", scope: !0, file: !1, line: 2, type: !6, isLocal: true, isDefinition: true)
+!6 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
diff --git a/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll
new file mode 100644
index 00000000000..aac2a464b31
--- /dev/null
+++ b/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll
@@ -0,0 +1,102 @@
+; RUN: llc %s -o - | FileCheck %s
+
+; ModuleID = 'throws-cfi-no-fp.cpp'
+source_filename = "throws-cfi-no-fp.cpp"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+$__clang_call_terminate = comdat any
+
+@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1
+@_ZTIi = external constant i8*
+@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00"
+
+; Function Attrs: uwtable
+define void @_Z6throwsv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+
+; CHECK-LABEL: _Z6throwsv:
+; CHECK: popq %rbx
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+
+entry:
+ %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1
+ br i1 %.b5, label %if.then, label %try.cont
+
+if.then: ; preds = %entry
+ %exception = tail call i8* @__cxa_allocate_exception(i64 4)
+ %0 = bitcast i8* %exception to i32*
+ store i32 1, i32* %0, align 16
+ invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+ to label %unreachable unwind label %lpad
+
+lpad: ; preds = %if.then
+ %1 = landingpad { i8*, i32 }
+ catch i8* null
+ %2 = extractvalue { i8*, i32 } %1, 0
+ %3 = tail call i8* @__cxa_begin_catch(i8* %2)
+ %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0))
+ invoke void @__cxa_rethrow() #4
+ to label %unreachable unwind label %lpad1
+
+lpad1: ; preds = %lpad
+ %4 = landingpad { i8*, i32 }
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+try.cont: ; preds = %entry
+ ret void
+
+eh.resume: ; preds = %lpad1
+ resume { i8*, i32 } %4
+
+terminate.lpad: ; preds = %lpad1
+ %5 = landingpad { i8*, i32 }
+ catch i8* null
+ %6 = extractvalue { i8*, i32 } %5, 0
+ tail call void @__clang_call_terminate(i8* %6)
+ unreachable
+
+unreachable: ; preds = %lpad, %if.then
+ unreachable
+}
+
+declare i8* @__cxa_allocate_exception(i64)
+
+declare void @__cxa_throw(i8*, i8*, i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_rethrow()
+
+declare void @__cxa_end_catch()
+
+; Function Attrs: noinline noreturn nounwind
+declare void @__clang_call_terminate(i8*)
+
+declare void @_ZSt9terminatev()
+
+
+; Function Attrs: nounwind
+declare i32 @puts(i8* nocapture readonly)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7, !8, !9}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3)
+!1 = !DIFile(filename: "throws-cfi-no-fp.cpp", directory: "epilogue-dwarf/test")
+!2 = !{}
+!3 = !{!4}
+!4 = !DIGlobalVariableExpression(var: !5)
+!5 = distinct !DIGlobalVariable(name: "ShouldThrow", linkageName: "_ZL11ShouldThrow", scope: !0, file: !1, line: 2, type: !6, isLocal: true, isDefinition: true)
+!6 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean)
+!7 = !{i32 2, !"Dwarf Version", i32 4}
+!8 = !{i32 2, !"Debug Info Version", i32 3}
+!9 = !{i32 1, !"wchar_size", i32 4}
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 392c0de95f2..3ffd27bbf19 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -3345,11 +3345,23 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: .Lcfi12:
+; AVX1-NEXT: .cfi_def_cfa_offset 48
; AVX1-NEXT: popq %r12
+; AVX1-NEXT: .Lcfi13:
+; AVX1-NEXT: .cfi_def_cfa_offset 40
; AVX1-NEXT: popq %r13
+; AVX1-NEXT: .Lcfi14:
+; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: popq %r14
+; AVX1-NEXT: .Lcfi15:
+; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: popq %r15
+; AVX1-NEXT: .Lcfi16:
+; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: .Lcfi17:
+; AVX1-NEXT: .cfi_def_cfa_offset 8
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_16i1_to_16i16:
@@ -3448,11 +3460,23 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: .Lcfi12:
+; AVX2-NEXT: .cfi_def_cfa_offset 48
; AVX2-NEXT: popq %r12
+; AVX2-NEXT: .Lcfi13:
+; AVX2-NEXT: .cfi_def_cfa_offset 40
; AVX2-NEXT: popq %r13
+; AVX2-NEXT: .Lcfi14:
+; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: popq %r14
+; AVX2-NEXT: .Lcfi15:
+; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: popq %r15
+; AVX2-NEXT: .Lcfi16:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: .Lcfi17:
+; AVX2-NEXT: .cfi_def_cfa_offset 8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_sext_16i1_to_16i16:
@@ -4849,6 +4873,8 @@ define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp {
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; X32-SSE41-NEXT: movd %xmm0, %eax
; X32-SSE41-NEXT: popl %ecx
+; X32-SSE41-NEXT: .Lcfi1:
+; X32-SSE41-NEXT: .cfi_def_cfa_offset 4
; X32-SSE41-NEXT: retl
entry:
%Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
index 706edd27a3f..fc414df3555 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -622,6 +622,8 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){
; KNL32-NEXT: vpblendvb %ymm3, 8(%ebp), %ymm1, %ymm1
; KNL32-NEXT: movl %ebp, %esp
; KNL32-NEXT: popl %ebp
+; KNL32-NEXT: .Lcfi3:
+; KNL32-NEXT: .cfi_def_cfa %esp, 4
; KNL32-NEXT: retl
entry:
%0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32> <i32 64, i32 1, i32 66, i32 3, i32 68, i32 5, i32 70, i32 7, i32 72, i32 9, i32 74, i32 11, i32 76, i32 13, i32 78, i32 15, i32 80, i32 17, i32 82, i32 19, i32 84, i32 21, i32 86, i32 23, i32 88, i32 25, i32 90, i32 27, i32 92, i32 29, i32 94, i32 31, i32 96, i32 33, i32 98, i32 35, i32 100, i32 37, i32 102, i32 39, i32 104, i32 41, i32 106, i32 43, i32 108, i32 45, i32 110, i32 47, i32 112, i32 49, i32 114, i32 51, i32 116, i32 53, i32 118, i32 55, i32 120, i32 57, i32 122, i32 59, i32 124, i32 61, i32 126, i32 63>
@@ -652,12 +654,12 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){
; KNL32-LABEL: test_mm512_mask_blend_epi16:
; KNL32: # BB#0: # %entry
; KNL32-NEXT: pushl %ebp
-; KNL32-NEXT: .Lcfi3:
-; KNL32-NEXT: .cfi_def_cfa_offset 8
; KNL32-NEXT: .Lcfi4:
+; KNL32-NEXT: .cfi_def_cfa_offset 8
+; KNL32-NEXT: .Lcfi5:
; KNL32-NEXT: .cfi_offset %ebp, -8
; KNL32-NEXT: movl %esp, %ebp
-; KNL32-NEXT: .Lcfi5:
+; KNL32-NEXT: .Lcfi6:
; KNL32-NEXT: .cfi_def_cfa_register %ebp
; KNL32-NEXT: andl $-32, %esp
; KNL32-NEXT: subl $32, %esp
@@ -665,6 +667,8 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){
; KNL32-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1],mem[2],ymm1[3],mem[4],ymm1[5],mem[6],ymm1[7],mem[8],ymm1[9],mem[10],ymm1[11],mem[12],ymm1[13],mem[14],ymm1[15]
; KNL32-NEXT: movl %ebp, %esp
; KNL32-NEXT: popl %ebp
+; KNL32-NEXT: .Lcfi7:
+; KNL32-NEXT: .cfi_def_cfa %esp, 4
; KNL32-NEXT: retl
entry:
%0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
index 4bcf18cc727..cbf6feaf637 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -445,6 +445,8 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-NEXT: orq %rcx, %rax
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: .Lcfi3:
+; AVX512F-NEXT: .cfi_def_cfa %rsp, 8
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/wide-integer-cmp.ll b/llvm/test/CodeGen/X86/wide-integer-cmp.ll
index b5c7f86567a..578fcb28aa7 100644
--- a/llvm/test/CodeGen/X86/wide-integer-cmp.ll
+++ b/llvm/test/CodeGen/X86/wide-integer-cmp.ll
@@ -107,10 +107,16 @@ define i32 @test_wide(i128 %a, i128 %b) {
; CHECK-NEXT: # BB#1: # %bb1
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB4_2: # %bb2
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: movl $2, %eax
; CHECK-NEXT: popl %esi
+; CHECK-NEXT: .Lcfi4:
+; CHECK-NEXT: .cfi_def_cfa_offset 4
; CHECK-NEXT: retl
entry:
%cmp = icmp slt i128 %a, %b
diff --git a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll
index f1590abcae8..4552071ec16 100644
--- a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll
+++ b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll
@@ -6,6 +6,8 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: pushq
; CHECK: ud2
; CHECK-NEXT: popq
+; CHECK-NEXT: :
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
define void @bar() {
entry:
diff --git a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
index 3052a0f615e..20b12de155e 100644
--- a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
+++ b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll
@@ -23,6 +23,8 @@ define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 {
; CHECK-NEXT: movl $4, %eax
; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: popq %rdx
+; CHECK-NEXT: .Lcfi3:
+; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
call void asm sideeffect "", "~{rax},~{rdx},~{xmm1},~{rdi},~{rsi},~{xmm0}"()
ret i32 4
OpenPOWER on IntegriCloud