diff options
author | Daniel Jasper <djasper@google.com> | 2017-06-29 13:58:24 +0000 |
---|---|---|
committer | Daniel Jasper <djasper@google.com> | 2017-06-29 13:58:24 +0000 |
commit | 559aa75382e941a1c0c2fc5724d5274c20f2adf8 (patch) | |
tree | c8f215d8add480f68484fcc2ef8043b94f967598 /llvm/test | |
parent | 8a44b7be76f35f26d56327952bd133a48c07f301 (diff) | |
download | bcm5719-llvm-559aa75382e941a1c0c2fc5724d5274c20f2adf8.tar.gz bcm5719-llvm-559aa75382e941a1c0c2fc5724d5274c20f2adf8.zip |
Revert "r306529 - [X86] Correct dwarf unwind information in function epilogue"
I am 99% sure that this breaks the PPC ASAN build bot:
http://lab.llvm.org:8011/builders/sanitizer-ppc64be-linux/builds/3112/steps/64-bit%20check-asan/logs/stdio
If it doesn't go back to green, we can recommit (and fix the original
commit message at the same time :) ).
llvm-svn: 306676
Diffstat (limited to 'llvm/test')
55 files changed, 243 insertions, 1058 deletions
diff --git a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll index 4f0d7348da0..6814ed1d894 100644 --- a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll +++ b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll @@ -23,8 +23,6 @@ lpad: ; preds = %cont, %entry } ; CHECK: lpad -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: Ltmp declare i32 @__gxx_personality_v0(...) diff --git a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll index 3dda56bdc65..ba5de8eb5fc 100644 --- a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll +++ b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll @@ -88,8 +88,6 @@ define void @full_test() { ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) ; X32-NEXT: addl $60, %esp -; X32-NEXT: .Lcfi1: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: full_test: diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll index 88ea96c43ba..a5dc7906363 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -23,8 +23,6 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) { ; X32-NEXT: addl 8(%ebp), %eax ; X32-NEXT: adcl 12(%ebp), %edx ; X32-NEXT: popl %ebp -; X32-NEXT: .Lcfi3: -; X32-NEXT: .cfi_def_cfa %esp, 4 ; X32-NEXT: retl %ret = add i64 %arg1, %arg2 ret i64 %ret diff --git a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll index 09ff60ed591..a9ec94defea 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll @@ -19,8 +19,6 @@ define i32* @allocai32() { ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movl %esp, %eax ; X32-NEXT: popl %ecx -; X32-NEXT: .Lcfi1: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X32ABI-LABEL: allocai32: diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll index bd742c2c70a..5e375cc42e0 100644 --- a/llvm/test/CodeGen/X86/O0-pipeline.ll +++ b/llvm/test/CodeGen/X86/O0-pipeline.ll @@ -46,9 +46,7 @@ ; CHECK-NEXT: Post-RA pseudo instruction expansion pass ; CHECK-NEXT: X86 pseudo instruction expansion pass ; CHECK-NEXT: Analyze Machine Code For Garbage Collection -; CHECK-NEXT: Verify that corresponding in/out CFI info matches ; CHECK-NEXT: X86 vzeroupper inserter -; CHECK-NEXT: CFI Instruction Inserter ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis ; CHECK-NEXT: Live DEBUG_VALUE analysis diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index 8ba0d0e275e..e5f7cc5c6dd 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -591,8 +591,6 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) { ; AVX1-NEXT: vmovups %ymm0, (%rax) ; AVX1-NEXT: vmovups %ymm1, (%rax) ; AVX1-NEXT: addq $24, %rsp -; AVX1-NEXT: .Lcfi1: -; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll index 576a3afa6bb..350c0d7873e 100644 --- a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll @@ -414,8 +414,6 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; ALL-NEXT: callq func_f32 ; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp -; ALL-NEXT: .Lcfi1: -; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -429,15 +427,13 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; ALL-LABEL: broadcast_sd_spill: ; ALL: # BB#0: ; ALL-NEXT: subq $24, %rsp -; ALL-NEXT: .Lcfi2: +; ALL-NEXT: .Lcfi1: ; ALL-NEXT: .cfi_def_cfa_offset 32 ; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0 ; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill ; ALL-NEXT: callq func_f64 ; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp -; ALL-NEXT: .Lcfi3: -; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index c7db4ded181..2b89373ceb0 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -289,8 +289,6 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi1: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) ret i64 %res @@ -307,7 +305,7 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-LABEL: test_mask_pcmpeq_b: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $12, %esp -; AVX512F-32-NEXT: .Lcfi2: +; AVX512F-32-NEXT: .Lcfi1: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 16 ; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1} @@ -315,8 +313,6 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi3: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) ret i64 %res @@ -370,15 +366,13 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-LABEL: test_pcmpgt_b: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $12, %esp -; AVX512F-32-NEXT: .Lcfi4: +; AVX512F-32-NEXT: .Lcfi2: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 16 ; AVX512F-32-NEXT: vpcmpgtb %zmm1, %zmm0, %k0 ; AVX512F-32-NEXT: kmovq %k0, (%esp) ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi5: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) ret i64 %res @@ -395,7 +389,7 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-LABEL: test_mask_pcmpgt_b: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $12, %esp -; AVX512F-32-NEXT: .Lcfi6: +; AVX512F-32-NEXT: .Lcfi3: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 16 ; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: vpcmpgtb %zmm1, %zmm0, %k0 {%k1} @@ -403,8 +397,6 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi7: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) ret i64 %res @@ -1601,7 +1593,7 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-LABEL: test_cmp_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $60, %esp -; AVX512F-32-NEXT: .Lcfi8: +; AVX512F-32-NEXT: .Lcfi4: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 64 ; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 ; AVX512F-32-NEXT: kmovq %k0, {{[0-9]+}}(%esp) @@ -1632,8 +1624,6 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .Lcfi9: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1) @@ -1683,17 +1673,17 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-LABEL: test_mask_cmp_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: pushl %ebx -; AVX512F-32-NEXT: .Lcfi10: +; AVX512F-32-NEXT: .Lcfi5: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: pushl %esi -; AVX512F-32-NEXT: .Lcfi11: +; AVX512F-32-NEXT: .Lcfi6: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: subl $60, %esp -; AVX512F-32-NEXT: .Lcfi12: +; AVX512F-32-NEXT: .Lcfi7: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 72 -; AVX512F-32-NEXT: .Lcfi13: +; AVX512F-32-NEXT: .Lcfi8: ; AVX512F-32-NEXT: .cfi_offset %esi, -12 -; AVX512F-32-NEXT: .Lcfi14: +; AVX512F-32-NEXT: .Lcfi9: ; AVX512F-32-NEXT: .cfi_offset %ebx, -8 ; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm6 ; AVX512F-32-NEXT: vmovdqa64 %zmm0, %zmm5 @@ -2438,14 +2428,8 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcxl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .Lcfi15: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi -; AVX512F-32-NEXT: .Lcfi16: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx -; AVX512F-32-NEXT: .Lcfi17: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) %res1 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask) @@ -2495,7 +2479,7 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-LABEL: test_ucmp_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $60, %esp -; AVX512F-32-NEXT: .Lcfi18: +; AVX512F-32-NEXT: .Lcfi10: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 64 ; AVX512F-32-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 ; AVX512F-32-NEXT: kmovq %k0, {{[0-9]+}}(%esp) @@ -2526,8 +2510,6 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .Lcfi19: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 -1) @@ -2577,17 +2559,17 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-LABEL: test_mask_x86_avx512_ucmp_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: pushl %ebx -; AVX512F-32-NEXT: .Lcfi20: +; AVX512F-32-NEXT: .Lcfi11: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: pushl %esi -; AVX512F-32-NEXT: .Lcfi21: +; AVX512F-32-NEXT: .Lcfi12: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: subl $60, %esp -; AVX512F-32-NEXT: .Lcfi22: +; AVX512F-32-NEXT: .Lcfi13: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 72 -; AVX512F-32-NEXT: .Lcfi23: +; AVX512F-32-NEXT: .Lcfi14: ; AVX512F-32-NEXT: .cfi_offset %esi, -12 -; AVX512F-32-NEXT: .Lcfi24: +; AVX512F-32-NEXT: .Lcfi15: ; AVX512F-32-NEXT: .cfi_offset %ebx, -8 ; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm6 ; AVX512F-32-NEXT: vmovdqa64 %zmm0, %zmm5 @@ -3332,14 +3314,8 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcxl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .Lcfi25: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi -; AVX512F-32-NEXT: .Lcfi26: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx -; AVX512F-32-NEXT: .Lcfi27: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) %res1 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 1, i64 %mask) diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll index 2281c01a84f..5472f057ef2 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1600,8 +1600,6 @@ define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi1: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.kunpck.dq(i64 %x0, i64 %x1) ret i64 %res @@ -1619,15 +1617,13 @@ define i64@test_int_x86_avx512_cvtb2mask_512(<64 x i8> %x0) { ; AVX512F-32-LABEL: test_int_x86_avx512_cvtb2mask_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $12, %esp -; AVX512F-32-NEXT: .Lcfi2: +; AVX512F-32-NEXT: .Lcfi1: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 16 ; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0 ; AVX512F-32-NEXT: kmovq %k0, (%esp) ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .Lcfi3: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.cvtb2mask.512(<64 x i8> %x0) ret i64 %res @@ -1805,7 +1801,7 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x ; AVX512F-32-LABEL: test_int_x86_avx512_ptestm_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $20, %esp -; AVX512F-32-NEXT: .Lcfi4: +; AVX512F-32-NEXT: .Lcfi2: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 24 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 @@ -1819,8 +1815,6 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp -; AVX512F-32-NEXT: .Lcfi5: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) @@ -1872,7 +1866,7 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 % ; AVX512F-32-LABEL: test_int_x86_avx512_ptestnm_b_512: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: subl $20, %esp -; AVX512F-32-NEXT: .Lcfi6: +; AVX512F-32-NEXT: .Lcfi3: ; AVX512F-32-NEXT: .cfi_def_cfa_offset 24 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 @@ -1886,8 +1880,6 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 % ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp -; AVX512F-32-NEXT: .Lcfi7: -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll index 8e25f2fd3e7..530e2c544cf 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -33,8 +33,6 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi1: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastd_epi32: @@ -59,7 +57,7 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) { ; X32-LABEL: test_mm_maskz_broadcastd_epi32: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi2: +; X32-NEXT: .Lcfi1: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -68,8 +66,6 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi3: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastd_epi32: @@ -166,7 +162,7 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-LABEL: test_mm_mask_broadcastq_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi4: +; X32-NEXT: .Lcfi2: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -175,8 +171,6 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi5: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastq_epi64: @@ -198,7 +192,7 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-LABEL: test_mm_maskz_broadcastq_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi6: +; X32-NEXT: .Lcfi3: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -207,8 +201,6 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi7: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastq_epi64: @@ -244,7 +236,7 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i ; X32-LABEL: test_mm256_mask_broadcastq_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi8: +; X32-NEXT: .Lcfi4: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -253,8 +245,6 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi9: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastq_epi64: @@ -276,7 +266,7 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-LABEL: test_mm256_maskz_broadcastq_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi10: +; X32-NEXT: .Lcfi5: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -285,8 +275,6 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi11: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastq_epi64: @@ -322,7 +310,7 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x ; X32-LABEL: test_mm_mask_broadcastsd_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi12: +; X32-NEXT: .Lcfi6: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -331,8 +319,6 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi13: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastsd_pd: @@ -354,7 +340,7 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-LABEL: test_mm_maskz_broadcastsd_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi14: +; X32-NEXT: .Lcfi7: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -363,8 +349,6 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi15: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastsd_pd: @@ -400,7 +384,7 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 ; X32-LABEL: test_mm256_mask_broadcastsd_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi16: +; X32-NEXT: .Lcfi8: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -409,8 +393,6 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi17: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastsd_pd: @@ -432,7 +414,7 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-LABEL: test_mm256_maskz_broadcastsd_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi18: +; X32-NEXT: .Lcfi9: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -441,8 +423,6 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi19: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastsd_pd: @@ -478,7 +458,7 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl ; X32-LABEL: test_mm_mask_broadcastss_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi20: +; X32-NEXT: .Lcfi10: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -487,8 +467,6 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi21: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastss_ps: @@ -510,7 +488,7 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) { ; X32-LABEL: test_mm_maskz_broadcastss_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi22: +; X32-NEXT: .Lcfi11: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -519,8 +497,6 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi23: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastss_ps: @@ -608,7 +584,7 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-LABEL: test_mm_mask_movddup_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi24: +; X32-NEXT: .Lcfi12: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -617,8 +593,6 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi25: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movddup_pd: @@ -640,7 +614,7 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) { ; X32-LABEL: test_mm_maskz_movddup_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi26: +; X32-NEXT: .Lcfi13: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -649,8 +623,6 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi27: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movddup_pd: @@ -686,7 +658,7 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-LABEL: test_mm256_mask_movddup_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi28: +; X32-NEXT: .Lcfi14: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -695,8 +667,6 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi29: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_movddup_pd: @@ -718,7 +688,7 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) { ; X32-LABEL: test_mm256_maskz_movddup_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi30: +; X32-NEXT: .Lcfi15: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -727,8 +697,6 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi31: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_movddup_pd: @@ -764,7 +732,7 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-LABEL: test_mm_mask_movehdup_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi32: +; X32-NEXT: .Lcfi16: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -773,8 +741,6 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = xmm1[1,1,3,3] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi33: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movehdup_ps: @@ -796,7 +762,7 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) { ; X32-LABEL: test_mm_maskz_movehdup_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi34: +; X32-NEXT: .Lcfi17: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -805,8 +771,6 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi35: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movehdup_ps: @@ -894,7 +858,7 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-LABEL: test_mm_mask_moveldup_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi36: +; X32-NEXT: .Lcfi18: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -903,8 +867,6 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = xmm1[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi37: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_moveldup_ps: @@ -926,7 +888,7 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) { ; X32-LABEL: test_mm_maskz_moveldup_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi38: +; X32-NEXT: .Lcfi19: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -935,8 +897,6 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi39: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_moveldup_ps: @@ -1024,7 +984,7 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64 ; X32-LABEL: test_mm256_mask_permutex_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi40: +; X32-NEXT: .Lcfi20: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1033,8 +993,6 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi41: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_epi64: @@ -1056,7 +1014,7 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) { ; X32-LABEL: test_mm256_maskz_permutex_epi64: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi42: +; X32-NEXT: .Lcfi21: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1065,8 +1023,6 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi43: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_epi64: @@ -1102,7 +1058,7 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x ; X32-LABEL: test_mm256_mask_permutex_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi44: +; X32-NEXT: .Lcfi22: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1111,8 +1067,6 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi45: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_pd: @@ -1134,7 +1088,7 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) { ; X32-LABEL: test_mm256_maskz_permutex_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi46: +; X32-NEXT: .Lcfi23: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1143,8 +1097,6 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi47: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_pd: @@ -1180,7 +1132,7 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-LABEL: test_mm_mask_shuffle_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi48: +; X32-NEXT: .Lcfi24: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -1189,8 +1141,6 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} = xmm1[1],xmm2[1] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi49: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_pd: @@ -1212,7 +1162,7 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou ; X32-LABEL: test_mm_maskz_shuffle_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi50: +; X32-NEXT: .Lcfi25: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $3, %al @@ -1221,8 +1171,6 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi51: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_pd: @@ -1258,7 +1206,7 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-LABEL: test_mm256_mask_shuffle_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi52: +; X32-NEXT: .Lcfi26: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1267,8 +1215,6 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} = ymm1[1],ymm2[1],ymm1[2],ymm2[2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi53: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shuffle_pd: @@ -1290,7 +1236,7 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x ; X32-LABEL: test_mm256_maskz_shuffle_pd: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi54: +; X32-NEXT: .Lcfi27: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1299,8 +1245,6 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[2],ymm1[2] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi55: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shuffle_pd: @@ -1336,7 +1280,7 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float> ; X32-LABEL: test_mm_mask_shuffle_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi56: +; X32-NEXT: .Lcfi28: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1345,8 +1289,6 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[0,1],xmm2[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi57: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_ps: @@ -1368,7 +1310,7 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float ; X32-LABEL: test_mm_maskz_shuffle_ps: ; X32: # BB#0: ; X32-NEXT: pushl %eax -; X32-NEXT: .Lcfi58: +; X32-NEXT: .Lcfi29: ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: andb $15, %al @@ -1377,8 +1319,6 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1],xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .Lcfi59: -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_ps: diff --git a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll index 98265fdc1cc..38a461ff0be 100644 --- a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll @@ -13,8 +13,6 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) { ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .Lcfi1: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -27,15 +25,13 @@ define <4 x float> @_128_broadcast_ss_spill(float %x) { ; CHECK-LABEL: _128_broadcast_ss_spill: ; CHECK: # BB#0: ; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .Lcfi2: +; CHECK-NEXT: .Lcfi1: ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -49,15 +45,13 @@ define <4 x double> @_256_broadcast_sd_spill(double %x) { ; CHECK-LABEL: _256_broadcast_sd_spill: ; CHECK: # BB#0: ; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .Lcfi4: +; CHECK-NEXT: .Lcfi2: ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: vaddsd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq func_f64 ; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .Lcfi5: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/llvm/test/CodeGen/X86/emutls-pie.ll b/llvm/test/CodeGen/X86/emutls-pie.ll index 2ff66e8b917..5db8c888a4e 100644 --- a/llvm/test/CodeGen/X86/emutls-pie.ll +++ b/llvm/test/CodeGen/X86/emutls-pie.ll @@ -18,19 +18,13 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi ; X64-NEXT: callq my_emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: : -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -50,19 +44,13 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: leaq __emutls_v.i(%rip), %rdi ; X64-NEXT: callq __emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: : -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll index eaff77734a6..9266fe962df 100644 --- a/llvm/test/CodeGen/X86/emutls.ll +++ b/llvm/test/CodeGen/X86/emutls.ll @@ -16,16 +16,12 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movl $my_emutls_v_xyz, %edi ; X64-NEXT: callq my_emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: : -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -49,16 +45,12 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: : -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -71,15 +63,11 @@ define i32* @f2() { ; X32: movl $__emutls_v.i1, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f2: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: popq %rcx -; X64-NEXT: : -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -104,8 +92,6 @@ define i32* @f4() { ; X32: movl $__emutls_v.i2, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -130,8 +116,6 @@ define i32* @f6() { ; X32: movl $__emutls_v.i3, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -144,8 +128,6 @@ define i32 @f7() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -158,8 +140,6 @@ define i32* @f8() { ; X32: movl $__emutls_v.i4, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -172,8 +152,6 @@ define i32 @f9() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -186,8 +164,6 @@ define i32* @f10() { ; X32: movl $__emutls_v.i5, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -200,8 +176,6 @@ define i16 @f11() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movzwl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -215,8 +189,6 @@ define i32 @f12() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movswl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -231,8 +203,6 @@ define i8 @f13() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movb (%eax), %al ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -246,8 +216,6 @@ define i32 @f14() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movsbl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: : -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll deleted file mode 100644 index d964576d31c..00000000000 --- a/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll +++ /dev/null @@ -1,44 +0,0 @@ -; RUN: llc -O0 %s -o - | FileCheck %s - -; ModuleID = 'epilogue-cfi-fp.c' -source_filename = "epilogue-cfi-fp.c" -target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" -target triple = "i686-pc-linux" - -; Function Attrs: noinline nounwind -define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) #0 { - -; CHECK-LABEL: foo: -; CHECK: popl %ebp -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %esp, 4 -; CHECK-NEXT: retl - -entry: - %i.addr = alloca i32, align 4 - %j.addr = alloca i32, align 4 - %k.addr = alloca i32, align 4 - %l.addr = alloca i32, align 4 - %m.addr = alloca i32, align 4 - store i32 %i, i32* %i.addr, align 4 - store i32 %j, i32* %j.addr, align 4 - store i32 %k, i32* %k.addr, align 4 - store i32 %l, i32* %l.addr, align 4 - store i32 %m, i32* %m.addr, align 4 - ret i32 0 -} - -attributes #0 = { "no-frame-pointer-elim"="true" } - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!3, !4, !5, !6, !7} - -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) -!1 = !DIFile(filename: "epilogue-cfi-fp.c", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{i32 1, !"NumRegisterParameters", i32 0} -!4 = !{i32 2, !"Dwarf Version", i32 4} -!5 = !{i32 2, !"Debug Info Version", i32 3} -!6 = !{i32 1, !"wchar_size", i32 4} -!7 = !{i32 7, !"PIC Level", i32 2} - diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll deleted file mode 100644 index 275a9c8fc1a..00000000000 --- a/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll +++ /dev/null @@ -1,50 +0,0 @@ -; RUN: llc -O0 < %s | FileCheck %s - -; ModuleID = 'epilogue-cfi-no-fp.c' -source_filename = "epilogue-cfi-no-fp.c" -target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" -target triple = "i686-pc-linux" - -; Function Attrs: noinline nounwind -define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) { -; CHECK-LABEL: foo: -; CHECK: addl $20, %esp -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: popl %esi -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 12 -; CHECK-NEXT: popl %edi -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: popl %ebx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 4 -; CHECK-NEXT: retl -entry: - %i.addr = alloca i32, align 4 - %j.addr = alloca i32, align 4 - %k.addr = alloca i32, align 4 - %l.addr = alloca i32, align 4 - %m.addr = alloca i32, align 4 - store i32 %i, i32* %i.addr, align 4 - store i32 %j, i32* %j.addr, align 4 - store i32 %k, i32* %k.addr, align 4 - store i32 %l, i32* %l.addr, align 4 - store i32 %m, i32* %m.addr, align 4 - ret i32 0 -} - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!3, !4, !5, !6, !7} - -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) -!1 = !DIFile(filename: "epilogue-cfi-no-fp.c", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{i32 1, !"NumRegisterParameters", i32 0} -!4 = !{i32 2, !"Dwarf Version", i32 4} -!5 = !{i32 2, !"Debug Info Version", i32 3} -!6 = !{i32 1, !"wchar_size", i32 4} -!7 = !{i32 7, !"PIC Level", i32 2} - - diff --git a/llvm/test/CodeGen/X86/fast-isel-store.ll b/llvm/test/CodeGen/X86/fast-isel-store.ll index 65fb1115e68..528682bf70b 100644 --- a/llvm/test/CodeGen/X86/fast-isel-store.ll +++ b/llvm/test/CodeGen/X86/fast-isel-store.ll @@ -376,8 +376,6 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double ; SSE64-NEXT: movupd %xmm0, (%eax) ; SSE64-NEXT: movupd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi1: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64: @@ -409,7 +407,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 ; SSE64-LABEL: test_store_4xf64_aligned: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi2: +; SSE64-NEXT: .Lcfi1: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE64-NEXT: addpd {{[0-9]+}}(%esp), %xmm1 @@ -417,8 +415,6 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 ; SSE64-NEXT: movapd %xmm0, (%eax) ; SSE64-NEXT: movapd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi3: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64_aligned: @@ -450,7 +446,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va ; SSE64-LABEL: test_store_16xi32: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi4: +; SSE64-NEXT: .Lcfi2: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -459,8 +455,6 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi5: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32: @@ -502,7 +496,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x ; SSE64-LABEL: test_store_16xi32_aligned: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi6: +; SSE64-NEXT: .Lcfi3: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -511,8 +505,6 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi7: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32_aligned: @@ -554,7 +546,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa ; SSE64-LABEL: test_store_16xf32: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi8: +; SSE64-NEXT: .Lcfi4: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -563,8 +555,6 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi9: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32: @@ -606,7 +596,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1 ; SSE64-LABEL: test_store_16xf32_aligned: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi10: +; SSE64-NEXT: .Lcfi5: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -615,8 +605,6 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1 ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi11: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32_aligned: @@ -662,7 +650,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; SSE64-LABEL: test_store_8xf64: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi12: +; SSE64-NEXT: .Lcfi6: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -675,8 +663,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; SSE64-NEXT: movupd %xmm2, 32(%eax) ; SSE64-NEXT: movupd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi13: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64: @@ -706,8 +692,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; AVXONLY64-NEXT: vmovupd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp -; AVXONLY64-NEXT: .Lcfi3: -; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64: @@ -743,7 +727,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; SSE64-LABEL: test_store_8xf64_aligned: ; SSE64: # BB#0: ; SSE64-NEXT: subl $12, %esp -; SSE64-NEXT: .Lcfi14: +; SSE64-NEXT: .Lcfi7: ; SSE64-NEXT: .cfi_def_cfa_offset 16 ; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3 ; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -756,8 +740,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; SSE64-NEXT: movapd %xmm2, 32(%eax) ; SSE64-NEXT: movapd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .Lcfi15: -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64_aligned: @@ -771,12 +753,12 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; AVXONLY64-LABEL: test_store_8xf64_aligned: ; AVXONLY64: # BB#0: ; AVXONLY64-NEXT: pushl %ebp -; AVXONLY64-NEXT: .Lcfi4: +; AVXONLY64-NEXT: .Lcfi3: ; AVXONLY64-NEXT: .cfi_def_cfa_offset 8 -; AVXONLY64-NEXT: .Lcfi5: +; AVXONLY64-NEXT: .Lcfi4: ; AVXONLY64-NEXT: .cfi_offset %ebp, -8 ; AVXONLY64-NEXT: movl %esp, %ebp -; AVXONLY64-NEXT: .Lcfi6: +; AVXONLY64-NEXT: .Lcfi5: ; AVXONLY64-NEXT: .cfi_def_cfa_register %ebp ; AVXONLY64-NEXT: andl $-32, %esp ; AVXONLY64-NEXT: subl $32, %esp @@ -787,8 +769,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; AVXONLY64-NEXT: vmovapd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp -; AVXONLY64-NEXT: .Lcfi7: -; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64_aligned: diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll index ef86f998fc0..ab797e04b40 100644 --- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll +++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll @@ -18,14 +18,9 @@ entry: } ; CHECK-LABEL: noDebug -; CHECK: addq $16, %rsp -; CHECK: addq $8, %rsp +; CHECK: addq $24, %rsp ; CHECK: popq %rbx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq @@ -46,14 +41,9 @@ entry: ; CHECK-LABEL: withDebug ; CHECK: #DEBUG_VALUE: test:j <- %RBX -; CHECK-NEXT: addq $16, %rsp -; CHECK: addq $8, %rsp +; CHECK-NEXT: addq $24, %rsp ; CHECK: popq %rbx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll index 593fb449082..f9ecf707810 100644 --- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll +++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll @@ -9,8 +9,6 @@ define i64 @fn1NoDebug(i64 %a) { ; CHECK-LABEL: fn1NoDebug ; CHECK: popq %rcx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret define i64 @fn1WithDebug(i64 %a) !dbg !4 { @@ -21,8 +19,6 @@ define i64 @fn1WithDebug(i64 %a) !dbg !4 { ; CHECK-LABEL: fn1WithDebug ; CHECK: popq %rcx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret %struct.Buffer = type { i8, [63 x i8] } @@ -37,8 +33,6 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { @@ -52,8 +46,6 @@ define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret declare i64 @fn(i64, i64) diff --git a/llvm/test/CodeGen/X86/haddsub-2.ll b/llvm/test/CodeGen/X86/haddsub-2.ll index 428b4110219..fd023d01803 100644 --- a/llvm/test/CodeGen/X86/haddsub-2.ll +++ b/llvm/test/CodeGen/X86/haddsub-2.ll @@ -736,23 +736,11 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx -; SSE3-NEXT: .Lcfi12: -; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 -; SSE3-NEXT: .Lcfi13: -; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 -; SSE3-NEXT: .Lcfi14: -; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 -; SSE3-NEXT: .Lcfi15: -; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 -; SSE3-NEXT: .Lcfi16: -; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp -; SSE3-NEXT: .Lcfi17: -; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_vphadd_w_test: @@ -1275,34 +1263,34 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) { ; SSE3-LABEL: avx2_hadd_w: ; SSE3: # BB#0: ; SSE3-NEXT: pushq %rbp -; SSE3-NEXT: .Lcfi18: +; SSE3-NEXT: .Lcfi12: ; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: pushq %r15 -; SSE3-NEXT: .Lcfi19: +; SSE3-NEXT: .Lcfi13: ; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: pushq %r14 -; SSE3-NEXT: .Lcfi20: +; SSE3-NEXT: .Lcfi14: ; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: pushq %r13 -; SSE3-NEXT: .Lcfi21: +; SSE3-NEXT: .Lcfi15: ; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: pushq %r12 -; SSE3-NEXT: .Lcfi22: +; SSE3-NEXT: .Lcfi16: ; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: pushq %rbx -; SSE3-NEXT: .Lcfi23: +; SSE3-NEXT: .Lcfi17: ; SSE3-NEXT: .cfi_def_cfa_offset 56 -; SSE3-NEXT: .Lcfi24: +; SSE3-NEXT: .Lcfi18: ; SSE3-NEXT: .cfi_offset %rbx, -56 -; SSE3-NEXT: .Lcfi25: +; SSE3-NEXT: .Lcfi19: ; SSE3-NEXT: .cfi_offset %r12, -48 -; SSE3-NEXT: .Lcfi26: +; SSE3-NEXT: .Lcfi20: ; SSE3-NEXT: .cfi_offset %r13, -40 -; SSE3-NEXT: .Lcfi27: +; SSE3-NEXT: .Lcfi21: ; SSE3-NEXT: .cfi_offset %r14, -32 -; SSE3-NEXT: .Lcfi28: +; SSE3-NEXT: .Lcfi22: ; SSE3-NEXT: .cfi_offset %r15, -24 -; SSE3-NEXT: .Lcfi29: +; SSE3-NEXT: .Lcfi23: ; SSE3-NEXT: .cfi_offset %rbp, -16 ; SSE3-NEXT: movd %xmm0, %eax ; SSE3-NEXT: pextrw $1, %xmm0, %r10d @@ -1387,23 +1375,11 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx -; SSE3-NEXT: .Lcfi30: -; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 -; SSE3-NEXT: .Lcfi31: -; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 -; SSE3-NEXT: .Lcfi32: -; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 -; SSE3-NEXT: .Lcfi33: -; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 -; SSE3-NEXT: .Lcfi34: -; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp -; SSE3-NEXT: .Lcfi35: -; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_hadd_w: diff --git a/llvm/test/CodeGen/X86/hipe-cc64.ll b/llvm/test/CodeGen/X86/hipe-cc64.ll index c61ea289a4a..43e2e1409fd 100644 --- a/llvm/test/CodeGen/X86/hipe-cc64.ll +++ b/llvm/test/CodeGen/X86/hipe-cc64.ll @@ -91,8 +91,6 @@ define cc 11 { i64, i64, i64 } @tailcaller(i64 %hp, i64 %p) #0 { ; CHECK-NEXT: movl $47, %ecx ; CHECK-NEXT: movl $63, %r8d ; CHECK-NEXT: popq %rax - ; CHECK-NEXT: : - ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: jmp tailcallee %ret = tail call cc11 { i64, i64, i64 } @tailcallee(i64 %hp, i64 %p, i64 15, i64 31, i64 47, i64 63, i64 79) #1 diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll index e1c5ff1b35c..45a83cc5dfd 100644 --- a/llvm/test/CodeGen/X86/imul.ll +++ b/llvm/test/CodeGen/X86/imul.ll @@ -309,8 +309,6 @@ define i64 @test5(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi2: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -31 @@ -353,9 +351,9 @@ define i64 @test7(i64 %a) { ; X86-LABEL: test7: ; X86: # BB#0: # %entry ; X86-NEXT: pushl %esi -; X86-NEXT: .Lcfi3: +; X86-NEXT: .Lcfi2: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: .Lcfi4: +; X86-NEXT: .Lcfi3: ; X86-NEXT: .cfi_offset %esi, -8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -368,8 +366,6 @@ define i64 @test7(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi5: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -33 @@ -386,9 +382,9 @@ define i64 @testOverflow(i64 %a) { ; X86-LABEL: testOverflow: ; X86: # BB#0: # %entry ; X86-NEXT: pushl %esi -; X86-NEXT: .Lcfi6: +; X86-NEXT: .Lcfi4: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: .Lcfi7: +; X86-NEXT: .Lcfi5: ; X86-NEXT: .cfi_offset %esi, -8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $-1, %edx @@ -400,8 +396,6 @@ define i64 @testOverflow(i64 %a) { ; X86-NEXT: addl %esi, %edx ; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi8: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, 9223372036854775807 diff --git a/llvm/test/CodeGen/X86/legalize-shift-64.ll b/llvm/test/CodeGen/X86/legalize-shift-64.ll index e1e68f37dc6..b3f2116e648 100644 --- a/llvm/test/CodeGen/X86/legalize-shift-64.ll +++ b/llvm/test/CodeGen/X86/legalize-shift-64.ll @@ -125,17 +125,9 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) { ; CHECK-NEXT: movl %esi, 4(%eax) ; CHECK-NEXT: movl %edi, (%eax) ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi8: -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popl %edi -; CHECK-NEXT: .Lcfi9: -; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: popl %ebx -; CHECK-NEXT: .Lcfi10: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebp -; CHECK-NEXT: .Lcfi11: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl $4 %shl = shl <2 x i64> %A, %B ret <2 x i64> %shl @@ -146,12 +138,12 @@ define i32 @test6() { ; CHECK-LABEL: test6: ; CHECK: # BB#0: ; CHECK-NEXT: pushl %ebp -; CHECK-NEXT: .Lcfi12: +; CHECK-NEXT: .Lcfi8: ; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi13: +; CHECK-NEXT: .Lcfi9: ; CHECK-NEXT: .cfi_offset %ebp, -8 ; CHECK-NEXT: movl %esp, %ebp -; CHECK-NEXT: .Lcfi14: +; CHECK-NEXT: .Lcfi10: ; CHECK-NEXT: .cfi_def_cfa_register %ebp ; CHECK-NEXT: andl $-8, %esp ; CHECK-NEXT: subl $16, %esp @@ -180,8 +172,6 @@ define i32 @test6() { ; CHECK-NEXT: .LBB5_4: # %if.then ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp -; CHECK-NEXT: .Lcfi15: -; CHECK-NEXT: .cfi_def_cfa %esp, 4 ; CHECK-NEXT: retl %x = alloca i32, align 4 %t = alloca i64, align 8 diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll index 21c132caa78..e737a51cf40 100644 --- a/llvm/test/CodeGen/X86/load-combine.ll +++ b/llvm/test/CodeGen/X86/load-combine.ll @@ -378,8 +378,6 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) { ; CHECK-NEXT: orl %ecx, %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi2: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_uses: @@ -484,9 +482,9 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) { ; CHECK-LABEL: load_i32_by_i8_bswap_store_in_between: ; CHECK: # BB#0: ; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi3: +; CHECK-NEXT: .Lcfi2: ; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi4: +; CHECK-NEXT: .Lcfi3: ; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -502,8 +500,6 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) { ; CHECK-NEXT: movzbl 3(%ecx), %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi5: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between: diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 60c2799d265..77254ba6760 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -1764,8 +1764,6 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; KNL_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .Lcfi3: -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16i64: @@ -1783,12 +1781,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; SKX_32-LABEL: test_gather_16i64: ; SKX_32: # BB#0: ; SKX_32-NEXT: pushl %ebp -; SKX_32-NEXT: .Lcfi2: +; SKX_32-NEXT: .Lcfi1: ; SKX_32-NEXT: .cfi_def_cfa_offset 8 -; SKX_32-NEXT: .Lcfi3: +; SKX_32-NEXT: .Lcfi2: ; SKX_32-NEXT: .cfi_offset %ebp, -8 ; SKX_32-NEXT: movl %esp, %ebp -; SKX_32-NEXT: .Lcfi4: +; SKX_32-NEXT: .Lcfi3: ; SKX_32-NEXT: .cfi_def_cfa_register %ebp ; SKX_32-NEXT: andl $-64, %esp ; SKX_32-NEXT: subl $64, %esp @@ -1803,8 +1801,6 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; SKX_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .Lcfi5: -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32 4, <16 x i1> %mask, <16 x i64> %src0) ret <16 x i64> %res @@ -1871,12 +1867,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; KNL_32-LABEL: test_gather_16f64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi4: +; KNL_32-NEXT: .Lcfi3: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi5: +; KNL_32-NEXT: .Lcfi4: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi6: +; KNL_32-NEXT: .Lcfi5: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -1891,8 +1887,6 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; KNL_32-NEXT: vmovapd %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .Lcfi7: -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16f64: @@ -1910,12 +1904,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; SKX_32-LABEL: test_gather_16f64: ; SKX_32: # BB#0: ; SKX_32-NEXT: pushl %ebp -; SKX_32-NEXT: .Lcfi6: +; SKX_32-NEXT: .Lcfi4: ; SKX_32-NEXT: .cfi_def_cfa_offset 8 -; SKX_32-NEXT: .Lcfi7: +; SKX_32-NEXT: .Lcfi5: ; SKX_32-NEXT: .cfi_offset %ebp, -8 ; SKX_32-NEXT: movl %esp, %ebp -; SKX_32-NEXT: .Lcfi8: +; SKX_32-NEXT: .Lcfi6: ; SKX_32-NEXT: .cfi_def_cfa_register %ebp ; SKX_32-NEXT: andl $-64, %esp ; SKX_32-NEXT: subl $64, %esp @@ -1930,8 +1924,6 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; SKX_32-NEXT: vmovapd %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .Lcfi9: -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32 4, <16 x i1> %mask, <16 x double> %src0) ret <16 x double> %res @@ -1997,12 +1989,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; KNL_32-LABEL: test_scatter_16i64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi8: +; KNL_32-NEXT: .Lcfi6: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi9: +; KNL_32-NEXT: .Lcfi7: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi10: +; KNL_32-NEXT: .Lcfi8: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -2016,8 +2008,6 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; KNL_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .Lcfi11: -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -2035,12 +2025,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; SKX_32-LABEL: test_scatter_16i64: ; SKX_32: # BB#0: ; SKX_32-NEXT: pushl %ebp -; SKX_32-NEXT: .Lcfi10: +; SKX_32-NEXT: .Lcfi7: ; SKX_32-NEXT: .cfi_def_cfa_offset 8 -; SKX_32-NEXT: .Lcfi11: +; SKX_32-NEXT: .Lcfi8: ; SKX_32-NEXT: .cfi_offset %ebp, -8 ; SKX_32-NEXT: movl %esp, %ebp -; SKX_32-NEXT: .Lcfi12: +; SKX_32-NEXT: .Lcfi9: ; SKX_32-NEXT: .cfi_def_cfa_register %ebp ; SKX_32-NEXT: andl $-64, %esp ; SKX_32-NEXT: subl $64, %esp @@ -2054,8 +2044,6 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; SKX_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .Lcfi13: -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32 4, <16 x i1> %mask) @@ -2123,12 +2111,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; KNL_32-LABEL: test_scatter_16f64: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi12: +; KNL_32-NEXT: .Lcfi9: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi13: +; KNL_32-NEXT: .Lcfi10: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi14: +; KNL_32-NEXT: .Lcfi11: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-64, %esp ; KNL_32-NEXT: subl $64, %esp @@ -2142,8 +2130,6 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; KNL_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .Lcfi15: -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -2161,12 +2147,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; SKX_32-LABEL: test_scatter_16f64: ; SKX_32: # BB#0: ; SKX_32-NEXT: pushl %ebp -; SKX_32-NEXT: .Lcfi14: +; SKX_32-NEXT: .Lcfi10: ; SKX_32-NEXT: .cfi_def_cfa_offset 8 -; SKX_32-NEXT: .Lcfi15: +; SKX_32-NEXT: .Lcfi11: ; SKX_32-NEXT: .cfi_offset %ebp, -8 ; SKX_32-NEXT: movl %esp, %ebp -; SKX_32-NEXT: .Lcfi16: +; SKX_32-NEXT: .Lcfi12: ; SKX_32-NEXT: .cfi_def_cfa_register %ebp ; SKX_32-NEXT: andl $-64, %esp ; SKX_32-NEXT: subl $64, %esp @@ -2180,8 +2166,6 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; SKX_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .Lcfi17: -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x double*> %ptrs, i32 4, <16 x i1> %mask) @@ -2208,12 +2192,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; KNL_32-LABEL: test_pr28312: ; KNL_32: # BB#0: ; KNL_32-NEXT: pushl %ebp -; KNL_32-NEXT: .Lcfi16: +; KNL_32-NEXT: .Lcfi12: ; KNL_32-NEXT: .cfi_def_cfa_offset 8 -; KNL_32-NEXT: .Lcfi17: +; KNL_32-NEXT: .Lcfi13: ; KNL_32-NEXT: .cfi_offset %ebp, -8 ; KNL_32-NEXT: movl %esp, %ebp -; KNL_32-NEXT: .Lcfi18: +; KNL_32-NEXT: .Lcfi14: ; KNL_32-NEXT: .cfi_def_cfa_register %ebp ; KNL_32-NEXT: andl $-32, %esp ; KNL_32-NEXT: subl $32, %esp @@ -2231,8 +2215,6 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; KNL_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .Lcfi19: -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_pr28312: @@ -2247,12 +2229,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; SKX_32-LABEL: test_pr28312: ; SKX_32: # BB#0: ; SKX_32-NEXT: pushl %ebp -; SKX_32-NEXT: .Lcfi18: +; SKX_32-NEXT: .Lcfi13: ; SKX_32-NEXT: .cfi_def_cfa_offset 8 -; SKX_32-NEXT: .Lcfi19: +; SKX_32-NEXT: .Lcfi14: ; SKX_32-NEXT: .cfi_offset %ebp, -8 ; SKX_32-NEXT: movl %esp, %ebp -; SKX_32-NEXT: .Lcfi20: +; SKX_32-NEXT: .Lcfi15: ; SKX_32-NEXT: .cfi_def_cfa_register %ebp ; SKX_32-NEXT: andl $-32, %esp ; SKX_32-NEXT: subl $32, %esp @@ -2263,8 +2245,6 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; SKX_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .Lcfi21: -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %g1 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) %g2 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll index 6dd9f01356e..13258fd81de 100644 --- a/llvm/test/CodeGen/X86/memset-nonzero.ll +++ b/llvm/test/CodeGen/X86/memset-nonzero.ll @@ -149,8 +149,6 @@ define void @memset_256_nonzero_bytes(i8* %x) { ; SSE-NEXT: movl $256, %edx # imm = 0x100 ; SSE-NEXT: callq memset ; SSE-NEXT: popq %rax -; SSE-NEXT: .Lcfi1: -; SSE-NEXT: .cfi_def_cfa_offset 8 ; SSE-NEXT: retq ; ; SSE2FAST-LABEL: memset_256_nonzero_bytes: diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll index 314243e1dbe..1d5829407b7 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -76,11 +76,7 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .Lcfi4: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .Lcfi5: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12: @@ -381,9 +377,9 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-LABEL: merge_4i32_i32_23u5: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %esi -; X32-SSE1-NEXT: .Lcfi6: +; X32-SSE1-NEXT: .Lcfi4: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 -; X32-SSE1-NEXT: .Lcfi7: +; X32-SSE1-NEXT: .Lcfi5: ; X32-SSE1-NEXT: .cfi_offset %esi, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -394,8 +390,6 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .Lcfi8: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5: @@ -533,24 +527,24 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-LABEL: merge_8i16_i16_23u567u9: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %ebp -; X32-SSE1-NEXT: .Lcfi9: +; X32-SSE1-NEXT: .Lcfi6: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: pushl %ebx -; X32-SSE1-NEXT: .Lcfi10: +; X32-SSE1-NEXT: .Lcfi7: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 ; X32-SSE1-NEXT: pushl %edi -; X32-SSE1-NEXT: .Lcfi11: +; X32-SSE1-NEXT: .Lcfi8: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 16 ; X32-SSE1-NEXT: pushl %esi -; X32-SSE1-NEXT: .Lcfi12: +; X32-SSE1-NEXT: .Lcfi9: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 20 -; X32-SSE1-NEXT: .Lcfi13: +; X32-SSE1-NEXT: .Lcfi10: ; X32-SSE1-NEXT: .cfi_offset %esi, -20 -; X32-SSE1-NEXT: .Lcfi14: +; X32-SSE1-NEXT: .Lcfi11: ; X32-SSE1-NEXT: .cfi_offset %edi, -16 -; X32-SSE1-NEXT: .Lcfi15: +; X32-SSE1-NEXT: .Lcfi12: ; X32-SSE1-NEXT: .cfi_offset %ebx, -12 -; X32-SSE1-NEXT: .Lcfi16: +; X32-SSE1-NEXT: .Lcfi13: ; X32-SSE1-NEXT: .cfi_offset %ebp, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -567,17 +561,9 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movw %dx, (%eax) ; X32-SSE1-NEXT: movw %di, 6(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .Lcfi17: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 16 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .Lcfi18: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 ; X32-SSE1-NEXT: popl %ebx -; X32-SSE1-NEXT: .Lcfi19: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %ebp -; X32-SSE1-NEXT: .Lcfi20: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_8i16_i16_23u567u9: @@ -655,9 +641,9 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %esi -; X32-SSE1-NEXT: .Lcfi21: +; X32-SSE1-NEXT: .Lcfi14: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 -; X32-SSE1-NEXT: .Lcfi22: +; X32-SSE1-NEXT: .Lcfi15: ; X32-SSE1-NEXT: .cfi_offset %esi, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -672,8 +658,6 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movw $0, 10(%eax) ; X32-SSE1-NEXT: movw $0, 8(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .Lcfi23: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz: @@ -711,12 +695,12 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin ; X32-SSE1-LABEL: merge_16i8_i8_01u3456789ABCDuF: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %ebx -; X32-SSE1-NEXT: .Lcfi24: +; X32-SSE1-NEXT: .Lcfi16: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: subl $12, %esp -; X32-SSE1-NEXT: .Lcfi25: +; X32-SSE1-NEXT: .Lcfi17: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 20 -; X32-SSE1-NEXT: .Lcfi26: +; X32-SSE1-NEXT: .Lcfi18: ; X32-SSE1-NEXT: .cfi_offset %ebx, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -767,11 +751,7 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin ; X32-SSE1-NEXT: movb {{[0-9]+}}(%esp), %cl # 1-byte Reload ; X32-SSE1-NEXT: movb %cl, 3(%eax) ; X32-SSE1-NEXT: addl $12, %esp -; X32-SSE1-NEXT: .Lcfi27: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %ebx -; X32-SSE1-NEXT: .Lcfi28: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF: @@ -888,12 +868,12 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin ; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %ebx -; X32-SSE1-NEXT: .Lcfi29: +; X32-SSE1-NEXT: .Lcfi19: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: pushl %eax -; X32-SSE1-NEXT: .Lcfi30: +; X32-SSE1-NEXT: .Lcfi20: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 -; X32-SSE1-NEXT: .Lcfi31: +; X32-SSE1-NEXT: .Lcfi21: ; X32-SSE1-NEXT: .cfi_offset %ebx, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -915,11 +895,7 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin ; X32-SSE1-NEXT: movb $0, 14(%eax) ; X32-SSE1-NEXT: movb $0, 13(%eax) ; X32-SSE1-NEXT: addl $4, %esp -; X32-SSE1-NEXT: .Lcfi32: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %ebx -; X32-SSE1-NEXT: .Lcfi33: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz: @@ -1014,14 +990,14 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin ; X32-SSE1-LABEL: merge_2i64_i64_12_volatile: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: pushl %edi -; X32-SSE1-NEXT: .Lcfi34: +; X32-SSE1-NEXT: .Lcfi22: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: pushl %esi -; X32-SSE1-NEXT: .Lcfi35: +; X32-SSE1-NEXT: .Lcfi23: ; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 -; X32-SSE1-NEXT: .Lcfi36: +; X32-SSE1-NEXT: .Lcfi24: ; X32-SSE1-NEXT: .cfi_offset %esi, -12 -; X32-SSE1-NEXT: .Lcfi37: +; X32-SSE1-NEXT: .Lcfi25: ; X32-SSE1-NEXT: .cfi_offset %edi, -8 ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx @@ -1034,11 +1010,7 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .Lcfi38: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .Lcfi39: -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12_volatile: diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll index eb5ce65311a..d715ccfa8c6 100644 --- a/llvm/test/CodeGen/X86/movtopush.ll +++ b/llvm/test/CodeGen/X86/movtopush.ll @@ -376,10 +376,8 @@ entry: ; LINUX: pushl $1 ; LINUX: .cfi_adjust_cfa_offset 4 ; LINUX: calll good -; LINUX: addl $16, %esp +; LINUX: addl $28, %esp ; LINUX: .cfi_adjust_cfa_offset -16 -; LINUX: addl $12, %esp -; LINUX: .cfi_def_cfa_offset 4 ; LINUX-NOT: add ; LINUX: retl define void @pr27140() optsize { diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll index 8400ca9b95a..65d80a699e2 100644 --- a/llvm/test/CodeGen/X86/mul-constant-result.ll +++ b/llvm/test/CodeGen/X86/mul-constant-result.ll @@ -33,148 +33,84 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: .LBB0_6: ; X86-NEXT: addl %eax, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi2: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_39: -; X86-NEXT: .Lcfi3: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB0_40: ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi4: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_7: -; X86-NEXT: .Lcfi5: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi6: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_8: -; X86-NEXT: .Lcfi7: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi8: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_9: -; X86-NEXT: .Lcfi9: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi10: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_10: -; X86-NEXT: .Lcfi11: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi12: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_11: -; X86-NEXT: .Lcfi13: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (,%eax,8), %ecx ; X86-NEXT: jmp .LBB0_12 ; X86-NEXT: .LBB0_13: ; X86-NEXT: shll $3, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi14: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_14: -; X86-NEXT: .Lcfi15: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi16: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_15: -; X86-NEXT: .Lcfi17: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi18: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_16: -; X86-NEXT: .Lcfi19: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi20: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_17: -; X86-NEXT: .Lcfi21: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi22: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_18: -; X86-NEXT: .Lcfi23: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi24: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_19: -; X86-NEXT: .Lcfi25: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: jmp .LBB0_20 ; X86-NEXT: .LBB0_21: ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi26: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_22: -; X86-NEXT: .Lcfi27: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $4, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi28: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_23: -; X86-NEXT: .Lcfi29: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $4, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi30: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_24: -; X86-NEXT: .Lcfi31: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi32: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_25: -; X86-NEXT: .Lcfi33: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: shll $2, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -182,32 +118,20 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi34: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_27: -; X86-NEXT: .Lcfi35: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi36: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_28: -; X86-NEXT: .Lcfi37: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: .LBB0_20: ; X86-NEXT: leal (%eax,%ecx,4), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi38: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_29: -; X86-NEXT: .Lcfi39: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: shll $3, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -215,21 +139,13 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $3, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi40: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_31: -; X86-NEXT: .Lcfi41: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi42: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_32: -; X86-NEXT: .Lcfi43: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: jmp .LBB0_12 @@ -237,33 +153,21 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi44: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_34: -; X86-NEXT: .Lcfi45: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi46: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_35: -; X86-NEXT: .Lcfi47: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %eax, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi48: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_36: -; X86-NEXT: .Lcfi49: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $5, %ecx ; X86-NEXT: subl %eax, %ecx @@ -275,16 +179,10 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: subl %eax, %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi50: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_38: -; X86-NEXT: .Lcfi51: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $5, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi52: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: mult: @@ -627,431 +525,431 @@ define i32 @foo() local_unnamed_addr #0 { ; X86-LABEL: foo: ; X86: # BB#0: ; X86-NEXT: pushl %ebx -; X86-NEXT: .Lcfi53: +; X86-NEXT: .Lcfi2: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: pushl %edi -; X86-NEXT: .Lcfi54: +; X86-NEXT: .Lcfi3: ; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: pushl %esi -; X86-NEXT: .Lcfi55: +; X86-NEXT: .Lcfi4: ; X86-NEXT: .cfi_def_cfa_offset 16 -; X86-NEXT: .Lcfi56: +; X86-NEXT: .Lcfi5: ; X86-NEXT: .cfi_offset %esi, -16 -; X86-NEXT: .Lcfi57: +; X86-NEXT: .Lcfi6: ; X86-NEXT: .cfi_offset %edi, -12 -; X86-NEXT: .Lcfi58: +; X86-NEXT: .Lcfi7: ; X86-NEXT: .cfi_offset %ebx, -8 ; X86-NEXT: pushl $0 -; X86-NEXT: .Lcfi59: +; X86-NEXT: .Lcfi8: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $1 -; X86-NEXT: .Lcfi60: +; X86-NEXT: .Lcfi9: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi61: +; X86-NEXT: .Lcfi10: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %esi ; X86-NEXT: xorl $1, %esi ; X86-NEXT: pushl $1 -; X86-NEXT: .Lcfi62: +; X86-NEXT: .Lcfi11: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $2 -; X86-NEXT: .Lcfi63: +; X86-NEXT: .Lcfi12: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi64: +; X86-NEXT: .Lcfi13: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $2, %edi ; X86-NEXT: pushl $1 -; X86-NEXT: .Lcfi65: +; X86-NEXT: .Lcfi14: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $3 -; X86-NEXT: .Lcfi66: +; X86-NEXT: .Lcfi15: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi67: +; X86-NEXT: .Lcfi16: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $3, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $2 -; X86-NEXT: .Lcfi68: +; X86-NEXT: .Lcfi17: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $4 -; X86-NEXT: .Lcfi69: +; X86-NEXT: .Lcfi18: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi70: +; X86-NEXT: .Lcfi19: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $4, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $2 -; X86-NEXT: .Lcfi71: +; X86-NEXT: .Lcfi20: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $5 -; X86-NEXT: .Lcfi72: +; X86-NEXT: .Lcfi21: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi73: +; X86-NEXT: .Lcfi22: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $5, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $3 -; X86-NEXT: .Lcfi74: +; X86-NEXT: .Lcfi23: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $6 -; X86-NEXT: .Lcfi75: +; X86-NEXT: .Lcfi24: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi76: +; X86-NEXT: .Lcfi25: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $6, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $3 -; X86-NEXT: .Lcfi77: +; X86-NEXT: .Lcfi26: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $7 -; X86-NEXT: .Lcfi78: +; X86-NEXT: .Lcfi27: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi79: +; X86-NEXT: .Lcfi28: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $7, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $4 -; X86-NEXT: .Lcfi80: +; X86-NEXT: .Lcfi29: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $8 -; X86-NEXT: .Lcfi81: +; X86-NEXT: .Lcfi30: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi82: +; X86-NEXT: .Lcfi31: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $8, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $4 -; X86-NEXT: .Lcfi83: +; X86-NEXT: .Lcfi32: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $9 -; X86-NEXT: .Lcfi84: +; X86-NEXT: .Lcfi33: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi85: +; X86-NEXT: .Lcfi34: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $9, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $5 -; X86-NEXT: .Lcfi86: +; X86-NEXT: .Lcfi35: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $10 -; X86-NEXT: .Lcfi87: +; X86-NEXT: .Lcfi36: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi88: +; X86-NEXT: .Lcfi37: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $10, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $5 -; X86-NEXT: .Lcfi89: +; X86-NEXT: .Lcfi38: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $11 -; X86-NEXT: .Lcfi90: +; X86-NEXT: .Lcfi39: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi91: +; X86-NEXT: .Lcfi40: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $11, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $6 -; X86-NEXT: .Lcfi92: +; X86-NEXT: .Lcfi41: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $12 -; X86-NEXT: .Lcfi93: +; X86-NEXT: .Lcfi42: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi94: +; X86-NEXT: .Lcfi43: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $12, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $6 -; X86-NEXT: .Lcfi95: +; X86-NEXT: .Lcfi44: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $13 -; X86-NEXT: .Lcfi96: +; X86-NEXT: .Lcfi45: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi97: +; X86-NEXT: .Lcfi46: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $13, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $7 -; X86-NEXT: .Lcfi98: +; X86-NEXT: .Lcfi47: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $14 -; X86-NEXT: .Lcfi99: +; X86-NEXT: .Lcfi48: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi100: +; X86-NEXT: .Lcfi49: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $14, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $7 -; X86-NEXT: .Lcfi101: +; X86-NEXT: .Lcfi50: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $15 -; X86-NEXT: .Lcfi102: +; X86-NEXT: .Lcfi51: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi103: +; X86-NEXT: .Lcfi52: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $15, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $8 -; X86-NEXT: .Lcfi104: +; X86-NEXT: .Lcfi53: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $16 -; X86-NEXT: .Lcfi105: +; X86-NEXT: .Lcfi54: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi106: +; X86-NEXT: .Lcfi55: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $16, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $8 -; X86-NEXT: .Lcfi107: +; X86-NEXT: .Lcfi56: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $17 -; X86-NEXT: .Lcfi108: +; X86-NEXT: .Lcfi57: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi109: +; X86-NEXT: .Lcfi58: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $17, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $9 -; X86-NEXT: .Lcfi110: +; X86-NEXT: .Lcfi59: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $18 -; X86-NEXT: .Lcfi111: +; X86-NEXT: .Lcfi60: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi112: +; X86-NEXT: .Lcfi61: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $18, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $9 -; X86-NEXT: .Lcfi113: +; X86-NEXT: .Lcfi62: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $19 -; X86-NEXT: .Lcfi114: +; X86-NEXT: .Lcfi63: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi115: +; X86-NEXT: .Lcfi64: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $19, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $10 -; X86-NEXT: .Lcfi116: +; X86-NEXT: .Lcfi65: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $20 -; X86-NEXT: .Lcfi117: +; X86-NEXT: .Lcfi66: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi118: +; X86-NEXT: .Lcfi67: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $20, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $10 -; X86-NEXT: .Lcfi119: +; X86-NEXT: .Lcfi68: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $21 -; X86-NEXT: .Lcfi120: +; X86-NEXT: .Lcfi69: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi121: +; X86-NEXT: .Lcfi70: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $21, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $11 -; X86-NEXT: .Lcfi122: +; X86-NEXT: .Lcfi71: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $22 -; X86-NEXT: .Lcfi123: +; X86-NEXT: .Lcfi72: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi124: +; X86-NEXT: .Lcfi73: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $22, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $11 -; X86-NEXT: .Lcfi125: +; X86-NEXT: .Lcfi74: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $23 -; X86-NEXT: .Lcfi126: +; X86-NEXT: .Lcfi75: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi127: +; X86-NEXT: .Lcfi76: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $23, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $12 -; X86-NEXT: .Lcfi128: +; X86-NEXT: .Lcfi77: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $24 -; X86-NEXT: .Lcfi129: +; X86-NEXT: .Lcfi78: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi130: +; X86-NEXT: .Lcfi79: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $24, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $12 -; X86-NEXT: .Lcfi131: +; X86-NEXT: .Lcfi80: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $25 -; X86-NEXT: .Lcfi132: +; X86-NEXT: .Lcfi81: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi133: +; X86-NEXT: .Lcfi82: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $25, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $13 -; X86-NEXT: .Lcfi134: +; X86-NEXT: .Lcfi83: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $26 -; X86-NEXT: .Lcfi135: +; X86-NEXT: .Lcfi84: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi136: +; X86-NEXT: .Lcfi85: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $26, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $13 -; X86-NEXT: .Lcfi137: +; X86-NEXT: .Lcfi86: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $27 -; X86-NEXT: .Lcfi138: +; X86-NEXT: .Lcfi87: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi139: +; X86-NEXT: .Lcfi88: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $27, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $14 -; X86-NEXT: .Lcfi140: +; X86-NEXT: .Lcfi89: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $28 -; X86-NEXT: .Lcfi141: +; X86-NEXT: .Lcfi90: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi142: +; X86-NEXT: .Lcfi91: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $28, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $14 -; X86-NEXT: .Lcfi143: +; X86-NEXT: .Lcfi92: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $29 -; X86-NEXT: .Lcfi144: +; X86-NEXT: .Lcfi93: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi145: +; X86-NEXT: .Lcfi94: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $29, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: pushl $15 -; X86-NEXT: .Lcfi146: +; X86-NEXT: .Lcfi95: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $30 -; X86-NEXT: .Lcfi147: +; X86-NEXT: .Lcfi96: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi148: +; X86-NEXT: .Lcfi97: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %edi ; X86-NEXT: xorl $30, %edi ; X86-NEXT: orl %ebx, %edi ; X86-NEXT: pushl $15 -; X86-NEXT: .Lcfi149: +; X86-NEXT: .Lcfi98: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $31 -; X86-NEXT: .Lcfi150: +; X86-NEXT: .Lcfi99: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi151: +; X86-NEXT: .Lcfi100: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: xorl $31, %ebx ; X86-NEXT: orl %edi, %ebx ; X86-NEXT: orl %esi, %ebx ; X86-NEXT: pushl $16 -; X86-NEXT: .Lcfi152: +; X86-NEXT: .Lcfi101: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: pushl $32 -; X86-NEXT: .Lcfi153: +; X86-NEXT: .Lcfi102: ; X86-NEXT: .cfi_adjust_cfa_offset 4 ; X86-NEXT: calll mult ; X86-NEXT: addl $8, %esp -; X86-NEXT: .Lcfi154: +; X86-NEXT: .Lcfi103: ; X86-NEXT: .cfi_adjust_cfa_offset -8 ; X86-NEXT: xorl $32, %eax ; X86-NEXT: orl %ebx, %eax @@ -1061,14 +959,8 @@ define i32 @foo() local_unnamed_addr #0 { ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB1_2: ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi155: -; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %edi -; X86-NEXT: .Lcfi156: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebx -; X86-NEXT: .Lcfi157: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: foo: @@ -1292,20 +1184,10 @@ define i32 @foo() local_unnamed_addr #0 { ; X64-HSW-NEXT: movl $-1, %eax ; X64-HSW-NEXT: cmovel %r12d, %eax ; X64-HSW-NEXT: popq %rbx -; X64-HSW-NEXT: .Lcfi10: -; X64-HSW-NEXT: .cfi_def_cfa_offset 40 ; X64-HSW-NEXT: popq %r12 -; X64-HSW-NEXT: .Lcfi11: -; X64-HSW-NEXT: .cfi_def_cfa_offset 32 ; X64-HSW-NEXT: popq %r14 -; X64-HSW-NEXT: .Lcfi12: -; X64-HSW-NEXT: .cfi_def_cfa_offset 24 ; X64-HSW-NEXT: popq %r15 -; X64-HSW-NEXT: .Lcfi13: -; X64-HSW-NEXT: .cfi_def_cfa_offset 16 ; X64-HSW-NEXT: popq %rbp -; X64-HSW-NEXT: .Lcfi14: -; X64-HSW-NEXT: .cfi_def_cfa_offset 8 ; X64-HSW-NEXT: retq %1 = tail call i32 @mult(i32 1, i32 0) %2 = icmp ne i32 %1, 1 diff --git a/llvm/test/CodeGen/X86/mul-i256.ll b/llvm/test/CodeGen/X86/mul-i256.ll index 08a843b63b0..acd86e94989 100644 --- a/llvm/test/CodeGen/X86/mul-i256.ll +++ b/llvm/test/CodeGen/X86/mul-i256.ll @@ -193,8 +193,6 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X32-NEXT: popl %edi ; X32-NEXT: popl %ebx ; X32-NEXT: popl %ebp -; X32-NEXT: .Lcfi6: -; X32-NEXT: .cfi_def_cfa %esp, 4 ; X32-NEXT: retl ; ; X64-LABEL: test: @@ -269,14 +267,8 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X64-NEXT: movq %rax, 16(%r9) ; X64-NEXT: movq %rdx, 24(%r9) ; X64-NEXT: popq %rbx -; X64-NEXT: .Lcfi6: -; X64-NEXT: .cfi_def_cfa_offset 24 ; X64-NEXT: popq %r14 -; X64-NEXT: .Lcfi7: -; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: popq %r15 -; X64-NEXT: .Lcfi8: -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %av = load i256, i256* %a diff --git a/llvm/test/CodeGen/X86/pr21792.ll b/llvm/test/CodeGen/X86/pr21792.ll index 5e7ae28d161..84b7467e6a1 100644 --- a/llvm/test/CodeGen/X86/pr21792.ll +++ b/llvm/test/CodeGen/X86/pr21792.ll @@ -29,8 +29,6 @@ define void @func(<4 x float> %vx) { ; CHECK-NEXT: leaq stuff+8(%r9), %r9 ; CHECK-NEXT: callq toto ; CHECK-NEXT: popq %rax -; CHECK-NEXT: .Lcfi1: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %tmp2 = bitcast <4 x float> %vx to <2 x i64> diff --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll index 52a8a25d735..8c970b3d477 100644 --- a/llvm/test/CodeGen/X86/pr29112.ll +++ b/llvm/test/CodeGen/X86/pr29112.ll @@ -66,8 +66,6 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vaddps {{[0-9]+}}(%rsp), %xmm1, %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: addq $88, %rsp -; CHECK-NEXT: .Lcfi1: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> <i32 4, i32 20, i32 1, i32 17> diff --git a/llvm/test/CodeGen/X86/pr30430.ll b/llvm/test/CodeGen/X86/pr30430.ll index 9f93294811c..14d81f14fc3 100644 --- a/llvm/test/CodeGen/X86/pr30430.ll +++ b/llvm/test/CodeGen/X86/pr30430.ll @@ -111,8 +111,6 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float ; CHECK-NEXT: vmovss %xmm14, (%rsp) # 4-byte Spill ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: retq entry: %__A.addr.i = alloca float, align 4 diff --git a/llvm/test/CodeGen/X86/pr32241.ll b/llvm/test/CodeGen/X86/pr32241.ll index d16c7761dbf..e1f726f0c62 100644 --- a/llvm/test/CodeGen/X86/pr32241.ll +++ b/llvm/test/CodeGen/X86/pr32241.ll @@ -54,11 +54,7 @@ define i32 @_Z3foov() { ; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: addl $24, %esp -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %aa = alloca i16, align 2 diff --git a/llvm/test/CodeGen/X86/pr32256.ll b/llvm/test/CodeGen/X86/pr32256.ll index 6c7deb991e3..e29b56236e2 100644 --- a/llvm/test/CodeGen/X86/pr32256.ll +++ b/llvm/test/CodeGen/X86/pr32256.ll @@ -28,8 +28,6 @@ define void @_Z1av() { ; CHECK-NEXT: andb $1, %al ; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) ; CHECK-NEXT: addl $2, %esp -; CHECK-NEXT: .Lcfi1: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %b = alloca i8, align 1 diff --git a/llvm/test/CodeGen/X86/pr32329.ll b/llvm/test/CodeGen/X86/pr32329.ll index 1e2c48f6fc7..f2b79b67877 100644 --- a/llvm/test/CodeGen/X86/pr32329.ll +++ b/llvm/test/CodeGen/X86/pr32329.ll @@ -64,17 +64,9 @@ define void @foo() local_unnamed_addr { ; X86-NEXT: imull %eax, %ebx ; X86-NEXT: movb %bl, var_218 ; X86-NEXT: popl %esi -; X86-NEXT: .Lcfi8: -; X86-NEXT: .cfi_def_cfa_offset 16 ; X86-NEXT: popl %edi -; X86-NEXT: .Lcfi9: -; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %ebx -; X86-NEXT: .Lcfi10: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebp -; X86-NEXT: .Lcfi11: -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: diff --git a/llvm/test/CodeGen/X86/pr32345.ll b/llvm/test/CodeGen/X86/pr32345.ll index f2b37a806f5..e9182698dd9 100644 --- a/llvm/test/CodeGen/X86/pr32345.ll +++ b/llvm/test/CodeGen/X86/pr32345.ll @@ -90,8 +90,6 @@ define void @foo() { ; 6860-NEXT: popl %edi ; 6860-NEXT: popl %ebx ; 6860-NEXT: popl %ebp -; 6860-NEXT: .Lcfi6: -; 6860-NEXT: .cfi_def_cfa %esp, 4 ; 6860-NEXT: retl ; ; X64-LABEL: foo: @@ -138,8 +136,6 @@ define void @foo() { ; 686-NEXT: movb %dl, (%eax) ; 686-NEXT: movl %ebp, %esp ; 686-NEXT: popl %ebp -; 686-NEXT: .Lcfi3: -; 686-NEXT: .cfi_def_cfa %esp, 4 ; 686-NEXT: retl bb: %tmp = alloca i64, align 8 diff --git a/llvm/test/CodeGen/X86/pr32451.ll b/llvm/test/CodeGen/X86/pr32451.ll index 22a045f2939..e4643a863f9 100644 --- a/llvm/test/CodeGen/X86/pr32451.ll +++ b/llvm/test/CodeGen/X86/pr32451.ll @@ -33,11 +33,7 @@ define i8** @japi1_convert_690(i8**, i8***, i32) { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; CHECK-NEXT: movl %eax, (%ecx) ; CHECK-NEXT: addl $16, %esp -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebx -; CHECK-NEXT: .Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl top: %3 = alloca i8*** diff --git a/llvm/test/CodeGen/X86/pr9743.ll b/llvm/test/CodeGen/X86/pr9743.ll index 6ab33193946..6597c235330 100644 --- a/llvm/test/CodeGen/X86/pr9743.ll +++ b/llvm/test/CodeGen/X86/pr9743.ll @@ -14,6 +14,4 @@ define void @f() { ; CHECK-NEXT: : ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: popq %rbp -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/X86/push-cfi-debug.ll b/llvm/test/CodeGen/X86/push-cfi-debug.ll index 01fa12e87d0..7f438e306e4 100644 --- a/llvm/test/CodeGen/X86/push-cfi-debug.ll +++ b/llvm/test/CodeGen/X86/push-cfi-debug.ll @@ -23,10 +23,8 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0 ; CHECK: .cfi_adjust_cfa_offset 4 ; CHECK: calll stdfoo ; CHECK: .cfi_adjust_cfa_offset -8 -; CHECK: addl $8, %esp +; CHECK: addl $20, %esp ; CHECK: .cfi_adjust_cfa_offset -8 -; CHECK: addl $12, %esp -; CHECK: .cfi_def_cfa_offset 4 define void @test1() #0 !dbg !4 { entry: tail call void @foo(i32 1, i32 2) #1, !dbg !10 diff --git a/llvm/test/CodeGen/X86/push-cfi-obj.ll b/llvm/test/CodeGen/X86/push-cfi-obj.ll index 2c9ec334027..33291ec3318 100644 --- a/llvm/test/CodeGen/X86/push-cfi-obj.ll +++ b/llvm/test/CodeGen/X86/push-cfi-obj.ll @@ -12,7 +12,7 @@ ; LINUX-NEXT: ] ; LINUX-NEXT: Address: 0x0 ; LINUX-NEXT: Offset: 0x68 -; LINUX-NEXT: Size: 72 +; LINUX-NEXT: Size: 64 ; LINUX-NEXT: Link: 0 ; LINUX-NEXT: Info: 0 ; LINUX-NEXT: AddressAlignment: 4 @@ -22,9 +22,8 @@ ; LINUX-NEXT: SectionData ( ; LINUX-NEXT: 0000: 1C000000 00000000 017A504C 5200017C |.........zPLR..|| ; LINUX-NEXT: 0010: 08070000 00000000 1B0C0404 88010000 |................| -; LINUX-NEXT: 0020: 24000000 24000000 00000000 1D000000 |$...$...........| +; LINUX-NEXT: 0020: 1C000000 24000000 00000000 1D000000 |....$...........| ; LINUX-NEXT: 0030: 04000000 00410E08 8502420D 05432E10 |.....A....B..C..| -; LINUX-NEXT: 0040: 540C0404 410C0508 |T...A...| ; LINUX-NEXT: ) declare i32 @__gxx_personality_v0(...) @@ -36,7 +35,7 @@ entry: to label %continue unwind label %cleanup continue: ret void -cleanup: +cleanup: landingpad { i8*, i32 } cleanup ret void diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll index 4497ec5ebd4..5428f12ad1c 100644 --- a/llvm/test/CodeGen/X86/push-cfi.ll +++ b/llvm/test/CodeGen/X86/push-cfi.ll @@ -82,9 +82,8 @@ cleanup: ; LINUX-NEXT: Lcfi{{[0-9]+}}: ; LINUX-NEXT: .cfi_adjust_cfa_offset 4 ; LINUX-NEXT: call -; LINUX-NEXT: addl $16, %esp +; LINUX-NEXT: addl $28, %esp ; LINUX: .cfi_adjust_cfa_offset -16 -; LINUX: addl $12, %esp ; DARWIN-NOT: .cfi_escape ; DARWIN-NOT: pushl define void @test2_nofp() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { diff --git a/llvm/test/CodeGen/X86/return-ext.ll b/llvm/test/CodeGen/X86/return-ext.ll index d2bbebaf8a0..ef160f43b4a 100644 --- a/llvm/test/CodeGen/X86/return-ext.ll +++ b/llvm/test/CodeGen/X86/return-ext.ll @@ -106,8 +106,6 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -122,8 +120,6 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -138,7 +134,5 @@ entry: ; CHECK: call ; CHECK-NEXT: movzwl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } diff --git a/llvm/test/CodeGen/X86/rtm.ll b/llvm/test/CodeGen/X86/rtm.ll index 37825c2dc58..a8562677c7b 100644 --- a/llvm/test/CodeGen/X86/rtm.ll +++ b/llvm/test/CodeGen/X86/rtm.ll @@ -76,8 +76,6 @@ define void @f2(i32 %x) nounwind uwtable { ; X64-NEXT: xabort $1 ; X64-NEXT: callq f1 ; X64-NEXT: popq %rax -; X64-NEXT: .Lcfi1: -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %x.addr = alloca i32, align 4 diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll index 45f96b7e794..2628f824ea4 100644 --- a/llvm/test/CodeGen/X86/setcc-lowering.ll +++ b/llvm/test/CodeGen/X86/setcc-lowering.ll @@ -92,8 +92,6 @@ define void @pr26232(i64 %a, <16 x i1> %b) { ; KNL-32-NEXT: jne .LBB1_1 ; KNL-32-NEXT: # BB#2: # %for_exit600 ; KNL-32-NEXT: popl %esi -; KNL-32-NEXT: .Lcfi2: -; KNL-32-NEXT: .cfi_def_cfa_offset 4 ; KNL-32-NEXT: retl allocas: br label %for_test11.preheader diff --git a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll index 3d011693a49..bd2dd53b654 100644 --- a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll @@ -83,8 +83,6 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp5: ; CHECK-NEXT: popq %rcx -; CHECK-NEXT: .Lcfi11: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll index c019e98eb65..b88ca03805f 100644 --- a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll +++ b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll @@ -69,8 +69,6 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp4: ; CHECK-NEXT: popq %rcx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 1, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/llvm/test/CodeGen/X86/statepoint-invoke.ll b/llvm/test/CodeGen/X86/statepoint-invoke.ll index fc0bf5b4e3c..29f8e3ed4f7 100644 --- a/llvm/test/CodeGen/X86/statepoint-invoke.ll +++ b/llvm/test/CodeGen/X86/statepoint-invoke.ll @@ -142,8 +142,6 @@ normal_return: ; CHECK-LABEL: %normal_return ; CHECK: xorl %eax, %eax ; CHECK-NEXT: popq - ; CHECK-NEXT: : - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %null.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 13, i32 13) %undef.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 14, i32 14) @@ -171,8 +169,6 @@ entry: normal_return: ; CHECK: leaq ; CHECK-NEXT: popq - ; CHECK-NEXT: : - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %aa.rel = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %sp, i32 13, i32 13) %aa.converted = bitcast i32 addrspace(1)* %aa.rel to i64 addrspace(1)* @@ -181,8 +177,6 @@ normal_return: exceptional_return: ; CHECK: movl $15 ; CHECK-NEXT: popq - ; CHECK-NEXT: : - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %landing_pad = landingpad token cleanup diff --git a/llvm/test/CodeGen/X86/throws-cfi-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-fp.ll deleted file mode 100644 index 35e8a68239c..00000000000 --- a/llvm/test/CodeGen/X86/throws-cfi-fp.ll +++ /dev/null @@ -1,103 +0,0 @@ -; RUN: llc %s -o - | FileCheck %s - -; ModuleID = 'throws-cfi-fp.cpp' -source_filename = "throws-cfi-fp.cpp" -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -$__clang_call_terminate = comdat any - -@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 -@_ZTIi = external constant i8* -@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" - -; Function Attrs: uwtable -define void @_Z6throwsv() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { - -; CHECK-LABEL: _Z6throwsv: -; CHECK: popq %rbp -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 -; CHECK-NEXT: retq -; CHECK-NEXT: .LBB0_1: -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa %rbp, 16 - -entry: - %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 - br i1 %.b5, label %if.then, label %try.cont - -if.then: ; preds = %entry - %exception = tail call i8* @__cxa_allocate_exception(i64 4) - %0 = bitcast i8* %exception to i32* - store i32 1, i32* %0, align 16 - invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) - to label %unreachable unwind label %lpad - -lpad: ; preds = %if.then - %1 = landingpad { i8*, i32 } - catch i8* null - %2 = extractvalue { i8*, i32 } %1, 0 - %3 = tail call i8* @__cxa_begin_catch(i8* %2) - %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) - invoke void @__cxa_rethrow() #4 - to label %unreachable unwind label %lpad1 - -lpad1: ; preds = %lpad - %4 = landingpad { i8*, i32 } - cleanup - invoke void @__cxa_end_catch() - to label %eh.resume unwind label %terminate.lpad - -try.cont: ; preds = %entry - ret void - -eh.resume: ; preds = %lpad1 - resume { i8*, i32 } %4 - -terminate.lpad: ; preds = %lpad1 - %5 = landingpad { i8*, i32 } - catch i8* null - %6 = extractvalue { i8*, i32 } %5, 0 - tail call void @__clang_call_terminate(i8* %6) #5 - unreachable - -unreachable: ; preds = %lpad, %if.then - unreachable -} - -declare i8* @__cxa_allocate_exception(i64) - -declare void @__cxa_throw(i8*, i8*, i8*) - -declare i32 @__gxx_personality_v0(...) - -declare i8* @__cxa_begin_catch(i8*) - -declare void @__cxa_rethrow() - -declare void @__cxa_end_catch() - -; Function Attrs: noinline noreturn nounwind -declare void @__clang_call_terminate(i8*) - -declare void @_ZSt9terminatev() - -; Function Attrs: nounwind -declare i32 @puts(i8* nocapture readonly) #3 - -attributes #0 = { "no-frame-pointer-elim"="true" } - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!7, !8, !9} - -!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3) -!1 = !DIFile(filename: "throws-cfi-fp.cpp", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{!4} -!4 = !DIGlobalVariableExpression(var: !5) -!5 = distinct !DIGlobalVariable(name: "ShouldThrow", linkageName: "_ZL11ShouldThrow", scope: !0, file: !1, line: 2, type: !6, isLocal: true, isDefinition: true) -!6 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean) -!7 = !{i32 2, !"Dwarf Version", i32 4} -!8 = !{i32 2, !"Debug Info Version", i32 3} -!9 = !{i32 1, !"wchar_size", i32 4} diff --git a/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll deleted file mode 100644 index aac2a464b31..00000000000 --- a/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll +++ /dev/null @@ -1,102 +0,0 @@ -; RUN: llc %s -o - | FileCheck %s - -; ModuleID = 'throws-cfi-no-fp.cpp' -source_filename = "throws-cfi-no-fp.cpp" -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -$__clang_call_terminate = comdat any - -@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 -@_ZTIi = external constant i8* -@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" - -; Function Attrs: uwtable -define void @_Z6throwsv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { - -; CHECK-LABEL: _Z6throwsv: -; CHECK: popq %rbx -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: retq -; CHECK-NEXT: .LBB0_1: -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 16 - -entry: - %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 - br i1 %.b5, label %if.then, label %try.cont - -if.then: ; preds = %entry - %exception = tail call i8* @__cxa_allocate_exception(i64 4) - %0 = bitcast i8* %exception to i32* - store i32 1, i32* %0, align 16 - invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) - to label %unreachable unwind label %lpad - -lpad: ; preds = %if.then - %1 = landingpad { i8*, i32 } - catch i8* null - %2 = extractvalue { i8*, i32 } %1, 0 - %3 = tail call i8* @__cxa_begin_catch(i8* %2) - %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) - invoke void @__cxa_rethrow() #4 - to label %unreachable unwind label %lpad1 - -lpad1: ; preds = %lpad - %4 = landingpad { i8*, i32 } - cleanup - invoke void @__cxa_end_catch() - to label %eh.resume unwind label %terminate.lpad - -try.cont: ; preds = %entry - ret void - -eh.resume: ; preds = %lpad1 - resume { i8*, i32 } %4 - -terminate.lpad: ; preds = %lpad1 - %5 = landingpad { i8*, i32 } - catch i8* null - %6 = extractvalue { i8*, i32 } %5, 0 - tail call void @__clang_call_terminate(i8* %6) - unreachable - -unreachable: ; preds = %lpad, %if.then - unreachable -} - -declare i8* @__cxa_allocate_exception(i64) - -declare void @__cxa_throw(i8*, i8*, i8*) - -declare i32 @__gxx_personality_v0(...) - -declare i8* @__cxa_begin_catch(i8*) - -declare void @__cxa_rethrow() - -declare void @__cxa_end_catch() - -; Function Attrs: noinline noreturn nounwind -declare void @__clang_call_terminate(i8*) - -declare void @_ZSt9terminatev() - - -; Function Attrs: nounwind -declare i32 @puts(i8* nocapture readonly) - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!7, !8, !9} - -!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, globals: !3) -!1 = !DIFile(filename: "throws-cfi-no-fp.cpp", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{!4} -!4 = !DIGlobalVariableExpression(var: !5) -!5 = distinct !DIGlobalVariable(name: "ShouldThrow", linkageName: "_ZL11ShouldThrow", scope: !0, file: !1, line: 2, type: !6, isLocal: true, isDefinition: true) -!6 = !DIBasicType(name: "bool", size: 8, encoding: DW_ATE_boolean) -!7 = !{i32 2, !"Dwarf Version", i32 4} -!8 = !{i32 2, !"Debug Info Version", i32 3} -!9 = !{i32 1, !"wchar_size", i32 4} diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll index 3ffd27bbf19..392c0de95f2 100644 --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -3345,23 +3345,11 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: popq %rbx -; AVX1-NEXT: .Lcfi12: -; AVX1-NEXT: .cfi_def_cfa_offset 48 ; AVX1-NEXT: popq %r12 -; AVX1-NEXT: .Lcfi13: -; AVX1-NEXT: .cfi_def_cfa_offset 40 ; AVX1-NEXT: popq %r13 -; AVX1-NEXT: .Lcfi14: -; AVX1-NEXT: .cfi_def_cfa_offset 32 ; AVX1-NEXT: popq %r14 -; AVX1-NEXT: .Lcfi15: -; AVX1-NEXT: .cfi_def_cfa_offset 24 ; AVX1-NEXT: popq %r15 -; AVX1-NEXT: .Lcfi16: -; AVX1-NEXT: .cfi_def_cfa_offset 16 ; AVX1-NEXT: popq %rbp -; AVX1-NEXT: .Lcfi17: -; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_16i1_to_16i16: @@ -3460,23 +3448,11 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: popq %rbx -; AVX2-NEXT: .Lcfi12: -; AVX2-NEXT: .cfi_def_cfa_offset 48 ; AVX2-NEXT: popq %r12 -; AVX2-NEXT: .Lcfi13: -; AVX2-NEXT: .cfi_def_cfa_offset 40 ; AVX2-NEXT: popq %r13 -; AVX2-NEXT: .Lcfi14: -; AVX2-NEXT: .cfi_def_cfa_offset 32 ; AVX2-NEXT: popq %r14 -; AVX2-NEXT: .Lcfi15: -; AVX2-NEXT: .cfi_def_cfa_offset 24 ; AVX2-NEXT: popq %r15 -; AVX2-NEXT: .Lcfi16: -; AVX2-NEXT: .cfi_def_cfa_offset 16 ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .Lcfi17: -; AVX2-NEXT: .cfi_def_cfa_offset 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: load_sext_16i1_to_16i16: @@ -4873,8 +4849,6 @@ define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp { ; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; X32-SSE41-NEXT: movd %xmm0, %eax ; X32-SSE41-NEXT: popl %ecx -; X32-SSE41-NEXT: .Lcfi1: -; X32-SSE41-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE41-NEXT: retl entry: %Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll index fc414df3555..706edd27a3f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll @@ -622,8 +622,6 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){ ; KNL32-NEXT: vpblendvb %ymm3, 8(%ebp), %ymm1, %ymm1 ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp -; KNL32-NEXT: .Lcfi3: -; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32> <i32 64, i32 1, i32 66, i32 3, i32 68, i32 5, i32 70, i32 7, i32 72, i32 9, i32 74, i32 11, i32 76, i32 13, i32 78, i32 15, i32 80, i32 17, i32 82, i32 19, i32 84, i32 21, i32 86, i32 23, i32 88, i32 25, i32 90, i32 27, i32 92, i32 29, i32 94, i32 31, i32 96, i32 33, i32 98, i32 35, i32 100, i32 37, i32 102, i32 39, i32 104, i32 41, i32 106, i32 43, i32 108, i32 45, i32 110, i32 47, i32 112, i32 49, i32 114, i32 51, i32 116, i32 53, i32 118, i32 55, i32 120, i32 57, i32 122, i32 59, i32 124, i32 61, i32 126, i32 63> @@ -654,12 +652,12 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){ ; KNL32-LABEL: test_mm512_mask_blend_epi16: ; KNL32: # BB#0: # %entry ; KNL32-NEXT: pushl %ebp -; KNL32-NEXT: .Lcfi4: +; KNL32-NEXT: .Lcfi3: ; KNL32-NEXT: .cfi_def_cfa_offset 8 -; KNL32-NEXT: .Lcfi5: +; KNL32-NEXT: .Lcfi4: ; KNL32-NEXT: .cfi_offset %ebp, -8 ; KNL32-NEXT: movl %esp, %ebp -; KNL32-NEXT: .Lcfi6: +; KNL32-NEXT: .Lcfi5: ; KNL32-NEXT: .cfi_def_cfa_register %ebp ; KNL32-NEXT: andl $-32, %esp ; KNL32-NEXT: subl $32, %esp @@ -667,8 +665,6 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){ ; KNL32-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1],mem[2],ymm1[3],mem[4],ymm1[5],mem[6],ymm1[7],mem[8],ymm1[9],mem[10],ymm1[11],mem[12],ymm1[13],mem[14],ymm1[15] ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp -; KNL32-NEXT: .Lcfi7: -; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll index cbf6feaf637..4bcf18cc727 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll @@ -445,8 +445,6 @@ define i64 @shuf64i1_zero(i64 %a) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .Lcfi3: -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/wide-integer-cmp.ll b/llvm/test/CodeGen/X86/wide-integer-cmp.ll index 578fcb28aa7..b5c7f86567a 100644 --- a/llvm/test/CodeGen/X86/wide-integer-cmp.ll +++ b/llvm/test/CodeGen/X86/wide-integer-cmp.ll @@ -107,16 +107,10 @@ define i32 @test_wide(i128 %a, i128 %b) { ; CHECK-NEXT: # BB#1: # %bb1 ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi2: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB4_2: # %bb2 -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: movl $2, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %cmp = icmp slt i128 %a, %b diff --git a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll index 4552071ec16..f1590abcae8 100644 --- a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll +++ b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll @@ -6,8 +6,6 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: pushq ; CHECK: ud2 ; CHECK-NEXT: popq -; CHECK-NEXT: : -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq define void @bar() { entry: diff --git a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll index 20b12de155e..3052a0f615e 100644 --- a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll +++ b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll @@ -23,8 +23,6 @@ define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 { ; CHECK-NEXT: movl $4, %eax ; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: popq %rdx -; CHECK-NEXT: .Lcfi3: -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void asm sideeffect "", "~{rax},~{rdx},~{xmm1},~{rdi},~{rsi},~{xmm0}"() ret i32 4 |