summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/fast-isel-store.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/fast-isel-store.ll')
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-store.ll40
1 files changed, 10 insertions, 30 deletions
diff --git a/llvm/test/CodeGen/X86/fast-isel-store.ll b/llvm/test/CodeGen/X86/fast-isel-store.ll
index 65fb1115e68..528682bf70b 100644
--- a/llvm/test/CodeGen/X86/fast-isel-store.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-store.ll
@@ -376,8 +376,6 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE64-NEXT: movupd %xmm0, (%eax)
; SSE64-NEXT: movupd %xmm1, 16(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi1:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64:
@@ -409,7 +407,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-LABEL: test_store_4xf64_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi2:
+; SSE64-NEXT: .Lcfi1:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd {{[0-9]+}}(%esp), %xmm1
@@ -417,8 +415,6 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-NEXT: movapd %xmm0, (%eax)
; SSE64-NEXT: movapd %xmm1, 16(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi3:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64_aligned:
@@ -450,7 +446,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-LABEL: test_store_16xi32:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi4:
+; SSE64-NEXT: .Lcfi2:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -459,8 +455,6 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-NEXT: movups %xmm2, 32(%eax)
; SSE64-NEXT: movups %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi5:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32:
@@ -502,7 +496,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-LABEL: test_store_16xi32_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi6:
+; SSE64-NEXT: .Lcfi3:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -511,8 +505,6 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-NEXT: movaps %xmm2, 32(%eax)
; SSE64-NEXT: movaps %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi7:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32_aligned:
@@ -554,7 +546,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-LABEL: test_store_16xf32:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi8:
+; SSE64-NEXT: .Lcfi4:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -563,8 +555,6 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-NEXT: movups %xmm2, 32(%eax)
; SSE64-NEXT: movups %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi9:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32:
@@ -606,7 +596,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-LABEL: test_store_16xf32_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi10:
+; SSE64-NEXT: .Lcfi5:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -615,8 +605,6 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-NEXT: movaps %xmm2, 32(%eax)
; SSE64-NEXT: movaps %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi11:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32_aligned:
@@ -662,7 +650,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-LABEL: test_store_8xf64:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi12:
+; SSE64-NEXT: .Lcfi6:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -675,8 +663,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-NEXT: movupd %xmm2, 32(%eax)
; SSE64-NEXT: movupd %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi13:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64:
@@ -706,8 +692,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY64-NEXT: vmovupd %ymm1, 32(%eax)
; AVXONLY64-NEXT: movl %ebp, %esp
; AVXONLY64-NEXT: popl %ebp
-; AVXONLY64-NEXT: .Lcfi3:
-; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64:
@@ -743,7 +727,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-LABEL: test_store_8xf64_aligned:
; SSE64: # BB#0:
; SSE64-NEXT: subl $12, %esp
-; SSE64-NEXT: .Lcfi14:
+; SSE64-NEXT: .Lcfi7:
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -756,8 +740,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-NEXT: movapd %xmm2, 32(%eax)
; SSE64-NEXT: movapd %xmm3, 48(%eax)
; SSE64-NEXT: addl $12, %esp
-; SSE64-NEXT: .Lcfi15:
-; SSE64-NEXT: .cfi_def_cfa_offset 4
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64_aligned:
@@ -771,12 +753,12 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-LABEL: test_store_8xf64_aligned:
; AVXONLY64: # BB#0:
; AVXONLY64-NEXT: pushl %ebp
-; AVXONLY64-NEXT: .Lcfi4:
+; AVXONLY64-NEXT: .Lcfi3:
; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
-; AVXONLY64-NEXT: .Lcfi5:
+; AVXONLY64-NEXT: .Lcfi4:
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
; AVXONLY64-NEXT: movl %esp, %ebp
-; AVXONLY64-NEXT: .Lcfi6:
+; AVXONLY64-NEXT: .Lcfi5:
; AVXONLY64-NEXT: .cfi_def_cfa_register %ebp
; AVXONLY64-NEXT: andl $-32, %esp
; AVXONLY64-NEXT: subl $32, %esp
@@ -787,8 +769,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-NEXT: vmovapd %ymm1, 32(%eax)
; AVXONLY64-NEXT: movl %ebp, %esp
; AVXONLY64-NEXT: popl %ebp
-; AVXONLY64-NEXT: .Lcfi7:
-; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64_aligned:
OpenPOWER on IntegriCloud