summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/fast-isel-store.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/fast-isel-store.ll')
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-store.ll184
1 files changed, 92 insertions, 92 deletions
diff --git a/llvm/test/CodeGen/X86/fast-isel-store.ll b/llvm/test/CodeGen/X86/fast-isel-store.ll
index e359e620563..6468186d4ca 100644
--- a/llvm/test/CodeGen/X86/fast-isel-store.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-store.ll
@@ -10,13 +10,13 @@
define i32 @test_store_32(i32* nocapture %addr, i32 %value) {
; ALL32-LABEL: test_store_32:
-; ALL32: # BB#0: # %entry
+; ALL32: # %bb.0: # %entry
; ALL32-NEXT: movl %esi, (%rdi)
; ALL32-NEXT: movl %esi, %eax
; ALL32-NEXT: retq
;
; ALL64-LABEL: test_store_32:
-; ALL64: # BB#0: # %entry
+; ALL64: # %bb.0: # %entry
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %eax
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL64-NEXT: movl %eax, (%ecx)
@@ -28,13 +28,13 @@ entry:
define i16 @test_store_16(i16* nocapture %addr, i16 %value) {
; ALL32-LABEL: test_store_16:
-; ALL32: # BB#0: # %entry
+; ALL32: # %bb.0: # %entry
; ALL32-NEXT: movw %si, (%rdi)
; ALL32-NEXT: movl %esi, %eax
; ALL32-NEXT: retq
;
; ALL64-LABEL: test_store_16:
-; ALL64: # BB#0: # %entry
+; ALL64: # %bb.0: # %entry
; ALL64-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; ALL64-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ALL64-NEXT: movw %ax, (%ecx)
@@ -46,39 +46,39 @@ entry:
define <4 x i32> @test_store_4xi32(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
; SSE32-LABEL: test_store_4xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: paddd %xmm1, %xmm0
; SSE32-NEXT: movdqu %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: paddd %xmm1, %xmm0
; SSE64-NEXT: movdqu %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_4xi32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVXONLY32-NEXT: vmovdqu %xmm0, (%rdi)
; AVXONLY32-NEXT: retq
;
; AVX64-LABEL: test_store_4xi32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovdqu %xmm0, (%eax)
; AVX64-NEXT: retl
;
; KNL32-LABEL: test_store_4xi32:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; KNL32-NEXT: vmovdqu %xmm0, (%rdi)
; KNL32-NEXT: retq
;
; SKX32-LABEL: test_store_4xi32:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; SKX32-NEXT: vmovdqu %xmm0, (%rdi)
; SKX32-NEXT: retq
@@ -89,39 +89,39 @@ define <4 x i32> @test_store_4xi32(<4 x i32>* nocapture %addr, <4 x i32> %value,
define <4 x i32> @test_store_4xi32_aligned(<4 x i32>* nocapture %addr, <4 x i32> %value, <4 x i32> %value2) {
; SSE32-LABEL: test_store_4xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: paddd %xmm1, %xmm0
; SSE32-NEXT: movdqa %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: paddd %xmm1, %xmm0
; SSE64-NEXT: movdqa %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_4xi32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVXONLY32-NEXT: vmovdqa %xmm0, (%rdi)
; AVXONLY32-NEXT: retq
;
; AVX64-LABEL: test_store_4xi32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovdqa %xmm0, (%eax)
; AVX64-NEXT: retl
;
; KNL32-LABEL: test_store_4xi32_aligned:
-; KNL32: # BB#0:
+; KNL32: # %bb.0:
; KNL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; KNL32-NEXT: vmovdqa %xmm0, (%rdi)
; KNL32-NEXT: retq
;
; SKX32-LABEL: test_store_4xi32_aligned:
-; SKX32: # BB#0:
+; SKX32: # %bb.0:
; SKX32-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; SKX32-NEXT: vmovdqa %xmm0, (%rdi)
; SKX32-NEXT: retq
@@ -132,23 +132,23 @@ define <4 x i32> @test_store_4xi32_aligned(<4 x i32>* nocapture %addr, <4 x i32>
define <4 x float> @test_store_4xf32(<4 x float>* nocapture %addr, <4 x float> %value) {
; SSE32-LABEL: test_store_4xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %xmm0, (%eax)
; AVX64-NEXT: retl
@@ -158,23 +158,23 @@ define <4 x float> @test_store_4xf32(<4 x float>* nocapture %addr, <4 x float> %
define <4 x float> @test_store_4xf32_aligned(<4 x float>* nocapture %addr, <4 x float> %value) {
; SSE32-LABEL: test_store_4xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %xmm0, (%eax)
; AVX64-NEXT: retl
@@ -184,26 +184,26 @@ define <4 x float> @test_store_4xf32_aligned(<4 x float>* nocapture %addr, <4 x
define <2 x double> @test_store_2xf64(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
; SSE32-LABEL: test_store_2xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm1, %xmm0
; SSE32-NEXT: movupd %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_2xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd %xmm1, %xmm0
; SSE64-NEXT: movupd %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_2xf64:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX32-NEXT: vmovupd %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_2xf64:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovupd %xmm0, (%eax)
@@ -215,26 +215,26 @@ define <2 x double> @test_store_2xf64(<2 x double>* nocapture %addr, <2 x double
define <2 x double> @test_store_2xf64_aligned(<2 x double>* nocapture %addr, <2 x double> %value, <2 x double> %value2) {
; SSE32-LABEL: test_store_2xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm1, %xmm0
; SSE32-NEXT: movapd %xmm0, (%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_2xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: addpd %xmm1, %xmm0
; SSE64-NEXT: movapd %xmm0, (%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_2xf64_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX32-NEXT: vmovapd %xmm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_2xf64_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX64-NEXT: vmovapd %xmm0, (%eax)
@@ -246,25 +246,25 @@ define <2 x double> @test_store_2xf64_aligned(<2 x double>* nocapture %addr, <2
define <8 x i32> @test_store_8xi32(<8 x i32>* nocapture %addr, <8 x i32> %value) {
; SSE32-LABEL: test_store_8xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: movups %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xi32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xi32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -274,25 +274,25 @@ define <8 x i32> @test_store_8xi32(<8 x i32>* nocapture %addr, <8 x i32> %value)
define <8 x i32> @test_store_8xi32_aligned(<8 x i32>* nocapture %addr, <8 x i32> %value) {
; SSE32-LABEL: test_store_8xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: movaps %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xi32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xi32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -302,25 +302,25 @@ define <8 x i32> @test_store_8xi32_aligned(<8 x i32>* nocapture %addr, <8 x i32>
define <8 x float> @test_store_8xf32(<8 x float>* nocapture %addr, <8 x float> %value) {
; SSE32-LABEL: test_store_8xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movups %xmm0, (%eax)
; SSE64-NEXT: movups %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xf32:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovups %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xf32:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovups %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -330,25 +330,25 @@ define <8 x float> @test_store_8xf32(<8 x float>* nocapture %addr, <8 x float> %
define <8 x float> @test_store_8xf32_aligned(<8 x float>* nocapture %addr, <8 x float> %value) {
; SSE32-LABEL: test_store_8xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE64-NEXT: movaps %xmm0, (%eax)
; SSE64-NEXT: movaps %xmm1, 16(%eax)
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_8xf32_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vmovaps %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_8xf32_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vmovaps %ymm0, (%eax)
; AVX64-NEXT: retl
@@ -358,7 +358,7 @@ define <8 x float> @test_store_8xf32_aligned(<8 x float>* nocapture %addr, <8 x
define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
; SSE32-LABEL: test_store_4xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm3, %xmm1
; SSE32-NEXT: addpd %xmm2, %xmm0
; SSE32-NEXT: movupd %xmm0, (%rdi)
@@ -366,7 +366,7 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -378,13 +378,13 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX32-NEXT: vmovupd %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf64:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX64-NEXT: vmovupd %ymm0, (%eax)
@@ -396,7 +396,7 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double
define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 x double> %value, <4 x double> %value2) {
; SSE32-LABEL: test_store_4xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm3, %xmm1
; SSE32-NEXT: addpd %xmm2, %xmm0
; SSE32-NEXT: movapd %xmm0, (%rdi)
@@ -404,7 +404,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_4xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -416,13 +416,13 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
; SSE64-NEXT: retl
;
; AVX32-LABEL: test_store_4xf64_aligned:
-; AVX32: # BB#0:
+; AVX32: # %bb.0:
; AVX32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX32-NEXT: vmovapd %ymm0, (%rdi)
; AVX32-NEXT: retq
;
; AVX64-LABEL: test_store_4xf64_aligned:
-; AVX64: # BB#0:
+; AVX64: # %bb.0:
; AVX64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX64-NEXT: vmovapd %ymm0, (%eax)
@@ -434,7 +434,7 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4
define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %value) {
; SSE32-LABEL: test_store_16xi32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: movups %xmm2, 32(%rdi)
@@ -442,7 +442,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xi32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -455,25 +455,25 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovups %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovups %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xi32:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovups %ymm0, (%eax)
; AVXONLY64-NEXT: vmovups %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xi32:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovups %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xi32:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovups %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -483,7 +483,7 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va
define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x i32> %value) {
; SSE32-LABEL: test_store_16xi32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: movaps %xmm2, 32(%rdi)
@@ -491,7 +491,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xi32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -504,25 +504,25 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xi32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovaps %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovaps %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xi32_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovaps %ymm0, (%eax)
; AVXONLY64-NEXT: vmovaps %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xi32_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovaps %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xi32_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovaps %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -532,7 +532,7 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x
define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x float> %value) {
; SSE32-LABEL: test_store_16xf32:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movups %xmm0, (%rdi)
; SSE32-NEXT: movups %xmm1, 16(%rdi)
; SSE32-NEXT: movups %xmm2, 32(%rdi)
@@ -540,7 +540,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xf32:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -553,25 +553,25 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovups %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovups %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xf32:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovups %ymm0, (%eax)
; AVXONLY64-NEXT: vmovups %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xf32:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovups %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xf32:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovups %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -581,7 +581,7 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa
define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <16 x float> %value) {
; SSE32-LABEL: test_store_16xf32_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: movaps %xmm0, (%rdi)
; SSE32-NEXT: movaps %xmm1, 16(%rdi)
; SSE32-NEXT: movaps %xmm2, 32(%rdi)
@@ -589,7 +589,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_16xf32_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movaps {{[0-9]+}}(%esp), %xmm3
@@ -602,25 +602,25 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_16xf32_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vmovaps %ymm0, (%rdi)
; AVXONLY32-NEXT: vmovaps %ymm1, 32(%rdi)
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_16xf32_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVXONLY64-NEXT: vmovaps %ymm0, (%eax)
; AVXONLY64-NEXT: vmovaps %ymm1, 32(%eax)
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_16xf32_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vmovaps %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_16xf32_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vmovaps %zmm0, (%eax)
; AVX51264-NEXT: retl
@@ -630,7 +630,7 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1
define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
; SSE32-LABEL: test_store_8xf64:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm7, %xmm3
; SSE32-NEXT: addpd %xmm6, %xmm2
; SSE32-NEXT: addpd %xmm5, %xmm1
@@ -642,7 +642,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf64:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
@@ -659,7 +659,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVXONLY32-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVXONLY32-NEXT: vmovupd %ymm0, (%rdi)
@@ -667,7 +667,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_8xf64:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: pushl %ebp
; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
@@ -685,13 +685,13 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51232-NEXT: vmovupd %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_8xf64:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51264-NEXT: vmovupd %zmm0, (%eax)
@@ -703,7 +703,7 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double
define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 x double> %value, <8 x double> %value2) {
; SSE32-LABEL: test_store_8xf64_aligned:
-; SSE32: # BB#0:
+; SSE32: # %bb.0:
; SSE32-NEXT: addpd %xmm7, %xmm3
; SSE32-NEXT: addpd %xmm6, %xmm2
; SSE32-NEXT: addpd %xmm5, %xmm1
@@ -715,7 +715,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE32-NEXT: retq
;
; SSE64-LABEL: test_store_8xf64_aligned:
-; SSE64: # BB#0:
+; SSE64: # %bb.0:
; SSE64-NEXT: subl $12, %esp
; SSE64-NEXT: .cfi_def_cfa_offset 16
; SSE64-NEXT: movapd {{[0-9]+}}(%esp), %xmm3
@@ -732,7 +732,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; SSE64-NEXT: retl
;
; AVXONLY32-LABEL: test_store_8xf64_aligned:
-; AVXONLY32: # BB#0:
+; AVXONLY32: # %bb.0:
; AVXONLY32-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVXONLY32-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVXONLY32-NEXT: vmovapd %ymm0, (%rdi)
@@ -740,7 +740,7 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY32-NEXT: retq
;
; AVXONLY64-LABEL: test_store_8xf64_aligned:
-; AVXONLY64: # BB#0:
+; AVXONLY64: # %bb.0:
; AVXONLY64-NEXT: pushl %ebp
; AVXONLY64-NEXT: .cfi_def_cfa_offset 8
; AVXONLY64-NEXT: .cfi_offset %ebp, -8
@@ -758,13 +758,13 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8
; AVXONLY64-NEXT: retl
;
; AVX51232-LABEL: test_store_8xf64_aligned:
-; AVX51232: # BB#0:
+; AVX51232: # %bb.0:
; AVX51232-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51232-NEXT: vmovapd %zmm0, (%rdi)
; AVX51232-NEXT: retq
;
; AVX51264-LABEL: test_store_8xf64_aligned:
-; AVX51264: # BB#0:
+; AVX51264: # %bb.0:
; AVX51264-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX51264-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX51264-NEXT: vmovapd %zmm0, (%eax)
OpenPOWER on IntegriCloud