summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll')
-rw-r--r--llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll60
1 files changed, 30 insertions, 30 deletions
diff --git a/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll b/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
index 391f7a38a37..c7825400910 100644
--- a/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
+++ b/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
@@ -7,18 +7,18 @@
define <8 x float> @load32bytes(<8 x float>* %Ap) {
; AVXSLOW-LABEL: load32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps (%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: load32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups (%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: load32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups (%rdi), %ymm0
; AVX2-NEXT: retq
%A = load <8 x float>, <8 x float>* %Ap, align 16
@@ -29,20 +29,20 @@ define <8 x float> @load32bytes(<8 x float>* %Ap) {
define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
; AVXSLOW-LABEL: store32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi)
; AVXSLOW-NEXT: vmovaps %xmm0, (%rdi)
; AVXSLOW-NEXT: vzeroupper
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: store32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups %ymm0, (%rdi)
; AVXFAST-NEXT: vzeroupper
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: store32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -54,18 +54,18 @@ define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 48(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -80,17 +80,17 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_aligned:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps 48(%rdi), %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_aligned:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovaps 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_aligned:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -105,18 +105,18 @@ define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) {
define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 64(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 80(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 64(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 64(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
@@ -133,7 +133,7 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i64:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -141,7 +141,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i64:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq 80(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5
@@ -163,7 +163,7 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i32:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -171,7 +171,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i32:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -179,7 +179,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd 96(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6
@@ -193,7 +193,7 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i16:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -201,7 +201,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i16:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -209,7 +209,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw 112(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7
@@ -223,7 +223,7 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i8:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i8:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -239,7 +239,7 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb 128(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8
@@ -253,19 +253,19 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_double:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 144(%rdi), %xmm1
; AVXSLOW-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1
; AVXSLOW-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_double:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_double:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9
OpenPOWER on IntegriCloud