summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2016-07-22 11:03:10 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2016-07-22 11:03:10 +0000
commit5ba0e203153ce2cc76d3afc4c5acaae4ed83a3c2 (patch)
treef01fff4c947bc6a17a47ce055218df01601115ff /llvm/test/CodeGen
parent60a3da3f4c69a5d5082dbafcecf3f0ddec8bafe5 (diff)
downloadbcm5719-llvm-5ba0e203153ce2cc76d3afc4c5acaae4ed83a3c2.tar.gz
bcm5719-llvm-5ba0e203153ce2cc76d3afc4c5acaae4ed83a3c2.zip
Revert "[X86][AVX] Added support for lowering to VBROADCASTF128/VBROADCASTI128"
It caused PR28657. This reverts commit r276281. llvm-svn: 276405
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll26
-rw-r--r--llvm/test/CodeGen/X86/avx-intrinsics-x86.ll40
-rw-r--r--llvm/test/CodeGen/X86/avx-vbroadcastf128.ll36
-rw-r--r--llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll6
-rw-r--r--llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll36
-rw-r--r--llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll128
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll18
7 files changed, 195 insertions, 95 deletions
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index 8ca0997e48e..0630fd8a93c 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -95,30 +95,6 @@ define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
}
-define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
-; CHECK-LABEL: test_x86_avx_vbroadcastf128_pd_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; CHECK-NEXT: retl
- %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
- ret <4 x double> %res
-}
-declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
-
-
-define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
-; CHECK-LABEL: test_x86_avx_vbroadcastf128_ps_256:
-; CHECK: ## BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; CHECK-NEXT: retl
- %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
- ret <8 x float> %res
-}
-declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
-
-
define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
; CHECK-LABEL: test_x86_avx_blend_pd_256:
; CHECK: ## BB#0:
@@ -388,7 +364,7 @@ define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) {
; CHECK-LABEL: test_x86_sse2_storeu_dq:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddb LCPI32_0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu %xmm0, (%eax)
; CHECK-NEXT: retl
%a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
index cf906c83fe1..c5d60da8f90 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -3970,6 +3970,42 @@ define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) {
declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
+define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
+; AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; AVX: ## BB#0:
+; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT: retl
+;
+; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX512VL-NEXT: retl
+ %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
+ ret <4 x double> %res
+}
+declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
+
+
+define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
+; AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; AVX: ## BB#0:
+; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX-NEXT: retl
+;
+; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX512VL-NEXT: retl
+ %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
+ ret <8 x float> %res
+}
+declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
+
+
define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
; AVX-LABEL: test_x86_avx_vperm2f128_pd_256:
; AVX: ## BB#0:
@@ -4549,7 +4585,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX-LABEL: movnt_dq:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
+; AVX-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
; AVX-NEXT: vmovntdq %ymm0, (%eax)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retl
@@ -4557,7 +4593,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind {
; AVX512VL-LABEL: movnt_dq:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
-; AVX512VL-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
+; AVX512VL-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
; AVX512VL-NEXT: vmovntdq %ymm0, (%eax)
; AVX512VL-NEXT: retl
%a2 = add <2 x i64> %a1, <i64 1, i64 1>
diff --git a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll
index c2646e5502c..4e7fa84d33b 100644
--- a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll
+++ b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll
@@ -6,12 +6,14 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
%2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -22,12 +24,14 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -38,12 +42,14 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -54,12 +60,14 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -70,12 +78,14 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -86,12 +96,14 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index bd4773a1a3c..430628c3f80 100644
--- a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -505,12 +505,14 @@ define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
; X32-LABEL: test_mm256_broadcastsi128_si256_mem:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
; X64: # BB#0:
-; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%a0 = load <2 x i64>, <2 x i64>* %p0
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
index 308c626b4aa..ccfb263587c 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
@@ -6,13 +6,15 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovapd (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vaddpd LCPI0_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovapd (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -25,13 +27,15 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovdqa (%eax), %xmm0
+; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
; X64: ## BB#0:
-; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -44,13 +48,15 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vaddps LCPI2_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
; X64: ## BB#0:
-; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -63,13 +69,15 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovdqa (%eax), %xmm0
+; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddd LCPI3_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
; X64: ## BB#0:
-; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -82,13 +90,15 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovdqa (%eax), %xmm0
+; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddw LCPI4_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
; X64: ## BB#0:
-; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
@@ -101,13 +111,15 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
; X32: ## BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X32-NEXT: vmovdqa (%eax), %xmm0
+; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: vpaddb LCPI5_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
; X64: ## BB#0:
-; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-NEXT: vmovdqa (%rdi), %xmm0
+; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll
index 15777664d0c..e2460d251d7 100644
--- a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll
+++ b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll
@@ -10,19 +10,22 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_4f64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x double>, <2 x double> *%p
@@ -34,19 +37,22 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_4i64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: retq
%1 = load <2 x i64>, <2 x i64> *%p
@@ -58,7 +64,8 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0
+; X64-AVX512-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x float>, <4 x float> *%p
@@ -70,7 +77,8 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512-NEXT: vmovdqa32 (%rdi), %xmm0
+; X64-AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX512-NEXT: retq
%1 = load <4 x i32>, <4 x i32> *%p
@@ -80,11 +88,26 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
}
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
-; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
-; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
-; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX512-NEXT: retq
+; X64-AVX512VL-LABEL: test_broadcast_8i16_16i16:
+; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512VL-NEXT: retq
+;
+; X64-AVX512BWVL-LABEL: test_broadcast_8i16_16i16:
+; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512BWVL-NEXT: retq
+;
+; X64-AVX512DQVL-LABEL: test_broadcast_8i16_16i16:
+; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512DQVL-NEXT: retq
%1 = load <8 x i16>, <8 x i16> *%p
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%3 = add <16 x i16> %2, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>
@@ -92,11 +115,26 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
}
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
-; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
-; X64-AVX512: ## BB#0:
-; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
-; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX512-NEXT: retq
+; X64-AVX512VL-LABEL: test_broadcast_16i8_32i8:
+; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512VL-NEXT: retq
+;
+; X64-AVX512BWVL-LABEL: test_broadcast_16i8_32i8:
+; X64-AVX512BWVL: ## BB#0:
+; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512BWVL-NEXT: retq
+;
+; X64-AVX512DQVL-LABEL: test_broadcast_16i8_32i8:
+; X64-AVX512DQVL: ## BB#0:
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
+; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX512DQVL-NEXT: retq
%1 = load <16 x i8>, <16 x i8> *%p
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%3 = add <32 x i8> %2, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32>
@@ -110,21 +148,24 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -137,21 +178,24 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -164,21 +208,24 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -191,21 +238,24 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512VL-NEXT: vmovdqa32 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovdqa32 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512DQVL-NEXT: vmovdqa32 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512DQVL-NEXT: retq
@@ -218,21 +268,24 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
@@ -245,21 +298,24 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512VL: ## BB#0:
-; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512VL-NEXT: retq
;
; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512BWVL: ## BB#0:
-; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
+; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
; X64-AVX512BWVL-NEXT: retq
;
; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
; X64-AVX512DQVL: ## BB#0:
-; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
+; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
+; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
; X64-AVX512DQVL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index d853655dea7..181b2e42020 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -1352,17 +1352,20 @@ define <4 x double> @splat_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
; AVX1-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX1: # BB#0:
-; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX1-NEXT: vmovaps (%rdi), %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT: vmovaps (%rdi), %xmm0
+; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
+; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
+; AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
; AVX512VL-NEXT: retq
%v = load <2 x i64>, <2 x i64>* %ptr
%shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1372,17 +1375,20 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
; AVX1-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX1: # BB#0:
-; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX1-NEXT: vmovaps (%rdi), %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX2: # BB#0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX2-NEXT: vmovaps (%rdi), %xmm0
+; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
+; AVX512VL-NEXT: vmovapd (%rdi), %xmm0
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
; AVX512VL-NEXT: retq
%v = load <2 x double>, <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
OpenPOWER on IntegriCloud