diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-07-22 13:58:44 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-07-22 13:58:44 +0000 |
commit | ea0d4f9962fbc1741a730ec74b655940ea15424b (patch) | |
tree | 47e47fd7351dc3615c9c33add8a694638cec173d /llvm/test | |
parent | 22c9e931470fea2e25bef1f52128e54ec96da403 (diff) | |
download | bcm5719-llvm-ea0d4f9962fbc1741a730ec74b655940ea15424b.tar.gz bcm5719-llvm-ea0d4f9962fbc1741a730ec74b655940ea15424b.zip |
[X86][AVX] Added support for lowering to VBROADCASTF128/VBROADCASTI128 (reapplied)
As reported on PR26235, we don't currently make use of the VBROADCASTF128/VBROADCASTI128 instructions (or the AVX512 equivalents) to load+splat a 128-bit vector to both lanes of a 256-bit vector.
This patch enables lowering from subvector insertion/concatenation patterns and auto-upgrades the llvm.x86.avx.vbroadcastf128.pd.256 / llvm.x86.avx.vbroadcastf128.ps.256 intrinsics to match.
We could possibly investigate using VBROADCASTF128/VBROADCASTI128 to load repeated constants as well (similar to how we already do for scalar broadcasts).
Reapplied with fix for PR28657 - removed intrinsic definitions (clang companion patch to be be submitted shortly).
Differential Revision: https://reviews.llvm.org/D22460
llvm-svn: 276416
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll | 10 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll | 26 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx-intrinsics-x86.ll | 40 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx-vbroadcastf128.ll | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll | 128 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 18 |
8 files changed, 99 insertions, 201 deletions
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll index f886e1ff814..9f21bf2368c 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -207,11 +207,10 @@ define <4 x double> @test_mm256_broadcast_pd(<2 x double>* %a0) nounwind { ; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq - %arg0 = bitcast <2 x double>* %a0 to i8* - %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %arg0) + %ld = load <2 x double>, <2 x double>* %a0 + %res = shufflevector <2 x double> %ld, <2 x double> %ld, <4 x i32> <i32 0, i32 1, i32 0, i32 1> ret <4 x double> %res } -declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind { ; X32-LABEL: test_mm256_broadcast_ps: @@ -224,11 +223,10 @@ define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind { ; X64: # BB#0: ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq - %arg0 = bitcast <4 x float>* %a0 to i8* - %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %arg0) + %ld = load <4 x float>, <4 x float>* %a0 + %res = shufflevector <4 x float> %ld, <4 x float> %ld, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> ret <8 x float> %res } -declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly define <4 x double> @test_mm256_broadcast_sd(double* %a0) nounwind { ; X32-LABEL: test_mm256_broadcast_sd: diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll index 0630fd8a93c..8ca0997e48e 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -95,6 +95,30 @@ define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) { } +define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) { +; CHECK-LABEL: test_x86_avx_vbroadcastf128_pd_256: +; CHECK: ## BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retl + %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1] + ret <4 x double> %res +} +declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly + + +define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) { +; CHECK-LABEL: test_x86_avx_vbroadcastf128_ps_256: +; CHECK: ## BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retl + %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1] + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly + + define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) { ; CHECK-LABEL: test_x86_avx_blend_pd_256: ; CHECK: ## BB#0: @@ -364,7 +388,7 @@ define void @test_x86_sse2_storeu_dq(i8* %a0, <16 x i8> %a1) { ; CHECK-LABEL: test_x86_sse2_storeu_dq: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vpaddb LCPI32_0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0 ; CHECK-NEXT: vmovdqu %xmm0, (%eax) ; CHECK-NEXT: retl %a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll index c5d60da8f90..cf906c83fe1 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -3970,42 +3970,6 @@ define <8 x float> @test_x86_avx_sqrt_ps_256(<8 x float> %a0) { declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone -define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) { -; AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; AVX-NEXT: retl -; -; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; AVX512VL-NEXT: retl - %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1] - ret <4 x double> %res -} -declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly - - -define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) { -; AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256: -; AVX: ## BB#0: -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; AVX-NEXT: retl -; -; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256: -; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; AVX512VL-NEXT: retl - %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1] - ret <8 x float> %res -} -declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly - - define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) { ; AVX-LABEL: test_x86_avx_vperm2f128_pd_256: ; AVX: ## BB#0: @@ -4585,7 +4549,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind { ; AVX-LABEL: movnt_dq: ; AVX: ## BB#0: ; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0 +; AVX-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0 ; AVX-NEXT: vmovntdq %ymm0, (%eax) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retl @@ -4593,7 +4557,7 @@ define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind { ; AVX512VL-LABEL: movnt_dq: ; AVX512VL: ## BB#0: ; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax -; AVX512VL-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0 +; AVX512VL-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0 ; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ; AVX512VL-NEXT: retl %a2 = add <2 x i64> %a1, <i64 1, i64 1> diff --git a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll index 4e7fa84d33b..c2646e5502c 100644 --- a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll +++ b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll @@ -6,14 +6,12 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { ; X32-LABEL: test_broadcast_2f64_4f64: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2f64_4f64: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <2 x double>, <2 x double> *%p %2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -24,14 +22,12 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { ; X32-LABEL: test_broadcast_2i64_4i64: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2i64_4i64: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <2 x i64>, <2 x i64> *%p %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -42,14 +38,12 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { ; X32-LABEL: test_broadcast_4f32_8f32: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4f32_8f32: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <4 x float>, <4 x float> *%p %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -60,14 +54,12 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { ; X32-LABEL: test_broadcast_4i32_8i32: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4i32_8i32: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <4 x i32>, <4 x i32> *%p %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -78,14 +70,12 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { ; X32-LABEL: test_broadcast_8i16_16i16: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_8i16_16i16: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16> *%p %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> @@ -96,14 +86,12 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { ; X32-LABEL: test_broadcast_16i8_32i8: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_16i8_32i8: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8> *%p %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll index 430628c3f80..bd4773a1a3c 100644 --- a/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll @@ -505,14 +505,12 @@ define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) { ; X32-LABEL: test_mm256_broadcastsi128_si256_mem: ; X32: # BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_broadcastsi128_si256_mem: ; X64: # BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: retq %a0 = load <2 x i64>, <2 x i64>* %p0 %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll index ccfb263587c..308c626b4aa 100644 --- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll @@ -6,15 +6,13 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { ; X32-LABEL: test_broadcast_2f64_4f64: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovapd (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vaddpd LCPI0_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2f64_4f64: ; X64: ## BB#0: -; X64-NEXT: vmovapd (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <2 x double>, <2 x double> *%p @@ -27,15 +25,13 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { ; X32-LABEL: test_broadcast_2i64_4i64: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovdqa (%eax), %xmm0 -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2i64_4i64: ; X64: ## BB#0: -; X64-NEXT: vmovdqa (%rdi), %xmm0 -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <2 x i64>, <2 x i64> *%p @@ -48,15 +44,13 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { ; X32-LABEL: test_broadcast_4f32_8f32: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %xmm0 -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vaddps LCPI2_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4f32_8f32: ; X64: ## BB#0: -; X64-NEXT: vmovaps (%rdi), %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <4 x float>, <4 x float> *%p @@ -69,15 +63,13 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { ; X32-LABEL: test_broadcast_4i32_8i32: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovdqa (%eax), %xmm0 -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vpaddd LCPI3_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4i32_8i32: ; X64: ## BB#0: -; X64-NEXT: vmovdqa (%rdi), %xmm0 -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <4 x i32>, <4 x i32> *%p @@ -90,15 +82,13 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { ; X32-LABEL: test_broadcast_8i16_16i16: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovdqa (%eax), %xmm0 -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vpaddw LCPI4_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_8i16_16i16: ; X64: ## BB#0: -; X64-NEXT: vmovdqa (%rdi), %xmm0 -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <8 x i16>, <8 x i16> *%p @@ -111,15 +101,13 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { ; X32-LABEL: test_broadcast_16i8_32i8: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovdqa (%eax), %xmm0 -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vpaddb LCPI5_0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_16i8_32i8: ; X64: ## BB#0: -; X64-NEXT: vmovdqa (%rdi), %xmm0 -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = load <16 x i8>, <16 x i8> *%p diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll index e2460d251d7..15777664d0c 100644 --- a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll +++ b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll @@ -10,22 +10,19 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_2f64_4f64: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512DQVL-NEXT: retq %1 = load <2 x double>, <2 x double> *%p @@ -37,22 +34,19 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind { define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_2i64_4i64: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512DQVL-NEXT: retq %1 = load <2 x i64>, <2 x i64> *%p @@ -64,8 +58,7 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind { define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { ; X64-AVX512-LABEL: test_broadcast_4f32_8f32: ; X64-AVX512: ## BB#0: -; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0 -; X64-AVX512-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512-NEXT: retq %1 = load <4 x float>, <4 x float> *%p @@ -77,8 +70,7 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind { define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { ; X64-AVX512-LABEL: test_broadcast_4i32_8i32: ; X64-AVX512: ## BB#0: -; X64-AVX512-NEXT: vmovdqa32 (%rdi), %xmm0 -; X64-AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX512-NEXT: retq %1 = load <4 x i32>, <4 x i32> *%p @@ -88,26 +80,11 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind { } define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { -; X64-AVX512VL-LABEL: test_broadcast_8i16_16i16: -; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512VL-NEXT: retq -; -; X64-AVX512BWVL-LABEL: test_broadcast_8i16_16i16: -; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512BWVL-NEXT: retq -; -; X64-AVX512DQVL-LABEL: test_broadcast_8i16_16i16: -; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512DQVL-NEXT: retq +; X64-AVX512-LABEL: test_broadcast_8i16_16i16: +; X64-AVX512: ## BB#0: +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0 +; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512-NEXT: retq %1 = load <8 x i16>, <8 x i16> *%p %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %3 = add <16 x i16> %2, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16> @@ -115,26 +92,11 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind { } define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { -; X64-AVX512VL-LABEL: test_broadcast_16i8_32i8: -; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512VL-NEXT: retq -; -; X64-AVX512BWVL-LABEL: test_broadcast_16i8_32i8: -; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512BWVL-NEXT: retq -; -; X64-AVX512DQVL-LABEL: test_broadcast_16i8_32i8: -; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 -; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 -; X64-AVX512DQVL-NEXT: retq +; X64-AVX512-LABEL: test_broadcast_16i8_32i8: +; X64-AVX512: ## BB#0: +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0 +; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512-NEXT: retq %1 = load <16 x i8>, <16 x i8> *%p %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> %3 = add <32 x i8> %2, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32> @@ -148,24 +110,21 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind { define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: retq @@ -178,24 +137,21 @@ define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind { define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: retq @@ -208,24 +164,21 @@ define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind { define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: retq @@ -238,24 +191,21 @@ define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind { define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa32 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqa32 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa32 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512DQVL-NEXT: retq @@ -268,24 +218,21 @@ define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind { define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1 ; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0 ; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1 ; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0 ; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1 ; X64-AVX512DQVL-NEXT: retq @@ -298,24 +245,21 @@ define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind { define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind { ; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8: ; X64-AVX512VL: ## BB#0: -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1 ; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0 ; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8: ; X64-AVX512BWVL: ## BB#0: -; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0 -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8: ; X64-AVX512DQVL: ## BB#0: -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0 -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1 ; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0 ; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1 ; X64-AVX512DQVL-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index 181b2e42020..d853655dea7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -1352,20 +1352,17 @@ define <4 x double> @splat_mem_v4f64_from_v2f64(<2 x double>* %ptr) { define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) { ; AVX1-LABEL: splat128_mem_v4i64_from_v2i64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: splat128_mem_v4i64_from_v2i64: ; AVX2: # BB#0: -; AVX2-NEXT: vmovaps (%rdi), %xmm0 -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 -; AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0 +; AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0 ; AVX512VL-NEXT: retq %v = load <2 x i64>, <2 x i64>* %ptr %shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1375,20 +1372,17 @@ define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) { define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) { ; AVX1-LABEL: splat128_mem_v4f64_from_v2f64: ; AVX1: # BB#0: -; AVX1-NEXT: vmovaps (%rdi), %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: splat128_mem_v4f64_from_v2f64: ; AVX2: # BB#0: -; AVX2-NEXT: vmovaps (%rdi), %xmm0 -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovapd (%rdi), %xmm0 -; AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0 +; AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0 ; AVX512VL-NEXT: retq %v = load <2 x double>, <2 x double>* %ptr %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> |