diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/x86-interleaved-access.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/x86-interleaved-access.ll | 92 |
1 files changed, 32 insertions, 60 deletions
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll index ec8bce1b43c..daf150fc687 100644 --- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll +++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll @@ -129,26 +129,18 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) { define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) { ; AVX-LABEL: store_factorf64_4: ; AVX: # BB#0: -; AVX-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm2[0],xmm3[0] -; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm5 = xmm0[0],xmm1[0] -; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] -; AVX-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm2[1],xmm3[1] -; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm1[1] -; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] -; AVX-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7 -; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] -; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] ; AVX-NEXT: vmovupd %ymm0, 96(%rdi) -; AVX-NEXT: vmovupd %ymm6, 64(%rdi) -; AVX-NEXT: vmovupd %ymm5, 32(%rdi) -; AVX-NEXT: vmovupd %ymm4, (%rdi) +; AVX-NEXT: vmovupd %ymm3, 64(%rdi) +; AVX-NEXT: vmovupd %ymm4, 32(%rdi) +; AVX-NEXT: vmovupd %ymm2, (%rdi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> @@ -161,55 +153,35 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) { ; AVX1-LABEL: store_factori64_4: ; AVX1: # BB#0: -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm2[0],xmm3[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm0[0],xmm1[0] -; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] -; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm2[1],xmm3[1] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm0[1],xmm1[1] -; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7 -; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] ; AVX1-NEXT: vmovupd %ymm0, 96(%rdi) -; AVX1-NEXT: vmovupd %ymm6, 64(%rdi) -; AVX1-NEXT: vmovupd %ymm5, 32(%rdi) -; AVX1-NEXT: vmovupd %ymm4, (%rdi) +; AVX1-NEXT: vmovupd %ymm3, 64(%rdi) +; AVX1-NEXT: vmovupd %ymm4, 32(%rdi) +; AVX1-NEXT: vmovupd %ymm2, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_factori64_4: ; AVX2: # BB#0: -; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm5 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm6 -; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm0[3,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6 -; AVX2-NEXT: vpbroadcastq %xmm3, %ymm7 -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7] -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm0[0],xmm1[0] -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7] -; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi) -; AVX2-NEXT: vmovdqu %ymm6, (%rdi) -; AVX2-NEXT: vmovdqu %ymm5, 96(%rdi) -; AVX2-NEXT: vmovdqu %ymm4, 64(%rdi) +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi) +; AVX2-NEXT: vmovdqu %ymm3, 64(%rdi) +; AVX2-NEXT: vmovdqu %ymm4, 32(%rdi) +; AVX2-NEXT: vmovdqu %ymm2, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> |

