summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorEvgeny Stupachenko <evstupac@gmail.com>2017-06-06 21:08:00 +0000
committerEvgeny Stupachenko <evstupac@gmail.com>2017-06-06 21:08:00 +0000
commitff9d80cc7a602001b5528b57fdc5632b6f251032 (patch)
treec27f4d3cfc49a6b4430305709cf2220f02341836 /llvm/test/CodeGen
parentc662b501508200076e581beb9345a7631173a1d8 (diff)
downloadbcm5719-llvm-ff9d80cc7a602001b5528b57fdc5632b6f251032.tar.gz
bcm5719-llvm-ff9d80cc7a602001b5528b57fdc5632b6f251032.zip
Added tests for X86InterleavedStore.
Reviewers: RKSimon, DavidKreitzer Differential Revision: https://reviews.llvm.org/D33684 Patch by: Aleen Farhana <Farhana.aleen@gmail.com> llvm-svn: 304834
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/x86-interleaved-access.ll93
1 files changed, 93 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 4181a374c61..74214aa1b8b 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -135,3 +135,96 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
%add3 = add <4 x i64> %add2, %strided.v3
ret <4 x i64> %add3
}
+
+define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) {
+; AVX-LABEL: store_factorf64_4:
+; AVX: # BB#0:
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm2[0],xmm3[0]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm5 = xmm0[0],xmm1[0]
+; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm2[1],xmm3[1]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm1[1]
+; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
+; AVX-NEXT: vmovupd %ymm0, 96(%rdi)
+; AVX-NEXT: vmovupd %ymm6, 64(%rdi)
+; AVX-NEXT: vmovupd %ymm5, 32(%rdi)
+; AVX-NEXT: vmovupd %ymm4, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+ %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %s1 = shufflevector <4 x double> %v2, <4 x double> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x double> %s0, <8 x double> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x double> %interleaved.vec, <16 x double>* %ptr, align 16
+ ret void
+}
+
+define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) {
+; AVX1-LABEL: store_factori64_4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm2[0],xmm3[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
+; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm2[1],xmm3[1]
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm0[1],xmm1[1]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
+; AVX1-NEXT: vmovupd %ymm0, 96(%rdi)
+; AVX1-NEXT: vmovupd %ymm6, 64(%rdi)
+; AVX1-NEXT: vmovupd %ymm5, 32(%rdi)
+; AVX1-NEXT: vmovupd %ymm4, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: store_factori64_4:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm5 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm0[3,1,2,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6
+; AVX2-NEXT: vpbroadcastq %xmm3, %ymm7
+; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi)
+; AVX2-NEXT: vmovdqu %ymm6, (%rdi)
+; AVX2-NEXT: vmovdqu %ymm5, 96(%rdi)
+; AVX2-NEXT: vmovdqu %ymm4, 64(%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %s1 = shufflevector <4 x i64> %v2, <4 x i64> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x i64> %s0, <8 x i64> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x i64> %interleaved.vec, <16 x i64>* %ptr, align 16
+ ret void
+}
OpenPOWER on IntegriCloud