summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-09-26 17:11:02 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-09-26 17:11:02 +0000
commit5afd4c26039fbc299f40608786df80ddb543598e (patch)
tree55f0003a4771c4009a26be2b530702fee2e4ab94 /llvm/test/CodeGen
parent36c626e33f85a8146e6840c65a03f0798a33b2a8 (diff)
downloadbcm5719-llvm-5afd4c26039fbc299f40608786df80ddb543598e.tar.gz
bcm5719-llvm-5afd4c26039fbc299f40608786df80ddb543598e.zip
[x86] Fix a large collection of bugs that crept in as I fleshed out the
AVX support. New test cases included. Note that none of the existing test cases covered these buggy code paths. =/ Also, it is clear from this that SHUFPS and SHUFPD are the most bug prone shuffle instructions in x86. =[ These were all detected by fuzz-testing. (I <3 fuzz testing.) llvm-svn: 218522
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll40
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll46
2 files changed, 86 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 423400c00a6..b7cfa0b1dab 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -566,6 +566,46 @@ define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) {
ret <4 x i64> %shuffle
}
+define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: @shuffle_v4i64_2u35
+; AVX1: # BB#0:
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpermilpd {{.*}} # xmm2 = xmm0[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0,1,2],ymm1[3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: @shuffle_v4i64_2u35
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermq {{.*}} # ymm1 = ymm1[0,1,2,1]
+; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[2,1,3,3]
+; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 undef, i32 3, i32 5>
+ ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: @shuffle_v4i64_1251
+; AVX1: # BB#0:
+; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1]
+; AVX1-NEXT: vshufpd {{.*}} # ymm0 = ymm0[1],ymm2[0],ymm0[2],ymm2[3]
+; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: @shuffle_v4i64_1251
+; AVX2: # BB#0:
+; AVX2-NEXT: vpermq {{.*}} # ymm1 = ymm1[0,1,1,3]
+; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[1,2,2,1]
+; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 5, i32 1>
+ ret <4 x i64> %shuffle
+}
+
define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
; AVX1-LABEL: @stress_test1
; AVX1: # BB#0:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
index 7e69ac5c537..fb5c993250e 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -646,6 +646,29 @@ define <8 x float> @shuffle_v8f32_uuu3uu66(<8 x float> %a, <8 x float> %b) {
ret <8 x float> %shuffle
}
+define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
+; AVX1-LABEL: @shuffle_v8f32_c348cda0
+; AVX1: # BB#0:
+; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1]
+; AVX1-NEXT: vshufps {{.*}} # ymm0 = ymm0[0,3],ymm2[0,0],ymm0[4,7],ymm2[4,4]
+; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1]
+; AVX1-NEXT: vpermilps {{.*}} # ymm1 = ymm1[0,1,2,0,4,5,6,4]
+; AVX1-NEXT: vblendps {{.*}} # ymm1 = ymm2[0],ymm1[1,2,3,4,5],ymm2[6],ymm1[7]
+; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: @shuffle_v8f32_c348cda0
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*}} # ymm2 = <u,3,4,u,u,u,u,0>
+; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vmovaps {{.*}} # ymm2 = <4,u,u,0,4,5,2,u>
+; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 12, i32 3, i32 4, i32 8, i32 12, i32 13, i32 10, i32 0>
+ ret <8 x float> %shuffle
+}
+
define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) {
; AVX1-LABEL: @shuffle_v8i32_00000000
; AVX1: # BB#0:
@@ -1514,3 +1537,26 @@ define <8 x i32> @shuffle_v8i32_uuu3uu66(<8 x i32> %a, <8 x i32> %b) {
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
ret <8 x i32> %shuffle
}
+
+define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: @shuffle_v8i32_6caa87e5
+; AVX1: # BB#0:
+; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1]
+; AVX1-NEXT: vshufps {{.*}} # ymm1 = ymm2[0,0],ymm1[2,2],ymm2[4,4],ymm1[6,6]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*}} # xmm2 = xmm0[2,1,2,3]
+; AVX1-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,3,2,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: @shuffle_v8i32_6caa87e5
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovdqa {{.*}} # ymm2 = <u,4,2,2,0,u,6,u>
+; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*}} # ymm0 = ymm0[3,1,3,2]
+; AVX2-NEXT: vpblendd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
+; AVX2-NEXT: retq
+ %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 6, i32 12, i32 10, i32 10, i32 8, i32 7, i32 14, i32 5>
+ ret <8 x i32> %shuffle
+}
OpenPOWER on IntegriCloud