diff options
| author | Chandler Carruth <chandlerc@gmail.com> | 2014-09-15 10:35:41 +0000 |
|---|---|---|
| committer | Chandler Carruth <chandlerc@gmail.com> | 2014-09-15 10:35:41 +0000 |
| commit | 35e3b545d6837ec088956e11b106c068fa4fe6cf (patch) | |
| tree | 0ca8be33041f08b525580d71c39be4330ace23bc /llvm/test | |
| parent | 7a8330e7bfaf2d48621fa0730ec3b1360c61e14a (diff) | |
| download | bcm5719-llvm-35e3b545d6837ec088956e11b106c068fa4fe6cf.tar.gz bcm5719-llvm-35e3b545d6837ec088956e11b106c068fa4fe6cf.zip | |
[x86] Undo a flawed transform I added to form UNPCK instructions when
AVX is available, and generally tidy up things surrounding UNPCK
formation.
Originally, I was thinking that the only advantage of PSHUFD over UNPCK
instruction variants was its free copy, and otherwise we should use the
shorter encoding UNPCK instructions. This isn't right though, there is
a larger advantage of being able to fold a load into the operand of
a PSHUFD. For UNPCK, the operand *must* be in a register so it can be
the second input.
This removes the UNPCK formation in the target-specific DAG combine for
v4i32 shuffles. It also lifts the v8 and v16 cases out of the
AVX-specific check as they are potentially replacing multiple
instructions with a single instruction and so should always be valuable.
The floating point checks are simplified accordingly.
This also adjusts the formation of PSHUFD instructions to attempt to
match the shuffle mask to one which would fit an UNPCK instruction
variant. This was originally motivated to allow it to match the UNPCK
instructions in the combiner, but clearly won't now.
Eventually, we should add a MachineCombiner pass that can form UNPCK
instructions post-RA when the operand is known to be in a register and
thus there is no loss.
llvm-svn: 217755
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-basic.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-sext.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-splat.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/exedepsfix-broadcast.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-128-v8.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 29 |
6 files changed, 22 insertions, 23 deletions
diff --git a/llvm/test/CodeGen/X86/avx-basic.ll b/llvm/test/CodeGen/X86/avx-basic.ll index ca540226ee7..a8dae82a8be 100644 --- a/llvm/test/CodeGen/X86/avx-basic.ll +++ b/llvm/test/CodeGen/X86/avx-basic.ll @@ -72,9 +72,9 @@ entry: ret <4 x i64> %shuffle } -; CHECK: vpunpcklqdq +; CHECK: vmovlhps ; CHECK-NEXT: vextractf128 $1 -; CHECK-NEXT: vpunpcklqdq +; CHECK-NEXT: vmovlhps ; CHECK-NEXT: vinsertf128 $1 define <4 x i64> @C(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { entry: @@ -83,7 +83,7 @@ entry: } ; CHECK: vpshufd $-96 -; CHECK: vpunpckhdq +; CHECK: vpshufd $-6 ; CHECK: vinsertf128 $1 define <8 x i32> @D(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp { entry: diff --git a/llvm/test/CodeGen/X86/avx-sext.ll b/llvm/test/CodeGen/X86/avx-sext.ll index 9bcf06f7b32..fb2287f5289 100644 --- a/llvm/test/CodeGen/X86/avx-sext.ll +++ b/llvm/test/CodeGen/X86/avx-sext.ll @@ -156,7 +156,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) { ; AVX-LABEL: sext_16i8_to_16i16 ; AVX: vpmovsxbw -; AVX: vpunpckhqdq +; AVX: vmovhlps ; AVX: vpmovsxbw ; AVX: ret define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) { diff --git a/llvm/test/CodeGen/X86/avx-splat.ll b/llvm/test/CodeGen/X86/avx-splat.ll index a2537ce5c04..058db314d28 100644 --- a/llvm/test/CodeGen/X86/avx-splat.ll +++ b/llvm/test/CodeGen/X86/avx-splat.ll @@ -19,7 +19,7 @@ entry: } ; CHECK: vmovq -; CHECK-NEXT: vpunpcklqdq %xmm +; CHECK-NEXT: vmovlhps %xmm ; CHECK-NEXT: vinsertf128 $1 define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp { entry: diff --git a/llvm/test/CodeGen/X86/exedepsfix-broadcast.ll b/llvm/test/CodeGen/X86/exedepsfix-broadcast.ll index f4539c8969c..ab794959550 100644 --- a/llvm/test/CodeGen/X86/exedepsfix-broadcast.ll +++ b/llvm/test/CodeGen/X86/exedepsfix-broadcast.ll @@ -95,8 +95,8 @@ define <4 x double> @ExeDepsFix_broadcastsd256(<4 x double> %arg, <4 x double> % ; CHECK-LABEL: ExeDepsFix_broadcastsd_inreg ; ExeDepsFix works top down, thus it coalesces vpunpcklqdq domain with ; vpand and there is nothing more you can do to match vmaxpd. -; CHECK: vpunpcklqdq -; CHECK: vpand +; CHECK: vmovlhps +; CHECK: vandps ; CHECK: vmaxpd ; CHECK: ret define <2 x double> @ExeDepsFix_broadcastsd_inreg(<2 x double> %arg, <2 x double> %arg2, i64 %broadcastvalue) { diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v8.ll index 8faa3f032fe..f959fea472f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v8.ll @@ -639,7 +639,7 @@ define <8 x i16> @shuffle_v8i16_032dXXXX(<8 x i16> %a, <8 x i16> %b) { define <8 x i16> @shuffle_v8i16_XXXdXXXX(<8 x i16> %a, <8 x i16> %b) { ; ALL-LABEL: @shuffle_v8i16_XXXdXXXX ; ALL: # BB#0: -; ALL-NEXT: pshufd {{.*}} # xmm0 = xmm1[0,2,2,3] +; ALL-NEXT: pshufd {{.*}} # xmm0 = xmm1[2,2,3,3] ; ALL-NEXT: retq %shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 13, i32 undef, i32 undef, i32 undef, i32 undef> ret <8 x i16> %shuffle diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index cd79a38ca4a..affa933768b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -6,7 +6,7 @@ target triple = "x86_64-unknown-unknown" define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @shuffle_v4i64_0001 ; AVX1: # BB#0: -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1> @@ -18,7 +18,7 @@ define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0> @@ -41,7 +41,7 @@ define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vblendpd {{.*}} # xmm1 = xmm0[0],xmm1[1] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0> @@ -52,7 +52,7 @@ define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @shuffle_v4i64_1000 ; AVX1: # BB#0: ; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm0[2,3,0,1] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0> @@ -63,8 +63,8 @@ define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @shuffle_v4i64_2200 ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0> @@ -76,7 +76,7 @@ define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0] -; AVX1-NEXT: vpunpckhqdq {{.*}} # xmm1 = xmm1[1,1] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,2,3] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0> @@ -281,7 +281,7 @@ define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @shuffle_v4i64_0124 ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm1 = xmm2[0],xmm1[1] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -292,7 +292,7 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @shuffle_v4i64_0142 ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm2 = xmm2[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm2[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm1 = xmm1[0],xmm2[1] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -304,7 +304,7 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm0 = xmm0[0],xmm1[1] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -316,7 +316,7 @@ define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm0 = xmm1[0],xmm0[1] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -336,7 +336,7 @@ define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm1[2,3,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm2 = xmm2[0],xmm0[1] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm0 = xmm0[0],xmm1[1] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -356,7 +356,7 @@ define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) { ; AVX1: # BB#0: ; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm0[2,3,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm2 = xmm2[0],xmm1[1] -; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[0,1,0,1] ; AVX1-NEXT: vblendpd {{.*}} # xmm0 = xmm1[0],xmm0[1] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -368,8 +368,7 @@ define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: @stress_test1 ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0 -; AVX1-NEXT: vpunpckhqdq {{.*}} # xmm0 = xmm0[1,1] -; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,2,3] ; AVX1-NEXT: vblendpd {{.*}} # xmm0 = xmm0[0],xmm1[1] ; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 |

