diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-10-19 08:57:37 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-10-19 08:57:37 +0000 |
| commit | b2ca2505ccc9877657239e54ba274b4811a02e0f (patch) | |
| tree | fe323514d78021a391c8e6001135e74843d3f2f8 /llvm/test/CodeGen/X86 | |
| parent | 99eeab7ff3aca9a2e0827349b5ce2e7c71a69373 (diff) | |
| download | bcm5719-llvm-b2ca2505ccc9877657239e54ba274b4811a02e0f.tar.gz bcm5719-llvm-b2ca2505ccc9877657239e54ba274b4811a02e0f.zip | |
[DAGCombine] Generalize distributeTruncateThroughAnd to work with any non-opaque constant or constant vector
llvm-svn: 284574
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-shl.ll | 5 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sra.ll | 5 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-srl.ll | 5 |
3 files changed, 6 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll index 7d9706075dc..ff5d4f013a4 100644 --- a/llvm/test/CodeGen/X86/combine-shl.ll +++ b/llvm/test/CodeGen/X86/combine-shl.ll @@ -98,11 +98,10 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) { define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_shl_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 @@ -111,9 +110,9 @@ define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_shl_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll index 570eb7ef56e..bfa5b9e98b1 100644 --- a/llvm/test/CodeGen/X86/combine-sra.ll +++ b/llvm/test/CodeGen/X86/combine-sra.ll @@ -161,11 +161,10 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) { define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_ashr_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: movdqa %xmm0, %xmm3 @@ -187,9 +186,9 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_ashr_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-srl.ll b/llvm/test/CodeGen/X86/combine-srl.ll index 8dfdb911d76..2bbe779e38f 100644 --- a/llvm/test/CodeGen/X86/combine-srl.ll +++ b/llvm/test/CodeGen/X86/combine-srl.ll @@ -507,11 +507,10 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_lshr_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: movdqa %xmm0, %xmm3 @@ -533,9 +532,9 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_lshr_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq |

