diff options
| author | Craig Topper <craig.topper@intel.com> | 2017-09-04 01:13:36 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2017-09-04 01:13:36 +0000 |
| commit | 76f44015e7043682977e3795d7bbb20cafa326a3 (patch) | |
| tree | 054e9c8769fb60b433d2292be597247a05f360af /llvm/test | |
| parent | 959fc08f3a22cc5c42fcbda40f52c9e8e93a193d (diff) | |
| download | bcm5719-llvm-76f44015e7043682977e3795d7bbb20cafa326a3.tar.gz bcm5719-llvm-76f44015e7043682977e3795d7bbb20cafa326a3.zip | |
[X86] Add a combine to recognize when we have two insert subvectors that together write the whole vector, but the starting vector isn't undef.
In this case we should replace the starting vector with undef.
llvm-svn: 312462
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll | 1 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/insertelement-zero.ll | 2 |
2 files changed, 0 insertions, 3 deletions
diff --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll index f2b41f7c29e..a687d30d873 100644 --- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -758,7 +758,6 @@ define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind { ; AVX: # BB#0: ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 diff --git a/llvm/test/CodeGen/X86/insertelement-zero.ll b/llvm/test/CodeGen/X86/insertelement-zero.ll index 8179758c4c0..dff7a69dc50 100644 --- a/llvm/test/CodeGen/X86/insertelement-zero.ll +++ b/llvm/test/CodeGen/X86/insertelement-zero.ll @@ -473,7 +473,6 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) { ; AVX1-NEXT: xorl %eax, %eax ; AVX1-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1 ; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7] @@ -485,7 +484,6 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) { ; AVX2-NEXT: xorl %eax, %eax ; AVX2-NEXT: vpinsrb $0, %eax, %xmm0, %xmm1 ; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7] |

