diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-18 22:21:22 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-18 22:21:22 +0000 |
| commit | e40900dddd84ac77bd626564e0aec0d195c365b1 (patch) | |
| tree | 490dd81cc8007e30d5026fd78d0e3f051d22b891 /llvm/test/CodeGen/X86/known-bits-vector.ll | |
| parent | 9ac82603d560295112247de8f5e1de09a34822e8 (diff) | |
| download | bcm5719-llvm-e40900dddd84ac77bd626564e0aec0d195c365b1.tar.gz bcm5719-llvm-e40900dddd84ac77bd626564e0aec0d195c365b1.zip | |
[SelectionDAG] Add knowbits support for CONCAT_VECTOR opcode
llvm-svn: 287387
Diffstat (limited to 'llvm/test/CodeGen/X86/known-bits-vector.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 14 |
1 files changed, 0 insertions, 14 deletions
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index a25f64443e6..983b571ad2d 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -331,15 +331,8 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n ; X32-NEXT: vpand {{\.LCPI.*}}, %xmm1, %xmm1 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,3] -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 -; X32-NEXT: vandps {{\.LCPI.*}}, %ymm2, %ymm2 -; X32-NEXT: vcvtdq2ps %ymm2, %ymm2 -; X32-NEXT: vpsrld $16, %xmm0, %xmm0 -; X32-NEXT: vpsrld $16, %xmm1, %xmm1 ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: vcvtdq2ps %ymm0, %ymm0 -; X32-NEXT: vmulps {{\.LCPI.*}}, %ymm0, %ymm0 -; X32-NEXT: vaddps %ymm2, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_concat_uitofp: @@ -348,15 +341,8 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) n ; X64-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,3] -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 -; X64-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2 -; X64-NEXT: vcvtdq2ps %ymm2, %ymm2 -; X64-NEXT: vpsrld $16, %xmm0, %xmm0 -; X64-NEXT: vpsrld $16, %xmm1, %xmm1 ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: vcvtdq2ps %ymm0, %ymm0 -; X64-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 -; X64-NEXT: vaddps %ymm2, %ymm0, %ymm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 131071, i32 -1> %2 = and <4 x i32> %a1, <i32 -1, i32 131071, i32 -1, i32 131071> |

