diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-12-09 17:53:11 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-12-09 17:53:11 +0000 |
commit | 017b7a71d82b5c154055ac6aa7997db5d2bf38ba (patch) | |
tree | e9eeea58e2b645eb663f75172e251b19a9678cc6 /llvm/test/CodeGen/X86/known-bits-vector.ll | |
parent | 38d8ed2b75122ed9b222012b0aec8ceb378599d8 (diff) | |
download | bcm5719-llvm-017b7a71d82b5c154055ac6aa7997db5d2bf38ba.tar.gz bcm5719-llvm-017b7a71d82b5c154055ac6aa7997db5d2bf38ba.zip |
[SelectionDAG] Add knownbits support for EXTRACT_VECTOR_ELT opcodes (REAPPLIED)
Reapplied with fix for PR31323 - X86 SSE2 vXi16 multiplies for illegal types were creating CONCAT_VECTORS nodes with vector inputs that might not total the number of elements in the result type.
llvm-svn: 289232
Diffstat (limited to 'llvm/test/CodeGen/X86/known-bits-vector.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 12 |
1 files changed, 2 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 944b3709b1a..717c7c63c38 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -58,11 +58,7 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou ; X32-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0 ; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vpsrld $16, %xmm0, %xmm0 -; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_insert_uitofp: @@ -72,11 +68,7 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou ; X64-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0 ; X64-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vpsrld $16, %xmm0, %xmm0 -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = zext i16 %a1 to i32 %2 = zext i16 %a2 to i32 |