diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-05-13 22:10:58 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-05-13 22:10:58 +0000 |
commit | 754c1618ec40a14a572102c493a32968a86600d8 (patch) | |
tree | 4fd2124a1b78149cda5504e5e9690c3c52698617 /llvm/test/CodeGen/X86/known-signbits-vector.ll | |
parent | d891d89ce8cb7c12dad4dec4b3a125d2c6e6da89 (diff) | |
download | bcm5719-llvm-754c1618ec40a14a572102c493a32968a86600d8.tar.gz bcm5719-llvm-754c1618ec40a14a572102c493a32968a86600d8.zip |
[SelectionDAG] Added support for EXTRACT_SUBVECTOR/CONCAT_VECTORS demandedelts in ComputeNumSignBits
llvm-svn: 302997
Diffstat (limited to 'llvm/test/CodeGen/X86/known-signbits-vector.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/known-signbits-vector.ll | 27 |
1 files changed, 4 insertions, 23 deletions
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index c8359097f65..d526b5dd786 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -173,10 +173,6 @@ define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind { ; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $32, %esp ; X32-NEXT: vpsrad $16, %xmm0, %xmm1 ; X32-NEXT: vpsrlq $16, %xmm0, %xmm0 ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] @@ -187,18 +183,8 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 ; X32-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X32-NEXT: vmovq {{.*#+}} xmm1 = xmm0[0],zero -; X32-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp) -; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstpl {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstpl (%esp) -; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X32-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; @@ -207,14 +193,9 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 ; X64-NEXT: vpsrad $16, %xmm0, %xmm1 ; X64-NEXT: vpsrlq $16, %xmm0, %xmm0 ; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] -; X64-NEXT: vpsrad $16, %xmm0, %xmm1 ; X64-NEXT: vpsrlq $16, %xmm0, %xmm0 -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] -; X64-NEXT: vpextrq $1, %xmm0, %rax -; X64-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm1 -; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm0 -; X64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X64-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, <i64 16, i64 16> %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef> |