diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-29 22:03:37 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-10-29 22:03:37 +0000 |
| commit | 601ae238b79cd62bf70703306cff5c606df329b3 (patch) | |
| tree | d92df1e18bce041dd286ff964dd131f46491f3d9 /llvm/test/CodeGen/X86/known-signbits-vector.ll | |
| parent | ccbb496b56dd3a19a76bac43dc9008f31c8823d6 (diff) | |
| download | bcm5719-llvm-601ae238b79cd62bf70703306cff5c606df329b3.tar.gz bcm5719-llvm-601ae238b79cd62bf70703306cff5c606df329b3.zip | |
[SelectionDAG] Add SEXT/AND/XOR/Or demanded elts support to ComputeNumSignBits
llvm-svn: 316875
Diffstat (limited to 'llvm/test/CodeGen/X86/known-signbits-vector.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/known-signbits-vector.ll | 38 |
1 files changed, 15 insertions, 23 deletions
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index f88637904a7..6ad45d1a99e 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -279,10 +279,7 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind { ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $60, %xmm2, %xmm3 ; X32-NEXT: vpsrlq $61, %xmm2, %xmm2 @@ -292,7 +289,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] ; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X32-NEXT: movl 8(%ebp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1 ; X32-NEXT: sarl $31, %eax ; X32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 @@ -301,12 +298,11 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X32-NEXT: vpsrlq $20, %xmm1, %xmm1 ; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; X32-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm4, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: @@ -325,7 +321,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, <i64 61, i64 60> %2 = sext i32 %a2 to i64 @@ -341,10 +337,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind { ; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $60, %xmm2, %xmm3 ; X32-NEXT: vpsrlq $61, %xmm2, %xmm2 @@ -358,12 +351,11 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 ; X32-NEXT: vpand %xmm1, %xmm0, %xmm2 ; X32-NEXT: vpor %xmm1, %xmm2, %xmm1 ; X32-NEXT: vpxor %xmm0, %xmm1, %xmm0 -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm4, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: @@ -379,7 +371,7 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 ; X64-NEXT: vpor %xmm1, %xmm2, %xmm1 ; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, <i64 61, i64 60> %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 1> |

