diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-05-13 21:50:18 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-05-13 21:50:18 +0000 |
commit | 78b0ce03e9ee069ffec981f46c69436363a3b473 (patch) | |
tree | d29838a3492e7b015c0e14497d375edb565b84c7 | |
parent | 7666afd042768850d470fbad11746362e78c4547 (diff) | |
download | bcm5719-llvm-78b0ce03e9ee069ffec981f46c69436363a3b473.tar.gz bcm5719-llvm-78b0ce03e9ee069ffec981f46c69436363a3b473.zip |
[X86][SSE] Test showing missing EXTRACT_SUBVECTOR/CONCAT_VECTORS demandedelts support in ComputeNumSignBits
llvm-svn: 302994
-rw-r--r-- | llvm/test/CodeGen/X86/known-signbits-vector.ll | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 60c466166b4..c8359097f65 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -169,3 +169,58 @@ define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) %3 = sitofp <4 x i64> %2 to <4 x double> ret <4 x double> %3 } + +define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind { +; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp: +; X32: # BB#0: +; X32-NEXT: pushl %ebp +; X32-NEXT: movl %esp, %ebp +; X32-NEXT: andl $-8, %esp +; X32-NEXT: subl $32, %esp +; X32-NEXT: vpsrad $16, %xmm0, %xmm1 +; X32-NEXT: vpsrlq $16, %xmm0, %xmm0 +; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; X32-NEXT: vmovaps {{.*#+}} ymm1 = [16,0,16,0,16,0,16,0] +; X32-NEXT: vextractf128 $1, %ymm1, %xmm1 +; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] +; X32-NEXT: vpsrlq %xmm1, %xmm2, %xmm2 +; X32-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; X32-NEXT: vmovq {{.*#+}} xmm1 = xmm0[0],zero +; X32-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp) +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) +; X32-NEXT: fildll {{[0-9]+}}(%esp) +; X32-NEXT: fstpl {{[0-9]+}}(%esp) +; X32-NEXT: fildll {{[0-9]+}}(%esp) +; X32-NEXT: fstpl (%esp) +; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X32-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; X32-NEXT: movl %ebp, %esp +; X32-NEXT: popl %ebp +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp: +; X64: # BB#0: +; X64-NEXT: vpsrad $16, %xmm0, %xmm1 +; X64-NEXT: vpsrlq $16, %xmm0, %xmm0 +; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; X64-NEXT: vpsrad $16, %xmm0, %xmm1 +; X64-NEXT: vpsrlq $16, %xmm0, %xmm0 +; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; X64-NEXT: vpextrq $1, %xmm0, %rax +; X64-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm1 +; X64-NEXT: vmovq %xmm0, %rax +; X64-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm0 +; X64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = ashr <2 x i64> %a0, <i64 16, i64 16> + %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef> + %3 = shufflevector <4 x i64> %a1, <4 x i64> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + %4 = ashr <4 x i64> %3, <i64 16, i64 16, i64 16, i64 16> + %5 = shufflevector <4 x i64> %4, <4 x i64> undef, <2 x i32> <i32 2, i32 3> + %6 = sitofp <2 x i64> %5 to <2 x double> + ret <2 x double> %6 +} |