diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
| commit | d633e290c8b5c831d09b3ff3ae72e4ae79a96171 (patch) | |
| tree | 0545cc2c63f4434ca937a0ebd922782906992720 /llvm/test/CodeGen/X86/known-signbits-vector.ll | |
| parent | ae34ae12ef6bff8d750ed3aca922687227c2198e (diff) | |
| download | bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.tar.gz bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.zip | |
[X86] getTargetConstantBitsFromNode - add support for rearranging constant bits via shuffles
Exposed an issue that recursive calls to getTargetConstantBitsFromNode don't handle changes to EltSizeInBits yet.
llvm-svn: 343384
Diffstat (limited to 'llvm/test/CodeGen/X86/known-signbits-vector.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/known-signbits-vector.ll | 34 |
1 files changed, 16 insertions, 18 deletions
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 1e48f8683c3..679e068b965 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -381,26 +381,24 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x ; X32-NEXT: movl %esp, %ebp ; X32-NEXT: andl $-16, %esp ; X32-NEXT: subl $16, %esp -; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [33,0,63,0] -; X32-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648] -; X32-NEXT: vpsrlq %xmm3, %xmm4, %xmm5 -; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1] -; X32-NEXT: vpsrlq %xmm6, %xmm4, %xmm4 -; X32-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; X32-NEXT: vextractf128 $1, %ymm2, %xmm5 -; X32-NEXT: vpsrlq %xmm6, %xmm5, %xmm7 -; X32-NEXT: vpsrlq %xmm3, %xmm5, %xmm5 -; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7] -; X32-NEXT: vpsrlq %xmm6, %xmm2, %xmm6 -; X32-NEXT: vpsrlq %xmm3, %xmm2, %xmm2 -; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7] ; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3 -; X32-NEXT: vpxor %xmm4, %xmm5, %xmm5 -; X32-NEXT: vpsubq %xmm4, %xmm5, %xmm5 -; X32-NEXT: vpxor %xmm4, %xmm2, %xmm2 -; X32-NEXT: vpsubq %xmm4, %xmm2, %xmm2 ; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4 -; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 +; X32-NEXT: vmovdqa {{.*#+}} xmm5 = [0,2147483648,0,2147483648] +; X32-NEXT: vpsrlq $63, %xmm5, %xmm6 +; X32-NEXT: vpsrlq $33, %xmm5, %xmm5 +; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7] +; X32-NEXT: vextractf128 $1, %ymm2, %xmm6 +; X32-NEXT: vpsrlq $63, %xmm6, %xmm7 +; X32-NEXT: vpsrlq $33, %xmm6, %xmm6 +; X32-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4,5,6,7] +; X32-NEXT: vpxor %xmm5, %xmm6, %xmm6 +; X32-NEXT: vpsubq %xmm5, %xmm6, %xmm6 +; X32-NEXT: vpsrlq $63, %xmm2, %xmm7 +; X32-NEXT: vpsrlq $33, %xmm2, %xmm2 +; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7] +; X32-NEXT: vpxor %xmm5, %xmm2, %xmm2 +; X32-NEXT: vpsubq %xmm5, %xmm2, %xmm2 +; X32-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2 ; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; X32-NEXT: vextractf128 $1, %ymm1, %xmm4 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm5 |

