diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
| commit | d633e290c8b5c831d09b3ff3ae72e4ae79a96171 (patch) | |
| tree | 0545cc2c63f4434ca937a0ebd922782906992720 /llvm/test/CodeGen/X86/packss.ll | |
| parent | ae34ae12ef6bff8d750ed3aca922687227c2198e (diff) | |
| download | bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.tar.gz bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.zip | |
[X86] getTargetConstantBitsFromNode - add support for rearranging constant bits via shuffles
Exposed an issue that recursive calls to getTargetConstantBitsFromNode don't handle changes to EltSizeInBits yet.
llvm-svn: 343384
Diffstat (limited to 'llvm/test/CodeGen/X86/packss.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/packss.ll | 40 |
1 files changed, 15 insertions, 25 deletions
diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll index 88257b0ac29..76dd87151e8 100644 --- a/llvm/test/CodeGen/X86/packss.ll +++ b/llvm/test/CodeGen/X86/packss.ll @@ -180,31 +180,21 @@ define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) { ; ; X86-AVX1-LABEL: trunc_ashr_v4i64_demandedelts: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: movl $63, %eax -; X86-AVX1-NEXT: vmovd %eax, %xmm1 -; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2 -; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7] -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648] -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm4, %xmm4 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm2, %xmm3 -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm1 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] -; X86-AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; X86-AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1 -; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X86-AVX1-NEXT: vpsllq $63, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; X86-AVX1-NEXT: vpsllq $63, %xmm2, %xmm3 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; X86-AVX1-NEXT: vpsrlq $63, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,0,0,0,0,0,32768] +; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4] ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 |

