diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-09-29 17:01:55 +0000 |
| commit | d633e290c8b5c831d09b3ff3ae72e4ae79a96171 (patch) | |
| tree | 0545cc2c63f4434ca937a0ebd922782906992720 /llvm/test/CodeGen/X86/vector-shift-lshr-256.ll | |
| parent | ae34ae12ef6bff8d750ed3aca922687227c2198e (diff) | |
| download | bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.tar.gz bcm5719-llvm-d633e290c8b5c831d09b3ff3ae72e4ae79a96171.zip | |
[X86] getTargetConstantBitsFromNode - add support for rearranging constant bits via shuffles
Exposed an issue that recursive calls to getTargetConstantBitsFromNode don't handle changes to EltSizeInBits yet.
llvm-svn: 343384
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shift-lshr-256.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-lshr-256.ll | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll index 9f54d4e5a88..3212c78e5a7 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -867,17 +867,13 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind { ; ; X32-AVX1-LABEL: constant_shift_v4i64: ; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 -; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2 +; X32-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1 ; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,7,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] -; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm3 -; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm2 +; X32-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-AVX1-NEXT: retl ; |

