diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-10 22:41:49 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-10 22:41:49 +0000 |
commit | 38f0045cb03431fcf3fe59a5942ad74cc6b0ee27 (patch) | |
tree | a8bea1b1b9afe092dd6173e04933ed26eccab323 /llvm/test/CodeGen/X86/known-bits-vector.ll | |
parent | ea27ef69699ef54034a300230a0a038de04853df (diff) | |
download | bcm5719-llvm-38f0045cb03431fcf3fe59a5942ad74cc6b0ee27.tar.gz bcm5719-llvm-38f0045cb03431fcf3fe59a5942ad74cc6b0ee27.zip |
[SelectionDAG] Add support for vector demandedelts in ADD/SUB opcodes
llvm-svn: 286516
Diffstat (limited to 'llvm/test/CodeGen/X86/known-bits-vector.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 26 |
1 files changed, 4 insertions, 22 deletions
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 967447cebc8..09953294461 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -207,22 +207,12 @@ define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind { define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; X32-LABEL: knownbits_mask_add_shuffle_lshr: ; X32: # BB#0: -; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] -; X32-NEXT: vpand %xmm2, %xmm0, %xmm0 -; X32-NEXT: vpand %xmm2, %xmm1, %xmm1 -; X32-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X32-NEXT: vpsrld $17, %xmm0, %xmm0 +; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_add_shuffle_lshr: ; X64: # BB#0: -; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] -; X64-NEXT: vpand %xmm2, %xmm0, %xmm0 -; X64-NEXT: vpand %xmm2, %xmm1, %xmm1 -; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X64-NEXT: vpsrld $17, %xmm0, %xmm0 +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767> %2 = and <4 x i32> %a1, <i32 32767, i32 -1, i32 -1, i32 32767> @@ -235,20 +225,12 @@ define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind { ; X32-LABEL: knownbits_mask_sub_shuffle_lshr: ; X32: # BB#0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] -; X32-NEXT: vpsubd %xmm0, %xmm1, %xmm0 -; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X32-NEXT: vpsrld $22, %xmm0, %xmm0 +; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_sub_shuffle_lshr: ; X64: # BB#0: -; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] -; X64-NEXT: vpsubd %xmm0, %xmm1, %xmm0 -; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X64-NEXT: vpsrld $22, %xmm0, %xmm0 +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 15, i32 -1, i32 -1, i32 15> %2 = sub <4 x i32> <i32 255, i32 255, i32 255, i32 255>, %1 |