summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-11-11 10:47:24 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-11-11 10:47:24 +0000
commit065222781483360735e99858312558bd230d9834 (patch)
tree999aa586a6ad80540e0147a4b9d8cd7d0e3ce20e /llvm
parentda1a43e8616ca68bfe1cbeff15e95a094227c535 (diff)
downloadbcm5719-llvm-065222781483360735e99858312558bd230d9834.tar.gz
bcm5719-llvm-065222781483360735e99858312558bd230d9834.zip
[SelectionDAG] Add support for vector demandedelts in UDIV opcodes
llvm-svn: 286576
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp6
-rw-r--r--llvm/test/CodeGen/X86/known-bits-vector.ll52
2 files changed, 6 insertions, 52 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 93ca120e3cc..48aa1784a9f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2192,10 +2192,12 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
- computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
+ Depth + 1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
- computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
+ computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
+ Depth + 1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll
index 0b3758310fa..a1efb9b176e 100644
--- a/llvm/test/CodeGen/X86/known-bits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-bits-vector.ll
@@ -242,60 +242,12 @@ define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_udiv_shuffle_lshr:
; X32: # BB#0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vpextrd $1, %xmm1, %ecx
-; X32-NEXT: vpextrd $1, %xmm0, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: divl %ecx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: vmovd %xmm1, %esi
-; X32-NEXT: vmovd %xmm0, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: divl %esi
-; X32-NEXT: vmovd %eax, %xmm2
-; X32-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
-; X32-NEXT: vpextrd $2, %xmm1, %ecx
-; X32-NEXT: vpextrd $2, %xmm0, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: divl %ecx
-; X32-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; X32-NEXT: vpextrd $3, %xmm1, %ecx
-; X32-NEXT: vpextrd $3, %xmm0, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: divl %ecx
-; X32-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
-; X32-NEXT: vpsrld $22, %xmm0, %xmm0
-; X32-NEXT: popl %esi
+; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_udiv_shuffle_lshr:
; X64: # BB#0:
-; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; X64-NEXT: vpextrd $1, %xmm1, %ecx
-; X64-NEXT: vpextrd $1, %xmm0, %eax
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: divl %ecx
-; X64-NEXT: movl %eax, %ecx
-; X64-NEXT: vmovd %xmm1, %esi
-; X64-NEXT: vmovd %xmm0, %eax
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: divl %esi
-; X64-NEXT: vmovd %eax, %xmm2
-; X64-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
-; X64-NEXT: vpextrd $2, %xmm1, %ecx
-; X64-NEXT: vpextrd $2, %xmm0, %eax
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: divl %ecx
-; X64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; X64-NEXT: vpextrd $3, %xmm1, %ecx
-; X64-NEXT: vpextrd $3, %xmm0, %eax
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: divl %ecx
-; X64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
-; X64-NEXT: vpsrld $22, %xmm0, %xmm0
+; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
%2 = udiv <4 x i32> %1, %a1
OpenPOWER on IntegriCloud