summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-11-14 18:16:21 +0000
committerCraig Topper <craig.topper@intel.com>2018-11-14 18:16:21 +0000
commit6c94264b1f3342376ec3d46e14634d39cf158a49 (patch)
treeb6b976865d907bf4a439c8a8a35e1264608e90f8
parentf0b757e2313c037e69df97d8c32c3bd0fdad3311 (diff)
downloadbcm5719-llvm-6c94264b1f3342376ec3d46e14634d39cf158a49.tar.gz
bcm5719-llvm-6c94264b1f3342376ec3d46e14634d39cf158a49.zip
[X86] Allow pmulh to be formed from narrow vXi16 vectors under -x86-experimental-vector-widening-legalization
Narrower vectors will be widened to 128 bits without changing the element size. And generic type legalization can already handle widening mulhu/mulhs. Differential Revision: https://reviews.llvm.org/D54513 llvm-svn: 346879
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp6
-rw-r--r--llvm/test/CodeGen/X86/pmulh.ll22
2 files changed, 6 insertions, 22 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d1421373517..9f8359322bd 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -37552,9 +37552,11 @@ static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
if (!Subtarget.hasSSE2())
return SDValue();
- // Only handle vXi16 types that are at least 128-bits.
+ // Only handle vXi16 types that are at least 128-bits unless they will be
+ // widened.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
- VT.getVectorNumElements() < 8)
+ (!ExperimentalVectorWideningLegalization &&
+ VT.getVectorNumElements() < 8))
return SDValue();
// Input type should be vXi32.
diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll
index f86b71ff8cb..7068650110e 100644
--- a/llvm/test/CodeGen/X86/pmulh.ll
+++ b/llvm/test/CodeGen/X86/pmulh.ll
@@ -24,11 +24,6 @@ define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-WIDEN-LABEL: mulhuw_v4i16:
; SSE2-WIDEN: # %bb.0:
; SSE2-WIDEN-NEXT: pmulhuw %xmm1, %xmm0
-; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1
-; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-WIDEN-NEXT: retq
;
; SSE41-PROMOTE-LABEL: mulhuw_v4i16:
@@ -42,11 +37,7 @@ define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) {
;
; SSE41-WIDEN-LABEL: mulhuw_v4i16:
; SSE41-WIDEN: # %bb.0:
-; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0
-; SSE41-WIDEN-NEXT: psrld $16, %xmm0
-; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0
+; SSE41-WIDEN-NEXT: pmulhuw %xmm1, %xmm0
; SSE41-WIDEN-NEXT: retq
;
; AVX-LABEL: mulhuw_v4i16:
@@ -82,11 +73,6 @@ define <4 x i16> @mulhw_v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-WIDEN-LABEL: mulhw_v4i16:
; SSE2-WIDEN: # %bb.0:
; SSE2-WIDEN-NEXT: pmulhw %xmm1, %xmm0
-; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1
-; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-WIDEN-NEXT: retq
;
; SSE41-PROMOTE-LABEL: mulhw_v4i16:
@@ -101,11 +87,7 @@ define <4 x i16> @mulhw_v4i16(<4 x i16> %a, <4 x i16> %b) {
;
; SSE41-WIDEN-LABEL: mulhw_v4i16:
; SSE41-WIDEN: # %bb.0:
-; SSE41-WIDEN-NEXT: pmovsxwd %xmm0, %xmm2
-; SSE41-WIDEN-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0
-; SSE41-WIDEN-NEXT: psrld $16, %xmm0
-; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0
+; SSE41-WIDEN-NEXT: pmulhw %xmm1, %xmm0
; SSE41-WIDEN-NEXT: retq
;
; AVX-LABEL: mulhw_v4i16:
OpenPOWER on IntegriCloud