diff options
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 29 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-mul.ll | 34 |
2 files changed, 45 insertions, 18 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 483cba3d314..846c6612239 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -38534,6 +38534,33 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +// Simplify PMULDQ and PMULUDQ operations. +static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI) { + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), + !DCI.isBeforeLegalizeOps()); + APInt DemandedMask(APInt::getLowBitsSet(64, 32)); + + // PMULQDQ/PMULUDQ only uses lower 32 bits from each vector element. + KnownBits LHSKnown; + if (TLI.SimplifyDemandedBits(LHS, DemandedMask, LHSKnown, TLO)) { + DCI.CommitTargetLoweringOpt(TLO); + return SDValue(N, 0); + } + + KnownBits RHSKnown; + if (TLI.SimplifyDemandedBits(RHS, DemandedMask, RHSKnown, TLO)) { + DCI.CommitTargetLoweringOpt(TLO); + return SDValue(N, 0); + } + + return SDValue(); +} + SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -38655,6 +38682,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget); case X86ISD::PCMPEQ: case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget); + case X86ISD::PMULDQ: + case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI); } return SDValue(); diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 15861353d16..80d18a39384 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -692,17 +692,16 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_neg_15_63: ; X86: # %bb.0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psrlq $32, %xmm1 -; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967281,4294967295,4294967233,4294967295] -; X86-NEXT: pmuludq %xmm2, %xmm1 -; X86-NEXT: movdqa %xmm2, %xmm3 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 ; X86-NEXT: psrlq $32, %xmm3 -; X86-NEXT: pmuludq %xmm0, %xmm3 -; X86-NEXT: paddq %xmm1, %xmm3 -; X86-NEXT: psllq $32, %xmm3 -; X86-NEXT: pmuludq %xmm2, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_neg_15_63: @@ -737,17 +736,16 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; X86-LABEL: mul_v2i64_neg_17_65: ; X86: # %bb.0: -; X86-NEXT: movdqa %xmm0, %xmm1 -; X86-NEXT: psrlq $32, %xmm1 -; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967279,4294967295,4294967231,4294967295] -; X86-NEXT: pmuludq %xmm2, %xmm1 -; X86-NEXT: movdqa %xmm2, %xmm3 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: movdqa %xmm0, %xmm3 ; X86-NEXT: psrlq $32, %xmm3 -; X86-NEXT: pmuludq %xmm0, %xmm3 -; X86-NEXT: paddq %xmm1, %xmm3 -; X86-NEXT: psllq $32, %xmm3 -; X86-NEXT: pmuludq %xmm2, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm3 +; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddq %xmm3, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_v2i64_neg_17_65: |