diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-04-13 06:07:18 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-04-13 06:07:18 +0000 |
| commit | 254ed028a4bd4fa81d0049d90e6ab23d704dd366 (patch) | |
| tree | d271a92f4eeb695afa59afb66b153125a49b7d36 /llvm/lib/Target | |
| parent | 7fc737a2472996609a855c2b8951b4a327d13d69 (diff) | |
| download | bcm5719-llvm-254ed028a4bd4fa81d0049d90e6ab23d704dd366.tar.gz bcm5719-llvm-254ed028a4bd4fa81d0049d90e6ab23d704dd366.zip | |
[X86] Remove the pmuldq/pmuldq intrinsics and replace with native IR.
This completes the work started in r329604 and r329605 when we changed clang to no longer use the intrinsics.
We lost some InstCombine SimplifyDemandedBit optimizations through this change as we aren't able to fold 'and', bitcast, shuffle very well.
llvm-svn: 329990
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 18 |
1 files changed, 0 insertions, 18 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 1870a668c98..08e4f6387b6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -20855,24 +20855,6 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. - case Intrinsic::x86_sse41_pmuldq: - case Intrinsic::x86_avx2_pmul_dq: - case Intrinsic::x86_avx512_pmul_dq_512: { - MVT OpVT = Op.getSimpleValueType(); - return DAG.getNode(X86ISD::PMULDQ, dl, OpVT, - DAG.getBitcast(OpVT, Op.getOperand(1)), - DAG.getBitcast(OpVT, Op.getOperand(2))); - } - - case Intrinsic::x86_sse2_pmulu_dq: - case Intrinsic::x86_avx2_pmulu_dq: - case Intrinsic::x86_avx512_pmulu_dq_512: { - MVT OpVT = Op.getSimpleValueType(); - return DAG.getNode(X86ISD::PMULUDQ, dl, OpVT, - DAG.getBitcast(OpVT, Op.getOperand(1)), - DAG.getBitcast(OpVT, Op.getOperand(2))); - } - case Intrinsic::x86_avx2_permd: case Intrinsic::x86_avx2_permps: // Operands intentionally swapped. Mask is last operand to intrinsic, |

