summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2017-12-15 20:57:18 +0000
committerCraig Topper <craig.topper@intel.com>2017-12-15 20:57:18 +0000
commitf08ab74ae3a2e64b475d67ec26413b3712f438ac (patch)
tree7cc1667d0968209e380b0eee1c2ecc6bdc5e3eb1
parentcf29eb8c22f66f03d460c9a6713d15b626a1bc81 (diff)
downloadbcm5719-llvm-f08ab74ae3a2e64b475d67ec26413b3712f438ac.tar.gz
bcm5719-llvm-f08ab74ae3a2e64b475d67ec26413b3712f438ac.zip
[X86] Remove unnecessary TODO.
When I wrote it I thought we were missing a potential optimization for KNL. But investigating further shows that for KNL we still do the optimal thing by widening to v4f32 and then using special isel patterns to widen again to zmm a register. llvm-svn: 320862
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp1
1 files changed, 0 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8468e4ed013..0429f65e73d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1566,7 +1566,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
if (Subtarget.hasDQI()) {
- // TODO: these shouldn't require VLX. We can widen to 512-bit with AVX512F.
// Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
// v2f32 UINT_TO_FP is already custom under SSE2.
setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
OpenPOWER on IntegriCloud