summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-05-06 17:09:03 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-05-06 17:09:03 +0000
commitc14ccc9184fc1bf02212fb85fdce36e9be8558d5 (patch)
tree04df0d8eeefdac20a3303a8df8c3e4943b575f99 /llvm/lib
parent29020cc571d564976555ec0eef26b53fc0590a6b (diff)
downloadbcm5719-llvm-c14ccc9184fc1bf02212fb85fdce36e9be8558d5.tar.gz
bcm5719-llvm-c14ccc9184fc1bf02212fb85fdce36e9be8558d5.zip
[X86] Improve the lowering of BITCAST dag nodes from type f64 to type v2i32 (and vice versa).
Before this patch, the backend always emitted a store+load sequence to bitconvert from f64 to i64 the input operand of a ISD::BITCAST dag node that performed a bitconvert from type MVT::f64 to type MVT::v2i32. The resulting i64 node was then used to build a v2i32 vector. With this patch, the backend now produces a cheaper SCALAR_TO_VECTOR from MVT::f64 to MVT::v2f64. That SCALAR_TO_VECTOR is then followed by a "free" bitcast to type MVT::v4i32. The elements of the resulting v4i32 are then extracted to build a v2i32 vector (which is illegal and therefore promoted to MVT::v2i64). This is in general cheaper than emitting a stack store+load sequence to bitconvert the operand from type f64 to type i64. llvm-svn: 208107
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp42
1 files changed, 41 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2f14d790742..380017feff6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1038,6 +1038,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
+
+ setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@@ -14091,6 +14093,25 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
+
+ if (SrcVT == MVT::v2i32) {
+ assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ if (DstVT != MVT::f64)
+ // This conversion needs to be expanded.
+ return SDValue();
+
+ SDLoc dl(Op);
+ SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Op->getOperand(0), DAG.getIntPtrConstant(0));
+ SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Op->getOperand(0), DAG.getIntPtrConstant(1));
+ SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0};
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts);
+ SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
+ DAG.getIntPtrConstant(0));
+ }
+
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
@@ -14546,8 +14567,27 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc);
return;
}
- case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_LOAD: {
ReplaceATOMIC_LOAD(N, Results, DAG);
+ return;
+ }
+ case ISD::BITCAST: {
+ assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ EVT DstVT = N->getValueType(0);
+ EVT SrcVT = N->getOperand(0)->getValueType(0);
+
+ if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) {
+ SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
+ MVT::v2f64, N->getOperand(0));
+ SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded);
+ SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ ToV4I32, DAG.getIntPtrConstant(0));
+ SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ ToV4I32, DAG.getIntPtrConstant(1));
+ SDValue Elts[] = {Elt0, Elt1};
+ Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts));
+ }
+ }
}
}
OpenPOWER on IntegriCloud