summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/PowerPC')
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp51
1 files changed, 36 insertions, 15 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 2198f330bea..1df4b28b563 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2428,24 +2428,45 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
/// Returns true if we should use a direct load into vector instruction
/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
-static bool usePartialVectorLoads(SDNode *N) {
- if (!N->hasOneUse())
- return false;
-
+static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
+
// If there are any other uses other than scalar to vector, then we should
// keep it as a scalar load -> direct move pattern to prevent multiple
- // loads. Currently, only check for i64 since we have lxsd/lfd to do this
- // efficiently, but no update equivalent.
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- EVT MemVT = LD->getMemoryVT();
- if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) {
- SDNode *User = *(LD->use_begin());
- if (User->getOpcode() == ISD::SCALAR_TO_VECTOR)
- return true;
- }
+ // loads.
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
+ if (!LD)
+ return false;
+
+ EVT MemVT = LD->getMemoryVT();
+ if (!MemVT.isSimple())
+ return false;
+ switch(MemVT.getSimpleVT().SimpleTy) {
+ case MVT::i64:
+ break;
+ case MVT::i32:
+ if (!ST.hasP8Vector())
+ return false;
+ break;
+ case MVT::i16:
+ case MVT::i8:
+ if (!ST.hasP9Vector())
+ return false;
+ break;
+ default:
+ return false;
}
- return false;
+ SDValue LoadedVal(N, 0);
+ if (!LoadedVal.hasOneUse())
+ return false;
+
+ for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
+ UI != UE; ++UI)
+ if (UI.getUse().get().getResNo() == 0 &&
+ UI->getOpcode() != ISD::SCALAR_TO_VECTOR)
+ return false;
+
+ return true;
}
/// getPreIndexedAddressParts - returns true by value, base pointer and
@@ -2476,7 +2497,7 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
// Do not generate pre-inc forms for specific loads that feed scalar_to_vector
// instructions because we can fold these into a more efficient instruction
// instead, (such as LXSD).
- if (isLoad && usePartialVectorLoads(N)) {
+ if (isLoad && usePartialVectorLoads(N, Subtarget)) {
return false;
}
OpenPOWER on IntegriCloud