summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp17
-rw-r--r--llvm/lib/Target/AMDGPU/R600ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp7
5 files changed, 25 insertions, 25 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index d9f95df57be..dcfa1e78d86 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -752,15 +752,7 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals);
} else {
- SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG);
- // Skip past MERGE_VALUE node if known.
- if (Scalarized->getOpcode() == ISD::MERGE_VALUES) {
- NewChain = Scalarized.getOperand(1);
- Value = Scalarized.getOperand(0);
- } else {
- NewChain = Scalarized.getValue(1);
- Value = Scalarized.getValue(0);
- }
+ std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
}
AddLegalizedOperand(Op.getValue(0), Value);
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index ee2e40b3c59..0bfd1b62db2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6497,8 +6497,9 @@ bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
return true;
}
-SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
- SelectionDAG &DAG) const {
+std::pair<SDValue, SDValue>
+TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
+ SelectionDAG &DAG) const {
SDLoc SL(LD);
SDValue Chain = LD->getChain();
SDValue BasePTR = LD->getBasePtr();
@@ -6532,7 +6533,7 @@ SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
- return DAG.getMergeValues({Value, NewChain}, SL);
+ return std::make_pair(Value, NewChain);
}
SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
@@ -6626,10 +6627,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
LoadedVT.isVector()) {
// Scalarize the load and let the individual components be handled.
- SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
- if (Scalarized->getOpcode() == ISD::MERGE_VALUES)
- return std::make_pair(Scalarized.getOperand(0), Scalarized.getOperand(1));
- return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
+ return scalarizeVectorLoad(LD, DAG);
}
// Expand to a (misaligned) integer load of the same size,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 5d8a2e26ab1..4cfbbe3f12f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1396,16 +1396,19 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
SelectionDAG &DAG) const {
LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT VT = Op.getValueType();
+ SDLoc SL(Op);
// If this is a 2 element vector, we really want to scalarize and not create
// weird 1 element vectors.
- if (VT.getVectorNumElements() == 2)
- return scalarizeVectorLoad(Load, DAG);
+ if (VT.getVectorNumElements() == 2) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, SL);
+ }
SDValue BasePtr = Load->getBasePtr();
EVT MemVT = Load->getMemoryVT();
- SDLoc SL(Op);
const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
@@ -2869,11 +2872,13 @@ SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
// the bytes again are not eliminated in the case of an unaligned copy.
if (!allowsMisalignedMemoryAccesses(
VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
+ SDValue Ops[2];
+
if (VT.isVector())
- return scalarizeVectorLoad(LN, DAG);
+ std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
+ else
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
- SDValue Ops[2];
- std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
return DAG.getMergeValues(Ops, SDLoc(N));
}
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index dbc9afaa33c..1b1f5f9a404 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1456,7 +1456,9 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
VT.isVector()) {
- return scalarizeVectorLoad(LoadNode, DAG);
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LoadNode, DAG);
+ return DAG.getMergeValues(Ops, DL);
}
// This is still used for explicit load from addrspace(8)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e9f2a675de9..00e25a1713a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7489,8 +7489,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// resource descriptor, we can only make private accesses up to a certain
// size.
switch (Subtarget->getMaxPrivateElementSize()) {
- case 4:
- return scalarizeVectorLoad(Load, DAG);
+ case 4: {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, DL);
+ }
case 8:
if (NumElements > 2)
return SplitVectorLoad(Op, DAG);
OpenPOWER on IntegriCloud