diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-30 21:15:10 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-30 21:15:10 +0000 |
commit | a4b1b6ea05645c8baacd5770540cf8c57549632b (patch) | |
tree | 7745c7dfa588a62ace9eb7556e5fd1003c046ea3 /llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | |
parent | 5d518386b658f51e18e77d31e2f823c171572b71 (diff) | |
download | bcm5719-llvm-a4b1b6ea05645c8baacd5770540cf8c57549632b.tar.gz bcm5719-llvm-a4b1b6ea05645c8baacd5770540cf8c57549632b.zip |
LegalizeDAG: Don't replace vector load with integer unless legal
On AMDGPU we want to be able to promote i64/f64 loads to v2i32.
If the access is unaligned, this would conclude that since i64 is legal,
it would convert it back to i64 and there is an endless legalization
loop.
Extract the logic for scalarizing the load into a new TargetLowering
function, where this can also replace the custom function AMDGPU
has for this.
llvm-svn: 264927
Diffstat (limited to 'llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 49 |
1 files changed, 21 insertions, 28 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index f8c2f1b1fc6..3b7bc2fd68c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -492,21 +492,26 @@ SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) { SDValue VectorLegalizer::ExpandLoad(SDValue Op) { - SDLoc dl(Op); LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); - SDValue Chain = LD->getChain(); - SDValue BasePTR = LD->getBasePtr(); - EVT SrcVT = LD->getMemoryVT(); - ISD::LoadExtType ExtType = LD->getExtensionType(); - SmallVector<SDValue, 8> Vals; - SmallVector<SDValue, 8> LoadChains; + EVT SrcVT = LD->getMemoryVT(); + EVT SrcEltVT = SrcVT.getScalarType(); unsigned NumElem = SrcVT.getVectorNumElements(); - EVT SrcEltVT = SrcVT.getScalarType(); - EVT DstEltVT = Op.getNode()->getValueType(0).getScalarType(); + SDValue NewChain; + SDValue Value; if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { + SDLoc dl(Op); + + SmallVector<SDValue, 8> Vals; + SmallVector<SDValue, 8> LoadChains; + + EVT DstEltVT = LD->getValueType(0).getScalarType(); + SDValue Chain = LD->getChain(); + SDValue BasePTR = LD->getBasePtr(); + ISD::LoadExtType ExtType = LD->getExtensionType(); + // When elements in a vector is not byte-addressable, we cannot directly // load each element by advancing pointer, which could only address bytes. // Instead, we load all significant words, mask bits off, and concatenate @@ -613,29 +618,17 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) { } Vals.push_back(Lo); } - } else { - unsigned Stride = SrcVT.getScalarType().getSizeInBits()/8; - for (unsigned Idx=0; Idx<NumElem; Idx++) { - SDValue ScalarLoad = DAG.getExtLoad(ExtType, dl, - Op.getNode()->getValueType(0).getScalarType(), - Chain, BasePTR, LD->getPointerInfo().getWithOffset(Idx * Stride), - SrcVT.getScalarType(), - LD->isVolatile(), LD->isNonTemporal(), LD->isInvariant(), - MinAlign(LD->getAlignment(), Idx * Stride), LD->getAAInfo()); - - BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR, - DAG.getConstant(Stride, dl, BasePTR.getValueType())); + NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); + Value = DAG.getNode(ISD::BUILD_VECTOR, dl, + Op.getNode()->getValueType(0), Vals); + } else { + SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG); - Vals.push_back(ScalarLoad.getValue(0)); - LoadChains.push_back(ScalarLoad.getValue(1)); - } + NewChain = Scalarized.getValue(1); + Value = Scalarized.getValue(0); } - SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); - SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, - Op.getNode()->getValueType(0), Vals); - AddLegalizedOperand(Op.getValue(0), Value); AddLegalizedOperand(Op.getValue(1), NewChain); |