diff options
| author | Tim Renouf <tpr.llvm@botech.co.uk> | 2019-07-04 17:38:24 +0000 | 
|---|---|---|
| committer | Tim Renouf <tpr.llvm@botech.co.uk> | 2019-07-04 17:38:24 +0000 | 
| commit | 5816889c748b37e1c3f48e8799a3478cf5931f60 (patch) | |
| tree | 5c693a172c14089403cf5f3f29d1dcb821f39975 /llvm/lib | |
| parent | 75b5edf6a1df09d466a9c4f5669121add1632067 (diff) | |
| download | bcm5719-llvm-5816889c748b37e1c3f48e8799a3478cf5931f60.tar.gz bcm5719-llvm-5816889c748b37e1c3f48e8799a3478cf5931f60.zip | |
[AMDGPU] Custom lower INSERT_SUBVECTOR v3, v4, v5, v8
Summary:
Since the changes to introduce vec3 and vec5, INSERT_VECTOR for these
sizes has been marked "expand", which made LegalizeDAG lower it to loads
and stores via a stack slot. The code got optimized a bit later, but the
now-unused stack slot was never deleted.
This commit avoids that problem by custom lowering INSERT_SUBVECTOR into
an EXTRACT_VECTOR_ELT and INSERT_VECTOR_ELT for each element in the
subvector to insert.
V2: Addressed review comments re test.
Differential Revision: https://reviews.llvm.org/D63160
Change-Id: I9e3c13e36f68cfa3431bb9814851cc1f673274e1
llvm-svn: 365148
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 44 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.h | 1 | 
2 files changed, 37 insertions, 8 deletions
| diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index a0e4c8a587f..2360eaced01 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -335,16 +335,16 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,    setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);    // Deal with vec3 vector operations when widened to vec4. -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);    // Deal with vec5 vector operations when widened to vec8. -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand); -  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); +  setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);    // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,    // and output demarshalling @@ -3956,6 +3956,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {    case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);    case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);    case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); +  case ISD::INSERT_SUBVECTOR: +    return lowerINSERT_SUBVECTOR(Op, DAG);    case ISD::INSERT_VECTOR_ELT:      return lowerINSERT_VECTOR_ELT(Op, DAG);    case ISD::EXTRACT_VECTOR_ELT: @@ -4628,6 +4630,32 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,    return DAG.getUNDEF(ASC->getValueType(0));  } +// This lowers an INSERT_SUBVECTOR by extracting the individual elements from +// the small vector and inserting them into the big vector. That is better than +// the default expansion of doing it via a stack slot. Even though the use of +// the stack slot would be optimized away afterwards, the stack slot itself +// remains. +SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, +                                                SelectionDAG &DAG) const { +  SDValue Vec = Op.getOperand(0); +  SDValue Ins = Op.getOperand(1); +  SDValue Idx = Op.getOperand(2); +  EVT VecVT = Vec.getValueType(); +  EVT InsVT = Ins.getValueType(); +  EVT EltVT = VecVT.getVectorElementType(); +  unsigned InsNumElts = InsVT.getVectorNumElements(); +  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); +  SDLoc SL(Op); + +  for (unsigned I = 0; I != InsNumElts; ++I) { +    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, +                              DAG.getConstant(I, SL, MVT::i32)); +    Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, +                      DAG.getConstant(IdxVal + I, SL, MVT::i32)); +  } +  return Vec; +} +  SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,                                                   SelectionDAG &DAG) const {    SDValue Vec = Op.getOperand(0); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index 909ee8f7987..21a215e16ce 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -121,6 +121,7 @@ private:                               SelectionDAG &DAG) const;    SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const; +  SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;    SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;    SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;    SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; | 

