diff options
| author | Dale Johannesen <dalej@apple.com> | 2010-07-29 20:10:08 +0000 | 
|---|---|---|
| committer | Dale Johannesen <dalej@apple.com> | 2010-07-29 20:10:08 +0000 | 
| commit | 2bff50546c6844ed7cc7160b81b1fc844ec1000c (patch) | |
| tree | 455b721a709f6231ca5625b13c14aeb50af5a606 /llvm/lib | |
| parent | 2d5cdf0615f9556f360d79415f571a2ea0237265 (diff) | |
| download | bcm5719-llvm-2bff50546c6844ed7cc7160b81b1fc844ec1000c.tar.gz bcm5719-llvm-2bff50546c6844ed7cc7160b81b1fc844ec1000c.zip | |
Implement vector constants which are splat of
integers with mov + vdup.  8003375.  This is
currently disabled by default because LICM will
not hoist a VDUP, so it pessimizes the code if
the construct occurs inside a loop (8248029).
llvm-svn: 109799
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 70 | 
1 files changed, 62 insertions, 8 deletions
| diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 56d24400f27..69b24d0c7a6 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -57,6 +57,13 @@ EnableARMTailCalls("arm-tail-calls", cl::Hidden,    cl::desc("Generate tail calls (TEMPORARY OPTION)."),    cl::init(true)); +// This option should go away when Machine LICM is smart enough to hoist a  +// reg-to-reg VDUP. +static cl::opt<bool> +EnableARMVDUPsplat("arm-vdup-splat", cl::Hidden, +  cl::desc("Generate VDUP for integer constant splats (TEMPORARY OPTION)."), +  cl::init(false)); +  static cl::opt<bool>  EnableARMLongCalls("arm-long-calls", cl::Hidden,    cl::desc("Generate calls via indirect call instructions"), @@ -3257,9 +3264,30 @@ static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,    return true;  } +// If N is an integer constant that can be moved into a register in one +// instruction, return an SDValue of such a constant (will become a MOV +// instruction).  Otherwise return null. +static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, +                                     const ARMSubtarget *ST, DebugLoc dl) { +  uint64_t Val; +  if (!isa<ConstantSDNode>(N)) +    return SDValue(); +  Val = cast<ConstantSDNode>(N)->getZExtValue(); + +  if (ST->isThumb1Only()) { +    if (Val <= 255 || ~Val <= 255) +      return DAG.getConstant(Val, MVT::i32); +  } else { +    if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) +      return DAG.getConstant(Val, MVT::i32); +  } +  return SDValue(); +} +  // If this is a case we can't handle, return null and let the default  // expansion code take care of it. -static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { +static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,  +                                 const ARMSubtarget *ST) {    BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());    DebugLoc dl = Op.getDebugLoc();    EVT VT = Op.getValueType(); @@ -3319,15 +3347,41 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {    if (isOnlyLowElement)      return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); -  // If all elements are constants, fall back to the default expansion, which -  // will generate a load from the constant pool. +  unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + +  if (EnableARMVDUPsplat) { +    // Use VDUP for non-constant splats.  For f32 constant splats, reduce to +    // i32 and try again. +    if (usesOnlyOneValue && EltSize <= 32) { +      if (!isConstant) +        return DAG.getNode(ARMISD::VDUP, dl, VT, Value); +      if (VT.getVectorElementType().isFloatingPoint()) { +        SmallVector<SDValue, 8> Ops; +        for (unsigned i = 0; i < NumElts; ++i) +          Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,  +                                    Op.getOperand(i))); +        SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &Ops[0], +                                  NumElts); +        return DAG.getNode(ISD::BIT_CONVERT, dl, VT,  +                           LowerBUILD_VECTOR(Val, DAG, ST)); +      } +      SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); +      if (Val.getNode()) +        return DAG.getNode(ARMISD::VDUP, dl, VT, Val); +    } +  } + +  // If all elements are constants and the case above didn't get hit, fall back +  // to the default expansion, which will generate a load from the constant +  // pool.    if (isConstant)      return SDValue(); -  // Use VDUP for non-constant splats. -  unsigned EltSize = VT.getVectorElementType().getSizeInBits(); -  if (usesOnlyOneValue && EltSize <= 32) -    return DAG.getNode(ARMISD::VDUP, dl, VT, Value); +  if (!EnableARMVDUPsplat) { +    // Use VDUP for non-constant splats. +    if (usesOnlyOneValue && EltSize <= 32) +      return DAG.getNode(ARMISD::VDUP, dl, VT, Value); +  }    // Vectors with 32- or 64-bit elements can be built by directly assigning    // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands @@ -3647,7 +3701,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {    case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);    case ISD::CTTZ:          return LowerCTTZ(Op.getNode(), DAG, Subtarget);    case ISD::VSETCC:        return LowerVSETCC(Op, DAG); -  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG); +  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);    case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);    case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);    case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); | 

