diff options
author | Sam Tebbs <sam.tebbs@arm.com> | 2019-06-28 15:43:31 +0000 |
---|---|---|
committer | Sam Tebbs <sam.tebbs@arm.com> | 2019-06-28 15:43:31 +0000 |
commit | e39e958da36da52d34e883dd5820262e96a8781a (patch) | |
tree | 8531832a979744c37c7b8efe661600430bc5b2b9 /llvm/lib/Target/ARM/ARMISelLowering.cpp | |
parent | 176b9f651685c52bce25e700a758bd33e6a5354d (diff) | |
download | bcm5719-llvm-e39e958da36da52d34e883dd5820262e96a8781a.tar.gz bcm5719-llvm-e39e958da36da52d34e883dd5820262e96a8781a.zip |
[ARM] Add support for the MVE long shift instructions
MVE adds the lsll, lsrl and asrl instructions, which perform a shift on a 64 bit value separated into two 32 bit registers.
The Expand64BitShift function is modified to accept ISD::SHL, ISD::SRL and ISD::SRA and convert it into the appropriate opcode in ARMISD. An SHL is converted into an lsll, an SRL is converted into an lsrl for the immediate form and a negation and lsll for the register form, and SRA is converted into an asrl.
test/CodeGen/ARM/shift_parts.ll is added to test the logic of emitting these instructions.
Differential Revision: https://reviews.llvm.org/D63430
llvm-svn: 364654
Diffstat (limited to 'llvm/lib/Target/ARM/ARMISelLowering.cpp')
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 57 |
1 files changed, 55 insertions, 2 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index ae7e2b6e34d..d2ef680524a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -932,6 +932,11 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SRA, MVT::i64, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); + // MVE lowers 64 bit shifts to lsll and lsrl + // assuming that ISD::SRL and SRA of i64 are already marked custom + if (Subtarget->hasMVEIntegerOps()) + setOperationAction(ISD::SHL, MVT::i64, Custom); + // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. if (Subtarget->isThumb1Only()) { setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); @@ -1411,6 +1416,10 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::SSAT: return "ARMISD::SSAT"; case ARMISD::USAT: return "ARMISD::USAT"; + case ARMISD::ASRL: return "ARMISD::ASRL"; + case ARMISD::LSRL: return "ARMISD::LSRL"; + case ARMISD::LSLL: return "ARMISD::LSLL"; + case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; case ARMISD::RRX: return "ARMISD::RRX"; @@ -5619,11 +5628,54 @@ static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, if (VT != MVT::i64) return SDValue(); - assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && + assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || + N->getOpcode() == ISD::SHL) && "Unknown shift to lower!"); + unsigned ShOpc = N->getOpcode(); + if (ST->hasMVEIntegerOps()) { + SDValue ShAmt = N->getOperand(1); + unsigned ShPartsOpc = ARMISD::LSLL; + ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt); + + // If the shift amount is greater than 32 then do the default optimisation + if (Con && Con->getZExtValue() > 32) + return SDValue(); + + // Extract the lower 32 bits of the shift amount if it's an i64 + if (ShAmt->getValueType(0) == MVT::i64) + ShAmt = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, ShAmt, + DAG.getConstant(0, dl, MVT::i32)); + + if (ShOpc == ISD::SRL) { + if (!Con) + // There is no t2LSRLr instruction so negate and perform an lsll if the + // shift amount is in a register, emulating a right shift. + ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, + DAG.getConstant(0, dl, MVT::i32), ShAmt); + else + // Else generate an lsrl on the immediate shift amount + ShPartsOpc = ARMISD::LSRL; + } else if (ShOpc == ISD::SRA) + ShPartsOpc = ARMISD::ASRL; + + // Lower 32 bits of the destination/source + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), + DAG.getConstant(0, dl, MVT::i32)); + // Upper 32 bits of the destination/source + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), + DAG.getConstant(1, dl, MVT::i32)); + + // Generate the shift operation as computed above + Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi, + ShAmt); + // The upper 32 bits come from the second return value of lsll + Hi = SDValue(Lo.getNode(), 1); + return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); + } + // We only lower SRA, SRL of 1 here, all others use generic lowering. - if (!isOneConstant(N->getOperand(1))) + if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL) return SDValue(); // If we are in thumb mode, we don't have RRX. @@ -8291,6 +8343,7 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N, break; case ISD::SRL: case ISD::SRA: + case ISD::SHL: Res = Expand64BitShift(N, DAG, Subtarget); break; case ISD::SREM: |