summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/ARM/ARMISelLowering.cpp
diff options
context:
space:
mode:
authorArtyom Skrobov <Artyom.Skrobov@arm.com>2017-03-22 23:35:51 +0000
committerArtyom Skrobov <Artyom.Skrobov@arm.com>2017-03-22 23:35:51 +0000
commit92c0653095f5db544ff52cfb72cc1726b2cdf345 (patch)
tree0ecabedbc90af3633442fca1f2e425630fa85373 /llvm/lib/Target/ARM/ARMISelLowering.cpp
parent3eb2b44d31ff921041eb5b55333c77d6fb16eb61 (diff)
downloadbcm5719-llvm-92c0653095f5db544ff52cfb72cc1726b2cdf345.tar.gz
bcm5719-llvm-92c0653095f5db544ff52cfb72cc1726b2cdf345.zip
Reapply r298417 "[ARM] Recommit the glueless lowering of addc/adde in Thumb1"
The UB in t2_so_imm_neg conversion has been addressed under D31242 / r298512 This reverts commit r298482. llvm-svn: 298562
Diffstat (limited to 'llvm/lib/Target/ARM/ARMISelLowering.cpp')
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp96
1 files changed, 83 insertions, 13 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e3723788223..0c17211cce3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -822,13 +822,10 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL, MVT::i64, Custom);
setOperationAction(ISD::SRA, MVT::i64, Custom);
- if (!Subtarget->isThumb1Only()) {
- // FIXME: We should do this for Thumb1 as well.
- setOperationAction(ISD::ADDC, MVT::i32, Custom);
- setOperationAction(ISD::ADDE, MVT::i32, Custom);
- setOperationAction(ISD::SUBC, MVT::i32, Custom);
- setOperationAction(ISD::SUBE, MVT::i32, Custom);
- }
+ setOperationAction(ISD::ADDC, MVT::i32, Custom);
+ setOperationAction(ISD::ADDE, MVT::i32, Custom);
+ setOperationAction(ISD::SUBC, MVT::i32, Custom);
+ setOperationAction(ISD::SUBE, MVT::i32, Custom);
if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
@@ -9096,19 +9093,45 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
// Rename pseudo opcodes.
unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
+ unsigned ccOutIdx;
if (NewOpc) {
const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
MCID = &TII->get(NewOpc);
- assert(MCID->getNumOperands() == MI.getDesc().getNumOperands() + 1 &&
- "converted opcode should be the same except for cc_out");
+ assert(MCID->getNumOperands() ==
+ MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
+ && "converted opcode should be the same except for cc_out"
+ " (and, on Thumb1, pred)");
MI.setDesc(*MCID);
// Add the optional cc_out operand
MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
- }
- unsigned ccOutIdx = MCID->getNumOperands() - 1;
+
+ // On Thumb1, move all input operands to the end, then add the predicate
+ if (Subtarget->isThumb1Only()) {
+ for (unsigned c = MCID->getNumOperands() - 4; c--;) {
+ MI.addOperand(MI.getOperand(1));
+ MI.RemoveOperand(1);
+ }
+
+ // Restore the ties
+ for (unsigned i = MI.getNumOperands(); i--;) {
+ const MachineOperand& op = MI.getOperand(i);
+ if (op.isReg() && op.isUse()) {
+ int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
+ if (DefIdx != -1)
+ MI.tieOperands(DefIdx, i);
+ }
+ }
+
+ MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
+ MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
+ ccOutIdx = 1;
+ } else
+ ccOutIdx = MCID->getNumOperands() - 1;
+ } else
+ ccOutIdx = MCID->getNumOperands() - 1;
// Any ARM instruction that sets the 's' bit should specify an optional
// "cc_out" operand in the last operand position.
@@ -9139,7 +9162,9 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
if (deadCPSR) {
assert(!MI.getOperand(ccOutIdx).getReg() &&
"expect uninitialized optional cc_out operand");
- return;
+ // Thumb1 instructions must have the S bit even if the CPSR is dead.
+ if (!Subtarget->isThumb1Only())
+ return;
}
// If this instruction was defined with an optional CPSR def and its dag node
@@ -9759,6 +9784,48 @@ static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue PerformAddcSubcCombine(SDNode *N, SelectionDAG &DAG,
+ const ARMSubtarget *Subtarget) {
+ if (Subtarget->isThumb1Only()) {
+ SDValue RHS = N->getOperand(1);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
+ int32_t imm = C->getSExtValue();
+ if (imm < 0 && imm > INT_MIN) {
+ SDLoc DL(N);
+ RHS = DAG.getConstant(-imm, DL, MVT::i32);
+ unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
+ : ARMISD::ADDC;
+ return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
+ }
+ }
+ }
+ return SDValue();
+}
+
+static SDValue PerformAddeSubeCombine(SDNode *N, SelectionDAG &DAG,
+ const ARMSubtarget *Subtarget) {
+ if (Subtarget->isThumb1Only()) {
+ SDValue RHS = N->getOperand(1);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
+ int64_t imm = C->getSExtValue();
+ if (imm < 0) {
+ SDLoc DL(N);
+
+ // The with-carry-in form matches bitwise not instead of the negation.
+ // Effectively, the inverse interpretation of the carry flag already
+ // accounts for part of the negation.
+ RHS = DAG.getConstant(~imm, DL, MVT::i32);
+
+ unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
+ : ARMISD::ADDE;
+ return DAG.getNode(Opcode, DL, N->getVTList(),
+ N->getOperand(0), RHS, N->getOperand(2));
+ }
+ }
+ }
+ return SDValue();
+}
+
/// PerformADDECombine - Target-specific dag combine transform from
/// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
/// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
@@ -9767,7 +9834,7 @@ static SDValue PerformADDECombine(SDNode *N,
const ARMSubtarget *Subtarget) {
// Only ARM and Thumb2 support UMLAL/SMLAL.
if (Subtarget->isThumb1Only())
- return SDValue();
+ return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
// Only perform the checks after legalize when the pattern is available.
if (DCI.isBeforeLegalize()) return SDValue();
@@ -11867,6 +11934,9 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget);
case ISD::AND: return PerformANDCombine(N, DCI, Subtarget);
+ case ARMISD::ADDC:
+ case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI.DAG, Subtarget);
+ case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI.DAG, Subtarget);
case ARMISD::BFI: return PerformBFICombine(N, DCI);
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
OpenPOWER on IntegriCloud