diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/BranchFolding.cpp | 37 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ExpandPseudo.cpp | 21 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrControl.td | 31 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 90 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.h | 7 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86MCInstLower.cpp | 9 |
6 files changed, 193 insertions, 2 deletions
diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index 4a9e15ebd76..b347ea31394 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -49,6 +49,7 @@ STATISTIC(NumDeadBlocks, "Number of dead blocks removed"); STATISTIC(NumBranchOpts, "Number of branches optimized"); STATISTIC(NumTailMerge , "Number of block tails merged"); STATISTIC(NumHoist , "Number of times common instructions are hoisted"); +STATISTIC(NumTailCalls, "Number of tail calls optimized"); static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge", cl::init(cl::BOU_UNSET), cl::Hidden); @@ -1415,6 +1416,42 @@ ReoptimizeBlock: } } + if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 && + MF.getFunction()->optForSize()) { + // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch + // direction, thereby defeating careful block placement and regressing + // performance. Therefore, only consider this for optsize functions. + MachineInstr &TailCall = *MBB->getFirstNonDebugInstr(); + if (TII->isUnconditionalTailCall(TailCall)) { + MachineBasicBlock *Pred = *MBB->pred_begin(); + MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr; + SmallVector<MachineOperand, 4> PredCond; + bool PredAnalyzable = + !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true); + + if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) { + // The predecessor has a conditional branch to this block which consists + // of only a tail call. Try to fold the tail call into the conditional + // branch. + if (TII->canMakeTailCallConditional(PredCond, TailCall)) { + // TODO: It would be nice if analyzeBranch() could provide a pointer + // to the branch insturction so replaceBranchWithTailCall() doesn't + // have to search for it. + TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall); + ++NumTailCalls; + Pred->removeSuccessor(MBB); + MadeChange = true; + return MadeChange; + } + } + // If the predecessor is falling through to this block, we could reverse + // the branch condition and fold the tail call into that. However, after + // that we might have to re-arrange the CFG to fall through to the other + // block and there is a high risk of regressing code size rather than + // improving it. + } + } + // Analyze the branch in the current block. MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr; SmallVector<MachineOperand, 4> CurCond; diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp index 6a2cf1d5314..c4bc29e963e 100644 --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -77,9 +77,11 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, default: return false; case X86::TCRETURNdi: + case X86::TCRETURNdicc: case X86::TCRETURNri: case X86::TCRETURNmi: case X86::TCRETURNdi64: + case X86::TCRETURNdi64cc: case X86::TCRETURNri64: case X86::TCRETURNmi64: { bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64; @@ -97,6 +99,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, Offset = StackAdj - MaxTCDelta; assert(Offset >= 0 && "Offset should never be negative"); + if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) { + assert(Offset == 0 && "Conditional tail call cannot adjust the stack."); + } + if (Offset) { // Check for possible merge with preceding ADD instruction. Offset += X86FL->mergeSPUpdates(MBB, MBBI, true); @@ -105,12 +111,21 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // Jump to label or value in register. bool IsWin64 = STI->isTargetWin64(); - if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) { + if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc || + Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) { unsigned Op; switch (Opcode) { case X86::TCRETURNdi: Op = X86::TAILJMPd; break; + case X86::TCRETURNdicc: + Op = X86::TAILJMPd_CC; + break; + case X86::TCRETURNdi64cc: + assert(!IsWin64 && "Conditional tail calls confuse the Win64 unwinder."); + // TODO: We could do it for Win64 "leaf" functions though; PR30337. + Op = X86::TAILJMPd64_CC; + break; default: // Note: Win64 uses REX prefixes indirect jumps out of functions, but // not direct ones. @@ -126,6 +141,10 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MIB.addExternalSymbol(JumpTarget.getSymbolName(), JumpTarget.getTargetFlags()); } + if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) { + MIB.addImm(MBBI->getOperand(2).getImm()); + } + } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) { unsigned Op = (Opcode == X86::TCRETURNmi) ? X86::TAILJMPm diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td index 2f260c48df4..4ea223e82be 100644 --- a/llvm/lib/Target/X86/X86InstrControl.td +++ b/llvm/lib/Target/X86/X86InstrControl.td @@ -264,6 +264,21 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, "jmp{l}\t{*}$dst", [], IIC_JMP_MEM>; } +// Conditional tail calls are similar to the above, but they are branches +// rather than barriers, and they use EFLAGS. +let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1, + isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in + let Uses = [ESP, EFLAGS] in { + def TCRETURNdicc : PseudoI<(outs), + (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>; + + // This gets substituted to a conditional jump instruction in MC lowering. + def TAILJMPd_CC : Ii32PCRel<0x80, RawFrm, (outs), + (ins i32imm_pcrel:$dst, i32imm:$cond), + "", + [], IIC_JMP_REL>; +} + //===----------------------------------------------------------------------===// // Call Instructions... @@ -325,3 +340,19 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, "rex64 jmp{q}\t{*}$dst", [], IIC_JMP_MEM>; } } + +// Conditional tail calls are similar to the above, but they are branches +// rather than barriers, and they use EFLAGS. +let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1, + isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in + let Uses = [RSP, EFLAGS] in { + def TCRETURNdi64cc : PseudoI<(outs), + (ins i64i32imm_pcrel:$dst, i32imm:$offset, + i32imm:$cond), []>; + + // This gets substituted to a conditional jump instruction in MC lowering. + def TAILJMPd64_CC : Ii32PCRel<0x80, RawFrm, (outs), + (ins i64i32imm_pcrel:$dst, i32imm:$cond), + "", + [], IIC_JMP_REL>; +} diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index d69c9d7a71f..e66ae6d34e6 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -5794,6 +5794,96 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { return !isPredicated(MI); } +bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + case X86::TCRETURNdi: + case X86::TCRETURNri: + case X86::TCRETURNmi: + case X86::TCRETURNdi64: + case X86::TCRETURNri64: + case X86::TCRETURNmi64: + return true; + default: + return false; + } +} + +bool X86InstrInfo::canMakeTailCallConditional( + SmallVectorImpl<MachineOperand> &BranchCond, + const MachineInstr &TailCall) const { + if (TailCall.getOpcode() != X86::TCRETURNdi && + TailCall.getOpcode() != X86::TCRETURNdi64) { + // Only direct calls can be done with a conditional branch. + return false; + } + + if (Subtarget.isTargetWin64()) { + // Conditional tail calls confuse the Win64 unwinder. + // TODO: Allow them for "leaf" functions; PR30337. + return false; + } + + assert(BranchCond.size() == 1); + if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { + // Can't make a conditional tail call with this condition. + return false; + } + + const X86MachineFunctionInfo *X86FI = + TailCall.getParent()->getParent()->getInfo<X86MachineFunctionInfo>(); + if (X86FI->getTCReturnAddrDelta() != 0 || + TailCall.getOperand(1).getImm() != 0) { + // A conditional tail call cannot do any stack adjustment. + return false; + } + + return true; +} + +void X86InstrInfo::replaceBranchWithTailCall( + MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, + const MachineInstr &TailCall) const { + assert(canMakeTailCallConditional(BranchCond, TailCall)); + + MachineBasicBlock::iterator I = MBB.end(); + while (I != MBB.begin()) { + --I; + if (I->isDebugValue()) + continue; + if (!I->isBranch()) + assert(0 && "Can't find the branch to replace!"); + + X86::CondCode CC = getCondFromBranchOpc(I->getOpcode()); + assert(BranchCond.size() == 1); + if (CC != BranchCond[0].getImm()) + continue; + + break; + } + + unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc + : X86::TCRETURNdi64cc; + + auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc)); + MIB->addOperand(TailCall.getOperand(0)); // Destination. + MIB.addImm(0); // Stack offset (not used). + MIB->addOperand(BranchCond[0]); // Condition. + MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters. + + // Add implicit uses and defs of all live regs potentially clobbered by the + // call. This way they still appear live across the call. + LivePhysRegs LiveRegs(&getRegisterInfo()); + LiveRegs.addLiveOuts(MBB); + SmallVector<std::pair<unsigned, const MachineOperand *>, 8> Clobbers; + LiveRegs.stepForward(*MIB, Clobbers); + for (const auto &C : Clobbers) { + MIB.addReg(C.first, RegState::Implicit); + MIB.addReg(C.first, RegState::Implicit | RegState::Define); + } + + I->eraseFromParent(); +} + // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may // not be a fallthrough MBB now due to layout changes). Return nullptr if the // fallthrough MBB cannot be identified. diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 0d4bc5d0fb6..c2644a35e48 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -316,6 +316,13 @@ public: // Branch analysis. bool isUnpredicatedTerminator(const MachineInstr &MI) const override; + bool isUnconditionalTailCall(const MachineInstr &MI) const override; + bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, + const MachineInstr &TailCall) const override; + void replaceBranchWithTailCall(MachineBasicBlock &MBB, + SmallVectorImpl<MachineOperand> &Cond, + const MachineInstr &TailCall) const override; + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond, diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index 0887643ed5a..65949531126 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -499,11 +499,16 @@ ReSimplify: break; } - // TAILJMPd, TAILJMPd64 - Lower to the correct jump instruction. + // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction. { unsigned Opcode; case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode; case X86::TAILJMPd: case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode; + case X86::TAILJMPd_CC: + case X86::TAILJMPd64_CC: + Opcode = X86::GetCondBranchFromCond( + static_cast<X86::CondCode>(MI->getOperand(1).getImm())); + goto SetTailJmpOpcode; SetTailJmpOpcode: MCOperand Saved = OutMI.getOperand(0); @@ -1294,9 +1299,11 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { case X86::TAILJMPr: case X86::TAILJMPm: case X86::TAILJMPd: + case X86::TAILJMPd_CC: case X86::TAILJMPr64: case X86::TAILJMPm64: case X86::TAILJMPd64: + case X86::TAILJMPd64_CC: case X86::TAILJMPr64_REX: case X86::TAILJMPm64_REX: // Lower these as normal, but add some comments. |