diff options
author | Hans Wennborg <hans@hanshq.net> | 2016-09-07 17:52:14 +0000 |
---|---|---|
committer | Hans Wennborg <hans@hanshq.net> | 2016-09-07 17:52:14 +0000 |
commit | 75e25f6812da5e46a2a8f8dbbeae0f0f3df832d9 (patch) | |
tree | 4aae7345b3f2ee264c337342b41f7d2e3d4c77fc /llvm/lib/Target/X86/X86InstrInfo.cpp | |
parent | 5ad1cbeecb07c02d5c9e9f27410815556d488452 (diff) | |
download | bcm5719-llvm-75e25f6812da5e46a2a8f8dbbeae0f0f3df832d9.tar.gz bcm5719-llvm-75e25f6812da5e46a2a8f8dbbeae0f0f3df832d9.zip |
X86: Fold tail calls into conditional branches where possible (PR26302)
When branching to a block that immediately tail calls, it is possible to fold
the call directly into the branch if the call is direct and there is no stack
adjustment, saving one byte.
Example:
define void @f(i32 %x, i32 %y) {
entry:
%p = icmp eq i32 %x, %y
br i1 %p, label %bb1, label %bb2
bb1:
tail call void @foo()
ret void
bb2:
tail call void @bar()
ret void
}
before:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne .LBB0_2
jmp foo
.LBB0_2:
jmp bar
after:
f:
movl 4(%esp), %eax
cmpl 8(%esp), %eax
jne bar
.LBB0_1:
jmp foo
I don't expect any significant size savings from this (on a Clang bootstrap I
saw 288 bytes), but it does make the code a little tighter.
This patch only does 32-bit, but 64-bit would work similarly.
Differential Revision: https://reviews.llvm.org/D24108
llvm-svn: 280832
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 5e1a7528a16..d9a0897c24d 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4029,6 +4029,75 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { return !isPredicated(MI); } +bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + case X86::TCRETURNdi: + case X86::TCRETURNri: + case X86::TCRETURNmi: + case X86::TCRETURNdi64: + case X86::TCRETURNri64: + case X86::TCRETURNmi64: + return true; + default: + return false; + } +} + +bool X86InstrInfo::canMakeTailCallConditional( + SmallVectorImpl<MachineOperand> &BranchCond, + const MachineInstr &TailCall) const { + if (TailCall.getOpcode() != X86::TCRETURNdi) { + // Only direct calls can be done with a conditional branch. + return false; + } + + assert(BranchCond.size() == 1); + if (BranchCond[0].getImm() > X86::LAST_VALID_COND) { + // Can't make a conditional tail call with this condition. + return false; + } + + const X86MachineFunctionInfo *X86FI = + TailCall.getParent()->getParent()->getInfo<X86MachineFunctionInfo>(); + if (X86FI->getTCReturnAddrDelta() != 0 || + TailCall.getOperand(1).getImm() != 0) { + // A conditional tail call cannot do any stack adjustment. + return false; + } + + return true; +} + +void X86InstrInfo::replaceBranchWithTailCall( + MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond, + const MachineInstr &TailCall) const { + assert(canMakeTailCallConditional(BranchCond, TailCall)); + + MachineBasicBlock::iterator I = MBB.end(); + while (I != MBB.begin()) { + --I; + if (I->isDebugValue()) + continue; + if (!I->isBranch()) + assert(0 && "Can't find the branch to replace!"); + + X86::CondCode CC = getCondFromBranchOpc(I->getOpcode()); + assert(BranchCond.size() == 1); + if (CC != BranchCond[0].getImm()) + continue; + + break; + } + + auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(X86::TCRETURNdicc)); + MIB->addOperand(TailCall.getOperand(0)); // Destination. + MIB.addImm(0); // Stack offset (not used). + MIB->addOperand(BranchCond[0]); // Condition. + MIB->addOperand(TailCall.getOperand(2)); // Regmask. + + I->eraseFromParent(); +} + // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may // not be a fallthrough MBB now due to layout changes). Return nullptr if the // fallthrough MBB cannot be identified. |