summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2017-02-07 20:37:45 +0000
committerHans Wennborg <hans@hanshq.net>2017-02-07 20:37:45 +0000
commit819e3e02a9d4e2b0e986c5d2cfe13f138cff17cc (patch)
tree6b0c1abbe0741e77a9da4d50e84cca4e761f6ab1 /llvm/lib
parent968d70ffda6da458373202f1219864aa98e3b171 (diff)
downloadbcm5719-llvm-819e3e02a9d4e2b0e986c5d2cfe13f138cff17cc.tar.gz
bcm5719-llvm-819e3e02a9d4e2b0e986c5d2cfe13f138cff17cc.zip
[X86] Disable conditional tail calls (PR31257)
They are currently modelled incorrectly (as calls, which clobber registers, confusing e.g. Machine Copy Propagation). Reverting until we figure out the proper solution. llvm-svn: 294348
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/BranchFolding.cpp37
-rw-r--r--llvm/lib/Target/X86/X86ExpandPseudo.cpp21
-rw-r--r--llvm/lib/Target/X86/X86InstrControl.td31
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp79
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h7
-rw-r--r--llvm/lib/Target/X86/X86MCInstLower.cpp9
6 files changed, 2 insertions, 182 deletions
diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp
index c632eb7ab7d..b196bf98d27 100644
--- a/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/llvm/lib/CodeGen/BranchFolding.cpp
@@ -49,7 +49,6 @@ STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
STATISTIC(NumBranchOpts, "Number of branches optimized");
STATISTIC(NumTailMerge , "Number of block tails merged");
STATISTIC(NumHoist , "Number of times common instructions are hoisted");
-STATISTIC(NumTailCalls, "Number of tail calls optimized");
static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
cl::init(cl::BOU_UNSET), cl::Hidden);
@@ -1394,42 +1393,6 @@ ReoptimizeBlock:
}
}
- if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
- MF.getFunction()->optForSize()) {
- // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
- // direction, thereby defeating careful block placement and regressing
- // performance. Therefore, only consider this for optsize functions.
- MachineInstr &TailCall = *MBB->getFirstNonDebugInstr();
- if (TII->isUnconditionalTailCall(TailCall)) {
- MachineBasicBlock *Pred = *MBB->pred_begin();
- MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
- SmallVector<MachineOperand, 4> PredCond;
- bool PredAnalyzable =
- !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
-
- if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) {
- // The predecessor has a conditional branch to this block which consists
- // of only a tail call. Try to fold the tail call into the conditional
- // branch.
- if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
- // TODO: It would be nice if analyzeBranch() could provide a pointer
- // to the branch insturction so replaceBranchWithTailCall() doesn't
- // have to search for it.
- TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
- ++NumTailCalls;
- Pred->removeSuccessor(MBB);
- MadeChange = true;
- return MadeChange;
- }
- }
- // If the predecessor is falling through to this block, we could reverse
- // the branch condition and fold the tail call into that. However, after
- // that we might have to re-arrange the CFG to fall through to the other
- // block and there is a high risk of regressing code size rather than
- // improving it.
- }
- }
-
// Analyze the branch in the current block.
MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
SmallVector<MachineOperand, 4> CurCond;
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index c4bc29e963e..6a2cf1d5314 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -77,11 +77,9 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
default:
return false;
case X86::TCRETURNdi:
- case X86::TCRETURNdicc:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
- case X86::TCRETURNdi64cc:
case X86::TCRETURNri64:
case X86::TCRETURNmi64: {
bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
@@ -99,10 +97,6 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Offset = StackAdj - MaxTCDelta;
assert(Offset >= 0 && "Offset should never be negative");
- if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {
- assert(Offset == 0 && "Conditional tail call cannot adjust the stack.");
- }
-
if (Offset) {
// Check for possible merge with preceding ADD instruction.
Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
@@ -111,21 +105,12 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Jump to label or value in register.
bool IsWin64 = STI->isTargetWin64();
- if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||
- Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {
+ if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
unsigned Op;
switch (Opcode) {
case X86::TCRETURNdi:
Op = X86::TAILJMPd;
break;
- case X86::TCRETURNdicc:
- Op = X86::TAILJMPd_CC;
- break;
- case X86::TCRETURNdi64cc:
- assert(!IsWin64 && "Conditional tail calls confuse the Win64 unwinder.");
- // TODO: We could do it for Win64 "leaf" functions though; PR30337.
- Op = X86::TAILJMPd64_CC;
- break;
default:
// Note: Win64 uses REX prefixes indirect jumps out of functions, but
// not direct ones.
@@ -141,10 +126,6 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
JumpTarget.getTargetFlags());
}
- if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) {
- MIB.addImm(MBBI->getOperand(2).getImm());
- }
-
} else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
unsigned Op = (Opcode == X86::TCRETURNmi)
? X86::TAILJMPm
diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td
index 4ea223e82be..2f260c48df4 100644
--- a/llvm/lib/Target/X86/X86InstrControl.td
+++ b/llvm/lib/Target/X86/X86InstrControl.td
@@ -264,21 +264,6 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
"jmp{l}\t{*}$dst", [], IIC_JMP_MEM>;
}
-// Conditional tail calls are similar to the above, but they are branches
-// rather than barriers, and they use EFLAGS.
-let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
- isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
- let Uses = [ESP, EFLAGS] in {
- def TCRETURNdicc : PseudoI<(outs),
- (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>;
-
- // This gets substituted to a conditional jump instruction in MC lowering.
- def TAILJMPd_CC : Ii32PCRel<0x80, RawFrm, (outs),
- (ins i32imm_pcrel:$dst, i32imm:$cond),
- "",
- [], IIC_JMP_REL>;
-}
-
//===----------------------------------------------------------------------===//
// Call Instructions...
@@ -340,19 +325,3 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
"rex64 jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
}
}
-
-// Conditional tail calls are similar to the above, but they are branches
-// rather than barriers, and they use EFLAGS.
-let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
- isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
- let Uses = [RSP, EFLAGS] in {
- def TCRETURNdi64cc : PseudoI<(outs),
- (ins i64i32imm_pcrel:$dst, i32imm:$offset,
- i32imm:$cond), []>;
-
- // This gets substituted to a conditional jump instruction in MC lowering.
- def TAILJMPd64_CC : Ii32PCRel<0x80, RawFrm, (outs),
- (ins i64i32imm_pcrel:$dst, i32imm:$cond),
- "",
- [], IIC_JMP_REL>;
-}
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 87f4a9946f2..a9b29818505 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -5577,85 +5577,6 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
return !isPredicated(MI);
}
-bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
- switch (MI.getOpcode()) {
- case X86::TCRETURNdi:
- case X86::TCRETURNri:
- case X86::TCRETURNmi:
- case X86::TCRETURNdi64:
- case X86::TCRETURNri64:
- case X86::TCRETURNmi64:
- return true;
- default:
- return false;
- }
-}
-
-bool X86InstrInfo::canMakeTailCallConditional(
- SmallVectorImpl<MachineOperand> &BranchCond,
- const MachineInstr &TailCall) const {
- if (TailCall.getOpcode() != X86::TCRETURNdi &&
- TailCall.getOpcode() != X86::TCRETURNdi64) {
- // Only direct calls can be done with a conditional branch.
- return false;
- }
-
- if (Subtarget.isTargetWin64()) {
- // Conditional tail calls confuse the Win64 unwinder.
- // TODO: Allow them for "leaf" functions; PR30337.
- return false;
- }
-
- assert(BranchCond.size() == 1);
- if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
- // Can't make a conditional tail call with this condition.
- return false;
- }
-
- const X86MachineFunctionInfo *X86FI =
- TailCall.getParent()->getParent()->getInfo<X86MachineFunctionInfo>();
- if (X86FI->getTCReturnAddrDelta() != 0 ||
- TailCall.getOperand(1).getImm() != 0) {
- // A conditional tail call cannot do any stack adjustment.
- return false;
- }
-
- return true;
-}
-
-void X86InstrInfo::replaceBranchWithTailCall(
- MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
- const MachineInstr &TailCall) const {
- assert(canMakeTailCallConditional(BranchCond, TailCall));
-
- MachineBasicBlock::iterator I = MBB.end();
- while (I != MBB.begin()) {
- --I;
- if (I->isDebugValue())
- continue;
- if (!I->isBranch())
- assert(0 && "Can't find the branch to replace!");
-
- X86::CondCode CC = getCondFromBranchOpc(I->getOpcode());
- assert(BranchCond.size() == 1);
- if (CC != BranchCond[0].getImm())
- continue;
-
- break;
- }
-
- unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
- : X86::TCRETURNdi64cc;
-
- auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
- MIB->addOperand(TailCall.getOperand(0)); // Destination.
- MIB.addImm(0); // Stack offset (not used).
- MIB->addOperand(BranchCond[0]); // Condition.
- MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
-
- I->eraseFromParent();
-}
-
// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
// not be a fallthrough MBB now due to layout changes). Return nullptr if the
// fallthrough MBB cannot be identified.
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index c2644a35e48..0d4bc5d0fb6 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -316,13 +316,6 @@ public:
// Branch analysis.
bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
- bool isUnconditionalTailCall(const MachineInstr &MI) const override;
- bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
- const MachineInstr &TailCall) const override;
- void replaceBranchWithTailCall(MachineBasicBlock &MBB,
- SmallVectorImpl<MachineOperand> &Cond,
- const MachineInstr &TailCall) const override;
-
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 65949531126..0887643ed5a 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -499,16 +499,11 @@ ReSimplify:
break;
}
- // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction.
+ // TAILJMPd, TAILJMPd64 - Lower to the correct jump instruction.
{ unsigned Opcode;
case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode;
case X86::TAILJMPd:
case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode;
- case X86::TAILJMPd_CC:
- case X86::TAILJMPd64_CC:
- Opcode = X86::GetCondBranchFromCond(
- static_cast<X86::CondCode>(MI->getOperand(1).getImm()));
- goto SetTailJmpOpcode;
SetTailJmpOpcode:
MCOperand Saved = OutMI.getOperand(0);
@@ -1299,11 +1294,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
- case X86::TAILJMPd_CC:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
case X86::TAILJMPd64:
- case X86::TAILJMPd64_CC:
case X86::TAILJMPr64_REX:
case X86::TAILJMPm64_REX:
// Lower these as normal, but add some comments.
OpenPOWER on IntegriCloud