summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86MCInstLower.cpp
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-08-27 17:24:23 +0000
committerCraig Topper <craig.topper@intel.com>2019-08-27 17:24:23 +0000
commitfc1f08c2f2a8112c97419c2dead294112ef704b4 (patch)
treefdfa09f55be5b62ebb3a96bf8ccba6e9bfc829cd /llvm/lib/Target/X86/X86MCInstLower.cpp
parent900f9ba217171b090e8c49830b7f744fa6d28f4f (diff)
downloadbcm5719-llvm-fc1f08c2f2a8112c97419c2dead294112ef704b4.tar.gz
bcm5719-llvm-fc1f08c2f2a8112c97419c2dead294112ef704b4.zip
[X86] Remove encoding information from the TAILJMP instructions that are lowered by MCInstLowering. Fix LowerPATCHABLE_TAIL_CALL to also convert them to regular JMP/JCC instructions
There are 5 instructions here that are converted from TAILJMP opcodes to regular JMP/JCC opcodes during MCInstLowering. So normally there encoding information isn't used. The exception being when XRay wraps them in PATCHABLE_TAIL_CALL. For the ones that weren't already handled in MCInstLowering, add handling for those and remove their encoding information. This patch fixes PATCHABLE_TAIL_CALL to do the same opcode conversion as the regular lowering patch. Then removes the encoding information. Differential Revision: https://reviews.llvm.org/D66561 llvm-svn: 370079
Diffstat (limited to 'llvm/lib/Target/X86/X86MCInstLower.cpp')
-rw-r--r--llvm/lib/Target/X86/X86MCInstLower.cpp80
1 files changed, 57 insertions, 23 deletions
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 2106387114e..55f0420a928 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -427,6 +427,41 @@ X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
}
}
+// Replace TAILJMP opcodes with their equivalent opcodes that have encoding
+// information.
+static unsigned convertTailJumpOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case X86::TAILJMPr:
+ Opcode = X86::JMP32r;
+ break;
+ case X86::TAILJMPm:
+ Opcode = X86::JMP32m;
+ break;
+ case X86::TAILJMPr64:
+ Opcode = X86::JMP64r;
+ break;
+ case X86::TAILJMPm64:
+ Opcode = X86::JMP64m;
+ break;
+ case X86::TAILJMPr64_REX:
+ Opcode = X86::JMP64r_REX;
+ break;
+ case X86::TAILJMPm64_REX:
+ Opcode = X86::JMP64m_REX;
+ break;
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64:
+ Opcode = X86::JMP_1;
+ break;
+ case X86::TAILJMPd_CC:
+ case X86::TAILJMPd64_CC:
+ Opcode = X86::JCC_1;
+ break;
+ }
+
+ return Opcode;
+}
+
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
@@ -500,12 +535,10 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
- // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions used to have
+ // CALL64r, CALL64pcrel32 - These instructions used to have
// register inputs modeled as normal uses instead of implicit uses. As such,
// they we used to truncate off all but the first operand (the callee). This
// issue seems to have been fixed at some point. This assert verifies that.
- case X86::TAILJMPr64:
- case X86::TAILJMPr64_REX:
case X86::CALL64r:
case X86::CALL64pcrel32:
assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
@@ -535,30 +568,30 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
- // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump
- // instruction.
- {
- unsigned Opcode;
- case X86::TAILJMPr:
- Opcode = X86::JMP32r;
- goto SetTailJmpOpcode;
- case X86::TAILJMPd:
- case X86::TAILJMPd64:
- Opcode = X86::JMP_1;
- goto SetTailJmpOpcode;
-
- SetTailJmpOpcode:
- assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
- OutMI.setOpcode(Opcode);
- break;
- }
+ // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump
+ // instruction.
+ case X86::TAILJMPr:
+ case X86::TAILJMPr64:
+ case X86::TAILJMPr64_REX:
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64:
+ assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
+ OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode()));
+ break;
case X86::TAILJMPd_CC:
- case X86::TAILJMPd64_CC: {
+ case X86::TAILJMPd64_CC:
assert(OutMI.getNumOperands() == 2 && "Unexpected number of operands!");
- OutMI.setOpcode(X86::JCC_1);
+ OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode()));
+ break;
+
+ case X86::TAILJMPm:
+ case X86::TAILJMPm64:
+ case X86::TAILJMPm64_REX:
+ assert(OutMI.getNumOperands() == X86::AddrNumOperands &&
+ "Unexpected number of operands!");
+ OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode()));
break;
- }
case X86::DEC16r:
case X86::DEC32r:
@@ -1359,6 +1392,7 @@ void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
recordSled(CurSled, MI, SledKind::TAIL_CALL);
unsigned OpCode = MI.getOperand(0).getImm();
+ OpCode = convertTailJumpOpcode(OpCode);
MCInst TC;
TC.setOpcode(OpCode);
OpenPOWER on IntegriCloud