summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-12-11 14:05:03 +0000
committerSanjay Patel <spatel@rotateright.com>2018-12-11 14:05:03 +0000
commit9765ba5f86ebc235fb1fc6e491bfb15026b6e0d9 (patch)
treef1289addb6b56be1d73b09bb841490242e8c13ad
parent8b6434bbb9f9414e88c462c0761b2bd50ef65d92 (diff)
downloadbcm5719-llvm-9765ba5f86ebc235fb1fc6e491bfb15026b6e0d9.tar.gz
bcm5719-llvm-9765ba5f86ebc235fb1fc6e491bfb15026b6e0d9.zip
[x86] remove dead code for 16-bit LEA formation; NFC
As discussed in: D55494 ...this code has been disabled/dead for a long time (the code references Athlon and Pentium 4), and there's almost no chance that it will be used given the last decade of uarch evolution. Also, in SDAG we promote 16-bit ops to 32-bit, so there's almost no way to test this code any more. llvm-svn: 348845
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp70
1 files changed, 13 insertions, 57 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 7944f89339e..13f9ac71e90 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -937,10 +937,6 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return nullptr;
MachineInstr *NewMI = nullptr;
- // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
- // we have better subtarget support, enable the 16-bit LEA generation here.
- // 16-bit LEA is also slow on Core2.
- bool DisableLEA16 = true;
bool is64Bit = Subtarget.is64Bit();
unsigned MIOpc = MI.getOpcode();
@@ -998,19 +994,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::SHL16ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
- if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
-
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
- : nullptr;
- NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
- .add(Dest)
- .addReg(0)
- .addImm(1ULL << ShAmt)
- .add(Src)
- .addImm(0)
- .addReg(0);
- break;
+ if (!isTruncatedShiftCountForLEA(ShAmt))
+ return nullptr;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
}
case X86::INC64r:
case X86::INC32r: {
@@ -1035,13 +1022,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
case X86::INC16r:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
- : nullptr;
- assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addOffset(
- BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1);
- break;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
case X86::DEC64r:
case X86::DEC32r: {
assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
@@ -1066,13 +1048,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
case X86::DEC16r:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
- : nullptr;
- assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addOffset(
- BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1);
- break;
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
@@ -1111,23 +1088,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
case X86::ADD16rr:
- case X86::ADD16rr_DB: {
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
- : nullptr;
- assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Src2 = MI.getOperand(2).getReg();
- bool isKill2 = MI.getOperand(2).isKill();
- NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest),
- Src.getReg(), Src.isKill(), Src2, isKill2);
-
- assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
- assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
-
- if (LV && isKill2)
- LV->replaceKillInstruction(Src2, MI, *NewMI);
- break;
- }
+ case X86::ADD16rr_DB:
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
@@ -1164,15 +1127,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
- : nullptr;
- assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(
- BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src),
- MI.getOperand(2));
- break;
-
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
case X86::VMOVDQU8Z128rmk:
case X86::VMOVDQU8Z256rmk:
case X86::VMOVDQU8Zrmk:
OpenPOWER on IntegriCloud