summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86InstrInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp23
1 files changed, 4 insertions, 19 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 88f2f0fffd6..ae45301f04b 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -683,10 +683,8 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) {
// The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
// effects.
- unsigned NewOpc = X86::MOV32ri;
int Value;
switch (Orig.getOpcode()) {
- case X86::MOV64r0: NewOpc = X86::MOV32ri64; Value = 0; break;
case X86::MOV32r0: Value = 0; break;
case X86::MOV32r1: Value = 1; break;
case X86::MOV32r_1: Value = -1; break;
@@ -695,7 +693,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
}
const DebugLoc &DL = Orig.getDebugLoc();
- BuildMI(MBB, I, DL, get(NewOpc))
+ BuildMI(MBB, I, DL, get(X86::MOV32ri))
.add(Orig.getOperand(0))
.addImm(Value);
} else {
@@ -3752,9 +3750,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
- if (!Movr0Inst &&
- (Instr.getOpcode() == X86::MOV32r0 ||
- Instr.getOpcode() == X86::MOV64r0) &&
+ if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
Movr0Inst = &Instr;
continue;
@@ -4159,15 +4155,6 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case X86::MOV32r0:
return Expand2AddrUndef(MIB, get(X86::XOR32rr));
- case X86::MOV64r0: {
- const TargetRegisterInfo *TRI = &getRegisterInfo();
- unsigned Reg = MIB->getOperand(0).getReg();
- unsigned Reg32 = TRI->getSubReg(Reg, X86::sub_32bit);
- MIB->getOperand(0).setReg(Reg32);
- Expand2AddrUndef(MIB, get(X86::XOR32rr));
- MIB.addReg(Reg, RegState::ImplicitDefine);
- return true;
- }
case X86::MOV32r1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
case X86::MOV32r_1:
@@ -4911,10 +4898,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
isTwoAddrFold = true;
} else {
if (OpNum == 0) {
- if (MI.getOpcode() == X86::MOV32r0 || MI.getOpcode() == X86::MOV64r0) {
- unsigned NewOpc = MI.getOpcode() == X86::MOV64r0 ? X86::MOV64mi32
- : X86::MOV32mi;
- NewMI = MakeM0Inst(*this, NewOpc, MOs, InsertPt, MI);
+ if (MI.getOpcode() == X86::MOV32r0) {
+ NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
}
OpenPOWER on IntegriCloud