From 4216615f99fb95692a0b847c06a21d2af466e1b2 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 12 Jan 2010 00:09:37 +0000 Subject: Add TargetInstrInfo::isCoalescableInstr. It returns true if the specified instruction is copy like where the source and destination registers can overlap. This is to be used by the coalescable to coalesce the source and destination registers of instructions like X86::MOVSX64rr32. Apparently some crazy people believe the coalescer is too simple. llvm-svn: 93210 --- llvm/lib/Target/X86/X86InstrInfo.cpp | 53 ++++++++++++++++++++++++++++++++++++ llvm/lib/Target/X86/X86InstrInfo.h | 8 ++++++ 2 files changed, 61 insertions(+) (limited to 'llvm/lib') diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 9600cffa91f..52077cfd79d 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -712,6 +712,59 @@ bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, } } +bool +X86InstrInfo::isCoalescableInstr(const MachineInstr &MI, bool &isCopy, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const { + switch (MI.getOpcode()) { + default: break; + case X86::MOVSX16rr8: + case X86::MOVZX16rr8: + case X86::MOVSX32rr8: + case X86::MOVZX32rr8: + case X86::MOVSX64rr8: + case X86::MOVZX64rr8: + case X86::MOVSX32rr16: + case X86::MOVZX32rr16: + case X86::MOVSX64rr16: + case X86::MOVZX64rr16: + case X86::MOVSX64rr32: + case X86::MOVZX64rr32: { + if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) + // Be conservative. + return false; + isCopy = false; + SrcReg = MI.getOperand(1).getReg(); + DstReg = MI.getOperand(0).getReg(); + DstSubIdx = 0; + switch (MI.getOpcode()) { + default: + llvm_unreachable(0); + break; + case X86::MOVSX16rr8: + case X86::MOVZX16rr8: + case X86::MOVSX32rr8: + case X86::MOVZX32rr8: + case X86::MOVSX64rr8: + case X86::MOVZX64rr8: + SrcSubIdx = 1; + break; + case X86::MOVSX32rr16: + case X86::MOVZX32rr16: + case X86::MOVSX64rr16: + case X86::MOVZX64rr16: + SrcSubIdx = 3; + break; + case X86::MOVSX64rr32: + case X86::MOVZX64rr32: + SrcSubIdx = 4; + break; + } + } + } + return isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx); +} + /// isFrameOperand - Return true and the FrameIndex if the specified /// operand and follow operands form a reference to the stack frame. bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op, diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index b83441d89ef..6ae7808e2dd 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -448,6 +448,14 @@ public: unsigned &SrcReg, unsigned &DstReg, unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + /// isCoalescableInstr - Return true if the instruction is "coalescable". That + /// is, it's like a copy where it's legal for the source to overlap the + /// destination. e.g. X86::MOVSX64rr32. + virtual bool isCoalescableInstr(const MachineInstr &MI, bool &isCopy, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + + unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination /// stack locations as well. This uses a heuristic so it isn't -- cgit v1.2.3