diff options
author | Jonas Paulsson <paulsson@linux.vnet.ibm.com> | 2016-05-10 08:09:37 +0000 |
---|---|---|
committer | Jonas Paulsson <paulsson@linux.vnet.ibm.com> | 2016-05-10 08:09:37 +0000 |
commit | 8e5b0c65ccb012c850a8d11cc74eafe79dea5825 (patch) | |
tree | c6b142f74a0918651eded82ebcb579f29831989a /llvm/lib | |
parent | ebc7135f8e529470c9745e8324d74f48f0fc3749 (diff) | |
download | bcm5719-llvm-8e5b0c65ccb012c850a8d11cc74eafe79dea5825.tar.gz bcm5719-llvm-8e5b0c65ccb012c850a8d11cc74eafe79dea5825.zip |
[foldMemoryOperand()] Pass LiveIntervals to enable liveness check.
SystemZ (and probably other targets as well) can fold a memory operand
by changing the opcode into a new instruction that as a side-effect
also clobbers the CC-reg.
In order to do this, liveness of that reg must first be checked. When
LIS is passed, getRegUnit() can be called on it and the right
LiveRange is computed on demand.
Reviewed by Matthias Braun.
http://reviews.llvm.org/D19861
llvm-svn: 269026
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/InlineSpiller.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/LiveRangeEdit.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetInstrInfo.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.h | 3 | ||||
-rw-r--r-- | llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp | 39 | ||||
-rw-r--r-- | llvm/lib/Target/SystemZ/SystemZInstrInfo.h | 6 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.h | 6 |
9 files changed, 52 insertions, 29 deletions
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 33340901816..6d2fcb9c358 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -761,8 +761,8 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, MachineInstrSpan MIS(MI); MachineInstr *FoldMI = - LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) - : TII.foldMemoryOperand(MI, FoldOps, StackSlot); + LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI, &LIS) + : TII.foldMemoryOperand(MI, FoldOps, StackSlot, &LIS); if (!FoldMI) return false; diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 3ed02f46c0e..8b355f00255 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -205,7 +205,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; - MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); + MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI, &LIS); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 800ad6d1bb4..6d90f9dd819 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -497,7 +497,8 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// stream. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef<unsigned> Ops, - int FI) const { + int FI, + LiveIntervals *LIS) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (MI->getOperand(Ops[i]).isDef()) @@ -519,7 +520,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); } if (NewMI) { @@ -778,7 +779,8 @@ void TargetInstrInfo::genAlternativeCodeSequence( /// stack slot. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef<unsigned> Ops, - MachineInstr *LoadMI) const { + MachineInstr *LoadMI, + LiveIntervals *LIS) const { assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) @@ -800,7 +802,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, NewMI = MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); } if (!NewMI) return nullptr; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 4f1a72cf97b..e396c335094 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2448,7 +2448,8 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB, MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // This is a bit of a hack. Consider this instruction: // // %vreg0<def> = COPY %SP; GPR64all:%vreg0 diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index 32d5c194154..37a9d41f845 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -143,7 +143,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 3938db256e7..fa824d01065 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -15,6 +15,7 @@ #include "SystemZInstrBuilder.h" #include "SystemZTargetMachine.h" #include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineRegisterInfo.h" using namespace llvm; @@ -846,31 +847,42 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Opcode = MI->getOpcode(); -// XXX This is an introduction of a CC def and is illegal! Reactivate -// with a check of liveness of CC reg. -#if 0 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { - if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && + if (LIS != nullptr && + (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && isInt<8>(MI->getOperand(2).getImm()) && !MI->getOperand(3).getReg()) { - // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST - MachineInstr *BuiltMI = - BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), - get(SystemZ::AGSI)) + + // Check CC liveness, since new instruction introduces a dead + // def of CC. + MCRegUnitIterator CCUnit(SystemZ::CC, TRI); + LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit); + ++CCUnit; + assert (!CCUnit.isValid() && "CC only has one reg unit."); + SlotIndex MISlot = + LIS->getSlotIndexes()->getInstructionIndex(*MI).getRegSlot(); + if (!CCLiveRange.liveAt(MISlot)) { + // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST + MachineInstr *BuiltMI = + BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::AGSI)) .addFrameIndex(FrameIndex) .addImm(0) .addImm(MI->getOperand(2).getImm()); - BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); - return BuiltMI; + BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); + CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator()); + return BuiltMI; + } } return nullptr; } -#endif // All other cases require a single operand. if (Ops.size() != 1) @@ -992,7 +1004,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { return nullptr; } diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index e995ff10ddc..b5e4ff48733 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -202,11 +202,13 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 45f3727a705..f6c11c80855 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6081,7 +6081,8 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // Check switch flag if (NoFusing) return nullptr; @@ -6193,14 +6194,15 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF)) return nullptr; - return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex); + return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); } // Check switch flag diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index d72589604ae..5a82c161b27 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -370,7 +370,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific @@ -378,7 +379,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is |