diff options
| author | Benjamin Kramer <benny.kra@googlemail.com> | 2015-02-28 12:04:00 +0000 |
|---|---|---|
| committer | Benjamin Kramer <benny.kra@googlemail.com> | 2015-02-28 12:04:00 +0000 |
| commit | f1362f61960b36665592f34811540096ddc089f4 (patch) | |
| tree | af5ec6c5269d5b18b96777d59be85d38fba13e87 /llvm/lib/Target/X86/X86InstrInfo.cpp | |
| parent | 4f6ac16292f5435cd00782757e6794f2bf6b64f5 (diff) | |
| download | bcm5719-llvm-f1362f61960b36665592f34811540096ddc089f4.tar.gz bcm5719-llvm-f1362f61960b36665592f34811540096ddc089f4.zip | |
ArrayRefize memory operand folding. NFC.
llvm-svn: 230846
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrInfo.cpp')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 47 |
1 files changed, 21 insertions, 26 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index ea4e25188da..36cbe08bc80 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4573,9 +4573,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI, return nullptr; // Check whether we can fold the def into SrcOperandId. - SmallVector<unsigned, 8> Ops; - Ops.push_back(SrcOperandId); - MachineInstr *FoldMI = foldMemoryOperand(MI, Ops, DefMI); + MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI); if (FoldMI) { FoldAsLoadDefReg = 0; return FoldMI; @@ -4670,7 +4668,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { } static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, - const SmallVectorImpl<MachineOperand> &MOs, + ArrayRef<MachineOperand> MOs, MachineInstr *MI, const TargetInstrInfo &TII) { // Create the base instruction with the memory operand as the first part. @@ -4697,9 +4695,8 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, return MIB; } -static MachineInstr *FuseInst(MachineFunction &MF, - unsigned Opcode, unsigned OpNo, - const SmallVectorImpl<MachineOperand> &MOs, +static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, + unsigned OpNo, ArrayRef<MachineOperand> MOs, MachineInstr *MI, const TargetInstrInfo &TII) { // Omit the implicit operands, something BuildMI can't do. MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), @@ -4723,7 +4720,7 @@ static MachineInstr *FuseInst(MachineFunction &MF, } static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, - const SmallVectorImpl<MachineOperand> &MOs, + ArrayRef<MachineOperand> MOs, MachineInstr *MI) { MachineFunction &MF = *MI->getParent()->getParent(); MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); @@ -4736,12 +4733,12 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, return MIB.addImm(0); } -MachineInstr* -X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr *MI, unsigned OpNum, - const SmallVectorImpl<MachineOperand> &MOs, - unsigned Size, unsigned Align, - bool AllowCommute) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + unsigned OpNum, + ArrayRef<MachineOperand> MOs, + unsigned Size, unsigned Align, + bool AllowCommute) const { const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr; bool isCallRegIndirect = Subtarget.callRegIndirect(); @@ -5104,10 +5101,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, MI->addRegisterKilled(Reg, TRI, true); } -MachineInstr* -X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl<unsigned> &Ops, - int FrameIndex) const { +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + ArrayRef<unsigned> Ops, + int FrameIndex) const { // Check switch flag if (NoFusing) return nullptr; @@ -5145,10 +5142,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, } else if (Ops.size() != 1) return nullptr; - SmallVector<MachineOperand,4> MOs; - MOs.push_back(MachineOperand::CreateFI(FrameIndex)); - return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, - Size, Alignment, /*AllowCommute=*/true); + return foldMemoryOperandImpl(MF, MI, Ops[0], + MachineOperand::CreateFI(FrameIndex), Size, + Alignment, /*AllowCommute=*/true); } static bool isPartialRegisterLoad(const MachineInstr &LoadMI, @@ -5170,9 +5166,9 @@ static bool isPartialRegisterLoad(const MachineInstr &LoadMI, return false; } -MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, +MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, - const SmallVectorImpl<unsigned> &Ops, + ArrayRef<unsigned> Ops, MachineInstr *LoadMI) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); @@ -5304,9 +5300,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, /*Size=*/0, Alignment, /*AllowCommute=*/true); } - bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl<unsigned> &Ops) const { + ArrayRef<unsigned> Ops) const { // Check switch flag if (NoFusing) return 0; |

