diff options
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r-- | llvm/lib/CodeGen/InlineSpiller.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetInstrInfo.cpp | 29 |
2 files changed, 33 insertions, 7 deletions
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 3ccc18d120f..3e5ae5f5f07 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -739,9 +739,12 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, bool WasCopy = MI->isCopy(); unsigned ImpReg = 0; - bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT || - MI->getOpcode() == TargetOpcode::PATCHPOINT || - MI->getOpcode() == TargetOpcode::STACKMAP); + // Spill subregs if the target allows it. + // We always want to spill subregs for stackmap/patchpoint pseudos. + bool SpillSubRegs = TII.isSubregFoldable() || + MI->getOpcode() == TargetOpcode::STATEPOINT || + MI->getOpcode() == TargetOpcode::PATCHPOINT || + MI->getOpcode() == TargetOpcode::STACKMAP; // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied // operands. @@ -754,7 +757,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, ImpReg = MO.getReg(); continue; } - // FIXME: Teach targets to deal with subregs. + if (!SpillSubRegs && MO.getSubReg()) return false; // We cannot fold a load instruction into a def. diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 265b4bfaff1..01f91b96b58 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -515,6 +515,31 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, assert(MBB && "foldMemoryOperand needs an inserted instruction"); MachineFunction &MF = *MBB->getParent(); + // If we're not folding a load into a subreg, the size of the load is the + // size of the spill slot. But if we are, we need to figure out what the + // actual load size is. + int64_t MemSize = 0; + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + + if (Flags & MachineMemOperand::MOStore) { + MemSize = MFI.getObjectSize(FI); + } else { + for (unsigned Idx : Ops) { + int64_t OpSize = MFI.getObjectSize(FI); + + if (auto SubReg = MI.getOperand(Idx).getSubReg()) { + unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); + if (SubRegSize > 0 && !(SubRegSize % 8)) + OpSize = SubRegSize / 8; + } + + MemSize = std::max(MemSize, OpSize); + } + } + + assert(MemSize && "Did not expect a zero-sized stack slot"); + MachineInstr *NewMI = nullptr; if (MI.getOpcode() == TargetOpcode::STACKMAP || @@ -538,10 +563,9 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, assert((!(Flags & MachineMemOperand::MOLoad) || NewMI->mayLoad()) && "Folded a use to a non-load!"); - const MachineFrameInfo &MFI = MF.getFrameInfo(); assert(MFI.getObjectOffset(FI) != -1); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI), + MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize, MFI.getObjectAlignment(FI)); NewMI->addMemOperand(MF, MMO); @@ -558,7 +582,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, const MachineOperand &MO = MI.getOperand(1 - Ops[0]); MachineBasicBlock::iterator Pos = MI; - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (Flags == MachineMemOperand::MOStore) storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); |