diff options
| author | Daniel Sanders <daniel_l_sanders@apple.com> | 2019-11-01 13:18:00 -0700 |
|---|---|---|
| committer | Daniel Sanders <daniel_l_sanders@apple.com> | 2019-11-05 10:31:17 -0800 |
| commit | e74c5b96610dfb03825d31035f50813af58beac5 (patch) | |
| tree | 381ef78b6382be7c73d5ebd38df5f9ce9c78082c /llvm/lib/CodeGen | |
| parent | de56a890725713dffc4ab5bf5fb2f434df27ed4d (diff) | |
| download | bcm5719-llvm-e74c5b96610dfb03825d31035f50813af58beac5.tar.gz bcm5719-llvm-e74c5b96610dfb03825d31035f50813af58beac5.zip | |
[globalisel] Rename G_GEP to G_PTR_ADD
Summary:
G_GEP is rather poorly named. It's a simple pointer+scalar addition and
doesn't support any of the complexities of getelementptr. I therefore
propose that we rename it. There's a G_PTR_MASK so let's follow that
convention and go with G_PTR_ADD
Reviewers: volkan, aditya_nandakumar, bogner, rovka, arsenm
Subscribers: sdardis, jvesely, wdng, nhaehnle, hiraditya, jrtc27, atanasyan, arphaman, Petar.Avramovic, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69734
Diffstat (limited to 'llvm/lib/CodeGen')
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 22 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 12 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 20 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/CodeGen/MachineVerifier.cpp | 2 |
8 files changed, 40 insertions, 38 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp index 7d9d812d34b..4caabca8328 100644 --- a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp @@ -52,7 +52,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) { case TargetOpcode::G_ANYEXT: case TargetOpcode::G_UNMERGE_VALUES: case TargetOpcode::G_TRUNC: - case TargetOpcode::G_GEP: + case TargetOpcode::G_PTR_ADD: return true; } return false; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index bb7f751de90..873378a97c4 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -571,7 +571,7 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); for (auto &Use : MRI.use_instructions(Base)) { - if (Use.getOpcode() != TargetOpcode::G_GEP) + if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) continue; Offset = Use.getOperand(2).getReg(); @@ -597,8 +597,8 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, // forming an indexed one. bool MemOpDominatesAddrUses = true; - for (auto &GEPUse : MRI.use_instructions(Use.getOperand(0).getReg())) { - if (!dominates(MI, GEPUse)) { + for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) { + if (!dominates(MI, PtrAddUse)) { MemOpDominatesAddrUses = false; break; } @@ -631,7 +631,7 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, #endif Addr = MI.getOperand(1).getReg(); - MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_GEP, Addr, MRI); + MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); if (!AddrDef || MRI.hasOneUse(Addr)) return false; @@ -667,8 +667,8 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, } } - // FIXME: check whether all uses of the base pointer are constant GEPs. That - // might allow us to end base's liveness here by adjusting the constant. + // FIXME: check whether all uses of the base pointer are constant PtrAdds. + // That might allow us to end base's liveness here by adjusting the constant. for (auto &UseMI : MRI.use_instructions(Addr)) { if (!dominates(MI, UseMI)) { @@ -1016,7 +1016,7 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val if (DstOff != 0) { auto Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff); - Ptr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); } MIB.buildStore(Value, Ptr, *StoreMMO); @@ -1121,13 +1121,13 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, if (CurrOffset != 0) { Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset) .getReg(0); - LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0); + LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); } auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO); // Create the store. Register StorePtr = - CurrOffset == 0 ? Dst : MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); MIB.buildStore(LdVal, StorePtr, *StoreMMO); CurrOffset += CopyTy.getSizeInBytes(); Size -= CopyTy.getSizeInBytes(); @@ -1218,7 +1218,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, if (CurrOffset != 0) { auto Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); - LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0); + LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); } LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0)); CurrOffset += CopyTy.getSizeInBytes(); @@ -1235,7 +1235,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, if (CurrOffset != 0) { auto Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); - StorePtr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0); + StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); } MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO); CurrOffset += CopyTy.getSizeInBytes(); diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp index be8efa8795f..41ef631906e 100644 --- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -179,8 +179,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known, Known.Zero = KnownZeroOut; break; } - case TargetOpcode::G_GEP: { - // G_GEP is like G_ADD. FIXME: Is this true for all targets? + case TargetOpcode::G_PTR_ADD: { + // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets? LLT Ty = MRI.getType(MI.getOperand(1).getReg()); if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace())) break; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index e712812abd1..2080a381e51 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -885,7 +885,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; for (unsigned i = 0; i < Regs.size(); ++i) { Register Addr; - MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); + MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); unsigned BaseAlign = getMemOpAlignment(LI); @@ -926,7 +926,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { for (unsigned i = 0; i < Vals.size(); ++i) { Register Addr; - MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); + MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); unsigned BaseAlign = getMemOpAlignment(SI); @@ -1080,8 +1080,8 @@ bool IRTranslator::translateGetElementPtr(const User &U, if (Offset != 0) { LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); - BaseReg = - MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0); + BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) + .getReg(0); Offset = 0; } @@ -1100,14 +1100,14 @@ bool IRTranslator::translateGetElementPtr(const User &U, } else GepOffsetReg = IdxReg; - BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0); + BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); } } if (Offset != 0) { auto OffsetMIB = MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset); - MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); + MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); return true; } diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp index 28143b30d4e..b9c90e69ddb 100644 --- a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -60,7 +60,7 @@ bool InstructionSelector::isBaseWithConstantOffset( return false; MachineInstr *RootI = MRI.getVRegDef(Root.getReg()); - if (RootI->getOpcode() != TargetOpcode::G_GEP) + if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) return false; MachineOperand &RHS = RootI->getOperand(2); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 761dd0a5487..a85e04e04a1 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1748,8 +1748,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { Observer.changedInstr(MI); return Legalized; - case TargetOpcode::G_GEP: - assert(TypeIdx == 1 && "unable to legalize pointer of GEP"); + case TargetOpcode::G_PTR_ADD: + assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD"); Observer.changingInstr(MI); widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); Observer.changedInstr(MI); @@ -2083,8 +2083,9 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { auto OffsetCst = MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8); - Register GEPReg = MRI.createGenericVirtualRegister(PtrTy); - auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0)); + Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy); + auto SmallPtr = + MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0)); auto SmallLoad = MIRBuilder.buildLoad(SmallLdReg, SmallPtr.getReg(0), *SmallMMO); @@ -2151,12 +2152,13 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { auto ShiftAmt = MIRBuilder.buildConstant(ExtendTy, LargeSplitSize); auto SmallVal = MIRBuilder.buildLShr(ExtendTy, ExtVal, ShiftAmt); - // Generate the GEP and truncating stores. + // Generate the PtrAdd and truncating stores. LLT PtrTy = MRI.getType(PtrReg); auto OffsetCst = MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8); - Register GEPReg = MRI.createGenericVirtualRegister(PtrTy); - auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0)); + Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy); + auto SmallPtr = + MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0)); MachineFunction &MF = MIRBuilder.getMF(); MachineMemOperand *LargeMMO = @@ -2908,7 +2910,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, unsigned ByteOffset = Offset / 8; Register NewAddrReg; - MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset); + MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset); MachineMemOperand *NewMMO = MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); @@ -4176,7 +4178,7 @@ LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) { // Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't // have to generate an extra instruction to negate the alloc and then use - // G_GEP to add the negative offset. + // G_PTR_ADD to add the negative offset. auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize); if (Align) { APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true); diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 1c5b6fc3ed8..67d9dacda61 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -219,19 +219,19 @@ void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0, assert((Res == Op0) && "type mismatch"); } -MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res, - const SrcOp &Op0, - const SrcOp &Op1) { +MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res, + const SrcOp &Op0, + const SrcOp &Op1) { assert(Res.getLLTTy(*getMRI()).isPointer() && Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type"); - return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1}); + return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); } Optional<MachineInstrBuilder> -MachineIRBuilder::materializeGEP(Register &Res, Register Op0, - const LLT &ValueTy, uint64_t Value) { +MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, + const LLT &ValueTy, uint64_t Value) { assert(Res == 0 && "Res is a result argument"); assert(ValueTy.isScalar() && "invalid offset type"); @@ -242,7 +242,7 @@ MachineIRBuilder::materializeGEP(Register &Res, Register Op0, Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); auto Cst = buildConstant(ValueTy, Value); - return buildGEP(Res, Op0, Cst.getReg(0)); + return buildPtrAdd(Res, Op0, Cst.getReg(0)); } MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res, diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index b5dea283185..6361a49d962 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1100,7 +1100,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { break; } - case TargetOpcode::G_GEP: { + case TargetOpcode::G_PTR_ADD: { LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); |

