summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp22
-rw-r--r--llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp4
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp12
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp20
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp14
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp16
-rw-r--r--llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMCallLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMInstructionSelector.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMLegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsCallLowering.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsInstructionSelector.cpp8
-rw-r--r--llvm/lib/Target/Mips/MipsLegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp4
-rw-r--r--llvm/lib/Target/X86/X86CallLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstructionSelector.cpp6
-rw-r--r--llvm/lib/Target/X86/X86LegalizerInfo.cpp8
28 files changed, 84 insertions, 82 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
index 7d9d812d34b..4caabca8328 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
@@ -52,7 +52,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
case TargetOpcode::G_ANYEXT:
case TargetOpcode::G_UNMERGE_VALUES:
case TargetOpcode::G_TRUNC:
- case TargetOpcode::G_GEP:
+ case TargetOpcode::G_PTR_ADD:
return true;
}
return false;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index bb7f751de90..873378a97c4 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -571,7 +571,7 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
for (auto &Use : MRI.use_instructions(Base)) {
- if (Use.getOpcode() != TargetOpcode::G_GEP)
+ if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
continue;
Offset = Use.getOperand(2).getReg();
@@ -597,8 +597,8 @@ bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
// forming an indexed one.
bool MemOpDominatesAddrUses = true;
- for (auto &GEPUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
- if (!dominates(MI, GEPUse)) {
+ for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) {
+ if (!dominates(MI, PtrAddUse)) {
MemOpDominatesAddrUses = false;
break;
}
@@ -631,7 +631,7 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
#endif
Addr = MI.getOperand(1).getReg();
- MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_GEP, Addr, MRI);
+ MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
if (!AddrDef || MRI.hasOneUse(Addr))
return false;
@@ -667,8 +667,8 @@ bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
}
}
- // FIXME: check whether all uses of the base pointer are constant GEPs. That
- // might allow us to end base's liveness here by adjusting the constant.
+ // FIXME: check whether all uses of the base pointer are constant PtrAdds.
+ // That might allow us to end base's liveness here by adjusting the constant.
for (auto &UseMI : MRI.use_instructions(Addr)) {
if (!dominates(MI, UseMI)) {
@@ -1016,7 +1016,7 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
if (DstOff != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
- Ptr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
+ Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
}
MIB.buildStore(Value, Ptr, *StoreMMO);
@@ -1121,13 +1121,13 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
.getReg(0);
- LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
+ LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
}
auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
// Create the store.
Register StorePtr =
- CurrOffset == 0 ? Dst : MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
+ CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
MIB.buildStore(LdVal, StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();
Size -= CopyTy.getSizeInBytes();
@@ -1218,7 +1218,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
- LoadPtr = MIB.buildGEP(PtrTy, Src, Offset).getReg(0);
+ LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
}
LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
CurrOffset += CopyTy.getSizeInBytes();
@@ -1235,7 +1235,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
if (CurrOffset != 0) {
auto Offset =
MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
- StorePtr = MIB.buildGEP(PtrTy, Dst, Offset).getReg(0);
+ StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
}
MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
CurrOffset += CopyTy.getSizeInBytes();
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index be8efa8795f..41ef631906e 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -179,8 +179,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
Known.Zero = KnownZeroOut;
break;
}
- case TargetOpcode::G_GEP: {
- // G_GEP is like G_ADD. FIXME: Is this true for all targets?
+ case TargetOpcode::G_PTR_ADD: {
+ // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets?
LLT Ty = MRI.getType(MI.getOperand(1).getReg());
if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
break;
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index e712812abd1..2080a381e51 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -885,7 +885,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
for (unsigned i = 0; i < Regs.size(); ++i) {
Register Addr;
- MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
+ MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(LI);
@@ -926,7 +926,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
for (unsigned i = 0; i < Vals.size(); ++i) {
Register Addr;
- MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
+ MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
unsigned BaseAlign = getMemOpAlignment(SI);
@@ -1080,8 +1080,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (Offset != 0) {
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
- BaseReg =
- MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
+ BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
+ .getReg(0);
Offset = 0;
}
@@ -1100,14 +1100,14 @@ bool IRTranslator::translateGetElementPtr(const User &U,
} else
GepOffsetReg = IdxReg;
- BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
+ BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
}
}
if (Offset != 0) {
auto OffsetMIB =
MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
- MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
+ MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
return true;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index 28143b30d4e..b9c90e69ddb 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -60,7 +60,7 @@ bool InstructionSelector::isBaseWithConstantOffset(
return false;
MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
- if (RootI->getOpcode() != TargetOpcode::G_GEP)
+ if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
return false;
MachineOperand &RHS = RootI->getOperand(2);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 761dd0a5487..a85e04e04a1 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1748,8 +1748,8 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
- case TargetOpcode::G_GEP:
- assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
+ case TargetOpcode::G_PTR_ADD:
+ assert(TypeIdx == 1 && "unable to legalize pointer of G_PTR_ADD");
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
Observer.changedInstr(MI);
@@ -2083,8 +2083,9 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto OffsetCst =
MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
- Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
- auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
+ Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
+ auto SmallPtr =
+ MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
auto SmallLoad = MIRBuilder.buildLoad(SmallLdReg, SmallPtr.getReg(0),
*SmallMMO);
@@ -2151,12 +2152,13 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
auto ShiftAmt = MIRBuilder.buildConstant(ExtendTy, LargeSplitSize);
auto SmallVal = MIRBuilder.buildLShr(ExtendTy, ExtVal, ShiftAmt);
- // Generate the GEP and truncating stores.
+ // Generate the PtrAdd and truncating stores.
LLT PtrTy = MRI.getType(PtrReg);
auto OffsetCst =
MIRBuilder.buildConstant(LLT::scalar(64), LargeSplitSize / 8);
- Register GEPReg = MRI.createGenericVirtualRegister(PtrTy);
- auto SmallPtr = MIRBuilder.buildGEP(GEPReg, PtrReg, OffsetCst.getReg(0));
+ Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
+ auto SmallPtr =
+ MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst.getReg(0));
MachineFunction &MF = MIRBuilder.getMF();
MachineMemOperand *LargeMMO =
@@ -2908,7 +2910,7 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
unsigned ByteOffset = Offset / 8;
Register NewAddrReg;
- MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
+ MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
MachineMemOperand *NewMMO =
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
@@ -4176,7 +4178,7 @@ LegalizerHelper::lowerDynStackAlloc(MachineInstr &MI) {
// Subtract the final alloc from the SP. We use G_PTRTOINT here so we don't
// have to generate an extra instruction to negate the alloc and then use
- // G_GEP to add the negative offset.
+ // G_PTR_ADD to add the negative offset.
auto Alloc = MIRBuilder.buildSub(IntPtrTy, SPTmp, AllocSize);
if (Align) {
APInt AlignMask(IntPtrTy.getSizeInBits(), Align, true);
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 1c5b6fc3ed8..67d9dacda61 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -219,19 +219,19 @@ void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
assert((Res == Op0) && "type mismatch");
}
-MachineInstrBuilder MachineIRBuilder::buildGEP(const DstOp &Res,
- const SrcOp &Op0,
- const SrcOp &Op1) {
+MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
+ const SrcOp &Op0,
+ const SrcOp &Op1) {
assert(Res.getLLTTy(*getMRI()).isPointer() &&
Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
assert(Op1.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
- return buildInstr(TargetOpcode::G_GEP, {Res}, {Op0, Op1});
+ return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
}
Optional<MachineInstrBuilder>
-MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
- const LLT &ValueTy, uint64_t Value) {
+MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
+ const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@@ -242,7 +242,7 @@ MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
auto Cst = buildConstant(ValueTy, Value);
- return buildGEP(Res, Op0, Cst.getReg(0));
+ return buildPtrAdd(Res, Op0, Cst.getReg(0));
}
MachineInstrBuilder MachineIRBuilder::buildPtrMask(const DstOp &Res,
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index b5dea283185..6361a49d962 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1100,7 +1100,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
- case TargetOpcode::G_GEP: {
+ case TargetOpcode::G_PTR_ADD: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index ed93d02aa61..03245de7840 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -160,7 +160,7 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
+ MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MF, Offset);
return AddrReg;
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index c9d4654ade0..20432ca9e7a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -462,7 +462,7 @@ static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
}
} else if (OpSize == 64) {
switch (GenericOpc) {
- case TargetOpcode::G_GEP:
+ case TargetOpcode::G_PTR_ADD:
return AArch64::ADDXrr;
case TargetOpcode::G_SHL:
return AArch64::LSLVXr;
@@ -1765,7 +1765,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
auto *PtrMI = MRI.getVRegDef(PtrReg);
// Try to fold a GEP into our unsigned immediate addressing mode.
- if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
+ if (PtrMI->getOpcode() == TargetOpcode::G_PTR_ADD) {
if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
int64_t Imm = *COff;
const unsigned Size = MemSizeInBits / 8;
@@ -1883,7 +1883,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
- case TargetOpcode::G_GEP: {
+ case TargetOpcode::G_PTR_ADD: {
MachineIRBuilder MIRBuilder(I);
emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2),
MIRBuilder);
@@ -4189,15 +4189,15 @@ AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
//
// val = G_CONSTANT LegalShiftVal
// shift = G_SHL off_reg val
- // ptr = G_GEP base_reg shift
+ // ptr = G_PTR_ADD base_reg shift
// x = G_LOAD ptr
//
// And fold it into this addressing mode:
//
// ldr x, [base_reg, off_reg, lsl #LegalShiftVal]
- // Check if we can find the G_GEP.
- MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_GEP, Root.getReg(), MRI);
+ // Check if we can find the G_PTR_ADD.
+ MachineInstr *Gep = getOpcodeDef(TargetOpcode::G_PTR_ADD, Root.getReg(), MRI);
if (!Gep || !isWorthFoldingIntoExtendedReg(*Gep, MRI))
return None;
@@ -4275,7 +4275,7 @@ AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
///
/// Where x2 is the base register, and x3 is an offset register.
///
-/// When possible (or profitable) to fold a G_GEP into the address calculation,
+/// When possible (or profitable) to fold a G_PTR_ADD into the address calculation,
/// this will do so. Otherwise, it will return None.
InstructionSelector::ComplexRendererFns
AArch64InstructionSelector::selectAddrModeRegisterOffset(
@@ -4284,7 +4284,7 @@ AArch64InstructionSelector::selectAddrModeRegisterOffset(
// We need a GEP.
MachineInstr *Gep = MRI.getVRegDef(Root.getReg());
- if (!Gep || Gep->getOpcode() != TargetOpcode::G_GEP)
+ if (!Gep || Gep->getOpcode() != TargetOpcode::G_PTR_ADD)
return None;
// If this is used more than once, let's not bother folding.
diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
index 7a1901bd5b1..c7e02eca493 100644
--- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
@@ -104,7 +104,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
.moreElementsToNextPow2(0)
.minScalarSameAs(1, 0);
- getActionDefinitionsBuilder(G_GEP)
+ getActionDefinitionsBuilder(G_PTR_ADD)
.legalFor({{p0, s64}})
.clampScalar(1, s64, s64);
@@ -743,7 +743,7 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
// Realign the list to the actual required alignment.
auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
- auto ListTmp = MIRBuilder.buildGEP(PtrTy, List, AlignMinus1.getReg(0));
+ auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
DstPtr = MRI.createGenericVirtualRegister(PtrTy);
MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
@@ -758,7 +758,7 @@ bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrSize));
- auto NewList = MIRBuilder.buildGEP(PtrTy, DstPtr, Size.getReg(0));
+ auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
MIRBuilder.buildStore(
NewList, ListPtr,
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index 8ec73aa3c04..0e50d7f8df6 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -529,7 +529,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Arithmetic ops.
case TargetOpcode::G_ADD:
case TargetOpcode::G_SUB:
- case TargetOpcode::G_GEP:
+ case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_MUL:
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UDIV:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 58c44acde1a..a68ba23e411 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -356,7 +356,7 @@ Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B,
Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
B.buildConstant(OffsetReg, Offset);
- B.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
+ B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
return DstReg;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 3cfa9d57ec4..3854a2d178f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -538,7 +538,7 @@ bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
return true;
}
-bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
+bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
return selectG_ADD_SUB(I);
}
@@ -1478,7 +1478,7 @@ void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
assert(PtrMI);
- if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
+ if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
return;
GEPInfo GEPInfo(*PtrMI);
@@ -1710,8 +1710,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
return selectG_MERGE_VALUES(I);
case TargetOpcode::G_UNMERGE_VALUES:
return selectG_UNMERGE_VALUES(I);
- case TargetOpcode::G_GEP:
- return selectG_GEP(I);
+ case TargetOpcode::G_PTR_ADD:
+ return selectG_PTR_ADD(I);
case TargetOpcode::G_IMPLICIT_DEF:
return selectG_IMPLICIT_DEF(I);
case TargetOpcode::G_INSERT:
@@ -1961,7 +1961,7 @@ AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
return Default;
const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
- if (!OpDef || OpDef->getOpcode() != AMDGPU::G_GEP)
+ if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
return Default;
Optional<int64_t> Offset =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index d3c83a6a872..cc9701c99ec 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -87,7 +87,7 @@ private:
bool selectG_EXTRACT(MachineInstr &I) const;
bool selectG_MERGE_VALUES(MachineInstr &I) const;
bool selectG_UNMERGE_VALUES(MachineInstr &I) const;
- bool selectG_GEP(MachineInstr &I) const;
+ bool selectG_PTR_ADD(MachineInstr &I) const;
bool selectG_IMPLICIT_DEF(MachineInstr &I) const;
bool selectG_INSERT(MachineInstr &I) const;
bool selectG_INTRINSIC(MachineInstr &I) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e6c3739d902..136f0351ce7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -478,7 +478,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0);
}
- getActionDefinitionsBuilder(G_GEP)
+ getActionDefinitionsBuilder(G_PTR_ADD)
.legalForCartesianProduct(AddrSpaces64, {S64})
.legalForCartesianProduct(AddrSpaces32, {S32})
.scalarize(0);
@@ -1202,7 +1202,7 @@ Register AMDGPULegalizerInfo::getSegmentAperture(
Register LoadResult = MRI.createGenericVirtualRegister(S32);
Register LoadAddr;
- B.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
+ B.materializePtrAdd(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
B.buildLoad(LoadResult, LoadAddr, *MMO);
return LoadResult;
}
@@ -2130,7 +2130,7 @@ bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
if (!loadInputValue(KernargPtrReg, B, Arg))
return false;
- B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
+ B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index b82475ebd83..8dae8b6c932 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2268,7 +2268,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLVM_FALLTHROUGH;
}
- case AMDGPU::G_GEP:
+ case AMDGPU::G_PTR_ADD:
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
case AMDGPU::G_MUL:
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index d3b595ce832..ce260a9ba14 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -106,7 +106,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
+ MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 8e5e474c0f5..56a487bf14b 100644
--- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -1061,7 +1061,7 @@ bool ARMInstructionSelector::select(MachineInstr &I) {
case G_SHL: {
return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
}
- case G_GEP:
+ case G_PTR_ADD:
I.setDesc(TII.get(Opcodes.ADDrr));
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
break;
diff --git a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
index 81414e6d76f..b758046e0f3 100644
--- a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -162,7 +162,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
.legalFor({s32, p0})
.minScalar(0, s32);
- getActionDefinitionsBuilder(G_GEP)
+ getActionDefinitionsBuilder(G_PTR_ADD)
.legalFor({{p0, s32}})
.minScalar(1, s32);
diff --git a/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp b/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
index b100150175f..4dcdb33d308 100644
--- a/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -249,7 +249,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_SEXT:
case G_ZEXT:
case G_ANYEXT:
- case G_GEP:
+ case G_PTR_ADD:
case G_INTTOPTR:
case G_PTRTOINT:
case G_CTLZ:
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 8f1cc82f7cc..6ba15c23286 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -299,7 +299,7 @@ Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
+ MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MachinePointerInfo MPO =
MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index 8657b61dfda..6f3e2dca2ab 100644
--- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -302,7 +302,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
I.eraseFromParent();
return true;
}
- case G_GEP: {
+ case G_PTR_ADD: {
MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
.add(I.getOperand(0))
.add(I.getOperand(1))
@@ -409,15 +409,15 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
MachineOperand BaseAddr = I.getOperand(1);
int64_t SignedOffset = 0;
- // Try to fold load/store + G_GEP + G_CONSTANT
+ // Try to fold load/store + G_PTR_ADD + G_CONSTANT
// %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
- // %Addr:(p0) = G_GEP %BaseAddr, %SignedOffset
+ // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
// %LoadResult/%StoreSrc = load/store %Addr(p0)
// into:
// %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
- if (Addr->getOpcode() == G_GEP) {
+ if (Addr->getOpcode() == G_PTR_ADD) {
MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
if (Offset->getOpcode() == G_CONSTANT) {
APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index ab0543b8cc8..a94f66fc8c1 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -166,7 +166,7 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
.legalFor({s32})
.clampScalar(0, s32, s32);
- getActionDefinitionsBuilder({G_GEP, G_INTTOPTR})
+ getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
.legalFor({{p0, s32}});
getActionDefinitionsBuilder(G_PTRTOINT)
diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
index 50d144f5234..27419b28d27 100644
--- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -440,7 +440,7 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case G_UMULH:
case G_ZEXTLOAD:
case G_SEXTLOAD:
- case G_GEP:
+ case G_PTR_ADD:
case G_INTTOPTR:
case G_PTRTOINT:
case G_AND:
@@ -638,7 +638,7 @@ void MipsRegisterBankInfo::setRegBank(MachineInstr &MI,
MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
break;
}
- case TargetOpcode::G_GEP: {
+ case TargetOpcode::G_PTR_ADD: {
assert(MRI.getType(Dest).isPointer() && "Unexpected operand type.");
MRI.setRegBank(Dest, getRegBank(Mips::GPRBRegBankID));
break;
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index 7ee637cfd52..57bf799cf89 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -115,7 +115,7 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
MIRBuilder.buildConstant(OffsetReg, Offset);
Register AddrReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
+ MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 931e8ce7c12..d6174d35aac 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -340,7 +340,7 @@ bool X86InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_STORE:
case TargetOpcode::G_LOAD:
return selectLoadStoreOp(I, MRI, MF);
- case TargetOpcode::G_GEP:
+ case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_FRAME_INDEX:
return selectFrameIndexOrGep(I, MRI, MF);
case TargetOpcode::G_GLOBAL_VALUE:
@@ -476,7 +476,7 @@ static void X86SelectAddress(const MachineInstr &I,
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
"unsupported type.");
- if (I.getOpcode() == TargetOpcode::G_GEP) {
+ if (I.getOpcode() == TargetOpcode::G_PTR_ADD) {
if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
int64_t Imm = *COff;
if (isInt<32>(Imm)) { // Check for displacement overflow.
@@ -560,7 +560,7 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
MachineFunction &MF) const {
unsigned Opc = I.getOpcode();
- assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
+ assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&
"unexpected instruction");
const Register DefReg = I.getOperand(0).getReg();
diff --git a/llvm/lib/Target/X86/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
index 04121f863c8..da53d642002 100644
--- a/llvm/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
@@ -77,7 +77,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizeScalarToDifferentSizeStrategy(MemOp, 0,
narrowToSmallerAndWidenToSmallest);
setLegalizeScalarToDifferentSizeStrategy(
- G_GEP, 1, widenToLargerTypesUnsupportedOtherwise);
+ G_PTR_ADD, 1, widenToLargerTypesUnsupportedOtherwise);
setLegalizeScalarToDifferentSizeStrategy(
G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest);
@@ -140,8 +140,8 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
setAction({G_FRAME_INDEX, p0}, Legal);
setAction({G_GLOBAL_VALUE, p0}, Legal);
- setAction({G_GEP, p0}, Legal);
- setAction({G_GEP, 1, s32}, Legal);
+ setAction({G_PTR_ADD, p0}, Legal);
+ setAction({G_PTR_ADD, 1, s32}, Legal);
if (!Subtarget.is64Bit()) {
getActionDefinitionsBuilder(G_PTRTOINT)
@@ -223,7 +223,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
setAction({MemOp, s64}, Legal);
// Pointer-handling
- setAction({G_GEP, 1, s64}, Legal);
+ setAction({G_PTR_ADD, 1, s64}, Legal);
getActionDefinitionsBuilder(G_PTRTOINT)
.legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
.maxScalar(0, s64)
OpenPOWER on IntegriCloud