summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp8
-rw-r--r--llvm/lib/Target/X86/X86InterleavedAccess.cpp3
-rw-r--r--llvm/lib/Target/X86/X86WinEHState.cpp8
6 files changed, 25 insertions, 22 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1e979f33dbb..361e2eaa7b0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8398,8 +8398,9 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
// If we're generating more than one load, compute the base address of
// subsequent loads as an offset from the previous.
if (LoadCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(
- BaseAddr, VecTy->getVectorNumElements() * Factor);
+ BaseAddr =
+ Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
+ VecTy->getVectorNumElements() * Factor);
CallInst *LdN = Builder.CreateCall(
LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN");
@@ -8561,7 +8562,8 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
// If we generating more than one store, we compute the base address of
// subsequent stores as an offset from the previous.
if (StoreCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor);
+ BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(),
+ BaseAddr, LaneLen * Factor);
Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
Builder.CreateCall(StNFunc, Ops);
@@ -11717,8 +11719,9 @@ static Value *UseTlsOffset(IRBuilder<> &IRB, unsigned Offset) {
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), Offset),
- Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
+ IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
+ Offset),
+ IRB.getInt8PtrTy()->getPointerTo(0));
}
Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index b2637418ebb..b1abe1001e9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -140,17 +140,14 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
//
// Additionally widen any sub-dword load to i32 even if suitably aligned,
// so that CSE between different argument loads works easily.
-
ArgPtr = Builder.CreateConstInBoundsGEP1_64(
- KernArgSegment,
- AlignDownOffset,
- Arg.getName() + ".kernarg.offset.align.down");
+ Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
+ Arg.getName() + ".kernarg.offset.align.down");
AdjustedArgTy = Builder.getInt32Ty();
} else {
ArgPtr = Builder.CreateConstInBoundsGEP1_64(
- KernArgSegment,
- EltOffset,
- Arg.getName() + ".kernarg.offset");
+ Builder.getInt8Ty(), KernArgSegment, EltOffset,
+ Arg.getName() + ".kernarg.offset");
AdjustedArgTy = ArgTy;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 5f05ce7d2a2..a2d62c73404 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -244,10 +244,10 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
// We could do a single 64-bit load here, but it's likely that the basic
// 32-bit and extract sequence is already present, and it is probably easier
// to CSE this. The loads should be mergable later anyway.
- Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
+ Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
- Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
+ Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
MDNode *MD = MDNode::get(Mod->getContext(), None);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 36434b9a438..68cc502d501 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -14906,8 +14906,9 @@ bool ARMTargetLowering::lowerInterleavedLoad(
// If we're generating more than one load, compute the base address of
// subsequent loads as an offset from the previous.
if (LoadCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(
- BaseAddr, VecTy->getVectorNumElements() * Factor);
+ BaseAddr =
+ Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
+ VecTy->getVectorNumElements() * Factor);
SmallVector<Value *, 2> Ops;
Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
@@ -15046,7 +15047,8 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
// If we generating more than one store, we compute the base address of
// subsequent stores as an offset from the previous.
if (StoreCount > 0)
- BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor);
+ BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(),
+ BaseAddr, LaneLen * Factor);
SmallVector<Value *, 6> Ops;
Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp
index 717ed2b0631..a95eeffd94d 100644
--- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp
+++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp
@@ -213,7 +213,8 @@ void X86InterleavedAccessGroup::decompose(
// Generate N loads of T type.
for (unsigned i = 0; i < NumLoads; i++) {
// TODO: Support inbounds GEP.
- Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
+ Value *NewBasePtr =
+ Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
Instruction *NewLoad =
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment());
DecomposedVectors.push_back(NewLoad);
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 3e31d99f656..df9a171f582 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -781,8 +781,8 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
IRBuilder<> Builder(CS.getInstruction());
Value *State;
if (InCleanup) {
- Value *StateField =
- Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
+ Value *StateField = Builder.CreateStructGEP(RegNode->getAllocatedType(),
+ RegNode, StateFieldIndex);
State = Builder.CreateLoad(Builder.getInt32Ty(), StateField);
} else {
State = Builder.getInt32(getStateForCallSite(BlockColors, FuncInfo, CS));
@@ -793,7 +793,7 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
void WinEHStatePass::insertStateNumberStore(Instruction *IP, int State) {
IRBuilder<> Builder(IP);
- Value *StateField =
- Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
+ Value *StateField = Builder.CreateStructGEP(RegNode->getAllocatedType(),
+ RegNode, StateFieldIndex);
Builder.CreateStore(Builder.getInt32(State), StateField);
}
OpenPOWER on IntegriCloud