summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
authorJames Y Knight <jyknight@google.com>2019-02-01 20:44:24 +0000
committerJames Y Knight <jyknight@google.com>2019-02-01 20:44:24 +0000
commit14359ef1b6a0610ac91df5f5a91c88a0b51c187c (patch)
tree53b7628ce6ecba998379d0d19f875bc9dad3b69a /llvm/lib/CodeGen
parentd9e85a0861b7e9320c34547a2ad7f49c504a9381 (diff)
downloadbcm5719-llvm-14359ef1b6a0610ac91df5f5a91c88a0b51c187c.tar.gz
bcm5719-llvm-14359ef1b6a0610ac91df5f5a91c88a0b51c187c.zip
[opaque pointer types] Pass value type to LoadInst creation.
This cleans up all LoadInst creation in LLVM to explicitly pass the value type rather than deriving it from the pointer's element-type. Differential Revision: https://reviews.llvm.org/D57172 llvm-svn: 352911
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp9
-rw-r--r--llvm/lib/CodeGen/GCRootLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp2
-rw-r--r--llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/SafeStack.cpp14
-rw-r--r--llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp11
-rw-r--r--llvm/lib/CodeGen/ShadowStackGCLowering.cpp6
-rw-r--r--llvm/lib/CodeGen/SjLjEHPrepare.cpp6
-rw-r--r--llvm/lib/CodeGen/StackProtector.cpp6
-rw-r--r--llvm/lib/CodeGen/WasmEHPrepare.cpp3
-rw-r--r--llvm/lib/CodeGen/WinEHPrepare.cpp9
11 files changed, 41 insertions, 29 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 6e8d68ebfa1..c247a8afcf7 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -381,7 +381,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
- auto *NewLI = Builder.CreateLoad(NewAddr);
+ auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
@@ -1769,8 +1769,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// from call}
Type *FinalResultTy = I->getType();
Value *V = UndefValue::get(FinalResultTy);
- Value *ExpectedOut =
- Builder.CreateAlignedLoad(AllocaCASExpected, AllocaAlignment);
+ Value *ExpectedOut = Builder.CreateAlignedLoad(
+ CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
V = Builder.CreateInsertValue(V, ExpectedOut, 0);
V = Builder.CreateInsertValue(V, Result, 1);
@@ -1780,7 +1780,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
if (UseSizedLibcall)
V = Builder.CreateBitOrPointerCast(Result, I->getType());
else {
- V = Builder.CreateAlignedLoad(AllocaResult, AllocaAlignment);
+ V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
+ AllocaAlignment);
Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
}
I->replaceAllUsesWith(V);
diff --git a/llvm/lib/CodeGen/GCRootLowering.cpp b/llvm/lib/CodeGen/GCRootLowering.cpp
index c4433d889e5..90571d090bf 100644
--- a/llvm/lib/CodeGen/GCRootLowering.cpp
+++ b/llvm/lib/CodeGen/GCRootLowering.cpp
@@ -213,7 +213,7 @@ bool LowerIntrinsics::DoLowering(Function &F, GCStrategy &S) {
}
case Intrinsic::gcread: {
// Replace a read barrier with a simple load.
- Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
+ Value *Ld = new LoadInst(CI->getType(), CI->getArgOperand(1), "", CI);
Ld->takeName(CI);
CI->replaceAllUsesWith(Ld);
CI->eraseFromParent();
diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
index e5e28e6cead..5524924f869 100644
--- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
+++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
@@ -1218,7 +1218,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
"interleaved.wide.ptrcast");
// Create the wide load and update the MemorySSA.
- auto LI = Builder.CreateAlignedLoad(CI, InsertionPoint->getAlignment(),
+ auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
"interleaved.wide.load");
auto MSSAU = MemorySSAUpdater(&MSSA);
MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 182c3924a70..2752e186875 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -44,7 +44,7 @@ static bool lowerLoadRelative(Function &F) {
Value *OffsetPtr =
B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1));
Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy);
- Value *OffsetI32 = B.CreateAlignedLoad(OffsetPtrI32, 4);
+ Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4);
Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32);
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index 2b80c63e570..b7a6760df02 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -371,7 +371,7 @@ Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
if (!StackGuardVar)
StackGuardVar =
F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy);
- return IRB.CreateLoad(StackGuardVar, "StackGuard");
+ return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
}
void SafeStack::findInsts(Function &F,
@@ -452,7 +452,8 @@ SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
++NumUnsafeStackRestorePoints;
IRB.SetInsertPoint(I->getNextNode());
- Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
+ Value *CurrentTop =
+ DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
IRB.CreateStore(CurrentTop, UnsafeStackPtr);
}
@@ -461,7 +462,7 @@ SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
AllocaInst *StackGuardSlot, Value *StackGuard) {
- Value *V = IRB.CreateLoad(StackGuardSlot);
+ Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true);
@@ -659,7 +660,8 @@ void SafeStack::moveDynamicAllocasToUnsafeStack(
uint64_t TySize = DL.getTypeAllocSize(Ty);
Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
- Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
+ Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr),
+ IntPtrTy);
SP = IRB.CreateSub(SP, Size);
// Align the SP value to satisfy the AllocaInst, type and stack alignments.
@@ -697,7 +699,7 @@ void SafeStack::moveDynamicAllocasToUnsafeStack(
if (II->getIntrinsicID() == Intrinsic::stacksave) {
IRBuilder<> IRB(II);
- Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
+ Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
LI->takeName(II);
II->replaceAllUsesWith(LI);
II->eraseFromParent();
@@ -792,7 +794,7 @@ bool SafeStack::run() {
// Load the current stack pointer (we'll also use it as a base pointer).
// FIXME: use a dedicated register for it ?
Instruction *BasePointer =
- IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
+ IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
assert(BasePointer->getType() == StackPtrTy);
AllocaInst *StackGuardSlot = nullptr;
diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
index 64b358b5980..103ba7661d9 100644
--- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
@@ -143,7 +143,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
// Short-cut if the mask is all-true.
if (isa<Constant>(Mask) && cast<Constant>(Mask)->isAllOnesValue()) {
- Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal);
+ Value *NewI = Builder.CreateAlignedLoad(VecType, Ptr, AlignVal);
CI->replaceAllUsesWith(NewI);
CI->eraseFromParent();
return;
@@ -166,7 +166,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
continue;
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
- LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
+ LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
VResult =
Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
}
@@ -198,7 +198,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
Value *Gep =
Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
- LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
+ LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
Builder.getInt32(Idx));
@@ -366,6 +366,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Src0 = CI->getArgOperand(3);
VectorType *VecType = cast<VectorType>(CI->getType());
+ Type *EltTy = VecType->getElementType();
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
@@ -387,7 +388,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
"Ptr" + Twine(Idx));
LoadInst *Load =
- Builder.CreateAlignedLoad(Ptr, AlignVal, "Load" + Twine(Idx));
+ Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
VResult = Builder.CreateInsertElement(
VResult, Load, Builder.getInt32(Idx), "Res" + Twine(Idx));
}
@@ -418,7 +419,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
"Ptr" + Twine(Idx));
LoadInst *Load =
- Builder.CreateAlignedLoad(Ptr, AlignVal, "Load" + Twine(Idx));
+ Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
Value *NewVResult = Builder.CreateInsertElement(VResult, Load,
Builder.getInt32(Idx),
"Res" + Twine(Idx));
diff --git a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
index f45db6d643c..17a4d76c4c8 100644
--- a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -312,7 +312,8 @@ bool ShadowStackGCLowering::runOnFunction(Function &F) {
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Initialize the map pointer and load the current head of the shadow stack.
- Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
+ Instruction *CurrentHead =
+ AtEntry.CreateLoad(StackEntryTy->getPointerTo(), Head, "gc_currhead");
Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
StackEntry, 0, 1, "gc_frame.map");
AtEntry.CreateStore(FrameMap, EntryMapPtr);
@@ -353,7 +354,8 @@ bool ShadowStackGCLowering::runOnFunction(Function &F) {
Instruction *EntryNextPtr2 =
CreateGEP(Context, *AtExit, ConcreteStackEntryTy, StackEntry, 0, 0,
"gc_frame.next");
- Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
+ Value *SavedHead = AtExit->CreateLoad(StackEntryTy->getPointerTo(),
+ EntryNextPtr2, "gc_savedhead");
AtExit->CreateStore(SavedHead, Head);
}
diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 94a1ac55fd2..23e5ce0acae 100644
--- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -189,14 +189,16 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 2, "__data");
// The exception values come back in context->__data[0].
+ Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *ExceptionAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 0, "exception_gep");
- Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
+ Value *ExnVal = Builder.CreateLoad(Int32Ty, ExceptionAddr, true, "exn_val");
ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy());
Value *SelectorAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData,
0, 1, "exn_selector_gep");
- Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
+ Value *SelVal =
+ Builder.CreateLoad(Int32Ty, SelectorAddr, true, "exn_selector_val");
substituteLPadValues(LPI, ExnVal, SelVal);
}
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 94f74b05aff..34942bc1b6c 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -322,7 +322,7 @@ static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
IRBuilder<> &B,
bool *SupportsSelectionDAGSP = nullptr) {
if (Value *Guard = TLI->getIRStackGuard(B))
- return B.CreateLoad(Guard, true, "StackGuard");
+ return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
// Use SelectionDAG SSP handling, since there isn't an IR guard.
//
@@ -417,7 +417,7 @@ bool StackProtector::InsertStackProtectors() {
// Generate the function-based epilogue instrumentation.
// The target provides a guard check function, generate a call to it.
IRBuilder<> B(RI);
- LoadInst *Guard = B.CreateLoad(AI, true, "Guard");
+ LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
CallInst *Call = B.CreateCall(GuardCheck, {Guard});
Call->setAttributes(GuardCheck->getAttributes());
Call->setCallingConv(GuardCheck->getCallingConv());
@@ -472,7 +472,7 @@ bool StackProtector::InsertStackProtectors() {
// Generate the stack protector instructions in the old basic block.
IRBuilder<> B(BB);
Value *Guard = getStackGuard(TLI, M, B);
- LoadInst *LI2 = B.CreateLoad(AI, true);
+ LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
Value *Cmp = B.CreateICmpEQ(Guard, LI2);
auto SuccessProb =
BranchProbabilityInfo::getBranchProbStackProtector(true);
diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp
index 02516fad387..e9986f566c3 100644
--- a/llvm/lib/CodeGen/WasmEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp
@@ -344,7 +344,8 @@ void WasmEHPrepare::prepareEHPad(BasicBlock *BB, bool NeedLSDA,
PersCI->setDoesNotThrow();
// Pseudocode: int selector = __wasm.landingpad_context.selector;
- Instruction *Selector = IRB.CreateLoad(SelectorField, "selector");
+ Instruction *Selector =
+ IRB.CreateLoad(IRB.getInt32Ty(), SelectorField, "selector");
// Replace the return value from wasm.get.ehselector() with the selector value
// loaded from __wasm_lpad_context.selector.
diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp
index c4e78faeeb5..d97d8e1dec5 100644
--- a/llvm/lib/CodeGen/WinEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WinEHPrepare.cpp
@@ -1079,7 +1079,8 @@ AllocaInst *WinEHPrepare::insertPHILoads(PHINode *PN, Function &F) {
SpillSlot = new AllocaInst(PN->getType(), DL->getAllocaAddrSpace(), nullptr,
Twine(PN->getName(), ".wineh.spillslot"),
&F.getEntryBlock().front());
- Value *V = new LoadInst(SpillSlot, Twine(PN->getName(), ".wineh.reload"),
+ Value *V = new LoadInst(PN->getType(), SpillSlot,
+ Twine(PN->getName(), ".wineh.reload"),
&*PHIBlock->getFirstInsertionPt());
PN->replaceAllUsesWith(V);
return SpillSlot;
@@ -1221,13 +1222,15 @@ void WinEHPrepare::replaceUseWithLoad(Value *V, Use &U, AllocaInst *&SpillSlot,
Value *&Load = Loads[IncomingBlock];
// Insert the load into the predecessor block
if (!Load)
- Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
+ Load = new LoadInst(V->getType(), SpillSlot,
+ Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, IncomingBlock->getTerminator());
U.set(Load);
} else {
// Reload right before the old use.
- auto *Load = new LoadInst(SpillSlot, Twine(V->getName(), ".wineh.reload"),
+ auto *Load = new LoadInst(V->getType(), SpillSlot,
+ Twine(V->getName(), ".wineh.reload"),
/*Volatile=*/false, UsingInst);
U.set(Load);
}
OpenPOWER on IntegriCloud