summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroCleanup.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroEarly.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp6
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroSplit.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/ArgumentPromotion.cpp6
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp28
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp15
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp21
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp17
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp23
-rw-r--r--llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp5
-rw-r--r--llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp44
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp8
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp8
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp74
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LowerAtomic.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp84
-rw-r--r--llvm/lib/Transforms/Scalar/Scalarizer.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/CodeExtractor.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/DemoteRegToStack.cpp9
-rw-r--r--llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp14
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp34
-rw-r--r--llvm/lib/Transforms/Utils/VNCoercion.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp4
37 files changed, 288 insertions, 213 deletions
diff --git a/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp b/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp
index 315d4be26ac..1fb0a114d0c 100644
--- a/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp
@@ -49,7 +49,7 @@ static void lowerSubFn(IRBuilder<> &Builder, CoroSubFnInst *SubFn) {
Builder.SetInsertPoint(SubFn);
auto *FramePtr = Builder.CreateBitCast(FrameRaw, FramePtrTy);
auto *Gep = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index);
- auto *Load = Builder.CreateLoad(Gep);
+ auto *Load = Builder.CreateLoad(FrameTy->getElementType(Index), Gep);
SubFn->replaceAllUsesWith(Load);
}
diff --git a/llvm/lib/Transforms/Coroutines/CoroEarly.cpp b/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
index 407e17fd67f..e6f501e5d19 100644
--- a/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroEarly.cpp
@@ -97,7 +97,7 @@ void Lowerer::lowerCoroDone(IntrinsicInst *II) {
Builder.SetInsertPoint(II);
auto *BCI = Builder.CreateBitCast(Operand, FramePtrTy);
auto *Gep = Builder.CreateConstInBoundsGEP1_32(FrameTy, BCI, 0);
- auto *Load = Builder.CreateLoad(Gep);
+ auto *Load = Builder.CreateLoad(FrameTy, Gep);
auto *Cond = Builder.CreateICmpEQ(Load, NullPtr);
II->replaceAllUsesWith(Cond);
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 92ac9bd86c0..24d685b7113 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -471,10 +471,10 @@ static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
auto *CB = Shape.CoroBegin;
IRBuilder<> Builder(CB->getNextNode());
- PointerType *FramePtrTy = Shape.FrameTy->getPointerTo();
+ StructType *FrameTy = Shape.FrameTy;
+ PointerType *FramePtrTy = FrameTy->getPointerTo();
auto *FramePtr =
cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
- Type *FrameTy = FramePtrTy->getElementType();
Value *CurrentValue = nullptr;
BasicBlock *CurrentBlock = nullptr;
@@ -501,7 +501,7 @@ static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
Twine(".reload.addr"));
return isa<AllocaInst>(CurrentValue)
? G
- : Builder.CreateLoad(G,
+ : Builder.CreateLoad(FrameTy->getElementType(Index), G,
CurrentValue->getName() + Twine(".reload"));
};
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 96b192704c9..f7d3072ccbc 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -93,7 +93,7 @@ static BasicBlock *createResumeEntryBlock(Function &F, coro::Shape &Shape) {
auto *FrameTy = Shape.FrameTy;
auto *GepIndex = Builder.CreateConstInBoundsGEP2_32(
FrameTy, FramePtr, 0, coro::Shape::IndexField, "index.addr");
- auto *Index = Builder.CreateLoad(GepIndex, "index");
+ auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
auto *Switch =
Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
Shape.ResumeSwitch = Switch;
@@ -229,7 +229,8 @@ static void handleFinalSuspend(IRBuilder<> &Builder, Value *FramePtr,
Builder.SetInsertPoint(OldSwitchBB->getTerminator());
auto *GepIndex = Builder.CreateConstInBoundsGEP2_32(Shape.FrameTy, FramePtr,
0, 0, "ResumeFn.addr");
- auto *Load = Builder.CreateLoad(GepIndex);
+ auto *Load = Builder.CreateLoad(
+ Shape.FrameTy->getElementType(coro::Shape::ResumeField), GepIndex);
auto *NullPtr =
ConstantPointerNull::get(cast<PointerType>(Load->getType()));
auto *Cond = Builder.CreateICmpEQ(Load, NullPtr);
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 8e9520c84a9..13df8280f34 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -263,7 +263,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
Value *Idx = GetElementPtrInst::Create(
STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call);
// TODO: Tell AA about the new values?
- Args.push_back(new LoadInst(Idx, Idx->getName() + ".val", Call));
+ Args.push_back(new LoadInst(STy->getElementType(i), Idx,
+ Idx->getName() + ".val", Call));
ArgAttrVec.push_back(AttributeSet());
}
} else if (!I->use_empty()) {
@@ -299,7 +300,8 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
}
// Since we're replacing a load make sure we take the alignment
// of the previous load.
- LoadInst *newLoad = new LoadInst(V, V->getName() + ".val", Call);
+ LoadInst *newLoad =
+ new LoadInst(OrigLoad->getType(), V, V->getName() + ".val", Call);
newLoad->setAlignment(OrigLoad->getAlignment());
// Transfer the AA info too.
AAMDNodes AAInfo;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 0e23a00e8db..5080e02b135 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -905,9 +905,10 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
- Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
+ Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
+ InitBool->getName() + ".val", false, 0,
LI->getOrdering(), LI->getSyncScopeID(),
- LI->isUnordered() ? (Instruction*)ICI : LI);
+ LI->isUnordered() ? (Instruction *)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
default: llvm_unreachable("Unknown ICmp Predicate!");
@@ -1040,7 +1041,8 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
}
// Insert a load from the global, and use it instead of the malloc.
- Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
+ Value *NL =
+ new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt);
U->replaceUsesOfWith(Alloc, NL);
}
}
@@ -1163,10 +1165,10 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
// This is a scalarized version of the load from the global. Just create
// a new Load of the scalarized global.
- Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
- InsertedScalarizedValues,
- PHIsToRewrite),
- LI->getName()+".f"+Twine(FieldNo), LI);
+ Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo,
+ InsertedScalarizedValues, PHIsToRewrite);
+ Result = new LoadInst(V->getType()->getPointerElementType(), V,
+ LI->getName() + ".f" + Twine(FieldNo), LI);
} else {
PHINode *PN = cast<PHINode>(V);
// PN's type is pointer to struct. Make a new PHI of pointer to struct
@@ -1356,7 +1358,9 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// Within the NullPtrBlock, we need to emit a comparison and branch for each
// pointer, because some may be null while others are not.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
- Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
+ Value *GVVal =
+ new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(),
+ FieldGlobals[i], "tmp", NullPtrBlock);
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
Constant::getNullValue(GVVal->getType()));
BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
@@ -1700,7 +1704,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
- StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
+ StoreVal = new LoadInst(NewGV->getValueType(), NewGV,
+ LI->getName() + ".b", false, 0,
LI->getOrdering(), LI->getSyncScopeID(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
@@ -1716,8 +1721,9 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
- LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
- LI->getOrdering(), LI->getSyncScopeID(), LI);
+ LoadInst *NLI =
+ new LoadInst(NewGV->getValueType(), NewGV, LI->getName() + ".b",
+ false, 0, LI->getOrdering(), LI->getSyncScopeID(), LI);
Instruction *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 5359445a700..2f8a96be875 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -618,7 +618,7 @@ Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
}
Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
- Value *Byte = B.CreateLoad(ByteAddr);
+ Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
Value *ByteAndMask =
B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index cb884d189d8..ab6f0eb660f 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -1183,7 +1183,7 @@ void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
Value *Addr =
B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte);
if (RetType->getBitWidth() == 1) {
- Value *Bits = B.CreateLoad(Addr);
+ Value *Bits = B.CreateLoad(Int8Ty, Addr);
Value *BitsAndBit = B.CreateAnd(Bits, Bit);
auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 6a0bd29069a..80eb51be84e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -171,7 +171,7 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
- LoadInst *L = Builder.CreateLoad(Src);
+ LoadInst *L = Builder.CreateLoad(IntType, Src);
// Alignment from the mem intrinsic will be better, so use it.
L->setAlignment(CopySrcAlign);
if (CopyMD)
@@ -1182,7 +1182,8 @@ static Value *simplifyMaskedLoad(const IntrinsicInst &II,
if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
Value *LoadPtr = II.getArgOperand(0);
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
- return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
+ return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
+ "unmaskedload");
}
return nullptr;
@@ -1499,7 +1500,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II,
auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
PointerType::get(II.getType(), 0));
- return Builder.CreateAlignedLoad(BCastInst, Alignment);
+ return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
}
// Returns true iff the 2 intrinsics have the same operands, limiting the
@@ -2300,7 +2301,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 16) {
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
- return new LoadInst(Ptr);
+ return new LoadInst(II->getType(), Ptr);
}
break;
case Intrinsic::ppc_vsx_lxvw4x:
@@ -2308,7 +2309,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC VSX loads into normal loads.
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
- return new LoadInst(Ptr, Twine(""), false, 1);
+ return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
}
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
@@ -2336,7 +2337,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
II->getType()->getVectorNumElements());
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(VTy));
- Value *Load = Builder.CreateLoad(Ptr);
+ Value *Load = Builder.CreateLoad(VTy, Ptr);
return new FPExtInst(Load, II->getType());
}
break;
@@ -2346,7 +2347,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 32) {
Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
- return new LoadInst(Ptr);
+ return new LoadInst(II->getType(), Ptr);
}
break;
case Intrinsic::ppc_qpx_qvstfs:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index a7d86080451..a7750695fef 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -298,7 +298,7 @@ void PointerReplacer::replace(Instruction *I) {
if (auto *LT = dyn_cast<LoadInst>(I)) {
auto *V = getReplacement(LT->getPointerOperand());
assert(V && "Operand not replaced");
- auto *NewI = new LoadInst(V);
+ auto *NewI = new LoadInst(I->getType(), V);
NewI->takeName(LT);
IC.InsertNewInstWith(NewI, *LT);
IC.replaceInstUsesWith(*LT, NewI);
@@ -465,7 +465,7 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
- NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
+ NewTy, NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
@@ -724,7 +724,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
- auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
+ auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
+ EltAlign, Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
@@ -774,8 +775,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
};
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Name + ".elt");
- auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
- Name + ".unpack");
+ auto *L = IC.Builder.CreateAlignedLoad(
+ AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
AAMDNodes AAMD;
LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD);
@@ -1065,10 +1066,12 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
unsigned Align = LI.getAlignment();
if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
- LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
- SI->getOperand(1)->getName()+".val");
- LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
- SI->getOperand(2)->getName()+".val");
+ LoadInst *V1 =
+ Builder.CreateLoad(LI.getType(), SI->getOperand(1),
+ SI->getOperand(1)->getName() + ".val");
+ LoadInst *V2 =
+ Builder.CreateLoad(LI.getType(), SI->getOperand(2),
+ SI->getOperand(2)->getName() + ".val");
assert(LI.isUnordered() && "implied by above");
V1->setAlignment(Align);
V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 0ef78214457..e217adec7ed 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -595,7 +595,8 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
Value *InVal = FirstLI->getOperand(0);
NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
- LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment);
+ LoadInst *NewLI =
+ new LoadInst(FirstLI->getType(), NewPN, "", isVolatile, LoadAlignment);
unsigned KnownIDs[] = {
LLVMContext::MD_tbaa,
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index b30b3eeadcc..90a54a71a4c 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2686,7 +2686,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
Builder.SetInsertPoint(L);
Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
L->getPointerOperand(), Indices);
- Instruction *NL = Builder.CreateLoad(GEP);
+ Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
// Whatever aliasing information we had for the orignal load must also
// hold for the smaller load, so propagate the annotations.
AAMDNodes Nodes;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index bae04a3d413..2a5a5c20469 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -949,8 +949,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
DynamicAreaOffset);
}
- IRB.CreateCall(AsanAllocasUnpoisonFunc,
- {IRB.CreateLoad(DynamicAllocaLayout), DynamicAreaPtr});
+ IRB.CreateCall(
+ AsanAllocasUnpoisonFunc,
+ {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
}
// Unpoison dynamic allocas redzones.
@@ -1552,7 +1553,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *CmpVal = Constant::getNullValue(ShadowTy);
Value *ShadowValue =
- IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
+ IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1ULL << Mapping.Scale;
@@ -2444,7 +2445,7 @@ void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
} else {
Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
kAsanShadowMemoryDynamicAddress, IntptrTy);
- LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
+ LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
}
}
@@ -2948,9 +2949,9 @@ void FunctionStackPoisoner::processStaticAllocas() {
// void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
- Value *UseAfterReturnIsEnabled =
- IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUseAfterReturn),
- Constant::getNullValue(IRB.getInt32Ty()));
+ Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
+ IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
+ Constant::getNullValue(IRB.getInt32Ty()));
Instruction *Term =
SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
IRBuilder<> IRBIf(Term);
@@ -3084,7 +3085,7 @@ void FunctionStackPoisoner::processStaticAllocas() {
FakeStack,
ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
Value *SavedFlagPtr = IRBPoison.CreateLoad(
- IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
+ IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
IRBPoison.CreateStore(
Constant::getNullValue(IRBPoison.getInt8Ty()),
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 383e6603762..72f1e2ad253 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1033,7 +1033,8 @@ Value *DFSanFunction::getShadow(Value *V) {
DFS.ArgTLS ? &*F->getEntryBlock().begin()
: cast<Instruction>(ArgTLSPtr)->getNextNode();
IRBuilder<> IRB(ArgTLSPos);
- Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
+ Shadow =
+ IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos));
break;
}
case DataFlowSanitizer::IA_Args: {
@@ -1183,7 +1184,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
const auto i = AllocaShadowMap.find(AI);
if (i != AllocaShadowMap.end()) {
IRBuilder<> IRB(Pos);
- return IRB.CreateLoad(i->second);
+ return IRB.CreateLoad(DFS.ShadowTy, i->second);
}
}
@@ -1208,7 +1209,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
case 0:
return DFS.ZeroShadow;
case 1: {
- LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
+ LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos);
LI->setAlignment(ShadowAlign);
return LI;
}
@@ -1216,8 +1217,9 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
ConstantInt::get(DFS.IntptrTy, 1));
- return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
- IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
+ return combineShadows(
+ IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign),
+ IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos);
}
}
if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
@@ -1236,7 +1238,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> IRB(Pos);
Value *WideAddr =
IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
- Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
+ Value *WideShadow =
+ IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
@@ -1269,7 +1272,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRBuilder<> NextIRB(NextBB);
WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
ConstantInt::get(DFS.IntptrTy, 1));
- Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
+ Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(),
+ WideAddr, ShadowAlign);
ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
LastBr->setSuccessor(0, NextBB);
LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
@@ -1646,7 +1650,8 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
}
if (!FT->getReturnType()->isVoidTy()) {
- LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
+ LoadInst *LabelLoad =
+ IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca);
DFSF.setShadow(CustomCI, LabelLoad);
}
@@ -1684,7 +1689,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
IRBuilder<> NextIRB(Next);
- LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
+ LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.getRetvalTLS());
DFSF.SkipInsts.insert(LI);
DFSF.setShadow(CS.getInstruction(), LI);
DFSF.NonZeroChecks.push_back(LI);
diff --git a/llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
index 14b05662c3e..26c4cbcb63f 100644
--- a/llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
@@ -790,7 +790,7 @@ bool EfficiencySanitizer::insertCounterUpdate(Instruction *I,
ConstantExpr::getGetElementPtr(
ArrayType::get(IRB.getInt64Ty(), getStructCounterSize(StructTy)),
CounterArray, Indices);
- Value *Load = IRB.CreateLoad(Counter);
+ Value *Load = IRB.CreateLoad(IRB.getInt64Ty(), Counter);
IRB.CreateStore(IRB.CreateAdd(Load, ConstantInt::get(IRB.getInt64Ty(), 1)),
Counter);
return true;
@@ -875,7 +875,8 @@ bool EfficiencySanitizer::instrumentFastpathWorkingSet(
// memory access, if they are not already set.
Value *ValueMask = ConstantInt::get(ShadowTy, 0x81); // 10000001B
- Value *OldValue = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
+ Value *OldValue =
+ IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
// The AND and CMP will be turned into a TEST instruction by the compiler.
Value *Cmp = IRB.CreateICmpNE(IRB.CreateAnd(OldValue, ValueMask), ValueMask);
Instruction *CmpTerm = SplitBlockAndInsertIfThen(Cmp, I, false);
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index d71b3b68f6b..69e0ed4394f 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -817,7 +817,7 @@ bool GCOVProfiler::emitProfileArcs() {
// Skip phis, landingpads.
IRBuilder<> Builder(&*BB.getFirstInsertionPt());
- Value *Count = Builder.CreateLoad(Phi);
+ Value *Count = Builder.CreateLoad(Builder.getInt64Ty(), Phi);
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Phi);
@@ -828,7 +828,7 @@ bool GCOVProfiler::emitProfileArcs() {
const unsigned Edge = It->second;
Value *Counter =
Builder.CreateConstInBoundsGEP2_64(Counters, 0, Edge);
- Value *Count = Builder.CreateLoad(Counter);
+ Value *Count = Builder.CreateLoad(Builder.getInt64Ty(), Counter);
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Counter);
}
@@ -1089,17 +1089,20 @@ Function *GCOVProfiler::insertCounterWriteout(
auto *StartFileCallArgsPtr = Builder.CreateStructGEP(FileInfoPtr, 0);
auto *StartFileCall = Builder.CreateCall(
StartFile,
- {Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 0)),
- Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 1)),
- Builder.CreateLoad(Builder.CreateStructGEP(StartFileCallArgsPtr, 2))});
+ {Builder.CreateLoad(StartFileCallArgsTy->getElementType(0),
+ Builder.CreateStructGEP(StartFileCallArgsPtr, 0)),
+ Builder.CreateLoad(StartFileCallArgsTy->getElementType(1),
+ Builder.CreateStructGEP(StartFileCallArgsPtr, 1)),
+ Builder.CreateLoad(StartFileCallArgsTy->getElementType(2),
+ Builder.CreateStructGEP(StartFileCallArgsPtr, 2))});
if (auto AK = TLI->getExtAttrForI32Param(false))
StartFileCall->addParamAttr(2, AK);
- auto *NumCounters =
- Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 1));
- auto *EmitFunctionCallArgsArray =
- Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 2));
- auto *EmitArcsCallArgsArray =
- Builder.CreateLoad(Builder.CreateStructGEP(FileInfoPtr, 3));
+ auto *NumCounters = Builder.CreateLoad(
+ FileInfoTy->getElementType(1), Builder.CreateStructGEP(FileInfoPtr, 1));
+ auto *EmitFunctionCallArgsArray = Builder.CreateLoad(
+ FileInfoTy->getElementType(2), Builder.CreateStructGEP(FileInfoPtr, 2));
+ auto *EmitArcsCallArgsArray = Builder.CreateLoad(
+ FileInfoTy->getElementType(3), Builder.CreateStructGEP(FileInfoPtr, 3));
auto *EnterCounterLoopCond =
Builder.CreateICmpSLT(Builder.getInt32(0), NumCounters);
Builder.CreateCondBr(EnterCounterLoopCond, CounterLoopHeader, FileLoopLatch);
@@ -1111,11 +1114,16 @@ Function *GCOVProfiler::insertCounterWriteout(
Builder.CreateInBoundsGEP(EmitFunctionCallArgsArray, {JV});
auto *EmitFunctionCall = Builder.CreateCall(
EmitFunction,
- {Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 0)),
- Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 1)),
- Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 2)),
- Builder.CreateLoad(Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 3)),
+ {Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(0),
+ Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 0)),
+ Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(1),
+ Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 1)),
+ Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(2),
+ Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 2)),
+ Builder.CreateLoad(EmitFunctionCallArgsTy->getElementType(3),
+ Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 3)),
Builder.CreateLoad(
+ EmitFunctionCallArgsTy->getElementType(4),
Builder.CreateStructGEP(EmitFunctionCallArgsPtr, 4))});
if (auto AK = TLI->getExtAttrForI32Param(false)) {
EmitFunctionCall->addParamAttr(0, AK);
@@ -1127,8 +1135,10 @@ Function *GCOVProfiler::insertCounterWriteout(
Builder.CreateInBoundsGEP(EmitArcsCallArgsArray, {JV});
auto *EmitArcsCall = Builder.CreateCall(
EmitArcs,
- {Builder.CreateLoad(Builder.CreateStructGEP(EmitArcsCallArgsPtr, 0)),
- Builder.CreateLoad(Builder.CreateStructGEP(EmitArcsCallArgsPtr, 1))});
+ {Builder.CreateLoad(EmitArcsCallArgsTy->getElementType(0),
+ Builder.CreateStructGEP(EmitArcsCallArgsPtr, 0)),
+ Builder.CreateLoad(EmitArcsCallArgsTy->getElementType(1),
+ Builder.CreateStructGEP(EmitArcsCallArgsPtr, 1))});
if (auto AK = TLI->getExtAttrForI32Param(false))
EmitArcsCall->addParamAttr(0, AK);
auto *NextJV = Builder.CreateAdd(JV, Builder.getInt32(1));
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 405dc5320cd..581e47ca7c6 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -428,7 +428,7 @@ Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
Value *GlobalDynamicAddress =
IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
- return IRB.CreateLoad(GlobalDynamicAddress);
+ return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
}
}
@@ -557,7 +557,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
IRB.getInt8Ty());
Value *AddrLong = untagPointer(IRB, PtrLong);
Value *Shadow = memToShadow(AddrLong, IRB);
- Value *MemTag = IRB.CreateLoad(Shadow);
+ Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
@@ -841,7 +841,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
assert(SlotPtr);
- Instruction *ThreadLong = IRB.CreateLoad(SlotPtr);
+ Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
Function *F = IRB.GetInsertBlock()->getParent();
if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
@@ -855,7 +855,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
// FIXME: This should call a new runtime function with a custom calling
// convention to avoid needing to spill all arguments here.
IRB.CreateCall(HwasanThreadEnterFunc);
- LoadInst *ReloadThreadLong = IRB.CreateLoad(SlotPtr);
+ LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 47c9af0fa8b..22250c07dd2 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -195,6 +195,7 @@ public:
// block.
Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
+ Type *Ty = LiveInValue->getType();
IRBuilder<> Builder(InsertPos);
if (AtomicCounterUpdatePromoted)
// automic update currently can only be promoted across the current
@@ -202,7 +203,7 @@ public:
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
AtomicOrdering::SequentiallyConsistent);
else {
- LoadInst *OldVal = Builder.CreateLoad(Addr, "pgocount.promoted");
+ LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
auto *NewStore = Builder.CreateStore(NewVal, Addr);
@@ -603,7 +604,8 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
AtomicOrdering::Monotonic);
} else {
- Value *Load = Builder.CreateLoad(Addr, "pgocount");
+ Value *IncStep = Inc->getStep();
+ Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
auto *Count = Builder.CreateAdd(Load, Inc->getStep());
auto *Store = Builder.CreateStore(Count, Addr);
if (isCounterPromotionEnabled())
@@ -950,7 +952,7 @@ bool InstrProfiling::emitRuntimeHook() {
User->setComdat(M->getOrInsertComdat(User->getName()));
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
- auto *Load = IRB.CreateLoad(Var);
+ auto *Load = IRB.CreateLoad(Int32Ty, Var);
IRB.CreateRet(Load);
// Mark the user variable as used so that it isn't stripped out.
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 3050ba09e1f..75c82229309 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1401,7 +1401,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
}
OriginPtr =
- IRB.CreateIntToPtr(OriginLong, PointerType::get(IRB.getInt32Ty(), 0));
+ IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
}
return std::make_pair(ShadowPtr, OriginPtr);
}
@@ -1618,8 +1618,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// ParamTLS overflow.
*ShadowPtr = getCleanShadow(V);
} else {
- *ShadowPtr =
- EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
+ *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
+ kShadowTLSAlignment);
}
}
LLVM_DEBUG(dbgs()
@@ -1627,7 +1627,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins && !Overflow) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
- setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
+ setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
} else {
setOrigin(A, getCleanOrigin());
}
@@ -1758,7 +1758,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
- setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
+ setShadow(&I,
+ IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@@ -1772,7 +1773,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
if (PropagateShadow) {
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
- setOrigin(&I, IRB.CreateAlignedLoad(OriginPtr, OriginAlignment));
+ setOrigin(
+ &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
} else {
setOrigin(&I, getCleanOrigin());
}
@@ -2452,7 +2454,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned Alignment = 1;
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
- setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld"));
+ setShadow(&I,
+ IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
@@ -2462,7 +2465,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
if (PropagateShadow)
- setOrigin(&I, IRB.CreateLoad(OriginPtr));
+ setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
else
setOrigin(&I, getCleanOrigin());
}
@@ -2845,9 +2848,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
- Value *Shadow = IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_ldmxcsr");
- Value *Origin =
- MS.TrackOrigins ? IRB.CreateLoad(OriginPtr) : getCleanOrigin();
+ Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
+ Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
+ : getCleanOrigin();
insertShadowCheck(Shadow, Origin, &I);
}
@@ -2921,7 +2924,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Origin = IRB.CreateSelect(
IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
- getOrigin(PassThru), IRB.CreateLoad(OriginPtr));
+ getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
setOrigin(&I, Origin);
} else {
@@ -3284,12 +3287,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
"Could not find insertion point for retval shadow load");
}
IRBuilder<> IRBAfter(&*NextInsn);
- Value *RetvalShadow =
- IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
- kShadowTLSAlignment, "_msret");
+ Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
+ getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter),
+ kShadowTLSAlignment, "_msret");
setShadow(&I, RetvalShadow);
if (MS.TrackOrigins)
- setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
+ setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy,
+ getOriginPtrForRetval(IRBAfter)));
}
bool isAMustTailRetVal(Value *RetVal) {
@@ -3837,7 +3841,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
- VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ VAArgOverflowSize =
+ IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
VAArgOverflowSize);
@@ -3856,11 +3861,13 @@ struct VarArgAMD64Helper : public VarArgHelper {
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
+ Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 16)),
- PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
- Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ PointerType::get(RegSaveAreaPtrTy, 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 16;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
@@ -3871,11 +3878,13 @@ struct VarArgAMD64Helper : public VarArgHelper {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
+ Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 8)),
- PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
- Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
+ PointerType::get(OverflowArgAreaPtrTy, 0));
+ Value *OverflowArgAreaPtr =
+ IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
@@ -3977,7 +3986,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
- VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
@@ -3994,10 +4003,12 @@ struct VarArgMIPS64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
+ Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
- Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ PointerType::get(RegSaveAreaPtrTy, 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
@@ -4147,7 +4158,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt64PtrTy(*MS.C));
- return IRB.CreateLoad(SaveAreaPtrPtr);
+ return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
// Retrieve a va_list field of 'int' size.
@@ -4157,7 +4168,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt32PtrTy(*MS.C));
- Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
+ Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
@@ -4168,7 +4179,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
- VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ VAArgOverflowSize =
+ IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize =
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
VAArgOverflowSize);
@@ -4411,7 +4423,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
- VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
VAArgSize);
@@ -4428,10 +4440,12 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
IRBuilder<> IRB(OrigInst->getNextNode());
Value *VAListTag = OrigInst->getArgOperand(0);
+ Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
- PointerType::get(Type::getInt64PtrTy(*MS.C), 0));
- Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ PointerType::get(RegSaveAreaPtrTy, 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
unsigned Alignment = 8;
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 4eff5be40bb..11f9d1242c1 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -822,7 +822,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
auto CounterPtr = IRB.CreateGEP(
Function8bitCounterArray,
{ConstantInt::get(IntptrTy, 0), ConstantInt::get(IntptrTy, Idx)});
- auto Load = IRB.CreateLoad(CounterPtr);
+ auto Load = IRB.CreateLoad(Int8Ty, CounterPtr);
auto Inc = IRB.CreateAdd(Load, ConstantInt::get(Int8Ty, 1));
auto Store = IRB.CreateStore(Inc, CounterPtr);
SetNoSanitizeMetadata(Load);
@@ -835,7 +835,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
auto FrameAddrPtr =
IRB.CreateCall(GetFrameAddr, {Constant::getNullValue(Int32Ty)});
auto FrameAddrInt = IRB.CreatePtrToInt(FrameAddrPtr, IntptrTy);
- auto LowestStack = IRB.CreateLoad(SanCovLowestStack);
+ auto LowestStack = IRB.CreateLoad(IntptrTy, SanCovLowestStack);
auto IsStackLower = IRB.CreateICmpULT(FrameAddrInt, LowestStack);
auto ThenTerm = SplitBlockAndInsertIfThen(IsStackLower, &*IP, false);
IRBuilder<> ThenIRB(ThenTerm);
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index b19503d095b..55fc7175154 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1234,10 +1234,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
BasicBlock *UnavailablePred = PredLoad.first;
Value *LoadPtr = PredLoad.second;
- auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
- LI->isVolatile(), LI->getAlignment(),
- LI->getOrdering(), LI->getSyncScopeID(),
- UnavailablePred->getTerminator());
+ auto *NewLoad =
+ new LoadInst(LI->getType(), LoadPtr, LI->getName() + ".pre",
+ LI->isVolatile(), LI->getAlignment(), LI->getOrdering(),
+ LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewLoad->setDebugLoc(LI->getDebugLoc());
// Transfer the old load's AA tags to the new load.
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 7c8d362cf5a..54c206444cf 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1445,11 +1445,11 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
if (UnavailablePred) {
assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
"Can't handle critical edge here!");
- LoadInst *NewVal =
- new LoadInst(LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
- LoadI->getName() + ".pr", false, LoadI->getAlignment(),
- LoadI->getOrdering(), LoadI->getSyncScopeID(),
- UnavailablePred->getTerminator());
+ LoadInst *NewVal = new LoadInst(
+ LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
+ LoadI->getName() + ".pr", false, LoadI->getAlignment(),
+ LoadI->getOrdering(), LoadI->getSyncScopeID(),
+ UnavailablePred->getTerminator());
NewVal->setDebugLoc(LoadI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index a0920f65a2a..4c63c136b69 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -1947,7 +1947,8 @@ bool llvm::promoteLoopAccessesToScalars(
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
LoadInst *PreheaderLoad = new LoadInst(
- SomePtr, SomePtr->getName() + ".promoted", Preheader->getTerminator());
+ SomePtr->getType()->getPointerElementType(), SomePtr,
+ SomePtr->getName() + ".promoted", Preheader->getTerminator());
if (SawUnorderedAtomic)
PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
PreheaderLoad->setAlignment(Alignment);
diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index f8fc76e4b87..b35d3a732b4 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -427,9 +427,9 @@ public:
auto *PH = L->getLoopPreheader();
Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
PH->getTerminator());
- Value *Initial =
- new LoadInst(InitialPtr, "load_initial", /* isVolatile */ false,
- Cand.Load->getAlignment(), PH->getTerminator());
+ Value *Initial = new LoadInst(
+ Cand.Load->getType(), InitialPtr, "load_initial",
+ /* isVolatile */ false, Cand.Load->getAlignment(), PH->getTerminator());
PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
&L->getHeader()->front());
diff --git a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 0f84d59b28f..f39ca239644 100644
--- a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -26,7 +26,7 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Cmp = CXI->getCompareOperand();
Value *Val = CXI->getNewValOperand();
- LoadInst *Orig = Builder.CreateLoad(Ptr);
+ LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
@@ -44,7 +44,7 @@ static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) {
Value *Ptr = RMWI->getPointerOperand();
Value *Val = RMWI->getValOperand();
- LoadInst *Orig = Builder.CreateLoad(Ptr);
+ LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Res = nullptr;
switch (RMWI->getOperation()) {
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 6e78336cb44..add26d77dea 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1636,7 +1636,7 @@ makeStatepointExplicit(DominatorTree &DT, CallSite CS,
// for sanity checking.
static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
- DenseMap<Value *, Value *> &AllocaMap,
+ DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (User *U : GCRelocs) {
GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
@@ -1671,7 +1671,7 @@ insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
// "insertRelocationStores" but works for rematerialized values.
static void insertRematerializationStores(
const RematerializedValueMapTy &RematerializedValues,
- DenseMap<Value *, Value *> &AllocaMap,
+ DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (auto RematerializedValuePair: RematerializedValues) {
Instruction *RematerializedValue = RematerializedValuePair.first;
@@ -1704,7 +1704,7 @@ static void relocationViaAlloca(
#endif
// TODO-PERF: change data structures, reserve
- DenseMap<Value *, Value *> AllocaMap;
+ DenseMap<Value *, AllocaInst *> AllocaMap;
SmallVector<AllocaInst *, 200> PromotableAllocas;
// Used later to chack that we have enough allocas to store all values
std::size_t NumRematerializedValues = 0;
@@ -1774,7 +1774,7 @@ static void relocationViaAlloca(
SmallVector<AllocaInst *, 64> ToClobber;
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
- AllocaInst *Alloca = cast<AllocaInst>(Pair.second);
+ AllocaInst *Alloca = Pair.second;
// This value was relocated
if (VisitedLiveValues.count(Def)) {
@@ -1806,7 +1806,7 @@ static void relocationViaAlloca(
// Update use with load allocas and add store for gc_relocated.
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
- Value *Alloca = Pair.second;
+ AllocaInst *Alloca = Pair.second;
// We pre-record the uses of allocas so that we dont have to worry about
// later update that changes the user information..
@@ -1834,13 +1834,15 @@ static void relocationViaAlloca(
PHINode *Phi = cast<PHINode>(Use);
for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
if (Def == Phi->getIncomingValue(i)) {
- LoadInst *Load = new LoadInst(
- Alloca, "", Phi->getIncomingBlock(i)->getTerminator());
+ LoadInst *Load =
+ new LoadInst(Alloca->getAllocatedType(), Alloca, "",
+ Phi->getIncomingBlock(i)->getTerminator());
Phi->setIncomingValue(i, Load);
}
}
} else {
- LoadInst *Load = new LoadInst(Alloca, "", Use);
+ LoadInst *Load =
+ new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
Use->replaceUsesOfWith(Def, Load);
}
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 991e9c7c52f..6be3f7903c9 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1231,15 +1231,14 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
static void speculatePHINodeLoads(PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
- Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
+ LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
+ Type *LoadTy = SomeLoad->getType();
IRBuilderTy PHIBuilder(&PN);
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
// Get the AA tags and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
- LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
-
AAMDNodes AATags;
SomeLoad->getAAMetadata(AATags);
unsigned Align = SomeLoad->getAlignment();
@@ -1270,7 +1269,8 @@ static void speculatePHINodeLoads(PHINode &PN) {
IRBuilderTy PredBuilder(TI);
LoadInst *Load = PredBuilder.CreateLoad(
- InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
+ LoadTy, InVal,
+ (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
if (AATags)
@@ -1330,10 +1330,10 @@ static void speculateSelectInstLoads(SelectInst &SI) {
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
- LoadInst *TL =
- IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
- LoadInst *FL =
- IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
+ LoadInst *TL = IRB.CreateLoad(LI->getType(), TV,
+ LI->getName() + ".sroa.speculate.load.true");
+ LoadInst *FL = IRB.CreateLoad(LI->getType(), FV,
+ LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
// Transfer alignment and AA info if present.
@@ -2410,14 +2410,16 @@ private:
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
- Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
- Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
@@ -2461,7 +2463,8 @@ private:
(canConvertValue(DL, NewAllocaTy, TargetTy) ||
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy()))) {
- LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
@@ -2497,9 +2500,9 @@ private:
}
} else {
Type *LTy = TargetTy->getPointerTo(AS);
- LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
- getSliceAlign(TargetTy),
- LI.isVolatile(), LI.getName());
+ LoadInst *NewLI = IRB.CreateAlignedLoad(
+ TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy),
+ LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
@@ -2525,8 +2528,8 @@ private:
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
- Value *Placeholder =
- new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS)));
+ Value *Placeholder = new LoadInst(
+ LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)));
V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
"insert");
LI.replaceAllUsesWith(V);
@@ -2557,7 +2560,8 @@ private:
V = convertValue(DL, IRB, V, SliceTy);
// Mix in the existing elements.
- Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
@@ -2573,8 +2577,8 @@ private:
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
@@ -2766,8 +2770,8 @@ private:
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
@@ -2779,8 +2783,8 @@ private:
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, V, Offset, "insert");
@@ -2940,18 +2944,18 @@ private:
// Reset the other pointer type to match the register type we're going to
// use, but using the address space of the original other pointer.
+ Type *OtherTy;
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
- OtherPtrTy = VecTy->getElementType();
+ OtherTy = VecTy->getElementType();
else
- OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
-
- OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
+ OtherTy = VectorType::get(VecTy->getElementType(), NumElements);
} else if (IntTy && !IsWholeAlloca) {
- OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
+ OtherTy = SubIntTy;
} else {
- OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
+ OtherTy = NewAllocaTy;
}
+ OtherPtrTy = OtherTy->getPointerTo(OtherAS);
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
@@ -2965,28 +2969,30 @@ private:
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
- Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
- Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
- LoadInst *Load = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
- "copyload");
+ LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
+ II.isVolatile(), "copyload");
if (AATags)
Load->setAAMetadata(AATags);
Src = Load;
}
if (VecTy && !IsWholeAlloca && IsDest) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
@@ -3293,7 +3299,7 @@ private:
// Load the single value and insert it using the indices.
Value *GEP =
IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
- LoadInst *Load = IRB.CreateAlignedLoad(GEP, Align, Name + ".load");
+ LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load");
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
@@ -3787,6 +3793,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto AS = LI->getPointerAddressSpace();
auto *PartPtrTy = PartTy->getPointerTo(AS);
LoadInst *PLoad = IRB.CreateAlignedLoad(
+ PartTy,
getAdjustedPtr(IRB, DL, BasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
@@ -3928,6 +3935,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
IRB.SetInsertPoint(LI);
auto AS = LI->getPointerAddressSpace();
PLoad = IRB.CreateAlignedLoad(
+ PartTy,
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 8fd32086a3b..856719910ce 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -743,7 +743,8 @@ bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I)
- Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
+ Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
+ Layout.getElemAlign(I),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index da227570179..9148c11ba15 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -844,7 +844,8 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
Instruction *TI = newFunction->begin()->getTerminator();
GetElementPtrInst *GEP = GetElementPtrInst::Create(
StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI);
- RewriteVal = new LoadInst(GEP, "loadgep_" + inputs[i]->getName(), TI);
+ RewriteVal = new LoadInst(StructTy->getElementType(i), GEP,
+ "loadgep_" + inputs[i]->getName(), TI);
} else
RewriteVal = &*AI++;
@@ -1054,7 +1055,8 @@ CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
} else {
Output = ReloadOutputs[i];
}
- LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload");
+ LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
+ outputs[i]->getName() + ".reload");
Reloads.push_back(load);
codeReplacer->getInstList().push_back(load);
std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
diff --git a/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index 8ae4da71a3e..5f53d794fe8 100644
--- a/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -72,7 +72,8 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Value *&V = Loads[PN->getIncomingBlock(i)];
if (!V) {
// Insert the load into the predecessor block
- V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
+ V = new LoadInst(I.getType(), Slot, I.getName() + ".reload",
+ VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@@ -80,7 +81,8 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
} else {
// If this is a normal instruction, just insert a load.
- Value *V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads, U);
+ Value *V = new LoadInst(I.getType(), Slot, I.getName() + ".reload",
+ VolatileLoads, U);
U->replaceUsesOfWith(&I, V);
}
}
@@ -141,7 +143,8 @@ AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt)
/* empty */; // Don't insert before PHI nodes or landingpad instrs.
- Value *V = new LoadInst(Slot, P->getName() + ".reload", &*InsertPt);
+ Value *V =
+ new LoadInst(P->getType(), Slot, P->getName() + ".reload", &*InsertPt);
P->replaceAllUsesWith(V);
// Delete PHI.
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index e6a8f24cbdb..875e3f49684 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -72,7 +72,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
// Loop Body
Value *SrcGEP =
LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
- Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *Load = LoopBuilder.CreateLoad(LoopOpType, SrcGEP, SrcIsVolatile);
Value *DstGEP =
LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@@ -114,7 +114,7 @@ void llvm::createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
: RBuilder.CreateBitCast(SrcAddr, SrcPtrType);
Value *SrcGEP = RBuilder.CreateInBoundsGEP(
OpTy, CastedSrc, ConstantInt::get(TypeOfCopyLen, GepIndex));
- Value *Load = RBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *Load = RBuilder.CreateLoad(OpTy, SrcGEP, SrcIsVolatile);
// Cast destination to operand type and store.
PointerType *DstPtrType = PointerType::get(OpTy, DstAS);
@@ -181,7 +181,7 @@ void llvm::createMemCpyLoopUnknownSize(Instruction *InsertBefore,
LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
- Value *Load = LoopBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *Load = LoopBuilder.CreateLoad(LoopOpType, SrcGEP, SrcIsVolatile);
Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
LoopBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@@ -234,7 +234,7 @@ void llvm::createMemCpyLoopUnknownSize(Instruction *InsertBefore,
Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
Value *SrcGEP =
ResBuilder.CreateInBoundsGEP(Int8Type, SrcAsInt8, FullOffset);
- Value *Load = ResBuilder.CreateLoad(SrcGEP, SrcIsVolatile);
+ Value *Load = ResBuilder.CreateLoad(Int8Type, SrcGEP, SrcIsVolatile);
Value *DstGEP =
ResBuilder.CreateInBoundsGEP(Int8Type, DstAsInt8, FullOffset);
ResBuilder.CreateStore(Load, DstGEP, DstIsVolatile);
@@ -292,6 +292,8 @@ static void createMemMoveLoop(Instruction *InsertBefore,
BasicBlock *OrigBB = InsertBefore->getParent();
Function *F = OrigBB->getParent();
+ Type *EltTy = cast<PointerType>(SrcAddr->getType())->getElementType();
+
// Create the a comparison of src and dst, based on which we jump to either
// the forward-copy part of the function (if src >= dst) or the backwards-copy
// part (if src < dst).
@@ -330,7 +332,7 @@ static void createMemMoveLoop(Instruction *InsertBefore,
Value *IndexPtr = LoopBuilder.CreateSub(
LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
Value *Element = LoopBuilder.CreateLoad(
- LoopBuilder.CreateInBoundsGEP(SrcAddr, IndexPtr), "element");
+ EltTy, LoopBuilder.CreateInBoundsGEP(SrcAddr, IndexPtr), "element");
LoopBuilder.CreateStore(Element,
LoopBuilder.CreateInBoundsGEP(DstAddr, IndexPtr));
LoopBuilder.CreateCondBr(
@@ -347,7 +349,7 @@ static void createMemMoveLoop(Instruction *InsertBefore,
IRBuilder<> FwdLoopBuilder(FwdLoopBB);
PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
Value *FwdElement = FwdLoopBuilder.CreateLoad(
- FwdLoopBuilder.CreateInBoundsGEP(SrcAddr, FwdCopyPhi), "element");
+ EltTy, FwdLoopBuilder.CreateInBoundsGEP(SrcAddr, FwdCopyPhi), "element");
FwdLoopBuilder.CreateStore(
FwdElement, FwdLoopBuilder.CreateInBoundsGEP(DstAddr, FwdCopyPhi));
Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 28e8cffe81c..3fec17ac8cc 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5090,7 +5090,9 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
Value *GEPIndices[] = {Builder.getInt32(0), Index};
Value *GEP = Builder.CreateInBoundsGEP(Array->getValueType(), Array,
GEPIndices, "switch.gep");
- return Builder.CreateLoad(GEP, "switch.load");
+ return Builder.CreateLoad(
+ cast<ArrayType>(Array->getValueType())->getElementType(), GEP,
+ "switch.load");
}
}
llvm_unreachable("Unknown lookup table kind!");
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 859c7b0c6b2..5181f497915 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -333,11 +333,12 @@ Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilder<> &B) {
return ConstantInt::get(CI->getType(), Str1.compare(Str2));
if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
- return B.CreateNeg(
- B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
+ return B.CreateNeg(B.CreateZExt(
+ B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
- return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
+ return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
+ CI->getType());
// strcmp(P, "x") -> memcmp(P, "x", 2)
uint64_t Len1 = GetStringLength(Str1P);
@@ -397,11 +398,12 @@ Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilder<> &B) {
}
if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
- return B.CreateNeg(
- B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
+ return B.CreateNeg(B.CreateZExt(
+ B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
- return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
+ return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
+ CI->getType());
uint64_t Len1 = GetStringLength(Str1P);
uint64_t Len2 = GetStringLength(Str2P);
@@ -590,7 +592,8 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilder<> &B,
// strlen(x) != 0 --> *x != 0
// strlen(x) == 0 --> *x == 0
if (isOnlyUsedInZeroEqualityComparison(CI))
- return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
+ return B.CreateZExt(B.CreateLoad(B.getIntNTy(CharSize), Src, "strlenfirst"),
+ CI->getType());
return nullptr;
}
@@ -844,10 +847,12 @@ Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
// memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
if (Len == 1) {
- Value *LHSV = B.CreateZExt(B.CreateLoad(castToCStr(LHS, B), "lhsc"),
- CI->getType(), "lhsv");
- Value *RHSV = B.CreateZExt(B.CreateLoad(castToCStr(RHS, B), "rhsc"),
- CI->getType(), "rhsv");
+ Value *LHSV =
+ B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(LHS, B), "lhsc"),
+ CI->getType(), "lhsv");
+ Value *RHSV =
+ B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(RHS, B), "rhsc"),
+ CI->getType(), "rhsv");
return B.CreateSub(LHSV, RHSV, "chardiff");
}
@@ -877,12 +882,12 @@ Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
if (!LHSV) {
Type *LHSPtrTy =
IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
- LHSV = B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
+ LHSV = B.CreateLoad(IntType, B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
}
if (!RHSV) {
Type *RHSPtrTy =
IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
- RHSV = B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
+ RHSV = B.CreateLoad(IntType, B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
}
return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
}
@@ -2286,7 +2291,8 @@ Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilder<> &B) {
// If this is writing one byte, turn it into fputc.
// This optimisation is only valid, if the return value is unused.
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
- Value *Char = B.CreateLoad(castToCStr(CI->getArgOperand(0), B), "char");
+ Value *Char = B.CreateLoad(B.getInt8Ty(),
+ castToCStr(CI->getArgOperand(0), B), "char");
Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
}
diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp
index 948d9bd5baa..19e5db4eee2 100644
--- a/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -386,12 +386,12 @@ Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
// memdep queries will find the new load. We can't easily remove the old
// load completely because it is already in the value numbering table.
IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
- Type *DestPTy = IntegerType::get(LoadTy->getContext(), NewLoadSize * 8);
- DestPTy =
- PointerType::get(DestPTy, PtrVal->getType()->getPointerAddressSpace());
+ Type *DestTy = IntegerType::get(LoadTy->getContext(), NewLoadSize * 8);
+ Type *DestPTy =
+ PointerType::get(DestTy, PtrVal->getType()->getPointerAddressSpace());
Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
- LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
+ LoadInst *NewLoad = Builder.CreateLoad(DestTy, PtrVal);
NewLoad->takeName(SrcVal);
NewLoad->setAlignment(SrcVal->getAlignment());
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 2aa559323b7..e0b793f08fd 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -1181,7 +1181,7 @@ bool Vectorizer::vectorizeLoadChain(
Value *Bitcast =
Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
- LoadInst *LI = Builder.CreateAlignedLoad(Bitcast, Alignment);
+ LoadInst *LI = Builder.CreateAlignedLoad(VecTy, Bitcast, Alignment);
propagateMetadata(LI, Chain);
if (VecLoadTy) {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index da9f01a2f59..6340b8279d9 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2100,8 +2100,8 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
GroupMask, UndefVec, "wide.masked.vec");
}
else
- NewLoad = Builder.CreateAlignedLoad(NewPtrs[Part],
- Group->getAlignment(), "wide.vec");
+ NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part],
+ Group->getAlignment(), "wide.vec");
Group->addMetadata(NewLoad);
NewLoads.push_back(NewLoad);
}
@@ -2312,7 +2312,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
UndefValue::get(DataTy),
"wide.masked.load");
else
- NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
+ NewLI =
+ Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
// Add metadata to the load, but setVectorValue to the reverse shuffle.
addMetadata(NewLI, LI);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index d040f8eba86..446a78be769 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -3136,7 +3136,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Builder.SetInsertPoint(LI);
PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
- LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
+ LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment());
Value *NewV = propagateMetadata(V, E->Scalars);
if (!E->ReorderIndices.empty()) {
OrdersType Mask;
@@ -3341,7 +3341,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0));
unsigned Alignment = LI->getAlignment();
- LI = Builder.CreateLoad(VecPtr);
+ LI = Builder.CreateLoad(VecTy, VecPtr);
if (!Alignment) {
Alignment = DL->getABITypeAlignment(ScalarLoadTy);
}
OpenPOWER on IntegriCloud