summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LowerAtomic.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp84
-rw-r--r--llvm/lib/Transforms/Scalar/Scalarizer.cpp3
8 files changed, 74 insertions, 62 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index b19503d095b..55fc7175154 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1234,10 +1234,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
BasicBlock *UnavailablePred = PredLoad.first;
Value *LoadPtr = PredLoad.second;
- auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
- LI->isVolatile(), LI->getAlignment(),
- LI->getOrdering(), LI->getSyncScopeID(),
- UnavailablePred->getTerminator());
+ auto *NewLoad =
+ new LoadInst(LI->getType(), LoadPtr, LI->getName() + ".pre",
+ LI->isVolatile(), LI->getAlignment(), LI->getOrdering(),
+ LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewLoad->setDebugLoc(LI->getDebugLoc());
// Transfer the old load's AA tags to the new load.
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 7c8d362cf5a..54c206444cf 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1445,11 +1445,11 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
if (UnavailablePred) {
assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
"Can't handle critical edge here!");
- LoadInst *NewVal =
- new LoadInst(LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
- LoadI->getName() + ".pr", false, LoadI->getAlignment(),
- LoadI->getOrdering(), LoadI->getSyncScopeID(),
- UnavailablePred->getTerminator());
+ LoadInst *NewVal = new LoadInst(
+ LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
+ LoadI->getName() + ".pr", false, LoadI->getAlignment(),
+ LoadI->getOrdering(), LoadI->getSyncScopeID(),
+ UnavailablePred->getTerminator());
NewVal->setDebugLoc(LoadI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index a0920f65a2a..4c63c136b69 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -1947,7 +1947,8 @@ bool llvm::promoteLoopAccessesToScalars(
// Set up the preheader to have a definition of the value. It is the live-out
// value from the preheader that uses in the loop will use.
LoadInst *PreheaderLoad = new LoadInst(
- SomePtr, SomePtr->getName() + ".promoted", Preheader->getTerminator());
+ SomePtr->getType()->getPointerElementType(), SomePtr,
+ SomePtr->getName() + ".promoted", Preheader->getTerminator());
if (SawUnorderedAtomic)
PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
PreheaderLoad->setAlignment(Alignment);
diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index f8fc76e4b87..b35d3a732b4 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -427,9 +427,9 @@ public:
auto *PH = L->getLoopPreheader();
Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
PH->getTerminator());
- Value *Initial =
- new LoadInst(InitialPtr, "load_initial", /* isVolatile */ false,
- Cand.Load->getAlignment(), PH->getTerminator());
+ Value *Initial = new LoadInst(
+ Cand.Load->getType(), InitialPtr, "load_initial",
+ /* isVolatile */ false, Cand.Load->getAlignment(), PH->getTerminator());
PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
&L->getHeader()->front());
diff --git a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 0f84d59b28f..f39ca239644 100644
--- a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -26,7 +26,7 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
Value *Cmp = CXI->getCompareOperand();
Value *Val = CXI->getNewValOperand();
- LoadInst *Orig = Builder.CreateLoad(Ptr);
+ LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
Builder.CreateStore(Res, Ptr);
@@ -44,7 +44,7 @@ static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) {
Value *Ptr = RMWI->getPointerOperand();
Value *Val = RMWI->getValOperand();
- LoadInst *Orig = Builder.CreateLoad(Ptr);
+ LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr);
Value *Res = nullptr;
switch (RMWI->getOperation()) {
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 6e78336cb44..add26d77dea 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1636,7 +1636,7 @@ makeStatepointExplicit(DominatorTree &DT, CallSite CS,
// for sanity checking.
static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
- DenseMap<Value *, Value *> &AllocaMap,
+ DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (User *U : GCRelocs) {
GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
@@ -1671,7 +1671,7 @@ insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
// "insertRelocationStores" but works for rematerialized values.
static void insertRematerializationStores(
const RematerializedValueMapTy &RematerializedValues,
- DenseMap<Value *, Value *> &AllocaMap,
+ DenseMap<Value *, AllocaInst *> &AllocaMap,
DenseSet<Value *> &VisitedLiveValues) {
for (auto RematerializedValuePair: RematerializedValues) {
Instruction *RematerializedValue = RematerializedValuePair.first;
@@ -1704,7 +1704,7 @@ static void relocationViaAlloca(
#endif
// TODO-PERF: change data structures, reserve
- DenseMap<Value *, Value *> AllocaMap;
+ DenseMap<Value *, AllocaInst *> AllocaMap;
SmallVector<AllocaInst *, 200> PromotableAllocas;
// Used later to chack that we have enough allocas to store all values
std::size_t NumRematerializedValues = 0;
@@ -1774,7 +1774,7 @@ static void relocationViaAlloca(
SmallVector<AllocaInst *, 64> ToClobber;
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
- AllocaInst *Alloca = cast<AllocaInst>(Pair.second);
+ AllocaInst *Alloca = Pair.second;
// This value was relocated
if (VisitedLiveValues.count(Def)) {
@@ -1806,7 +1806,7 @@ static void relocationViaAlloca(
// Update use with load allocas and add store for gc_relocated.
for (auto Pair : AllocaMap) {
Value *Def = Pair.first;
- Value *Alloca = Pair.second;
+ AllocaInst *Alloca = Pair.second;
// We pre-record the uses of allocas so that we dont have to worry about
// later update that changes the user information..
@@ -1834,13 +1834,15 @@ static void relocationViaAlloca(
PHINode *Phi = cast<PHINode>(Use);
for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
if (Def == Phi->getIncomingValue(i)) {
- LoadInst *Load = new LoadInst(
- Alloca, "", Phi->getIncomingBlock(i)->getTerminator());
+ LoadInst *Load =
+ new LoadInst(Alloca->getAllocatedType(), Alloca, "",
+ Phi->getIncomingBlock(i)->getTerminator());
Phi->setIncomingValue(i, Load);
}
}
} else {
- LoadInst *Load = new LoadInst(Alloca, "", Use);
+ LoadInst *Load =
+ new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
Use->replaceUsesOfWith(Def, Load);
}
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 991e9c7c52f..6be3f7903c9 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1231,15 +1231,14 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
static void speculatePHINodeLoads(PHINode &PN) {
LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
- Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
+ LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
+ Type *LoadTy = SomeLoad->getType();
IRBuilderTy PHIBuilder(&PN);
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
// Get the AA tags and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
- LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
-
AAMDNodes AATags;
SomeLoad->getAAMetadata(AATags);
unsigned Align = SomeLoad->getAlignment();
@@ -1270,7 +1269,8 @@ static void speculatePHINodeLoads(PHINode &PN) {
IRBuilderTy PredBuilder(TI);
LoadInst *Load = PredBuilder.CreateLoad(
- InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
+ LoadTy, InVal,
+ (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
if (AATags)
@@ -1330,10 +1330,10 @@ static void speculateSelectInstLoads(SelectInst &SI) {
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
- LoadInst *TL =
- IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
- LoadInst *FL =
- IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
+ LoadInst *TL = IRB.CreateLoad(LI->getType(), TV,
+ LI->getName() + ".sroa.speculate.load.true");
+ LoadInst *FL = IRB.CreateLoad(LI->getType(), FV,
+ LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
// Transfer alignment and AA info if present.
@@ -2410,14 +2410,16 @@ private:
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
- Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
- Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
@@ -2461,7 +2463,8 @@ private:
(canConvertValue(DL, NewAllocaTy, TargetTy) ||
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy()))) {
- LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
@@ -2497,9 +2500,9 @@ private:
}
} else {
Type *LTy = TargetTy->getPointerTo(AS);
- LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
- getSliceAlign(TargetTy),
- LI.isVolatile(), LI.getName());
+ LoadInst *NewLI = IRB.CreateAlignedLoad(
+ TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy),
+ LI.isVolatile(), LI.getName());
if (AATags)
NewLI->setAAMetadata(AATags);
if (LI.isVolatile())
@@ -2525,8 +2528,8 @@ private:
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
- Value *Placeholder =
- new LoadInst(UndefValue::get(LI.getType()->getPointerTo(AS)));
+ Value *Placeholder = new LoadInst(
+ LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)));
V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
"insert");
LI.replaceAllUsesWith(V);
@@ -2557,7 +2560,8 @@ private:
V = convertValue(DL, IRB, V, SliceTy);
// Mix in the existing elements.
- Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
@@ -2573,8 +2577,8 @@ private:
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
@@ -2766,8 +2770,8 @@ private:
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
@@ -2779,8 +2783,8 @@ private:
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
V = insertInteger(DL, IRB, Old, V, Offset, "insert");
@@ -2940,18 +2944,18 @@ private:
// Reset the other pointer type to match the register type we're going to
// use, but using the address space of the original other pointer.
+ Type *OtherTy;
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
- OtherPtrTy = VecTy->getElementType();
+ OtherTy = VecTy->getElementType();
else
- OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
-
- OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
+ OtherTy = VectorType::get(VecTy->getElementType(), NumElements);
} else if (IntTy && !IsWholeAlloca) {
- OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
+ OtherTy = SubIntTy;
} else {
- OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
+ OtherTy = NewAllocaTy;
}
+ OtherPtrTy = OtherTy->getPointerTo(OtherAS);
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
@@ -2965,28 +2969,30 @@ private:
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
- Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
- Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load");
+ Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "load");
Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
- LoadInst *Load = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
- "copyload");
+ LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
+ II.isVolatile(), "copyload");
if (AATags)
Load->setAAMetadata(AATags);
Src = Load;
}
if (VecTy && !IsWholeAlloca && IsDest) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
- Value *Old =
- IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload");
+ Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
+ NewAI.getAlignment(), "oldload");
Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
@@ -3293,7 +3299,7 @@ private:
// Load the single value and insert it using the indices.
Value *GEP =
IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep");
- LoadInst *Load = IRB.CreateAlignedLoad(GEP, Align, Name + ".load");
+ LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load");
if (AATags)
Load->setAAMetadata(AATags);
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
@@ -3787,6 +3793,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto AS = LI->getPointerAddressSpace();
auto *PartPtrTy = PartTy->getPointerTo(AS);
LoadInst *PLoad = IRB.CreateAlignedLoad(
+ PartTy,
getAdjustedPtr(IRB, DL, BasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
@@ -3928,6 +3935,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
IRB.SetInsertPoint(LI);
auto AS = LI->getPointerAddressSpace();
PLoad = IRB.CreateAlignedLoad(
+ PartTy,
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 8fd32086a3b..856719910ce 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -743,7 +743,8 @@ bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
Res.resize(NumElems);
for (unsigned I = 0; I < NumElems; ++I)
- Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
+ Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I],
+ Layout.getElemAlign(I),
LI.getName() + ".i" + Twine(I));
gather(&LI, Res);
return true;
OpenPOWER on IntegriCloud