summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp2
-rw-r--r--llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp4
-rw-r--r--llvm/lib/Transforms/ObjCARC/PtrState.h6
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInterchange.cpp22
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp38
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp17
-rw-r--r--llvm/lib/Transforms/Utils/CloneFunction.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/LowerSwitch.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyIndVar.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp14
14 files changed, 73 insertions, 72 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 734056d2118..d39b49fca86 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -519,7 +519,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
if (isa<OverflowingBinaryOperator>(Op1))
HasNSW &= Op1->hasNoSignedWrap();
- // We can propogate 'nsw' if we know that
+ // We can propagate 'nsw' if we know that
// %Y = mul nsw i16 %X, C
// %Z = add nsw i16 %Y, %X
// =>
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index 9edbb17e8d1..b2d11e7035f 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -482,7 +482,7 @@ namespace {
/// A flag indicating whether this optimization pass should run.
bool Run;
- /// Flags which determine whether each of the interesting runtine functions
+ /// Flags which determine whether each of the interesting runtime functions
/// is in fact used in the current function.
unsigned UsedInThisFunction;
@@ -1264,7 +1264,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
Arg = GetArgRCIdentityRoot(Inst);
TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
NestingDetected |= S.InitTopDown(Class, Inst);
- // A retain can be a potential use; procede to the generic checking
+ // A retain can be a potential use; proceed to the generic checking
// code below.
break;
}
diff --git a/llvm/lib/Transforms/ObjCARC/PtrState.h b/llvm/lib/Transforms/ObjCARC/PtrState.h
index e45e1ea96c5..a8c6ff1ed62 100644
--- a/llvm/lib/Transforms/ObjCARC/PtrState.h
+++ b/llvm/lib/Transforms/ObjCARC/PtrState.h
@@ -96,7 +96,7 @@ struct RRInfo {
};
/// \brief This class summarizes several per-pointer runtime properties which
-/// are propogated through the flow graph.
+/// are propagated through the flow graph.
class PtrState {
protected:
/// True if the reference count is known to be incremented.
@@ -172,7 +172,7 @@ struct BottomUpPtrState : PtrState {
bool InitBottomUp(ARCMDKindCache &Cache, Instruction *I);
/// Return true if this set of releases can be paired with a release. Modifies
- /// state appropriately to reflect that the matching occured if it is
+ /// state appropriately to reflect that the matching occurred if it is
/// successful.
///
/// It is assumed that one has already checked that the RCIdentity of the
@@ -194,7 +194,7 @@ struct TopDownPtrState : PtrState {
/// Return true if this set of retains can be paired with the given
/// release. Modifies state appropriately to reflect that the matching
- /// occured.
+ /// occurred.
bool MatchWithRelease(ARCMDKindCache &Cache, Instruction *Release);
void HandlePotentialUse(Instruction *Inst, const Value *Ptr,
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index f0ac76d84b5..f256af81fc8 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1771,7 +1771,7 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) {
// FIXME: If both the original and replacement value are part of the
// same control-flow region (meaning that the execution of one
- // guarentees the executation of the other), then we can combine the
+ // guarantees the execution of the other), then we can combine the
// noalias scopes here and do better than the general conservative
// answer used in combineMetadata().
@@ -2766,7 +2766,7 @@ void GVN::addDeadBlock(BasicBlock *BB) {
// R be the target of the dead out-coming edge.
// 1) Identify the set of dead blocks implied by the branch's dead outcoming
// edge. The result of this step will be {X| X is dominated by R}
-// 2) Identify those blocks which haves at least one dead prodecessor. The
+// 2) Identify those blocks which haves at least one dead predecessor. The
// result of this step will be dominance-frontier(R).
// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index bcc05c38a04..76848c76a85 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -402,7 +402,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
}
/// Computes loop safety information, checks loop body & header
-/// for the possiblity of may throw exception.
+/// for the possibility of may throw exception.
///
void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) {
assert(CurLoop != nullptr && "CurLoop cant be null");
@@ -410,7 +410,7 @@ void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) {
// Setting default safety values.
SafetyInfo->MayThrow = false;
SafetyInfo->HeaderMayThrow = false;
- // Iterate over header and compute dafety info.
+ // Iterate over header and compute safety info.
for (BasicBlock::iterator I = Header->begin(), E = Header->end();
(I != E) && !SafetyInfo->HeaderMayThrow; ++I)
SafetyInfo->HeaderMayThrow |= I->mayThrow();
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 9d7e57ffeba..2f3b5e97185 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -489,7 +489,7 @@ struct LoopInterchange : public FunctionPass {
unsigned selectLoopForInterchange(LoopVector LoopList) {
// TODO: Add a better heuristic to select the loop to be interchanged based
- // on the dependece matrix. Currently we select the innermost loop.
+ // on the dependence matrix. Currently we select the innermost loop.
return LoopList.size() - 1;
}
@@ -544,7 +544,7 @@ struct LoopInterchange : public FunctionPass {
}
unsigned SelecLoopId = selectLoopForInterchange(LoopList);
- // Move the selected loop outwards to the best posible position.
+ // Move the selected loop outwards to the best possible position.
for (unsigned i = SelecLoopId; i > 0; i--) {
bool Interchanged =
processLoop(LoopList, i, i - 1, LoopNestExit, DependencyMatrix);
@@ -655,7 +655,7 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) {
DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch \n");
// We do not have any basic block in between now make sure the outer header
- // and outer loop latch doesnt contain any unsafe instructions.
+ // and outer loop latch doesn't contain any unsafe instructions.
if (containsUnsafeInstructionsInHeader(OuterLoopHeader) ||
containsUnsafeInstructionsInLatch(OuterLoopLatch))
return false;
@@ -836,7 +836,7 @@ bool LoopInterchangeLegality::currentLimitations() {
else
FoundInduction = true;
}
- // The loop latch ended and we didnt find the induction variable return as
+ // The loop latch ended and we didn't find the induction variable return as
// current limitation.
if (!FoundInduction)
return true;
@@ -966,7 +966,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId,
unsigned OuterLoopId,
CharMatrix &DepMatrix) {
- // TODO: Add Better Profitibility checks.
+ // TODO: Add better profitability checks.
// e.g
// 1) Construct dependency matrix and move the one with no loop carried dep
// inside to enable vectorization.
@@ -980,7 +980,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId,
if (Cost < 0)
return true;
- // It is not profitable as per current cache profitibility model. But check if
+ // It is not profitable as per current cache profitability model. But check if
// we can move this loop outside to improve parallelism.
bool ImprovesPar =
isProfitabileForVectorization(InnerLoopId, OuterLoopId, DepMatrix);
@@ -1045,7 +1045,7 @@ bool LoopInterchangeTransform::transform() {
splitInnerLoopLatch(InnerIndexVar);
DEBUG(dbgs() << "splitInnerLoopLatch Done\n");
- // Splits the inner loops phi nodes out into a seperate basic block.
+ // Splits the inner loops phi nodes out into a separate basic block.
splitInnerLoopHeader();
DEBUG(dbgs() << "splitInnerLoopHeader Done\n");
}
@@ -1181,8 +1181,8 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI)
return false;
- BasicBlock *InnerLoopHeaderSucessor = InnerLoopHeader->getUniqueSuccessor();
- if (!InnerLoopHeaderSucessor)
+ BasicBlock *InnerLoopHeaderSuccessor = InnerLoopHeader->getUniqueSuccessor();
+ if (!InnerLoopHeaderSuccessor)
return false;
// Adjust Loop Preheader and headers
@@ -1198,11 +1198,11 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch)
OuterLoopHeaderBI->setSuccessor(i, LoopExit);
else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader)
- OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSucessor);
+ OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSuccessor);
}
// Adjust reduction PHI's now that the incoming block has changed.
- updateIncomingBlock(InnerLoopHeaderSucessor, InnerLoopHeader,
+ updateIncomingBlock(InnerLoopHeaderSuccessor, InnerLoopHeader,
OuterLoopHeader);
BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI);
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 643bf414575..67d5824f0ab 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -373,7 +373,7 @@ private:
/// Try to simplify binary operator I.
///
- /// TODO: Probaly it's worth to hoist the code for estimating the
+ /// TODO: Probably it's worth to hoist the code for estimating the
/// simplifications effects to a separate class, since we have a very similar
/// code in InlineCost already.
bool visitBinaryOperator(BinaryOperator &I) {
@@ -851,7 +851,7 @@ unsigned LoopUnroll::selectUnrollCount(
unsigned Count = UserCount ? CurrentCount : 0;
// If there is no user-specified count, unroll pragmas have the next
- // highest precendence.
+ // highest precedence.
if (Count == 0) {
if (PragmaCount) {
Count = PragmaCount;
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index d6a48c783b4..79b4e1a83ee 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -27,7 +27,7 @@
// well defined state for inspection by the collector. In the current
// implementation, this is done via the insertion of poll sites at method entry
// and the backedge of most loops. We try to avoid inserting more polls than
-// are neccessary to ensure a finite period between poll sites. This is not
+// are necessary to ensure a finite period between poll sites. This is not
// because the poll itself is expensive in the generated code; it's not. Polls
// do tend to impact the optimizer itself in negative ways; we'd like to avoid
// perturbing the optimization of the method as much as we can.
@@ -91,7 +91,7 @@ STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution");
using namespace llvm;
-// Ignore oppurtunities to avoid placing safepoints on backedges, useful for
+// Ignore opportunities to avoid placing safepoints on backedges, useful for
// validation
static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
cl::init(false));
@@ -121,7 +121,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
std::vector<TerminatorInst *> PollLocations;
/// True unless we're running spp-no-calls in which case we need to disable
- /// the call dependend placement opts.
+ /// the call-dependent placement opts.
bool CallSafepointsEnabled;
ScalarEvolution *SE = nullptr;
@@ -220,7 +220,7 @@ static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
// For the moment, we look only for the 'cuts' that consist of a single call
// instruction in a block which is dominated by the Header and dominates the
// loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
- // of such dominating blocks gets substaintially more occurences than just
+ // of such dominating blocks gets substantially more occurrences than just
// checking the Pred and Header blocks themselves. This may be due to the
// density of loop exit conditions caused by range and null checks.
// TODO: structure this as an analysis pass, cache the result for subloops,
@@ -765,7 +765,7 @@ static bool isGCLeafFunction(const CallSite &CS) {
// Most LLVM intrinsics are things which can never take a safepoint.
// As a result, we don't need to have the stack parsable at the
// callsite. This is a highly useful optimization since intrinsic
- // calls are fairly prevelent, particularly in debug builds.
+ // calls are fairly prevalent, particularly in debug builds.
return true;
}
@@ -933,7 +933,7 @@ static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
Token = Call;
- // Put the following gc_result and gc_relocate calls immediately after the
+ // Put the following gc_result and gc_relocate calls immediately after
// the old call (which we're about to delete).
assert(ToReplace->getNextNode() && "not a terminator, must have next");
Builder.SetInsertPoint(ToReplace->getNextNode());
@@ -962,7 +962,7 @@ static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
// We'll insert the gc.result into the normal block
BasicBlock *NormalDest = ToReplace->getNormalDest();
// Can not insert gc.result in case of phi nodes preset.
- // Should have removed this cases prior to runnning this function
+ // Should have removed this cases prior to running this function
assert(!isa<PHINode>(NormalDest->begin()));
Instruction *IP = &*(NormalDest->getFirstInsertionPt());
Builder.SetInsertPoint(IP);
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 6f70147c2a0..062c0d5612b 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -164,7 +164,7 @@ typedef DenseSet<llvm::Value *> StatepointLiveSetTy;
typedef DenseMap<Instruction *, Value *> RematerializedValueMapTy;
struct PartiallyConstructedSafepointRecord {
- /// The set of values known to be live accross this safepoint
+ /// The set of values known to be live across this safepoint
StatepointLiveSetTy liveset;
/// Mapping from live pointers to a base-defining-value
@@ -274,7 +274,7 @@ static void analyzeParsePointLiveness(
if (PrintLiveSet) {
// Note: This output is used by several of the test cases
- // The order of elemtns in a set is not stable, put them in a vec and sort
+ // The order of elements in a set is not stable, put them in a vec and sort
// by name
SmallVector<Value *, 64> temp;
temp.insert(temp.end(), liveset.begin(), liveset.end());
@@ -509,7 +509,7 @@ static Value *findBaseDefiningValue(Value *I) {
return I;
// I have absolutely no idea how to implement this part yet. It's not
- // neccessarily hard, I just haven't really looked at it yet.
+ // necessarily hard, I just haven't really looked at it yet.
assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
if (isa<AtomicCmpXchgInst>(I))
@@ -533,7 +533,7 @@ static Value *findBaseDefiningValue(Value *I) {
"Base pointer for a struct is meaningless");
// The last two cases here don't return a base pointer. Instead, they
- // return a value which dynamically selects from amoung several base
+ // return a value which dynamically selects from among several base
// derived pointers (each with it's own base potentially). It's the job of
// the caller to resolve these.
assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
@@ -717,7 +717,7 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) {
//
// Note: A simpler form of this would be to add the conflict form of all
// PHIs without running the optimistic algorithm. This would be
- // analougous to pessimistic data flow and would likely lead to an
+ // analogous to pessimistic data flow and would likely lead to an
// overall worse solution.
#ifndef NDEBUG
@@ -915,7 +915,7 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) {
assert(base != nullptr && "unknown BDVState!");
}
- // In essense this assert states: the only way two
+ // In essence this assert states: the only way two
// values incoming from the same basic block may be
// different is by being different bitcasts of the same
// value. A cleanup that remains TODO is changing
@@ -1043,7 +1043,7 @@ findBasePointers(const StatepointLiveSetTy &live,
// If you see this trip and like to live really dangerously, the code should
// be correct, just with idioms the verifier can't handle. You can try
- // disabling the verifier at your own substaintial risk.
+ // disabling the verifier at your own substantial risk.
assert(!isa<ConstantPointerNull>(base) &&
"the relocation code needs adjustment to handle the relocation of "
"a null pointer constant without causing false positives in the "
@@ -1089,7 +1089,7 @@ static void recomputeLiveInValues(
Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate,
MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
// TODO-PERF: reuse the original liveness, then simply run the dataflow
- // again. The old values are still live and will help it stablize quickly.
+ // again. The old values are still live and will help it stabilize quickly.
GCPtrLivenessData RevisedLivenessData;
computeLiveInValues(DT, F, RevisedLivenessData);
for (size_t i = 0; i < records.size(); i++) {
@@ -1131,7 +1131,7 @@ static int find_index(ArrayRef<Value *> livevec, Value *val) {
return index;
}
-// Create new attribute set containing only attributes which can be transfered
+// Create new attribute set containing only attributes which can be transferred
// from original call to the safepoint.
static AttributeSet legalizeCallAttributes(AttributeSet AS) {
AttributeSet ret;
@@ -1263,7 +1263,7 @@ makeStatepointExplicitImpl(const CallSite &CS, /* to replace */
// Currently we will fail on parameter attributes and on certain
// function attributes.
AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes());
- // In case if we can handle this set of sttributes - set up function attrs
+ // In case if we can handle this set of attributes - set up function attrs
// directly on statepoint and return attrs later for gc_result intrinsic.
call->setAttributes(new_attrs.getFnAttributes());
return_attributes = new_attrs.getRetAttributes();
@@ -1293,7 +1293,7 @@ makeStatepointExplicitImpl(const CallSite &CS, /* to replace */
// Currently we will fail on parameter attributes and on certain
// function attributes.
AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes());
- // In case if we can handle this set of sttributes - set up function attrs
+ // In case if we can handle this set of attributes - set up function attrs
// directly on statepoint and return attrs later for gc_result intrinsic.
invoke->setAttributes(new_attrs.getFnAttributes());
return_attributes = new_attrs.getRetAttributes();
@@ -1571,7 +1571,7 @@ static void relocationViaAlloca(
VisitedLiveValues);
if (ClobberNonLive) {
- // As a debuging aid, pretend that an unrelocated pointer becomes null at
+ // As a debugging aid, pretend that an unrelocated pointer becomes null at
// the gc.statepoint. This will turn some subtle GC problems into
// slightly easier to debug SEGVs. Note that on large IR files with
// lots of gc.statepoints this is extremely costly both memory and time
@@ -1742,10 +1742,10 @@ static void findLiveReferences(
/// Remove any vector of pointers from the liveset by scalarizing them over the
/// statepoint instruction. Adds the scalarized pieces to the liveset. It
-/// would be preferrable to include the vector in the statepoint itself, but
+/// would be preferable to include the vector in the statepoint itself, but
/// the lowering code currently does not handle that. Extending it would be
/// slightly non-trivial since it requires a format change. Given how rare
-/// such cases are (for the moment?) scalarizing is an acceptable comprimise.
+/// such cases are (for the moment?) scalarizing is an acceptable compromise.
static void splitVectorValues(Instruction *StatepointInst,
StatepointLiveSetTy &LiveSet,
DenseMap<Value *, Value *>& PointerToBase,
@@ -1876,7 +1876,7 @@ static void splitVectorValues(Instruction *StatepointInst,
// Helper function for the "rematerializeLiveValues". It walks use chain
// starting from the "CurrentValue" until it meets "BaseValue". Only "simple"
// values are visited (currently it is GEP's and casts). Returns true if it
-// sucessfully reached "BaseValue" and false otherwise.
+// successfully reached "BaseValue" and false otherwise.
// Fills "ChainToBase" array with all visited values. "BaseValue" is not
// recorded.
static bool findRematerializableChainToBasePointer(
@@ -2128,7 +2128,7 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P,
}
assert(records.size() == toUpdate.size());
- // A) Identify all gc pointers which are staticly live at the given call
+ // A) Identify all gc pointers which are statically live at the given call
// site.
findLiveReferences(F, DT, P, toUpdate, records);
@@ -2205,7 +2205,7 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P,
}
// In order to reduce live set of statepoint we might choose to rematerialize
- // some values instead of relocating them. This is purelly an optimization and
+ // some values instead of relocating them. This is purely an optimization and
// does not influence correctness.
TargetTransformInfo &TTI =
P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
@@ -2450,7 +2450,7 @@ static void computeLiveInValues(BasicBlock::reverse_iterator rbegin,
"support for FCA unimplemented");
if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
// The choice to exclude all things constant here is slightly subtle.
- // There are two idependent reasons:
+ // There are two independent reasons:
// - We assume that things which are constant (from LLVM's definition)
// do not move at runtime. For example, the address of a global
// variable is fixed, even though it's contents may not be.
@@ -2588,7 +2588,7 @@ static void computeLiveInValues(DominatorTree &DT, Function &F,
} // while( !worklist.empty() )
#ifndef NDEBUG
- // Sanity check our ouput against SSA properties. This helps catch any
+ // Sanity check our output against SSA properties. This helps catch any
// missing kills during the above iteration.
for (BasicBlock &BB : F) {
checkBasicSSA(DT, Data, BB);
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 947513a3657..9c69b6c7558 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -270,7 +270,8 @@ public:
friend class AllocaSlices;
friend class AllocaSlices::partition_iterator;
- /// \brief The begining and ending offsets of the alloca for this partition.
+ /// \brief The beginning and ending offsets of the alloca for this
+ /// partition.
uint64_t BeginOffset, EndOffset;
/// \brief The start end end iterators of this partition.
@@ -439,7 +440,7 @@ public:
// OK, we need to consume new slices. Set the end offset based on the
// current slice, and step SJ past it. The beginning offset of the
- // parttion is the beginning offset of the next slice unless we have
+ // partition is the beginning offset of the next slice unless we have
// pre-existing split slices that are continuing, in which case we begin
// at the prior end offset.
P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
@@ -493,7 +494,7 @@ public:
"End iterators don't match between compared partition iterators!");
// The observed positions of partitions is marked by the P.SI iterator and
- // the emptyness of the split slices. The latter is only relevant when
+ // the emptiness of the split slices. The latter is only relevant when
// P.SI == SE, as the end iterator will additionally have an empty split
// slices list, but the prior may have the same P.SI and a tail of split
// slices.
@@ -1934,7 +1935,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
/// \brief Test whether the given slice use can be promoted to a vector.
///
-/// This function is called to test each entry in a partioning which is slated
+/// This function is called to test each entry in a partition which is slated
/// for a single slice.
static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P,
const Slice &S, VectorType *Ty,
@@ -2130,7 +2131,7 @@ static bool isIntegerWideningViableForSlice(const Slice &S,
uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
// We can't reasonably handle cases where the load or store extends past
- // the end of the aloca's type and into its padding.
+ // the end of the alloca's type and into its padding.
if (RelEnd > Size)
return false;
@@ -3711,7 +3712,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
return true;
}),
Stores.end());
- // Now we have to go *back* through all te stores, because a later store may
+ // Now we have to go *back* through all the stores, because a later store may
// have caused an earlier store's load to become unsplittable and if it is
// unsplittable for the later store, then we can't rely on it being split in
// the earlier store either.
@@ -3972,7 +3973,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
// Mark the original store as dead now that we've split it up and kill its
// slice. Note that we leave the original load in place unless this store
- // was its ownly use. It may in turn be split up if it is an alloca load
+ // was its only use. It may in turn be split up if it is an alloca load
// for some other alloca, but it may be a normal load. This may introduce
// redundant loads, but where those can be merged the rest of the optimizer
// should handle the merging, and this uncovers SSA splits which is more
@@ -4230,7 +4231,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
// Migrate debug information from the old alloca to the new alloca(s)
- // and the individial partitions.
+ // and the individual partitions.
if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) {
auto *Var = DbgDecl->getVariable();
auto *Expr = DbgDecl->getExpression();
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index cf11119086c..52d13b3a94a 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -484,7 +484,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
}
#ifndef NDEBUG
- // If the cloning starts at the begining of the function, verify that
+ // If the cloning starts at the beginning of the function, verify that
// the function arguments are mapped.
if (!StartingInst)
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 4acd988691d..3b447098af1 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -163,7 +163,7 @@ static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
I != IE; ++I) {
PHINode *PN = cast<PHINode>(I);
- // Only update the first occurence.
+ // Only update the first occurrence.
unsigned Idx = 0, E = PN->getNumIncomingValues();
unsigned LocalNumMergedCases = NumMergedCases;
for (; Idx != E; ++Idx) {
@@ -173,7 +173,7 @@ static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB,
}
}
- // Remove additional occurences coming from condensed cases and keep the
+ // Remove additional occurrences coming from condensed cases and keep the
// number of incoming values equal to the number of branches to SuccBB.
SmallVector<unsigned, 8> Indices;
for (++Idx; LocalNumMergedCases > 0 && Idx < E; ++Idx)
@@ -424,7 +424,7 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
std::vector<IntRange> UnreachableRanges;
if (isa<UnreachableInst>(Default->getFirstNonPHIOrDbg())) {
- // Make the bounds tightly fitted around the case value range, becase we
+ // Make the bounds tightly fitted around the case value range, because we
// know that the value passed to the switch must be exactly one of the case
// values.
assert(!Cases.empty());
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index fa0459f62c9..10554845e5a 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -63,7 +63,7 @@ namespace {
/// Iteratively perform simplification on a worklist of users of the
/// specified induction variable. This is the top-level driver that applies
- /// all simplicitions to users of an IV.
+ /// all simplifications to users of an IV.
void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
@@ -184,7 +184,7 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
InvariantPredicate, InvariantLHS,
InvariantRHS)) {
- // Rewrite the comparision to a loop invariant comparision if it can be done
+ // Rewrite the comparison to a loop invariant comparison if it can be done
// cheaply, where cheaply means "we don't need to emit any new
// instructions".
@@ -482,8 +482,8 @@ static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
/// This algorithm does not require IVUsers analysis. Instead, it simplifies
/// instructions in-place during analysis. Rather than rewriting induction
/// variables bottom-up from their users, it transforms a chain of IVUsers
-/// top-down, updating the IR only when it encouters a clear optimization
-/// opportunitiy.
+/// top-down, updating the IR only when it encounters a clear optimization
+/// opportunity.
///
/// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
///
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index b215a256af0..61773cd2013 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -162,7 +162,7 @@ static bool canCombineAsAltInst(unsigned Op) {
return false;
}
-/// \returns ShuffleVector instruction if intructions in \p VL have
+/// \returns ShuffleVector instruction if instructions in \p VL have
/// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
/// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
static unsigned isAltInst(ArrayRef<Value *> VL) {
@@ -393,7 +393,7 @@ public:
/// \brief Perform LICM and CSE on the newly generated gather sequences.
void optimizeGatherSequence();
- /// \returns true if it is benefitial to reverse the vector order.
+ /// \returns true if it is beneficial to reverse the vector order.
bool shouldReorder() const {
return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
}
@@ -441,7 +441,7 @@ private:
/// \returns a vector from a collection of scalars in \p VL.
Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
- /// \returns whether the VectorizableTree is fully vectoriable and will
+ /// \returns whether the VectorizableTree is fully vectorizable and will
/// be beneficial even the tree height is tiny.
bool isFullyVectorizableTinyTree();
@@ -2898,8 +2898,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
}
} else {
// I'm not sure if this can ever happen. But we need to be safe.
- // This lets the instruction/bundle never be scheduled and eventally
- // disable vectorization.
+ // This lets the instruction/bundle never be scheduled and
+ // eventually disable vectorization.
BundleMember->Dependencies++;
BundleMember->incrementUnscheduledDeps(1);
}
@@ -3005,7 +3005,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
};
std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
- // Ensure that all depencency data is updated and fill the ready-list with
+ // Ensure that all dependency data is updated and fill the ready-list with
// initial instructions.
int Idx = 0;
int NumToSchedule = 0;
@@ -3732,7 +3732,7 @@ public:
private:
- /// \brief Calcuate the cost of a reduction.
+ /// \brief Calculate the cost of a reduction.
int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) {
Type *ScalarTy = FirstReducedVal->getType();
Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
OpenPOWER on IntegriCloud