summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/AliasAnalysisSummary.h2
-rw-r--r--llvm/lib/Analysis/BranchProbabilityInfo.cpp2
-rw-r--r--llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/LazyCallGraph.cpp10
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp2
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp6
-rw-r--r--llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp2
8 files changed, 15 insertions, 15 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysisSummary.h b/llvm/lib/Analysis/AliasAnalysisSummary.h
index 51a85f4e706..fb93a12420f 100644
--- a/llvm/lib/Analysis/AliasAnalysisSummary.h
+++ b/llvm/lib/Analysis/AliasAnalysisSummary.h
@@ -13,7 +13,7 @@
/// Summary-based analysis, also known as bottom-up analysis, is a style of
/// interprocedrual static analysis that tries to analyze the callees before the
/// callers get analyzed. The key idea of summary-based analysis is to first
-/// process each function indepedently, outline its behavior in a condensed
+/// process each function independently, outline its behavior in a condensed
/// summary, and then instantiate the summary at the callsite when the said
/// function is called elsewhere. This is often in contrast to another style
/// called top-down analysis, in which callers are always analyzed first before
diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 4247d9a3528..f4aea51d301 100644
--- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -392,7 +392,7 @@ bool BranchProbabilityInfo::calcColdCallHeuristics(const BasicBlock *BB) {
return true;
}
-// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
+// Calculate Edge Weights using "Pointer Heuristics". Predict a comparison
// between two pointer or pointer and NULL will fail.
bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) {
const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
diff --git a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
index 076a2b205d0..7138f184b73 100644
--- a/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -18,7 +18,7 @@
//
// The algorithm used here is based on recursive state machine matching scheme
// proposed in "Demand-driven alias analysis for C" by Xin Zheng and Radu
-// Rugina. The general idea is to extend the tranditional transitive closure
+// Rugina. The general idea is to extend the traditional transitive closure
// algorithm to perform CFL matching along the way: instead of recording
// "whether X is reachable from Y", we keep track of "whether X is reachable
// from Y at state Z", where the "state" field indicates where we are in the CFL
@@ -645,7 +645,7 @@ static void processWorkListItem(const WorkListItem &Item, const CFLGraph &Graph,
// relations that are symmetric, we could actually cut the storage by half by
// sorting FromNode and ToNode before insertion happens.
- // The newly added value alias pair may pontentially generate more memory
+ // The newly added value alias pair may potentially generate more memory
// alias pairs. Check for them here.
auto FromNodeBelow = getNodeBelow(Graph, FromNode);
auto ToNodeBelow = getNodeBelow(Graph, ToNode);
diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp
index abd47c0bc79..535f0ae55ab 100644
--- a/llvm/lib/Analysis/LazyCallGraph.cpp
+++ b/llvm/lib/Analysis/LazyCallGraph.cpp
@@ -427,7 +427,7 @@ bool LazyCallGraph::RefSCC::isAncestorOf(const RefSCC &RC) const {
/// source to target.
///
/// This helper routine, in addition to updating the postorder sequence itself
-/// will also update a map from SCCs to indices within that sequecne.
+/// will also update a map from SCCs to indices within that sequence.
///
/// The sequence and the map must operate on pointers to the SCC type.
///
@@ -713,7 +713,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
//
// However, we specially handle the target node. The target node is known to
// reach all other nodes in the original SCC by definition. This means that
- // we want the old SCC to be replaced with an SCC contaning that node as it
+ // we want the old SCC to be replaced with an SCC containing that node as it
// will be the root of whatever SCC DAG results from the DFS. Assumptions
// about an SCC such as the set of functions called will continue to hold,
// etc.
@@ -822,7 +822,7 @@ LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
// Cleared the DFS early, start another round.
break;
- // We've finished processing N and its descendents, put it on our pending
+ // We've finished processing N and its descendants, put it on our pending
// SCC stack to eventually get merged into an SCC of nodes.
PendingSCCStack.push_back(N);
@@ -1234,7 +1234,7 @@ LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN,
++I;
}
- // We've finished processing N and its descendents, put it on our pending
+ // We've finished processing N and its descendants, put it on our pending
// stack to eventually get merged into a RefSCC.
PendingRefSCCStack.push_back(N);
@@ -1617,7 +1617,7 @@ void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
++I;
}
- // We've finished processing N and its descendents, put it on our pending
+ // We've finished processing N and its descendants, put it on our pending
// SCC stack to eventually get merged into an SCC of nodes.
PendingSCCStack.push_back(N);
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 2aa389e059c..af9ad9ee0a6 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -238,7 +238,7 @@ bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
}
/// \brief Tests if a value is a call or invoke to a library function that
-/// allocates memory similiar to malloc or calloc.
+/// allocates memory similar to malloc or calloc.
bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
return getAllocationData(V, MallocOrCallocLike, TLI,
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 53ce33bacbe..78a22e338c8 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1387,7 +1387,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
// block. Adjusting IVIncInsertPos before expansion begins cannot handle
- // all cases. Consider a phi outide whose operand is replaced during
+ // all cases. Consider a phi outside whose operand is replaced during
// expansion with the value of the postinc user. Without fundamentally
// changing the way postinc users are tracked, the only remedy is
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
@@ -1407,7 +1407,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
}
// We have decided to reuse an induction variable of a dominating loop. Apply
- // truncation and/or invertion of the step.
+ // truncation and/or inversion of the step.
if (TruncTy) {
Type *ResTy = Result->getType();
// Normalize the result type.
@@ -2209,7 +2209,7 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
// If the backedge taken count type is larger than the AR type,
// check that we don't drop any bits by truncating it. If we are
- // droping bits, then we have overflow (unless the step is zero).
+ // dropping bits, then we have overflow (unless the step is zero).
if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
auto *BackedgeCheck =
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 2b19b14407f..25a154edf4a 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -636,7 +636,7 @@ static bool mayBeAccessToSubobjectOf(TBAAStructTagNode BaseTag,
// If the base object has a direct or indirect field of the subobject's type,
// then this may be an access to that field. We need this to check now that
- // we support aggreagtes as access types.
+ // we support aggregates as access types.
if (NewFormat) {
// TBAAStructTypeNode BaseAccessType(BaseTag.getAccessType());
TBAAStructTypeNode FieldType(SubobjectTag.getBaseType());
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e8d8dfc6eff..411ede451b2 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -4508,7 +4508,7 @@ static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
///
/// The function processes the case when type of true and false values of a
/// select instruction differs from type of the cmp instruction operands because
-/// of a cast instructon. The function checks if it is legal to move the cast
+/// of a cast instruction. The function checks if it is legal to move the cast
/// operation after "select". If yes, it returns the new second value of
/// "select" (with the assumption that cast is moved):
/// 1. As operand of cast instruction when both values of "select" are same cast
OpenPOWER on IntegriCloud