diff options
Diffstat (limited to 'llvm/lib/Transforms')
16 files changed, 35 insertions, 35 deletions
diff --git a/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/llvm/lib/Transforms/IPO/ConstantMerge.cpp index d94c0f45323..aefcff95653 100644 --- a/llvm/lib/Transforms/IPO/ConstantMerge.cpp +++ b/llvm/lib/Transforms/IPO/ConstantMerge.cpp @@ -77,8 +77,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed, } // True if A is better than B. -static bool IsBetterCannonical(const GlobalVariable &A, - const GlobalVariable &B) { +static bool IsBetterCanonical(const GlobalVariable &A, + const GlobalVariable &B) { if (!A.hasLocalLinkage() && B.hasLocalLinkage()) return true; @@ -160,7 +160,7 @@ bool ConstantMerge::runOnModule(Module &M) { // If this is the first constant we find or if the old one is local, // replace with the current one. If the current is externally visible // it cannot be replace, but can be the canonical constant we merge with. - if (Slot == 0 || IsBetterCannonical(*GV, *Slot)) + if (Slot == 0 || IsBetterCanonical(*GV, *Slot)) Slot = GV; } diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp index 38614216c3c..33f0707f29a 100644 --- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp +++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp @@ -723,7 +723,7 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) { // Helper for writeThunk, // Selects proper bitcast operation, -// but a bit simplier then CastInst::getCastOpcode. +// but a bit simpler then CastInst::getCastOpcode. static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) { Type *SrcTy = V->getType(); if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index c949720b1f5..7d2fc0a5289 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -77,7 +77,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { // A single load+store correctly handles overlapping memory in the memmove // case. uint64_t Size = MemOpLength->getLimitedValue(); - assert(Size && "0-sized memory transfering should be removed already."); + assert(Size && "0-sized memory transferring should be removed already."); if (Size > 8 || (Size&(Size-1))) return 0; // If not 1/2/4/8 bytes, exit. @@ -684,7 +684,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems)); } - // Couldn't simplify - cannonicalize constant to the RHS. + // Couldn't simplify - canonicalize constant to the RHS. std::swap(Arg0, Arg1); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index c85ec29f2fa..cccfd4d49ef 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1193,10 +1193,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) { // will not occur because the result of OpI is exact (as we will for // FMul, for example) is hopeless. However, we *can* nonetheless // frequently know that double rounding cannot occur (or that it is - // innoculous) by taking advantage of the specific structure of + // innocuous) by taking advantage of the specific structure of // infinitely-precise results that admit double rounding. // - // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficent + // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient // to represent both sources, we can guarantee that the double // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 3bc8ad3c8c4..5dd3325a0bc 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -2048,7 +2048,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I, /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst /// should be swapped. -/// The descision is based on how many times these two operands are reused +/// The decision is based on how many times these two operands are reused /// as subtract operands and their positions in those instructions. /// The rational is that several architectures use the same instruction for /// both subtract and cmp, thus it is better if the order of those operands @@ -2064,7 +2064,7 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0, // Each time Op0 is the first operand, count -1: swapping is bad, the // subtract has already the same layout as the compare. // Each time Op0 is the second operand, count +1: swapping is good, the - // subtract has a diffrent layout as the compare. + // subtract has a different layout as the compare. // At the end, if the benefit is greater than 0, Op0 should come second to // expose more CSE opportunities. int GlobalSwapBenefits = 0; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 178be61b43e..7ffb01b5b2a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1013,7 +1013,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { // references from RHSOp0 to LHSOp0, so we don't need to shift the mask. // If newRHS == newLHS, we want to remap any references from newRHS to // newLHS so that we can properly identify splats that may occur due to - // obfuscation accross the two vectors. + // obfuscation across the two vectors. if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS) eltMask += newLHSWidth; } diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 6a7252fc41e..b453f81de94 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1629,7 +1629,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } - // Cannonicalize fcmp_one -> fcmp_oeq + // Canonicalize fcmp_one -> fcmp_oeq FCmpInst::Predicate FPred; Value *Y; if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), TrueDest, FalseDest)) && @@ -1645,7 +1645,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } - // Cannonicalize icmp_ne -> icmp_eq + // Canonicalize icmp_ne -> icmp_eq ICmpInst::Predicate IPred; if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), TrueDest, FalseDest)) && diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index fe875192f58..377d0d87d16 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -249,7 +249,7 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) { ShadowMapping Mapping; // OR-ing shadow offset if more efficient (at least on x86), - // but on ppc64 we have to use add since the shadow offset is not neccesary + // but on ppc64 we have to use add since the shadow offset is not necessary // 1/8-th of the address space. Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset; diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 8a52a4444be..93e71cc1161 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1964,7 +1964,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // Now, get the shadow for the RetVal. if (!I.getType()->isSized()) return; IRBuilder<> IRBBefore(&I); - // Untill we have full dynamic coverage, make sure the retval shadow is 0. + // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); Instruction *NextInsn = 0; diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 5c188178200..45703113405 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -487,7 +487,7 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { } // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x -// standards. For background see C++11 standard. A slightly older, publically +// standards. For background see C++11 standard. A slightly older, publicly // available draft of the standard (not entirely up-to-date, but close enough // for casual browsing) is available here: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index f8b6f15850f..8e2c362ad81 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -382,7 +382,7 @@ namespace { void clear(); /// Conservatively merge the two RRInfo. Returns true if a partial merge has - /// occured, false otherwise. + /// occurred, false otherwise. bool Merge(const RRInfo &Other); }; @@ -659,7 +659,7 @@ namespace { /// which pass through this block. This is only valid after both the /// top-down and bottom-up traversals are complete. /// - /// Returns true if overflow occured. Returns false if overflow did not + /// Returns true if overflow occurred. Returns false if overflow did not /// occur. bool GetAllPathCountWithOverflow(unsigned &PathCount) const { if (TopDownPathCount == OverflowOccurredValue || @@ -667,7 +667,7 @@ namespace { return true; unsigned long long Product = (unsigned long long)TopDownPathCount*BottomUpPathCount; - // Overflow occured if any of the upper bits of Product are set or if all + // Overflow occurred if any of the upper bits of Product are set or if all // the lower bits of Product are all set. return (Product >> 32) || ((PathCount = Product) == OverflowOccurredValue); @@ -711,7 +711,7 @@ void BBState::MergePred(const BBState &Other) { // In order to be consistent, we clear the top down pointers when by adding // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow - // has not occured. + // has not occurred. if (TopDownPathCount == OverflowOccurredValue) { clearTopDownPointers(); return; @@ -755,7 +755,7 @@ void BBState::MergeSucc(const BBState &Other) { // In order to be consistent, we clear the top down pointers when by adding // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow - // has not occured. + // has not occurred. if (BottomUpPathCount == OverflowOccurredValue) { clearBottomUpPointers(); return; @@ -1808,13 +1808,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, // pointer has multiple owners implying that we must be more conservative. // // This comes up in the context of a pointer being ``KnownSafe''. In the - // presense of a block being initialized, the frontend will emit the + // presence of a block being initialized, the frontend will emit the // objc_retain on the original pointer and the release on the pointer loaded // from the alloca. The optimizer will through the provenance analysis // realize that the two are related, but since we only require KnownSafe in // one direction, will match the inner retain on the original pointer with // the guard release on the original pointer. This is fixed by ensuring that - // in the presense of allocas we only unconditionally remove pointers if + // in the presence of allocas we only unconditionally remove pointers if // both our retain and our release are KnownSafe. if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) { diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index b4d59fa0eaf..c89cd74c6eb 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -109,8 +109,8 @@ namespace { bool preliminaryScreen(); /// Check if the given conditional branch is based on the comparison - /// beween a variable and zero, and if the variable is non-zero, the - /// control yeilds to the loop entry. If the branch matches the behavior, + /// between a variable and zero, and if the variable is non-zero, the + /// control yields to the loop entry. If the branch matches the behavior, /// the variable involved in the comparion is returned. This function will /// be called to see if the precondition and postcondition of the loop /// are in desirable form. @@ -521,7 +521,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst, // TripCnt is exactly the number of iterations the loop has TripCnt = NewCount; - // If the popoulation counter's initial value is not zero, insert Add Inst. + // If the population counter's initial value is not zero, insert Add Inst. Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); if (!InitConst || !InitConst->isZero()) { diff --git a/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/llvm/lib/Transforms/Utils/FlattenCFG.cpp index 1da226bfcbe..39c80f86b67 100644 --- a/llvm/lib/Transforms/Utils/FlattenCFG.cpp +++ b/llvm/lib/Transforms/Utils/FlattenCFG.cpp @@ -240,7 +240,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder, BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator()); CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition()); CmpInst::Predicate Predicate = CI->getPredicate(); - // Cannonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq + // Canonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) { CI->setPredicate(ICmpInst::getInversePredicate(Predicate)); BI->swapSuccessors(); diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index a30dcf2fe0c..e43c9e2708d 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -62,9 +62,9 @@ static cl::opt<bool> SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), cl::desc("Sink common instructions down to the end block")); -static cl::opt<bool> -HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), - cl::desc("Hoist conditional stores if an unconditional store preceeds")); +static cl::opt<bool> HoistCondStores( + "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), + cl::desc("Hoist conditional stores if an unconditional store precedes")); STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 695ee03ea76..892c42755b4 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2371,7 +2371,7 @@ void InnerLoopVectorizer::vectorizeLoop() { setDebugLocFromInst(Builder, RdxDesc.StartValue); // We need to generate a reduction vector from the incoming scalar. - // To do so, we need to generate the 'identity' vector and overide + // To do so, we need to generate the 'identity' vector and override // one of the elements with the incoming scalar reduction. We need // to do it in the vector-loop preheader. Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator()); @@ -3713,8 +3713,8 @@ void AccessAnalysis::processMemAccesses(bool UseDeferred) { } bool NeedDepCheck = false; - // Check whether there is the possiblity of dependency because of underlying - // objects being the same. + // Check whether there is the possibility of dependency because of + // underlying objects being the same. typedef SmallVector<Value*, 16> ValueVector; ValueVector TempObjects; GetUnderlyingObjects(Ptr, TempObjects, DL); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 0e2a98e6de1..80d9ffccafb 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1871,7 +1871,7 @@ private: StoreListMap StoreRefs; }; -/// \brief Check that the Values in the slice in VL array are still existant in +/// \brief Check that the Values in the slice in VL array are still existent in /// the WeakVH array. /// Vectorization of part of the VL array may cause later values in the VL array /// to become invalid. We track when this has happened in the WeakVH array. @@ -2516,7 +2516,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { break; } - // Start over at the next instruction of a differnt type (or the end). + // Start over at the next instruction of a different type (or the end). IncIt = SameTypeIt; } } |

