diff options
Diffstat (limited to 'llvm/lib')
19 files changed, 21 insertions, 21 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index f82235d0c26..8274374621c 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1702,7 +1702,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, unsigned NumElim = 0; DenseMap<const SCEV *, PHINode *> ExprToIVMap; - // Process phis from wide to narrow. Mapping wide phis to the their truncation + // Process phis from wide to narrow. Map wide phis to their truncation // so narrow phis can reuse them. for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), PEnd = Phis.end(); PIter != PEnd; ++PIter) { diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index fa17108b2a8..530ab46db03 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -464,7 +464,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { Value *ShouldStore = Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store"); - // If the the cmpxchg doesn't actually need any ordering when it fails, we can + // If the cmpxchg doesn't actually need any ordering when it fails, we can // jump straight past that fence instruction (if it exists). Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB); diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 122e23d4a5c..d7644a6676c 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -61,10 +61,10 @@ class ImplicitNullChecks : public MachineFunctionPass { // The block the check resides in. MachineBasicBlock *CheckBlock; - // The block branched to if the the pointer is non-null. + // The block branched to if the pointer is non-null. MachineBasicBlock *NotNullSucc; - // The block branched to if the the pointer is null. + // The block branched to if the pointer is null. MachineBasicBlock *NullSucc; NullCheck() diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 50a9c669ea2..a48e54caf3f 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -2150,7 +2150,7 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone) { - // Apply preemptive heuristics based on the the total latency and resources + // Apply preemptive heuristics based on the total latency and resources // inside and outside this zone. Potential stalls should be considered before // following this policy. diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp index 7934a4d9da2..d04d93f11e4 100644 --- a/llvm/lib/CodeGen/WinEHPrepare.cpp +++ b/llvm/lib/CodeGen/WinEHPrepare.cpp @@ -2296,7 +2296,7 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions, // value for this block but the value is a nullptr. This means that // we have previously analyzed the block and determined that it did // not contain any cleanup code. Based on the earlier analysis, we - // know the the block must end in either an unconditional branch, a + // know the block must end in either an unconditional branch, a // resume or a conditional branch that is predicated on a comparison // with a selector. Either the resume or the selector dispatch // would terminate the search for cleanup code, so the unconditional diff --git a/llvm/lib/Support/Locale.cpp b/llvm/lib/Support/Locale.cpp index 35ddf7f11bf..d5cb72b5db3 100644 --- a/llvm/lib/Support/Locale.cpp +++ b/llvm/lib/Support/Locale.cpp @@ -15,7 +15,7 @@ int columnWidth(StringRef Text) { bool isPrint(int UCS) { #if LLVM_ON_WIN32 - // Restrict characters that we'll try to print to the the lower part of ASCII + // Restrict characters that we'll try to print to the lower part of ASCII // except for the control characters (0x20 - 0x7E). In general one can not // reliably output code points U+0080 and higher using narrow character C/C++ // output functions in Windows, because the meaning of the upper 128 codes is diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 1fa266a87b9..0165ef9c49c 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1424,7 +1424,7 @@ static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) { ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); - // The the values aren't constants, this isn't the pattern we're looking for. + // The values aren't constants, this isn't the pattern we're looking for. if (!CFVal || !CTVal) return Op; @@ -3420,7 +3420,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, EltVT = MVT::i64; VecVT = MVT::v2i64; - // We want to materialize a mask with the the high bit set, but the AdvSIMD + // We want to materialize a mask with the high bit set, but the AdvSIMD // immediate moves cannot materialize that in a single instruction for // 64-bit elements. Instead, materialize zero and then negate it. EltMask = 0; diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp index eb05ed915dd..82bc949927c 100644 --- a/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp +++ b/llvm/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp @@ -52,7 +52,7 @@ getVariant(uint64_t LLVMDisassembler_VariantKind) { /// returns zero and isBranch is Success then a symbol look up for /// Address + Value is done and if a symbol is found an MCExpr is created with /// that, else an MCExpr with Address + Value is created. If GetOpInfo() -/// returns zero and isBranch is Fail then the the Opcode of the MCInst is +/// returns zero and isBranch is Fail then the Opcode of the MCInst is /// tested and for ADRP an other instructions that help to load of pointers /// a symbol look up is done to see it is returns a specific reference type /// to add to the comment stream. This function returns Success if it adds diff --git a/llvm/lib/Target/AMDGPU/AMDKernelCodeT.h b/llvm/lib/Target/AMDGPU/AMDKernelCodeT.h index 4d3041ff3db..eaffb854793 100644 --- a/llvm/lib/Target/AMDGPU/AMDKernelCodeT.h +++ b/llvm/lib/Target/AMDGPU/AMDKernelCodeT.h @@ -132,7 +132,7 @@ enum amd_code_property_mask_t { /// private memory do not exceed this size. For example, if the /// element size is 4 (32-bits or dword) and a 64-bit value must be /// loaded, the finalizer will generate two 32-bit loads. This - /// ensures that the interleaving will get the the work-item + /// ensures that the interleaving will get the work-item /// specific dword for both halves of the 64-bit value. If it just /// did a 64-bit load then it would get one dword which belonged to /// its own work-item, but the second dword would belong to the diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 3394573b062..47bc17823b3 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1806,7 +1806,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { } MachineBasicBlock &MBB = *MI->getParent(); - // Extract the the ptr from the resource descriptor. + // Extract the ptr from the resource descriptor. // SRsrcPtrLo = srsrc:sub0 unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc, diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 8bcbb1159f8..35387d3e6cf 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -5841,7 +5841,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // do and don't have a cc_out optional-def operand. With some spot-checks // of the operand list, we can figure out which variant we're trying to // parse and adjust accordingly before actually matching. We shouldn't ever - // try to remove a cc_out operand that was explicitly set on the the + // try to remove a cc_out operand that was explicitly set on the // mnemonic, of course (CarrySetting == true). Reason number #317 the // table driven matcher doesn't fit well with the ARM instruction set. if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index bef77f5c24e..b88578309f0 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -1065,7 +1065,7 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx, // it's just a plain immediate expression, previously those evaluated to // the lower 16 bits of the expression regardless of whether // we have a movt or a movw, but that led to misleadingly results. - // This is now disallowed in the the AsmParser in validateInstruction() + // This is disallowed in the AsmParser in validateInstruction() // so this should never happen. llvm_unreachable("expression without :upper16: or :lower16:"); } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index 85b00068252..ef197f40c27 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -271,7 +271,7 @@ namespace X86II { /// register DI/EDI/ESI. RawFrmDst = 9, - /// RawFrmSrc - This form is for instructions that use the the source index + /// RawFrmSrc - This form is for instructions that use the source index /// register SI/ESI/ERI with a possible segment override, and also the /// destination index register DI/ESI/RDI. RawFrmDstSrc = 10, diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index b39c5aba30b..5eb4faeedff 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -44,7 +44,7 @@ class FixupLEAPass : public MachineFunctionPass { /// \brief Given a machine register, look for the instruction /// which writes it in the current basic block. If found, /// try to replace it with an equivalent LEA instruction. - /// If replacement succeeds, then also process the the newly created + /// If replacement succeeds, then also process the newly created /// instruction. void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 685ca2c3161..1af4c31505e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5446,7 +5446,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, /// /// Otherwise, the first horizontal binop dag node takes as input the lower /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop -/// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1. +/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1. /// Example: /// HADD V0_LO, V1_LO /// HADD V0_HI, V1_HI diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp index 528f40eaad1..baca76ba3f2 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -212,7 +212,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load, break; // Now we know that we have not seen either the store or the release. If I - // is the the release, mark that we saw the release and continue. + // is the release, mark that we saw the release and continue. Instruction *Inst = &*I; if (Inst == Release) { SawRelease = true; diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp index 1ebc6a5b1aa..1130d228acb 100644 --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -759,7 +759,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { // If we're branching on a conditional, LVI might be able to determine - // it's value at the the branch instruction. We only handle comparisons + // it's value at the branch instruction. We only handle comparisons // against a constant at this time. // TODO: This should be extended to handle switches as well. BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); diff --git a/llvm/lib/Transforms/Scalar/SampleProfile.cpp b/llvm/lib/Transforms/Scalar/SampleProfile.cpp index 3480cd49912..c8dfa54a4aa 100644 --- a/llvm/lib/Transforms/Scalar/SampleProfile.cpp +++ b/llvm/lib/Transforms/Scalar/SampleProfile.cpp @@ -282,7 +282,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) { /// \brief Find equivalence classes for the given block. /// /// This finds all the blocks that are guaranteed to execute the same -/// number of times as \p BB1. To do this, it traverses all the the +/// number of times as \p BB1. To do this, it traverses all the /// descendants of \p BB1 in the dominator or post-dominator tree. /// /// A block BB2 will be in the same equivalence class as \p BB1 if diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 60ac271bceb..71aaa8808b0 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -4058,7 +4058,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder, return false; // Figure out the corresponding result for each case value and phi node in the - // common destination, as well as the the min and max case values. + // common destination, as well as the min and max case values. assert(SI->case_begin() != SI->case_end()); SwitchInst::CaseIt CI = SI->case_begin(); ConstantInt *MinCaseVal = CI.getCaseValue(); |