diff options
author | Hiroshi Inoue <inouehrs@jp.ibm.com> | 2018-01-29 05:17:03 +0000 |
---|---|---|
committer | Hiroshi Inoue <inouehrs@jp.ibm.com> | 2018-01-29 05:17:03 +0000 |
commit | c8e924581676b4386e4ab6a28e121d3a5b13c025 (patch) | |
tree | 94e44a040ceff8ea1d2c0ecd1fce8cb8c2e7b22d /llvm/lib | |
parent | 1019f8a98e33c56e98592733b2e7e92723b4e404 (diff) | |
download | bcm5719-llvm-c8e924581676b4386e4ab6a28e121d3a5b13c025.tar.gz bcm5719-llvm-c8e924581676b4386e4ab6a28e121d3a5b13c025.zip |
[NFC] fix trivial typos in comments and documents
"to to" -> "to"
llvm-svn: 323628
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInsertWaits.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/PowerPC/PPCFastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNHoist.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 2 |
11 files changed, 11 insertions, 11 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 1295b83fc6c..7df9ee87bff 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7267,7 +7267,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { continue; // If this is a memory input, and if the operand is not indirect, do what we - // need to to provide an address for the memory input. + // need to provide an address for the memory input. if (OpInfo.ConstraintType == TargetLowering::C_Memory && !OpInfo.isIndirect) { assert((OpInfo.isMultipleAlternative || diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 0ec2e8ebd34..582d4720401 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1985,7 +1985,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); - // Extend back to to 64-bits. + // Extend back to 64-bits. SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index e61c8032e67..65020d7bde5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -79,7 +79,7 @@ static cl::opt<bool> EnableLoadStoreVectorizer( cl::init(true), cl::Hidden); -// Option to to control global loads scalarization +// Option to control global loads scalarization static cl::opt<bool> ScalarizeGlobal( "amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp index a0e4f7ff24c..9c6a307ea09 100644 --- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp @@ -452,7 +452,7 @@ unsigned GCNIterativeScheduler::tryMaximizeOccupancy(unsigned TargetOcc) { // TODO: assert Regions are sorted descending by pressure const auto &ST = MF.getSubtarget<SISubtarget>(); const auto Occ = Regions.front()->MaxPressure.getOccupancy(ST); - DEBUG(dbgs() << "Trying to to improve occupancy, target = " << TargetOcc + DEBUG(dbgs() << "Trying to improve occupancy, target = " << TargetOcc << ", current = " << Occ << '\n'); auto NewOcc = TargetOcc; diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 6bbe5979316..0328686c0a6 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1837,7 +1837,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { if (!MFI->isEntryFunction()) { // Wait for any outstanding memory operations that the input registers may - // depend on. We can't track them and it's better to to the wait after the + // depend on. We can't track them and it's better to the wait after the // costly call sequence. // TODO: Could insert earlier and schedule more liberally with operations diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp index b074b95c2d3..e89f0f855ed 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp @@ -687,7 +687,7 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) { if (!MFI->isEntryFunction()) { // Wait for any outstanding memory operations that the input registers may - // depend on. We can't track them and it's better to to the wait after the + // depend on. We can't track them and it's better to the wait after the // costly call sequence. // TODO: Could insert earlier and schedule more liberally with operations diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index 402e29cdff7..cdfbfaf60ad 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -219,7 +219,7 @@ static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) { // result consists of 4 bits, indicating lt, eq, gt and un (unordered), // only one of which will be set. The result is generated by fcmpu // instruction. However, bc instruction only inspects one of the first 3 - // bits, so when un is set, bc instruction may jump to to an undesired + // bits, so when un is set, bc instruction may jump to an undesired // place. // // More specifically, if we expect an unordered comparison and un is set, we diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index faeda19f4b6..7e88304ae19 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -2675,7 +2675,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"); - // Always make a copy of the frame register to to a vreg first, so that we + // Always make a copy of the frame register to a vreg first, so that we // never directly reference the frame register (the TwoAddressInstruction- // Pass doesn't like that). unsigned SrcReg = createResultReg(RC); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a0ed2934fc4..f9964c2d16c 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -13917,7 +13917,7 @@ static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT, return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL); } - // Try to lower to to vshuf64x2/vshuf32x4. + // Try to lower to vshuf64x2/vshuf32x4. SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)}; unsigned PermMask = 0; // Insure elements came from the same Op. diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index 026fab5dbd3..214855d5630 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -570,7 +570,7 @@ private: // The ides is inspired from: // "Partial Redundancy Elimination in SSA Form" // ROBERT KENNEDY, SUN CHAN, SHIN-MING LIU, RAYMOND LO, PENG TU and FRED CHOW - // They use similar idea in the forward graph to to find fully redundant and + // They use similar idea in the forward graph to find fully redundant and // partially redundant expressions, here it is used in the inverse graph to // find fully anticipable instructions at merge point (post-dominator in // the inverse CFG). diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 21551f0a082..0451f774378 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1675,7 +1675,7 @@ void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, } // Step 3: Note that the population count is exactly the trip count of the - // loop in question, which enable us to to convert the loop from noncountable + // loop in question, which enable us to convert the loop from noncountable // loop into a countable one. The benefit is twofold: // // - If the loop only counts population, the entire loop becomes dead after |