diff options
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r-- | llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/GlobalMerge.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/PeepholeOptimizer.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/RegisterCoalescer.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/CodeGen/RegisterPressure.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/ScheduleDAGInstrs.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp | 25 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/WinEHPrepare.cpp | 2 |
13 files changed, 37 insertions, 36 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp index 6610ac78f8c..8b1bea8049e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp @@ -253,7 +253,7 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) { } FilenameSegmentLengths[LastSegmentEnd] = FI.Instrs.size() - LastSegmentEnd; - // Emit a line table subsection, requred to do PC-to-file:line lookup. + // Emit a line table subsection, required to do PC-to-file:line lookup. Asm->OutStreamer->AddComment("Line table subsection for " + Twine(FuncName)); Asm->EmitInt32(COFF::DEBUG_LINE_TABLE_SUBSECTION); MCSymbol *LineTableBegin = Asm->MMI->getContext().createTempSymbol(), diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 6e7f525736d..3768807702b 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1089,7 +1089,7 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, // ScalarizeMaskedLoad() translates masked load intrinsic, like // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, // <16 x i1> %mask, <16 x i32> %passthru) -// to a chain of basic blocks, whith loading element one-by-one if +// to a chain of basic blocks, with loading element one-by-one if // the appropriate mask bit is set // // %1 = bitcast i8* %addr to i32* @@ -4135,7 +4135,7 @@ class VectorPromoteHelper { /// \brief Generate a constant vector with \p Val with the same /// number of elements as the transition. /// \p UseSplat defines whether or not \p Val should be replicated - /// accross the whole vector. + /// across the whole vector. /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, /// otherwise we generate a vector with as many undef as possible: /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp index 04c97f6796c..d477143d62d 100644 --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -211,7 +211,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals, // If we want to be smarter, look at all uses of each global, to try to // discover all sets of globals used together, and how many times each of - // these sets occured. + // these sets occurred. // // Keep this reasonably efficient, by having an append-only list of all sets // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index ef213e4347f..21d603b19ef 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -327,7 +327,7 @@ namespace { /// \brief Following the use-def chain, get the next available source /// for the tracked value. - /// \return A ValueTrackerResult containing the a set of registers + /// \return A ValueTrackerResult containing a set of registers /// and sub registers with tracked values. A ValueTrackerResult with /// an empty set of registers means no source was found. ValueTrackerResult getNextSource(); @@ -1567,7 +1567,7 @@ ValueTrackerResult ValueTracker::getNextSource() { Res.setInst(Def); // If we can still move up in the use-def chain, move to the next - // defintion. + // definition. if (!TargetRegisterInfo::isPhysicalRegister(Reg) && OneRegSrc) { Def = MRI.getVRegDef(Reg); DefIdx = MRI.def_begin(Reg).getOperandNo(); diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index c911b9b47ea..292e836c495 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -1822,7 +1822,7 @@ public: /// Removes subranges starting at copies that get removed. This sometimes /// happens when undefined subranges are copied around. These ranges contain - /// no usefull information and can be removed. + /// no useful information and can be removed. void pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask); /// Erase any machine instructions that have been coalesced away. @@ -2410,7 +2410,7 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask) continue; } // If a subrange ends at the copy, then a value was copied but only - // partially used later. Shrink the subregister range apropriately. + // partially used later. Shrink the subregister range appropriately. if (Q.valueIn() != nullptr && Q.valueOut() == nullptr) { DEBUG(dbgs() << "\t\tDead uses at sublane " << format("%04X", S.LaneMask) << " at " << Def << "\n"); @@ -2799,7 +2799,7 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const { !isTerminalReg(DstReg, Copy, MRI)) return false; - // DstReg is a terminal node. Check if it inteferes with any other + // DstReg is a terminal node. Check if it interferes with any other // copy involving SrcReg. const MachineBasicBlock *OrigBB = Copy.getParent(); const LiveInterval &DstLI = LIS->getInterval(DstReg); diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp index c3786e552a1..3456bb6f9f8 100644 --- a/llvm/lib/CodeGen/RegisterPressure.cpp +++ b/llvm/lib/CodeGen/RegisterPressure.cpp @@ -966,7 +966,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) { /// This is expensive for an on-the-fly query because it calls /// bumpDownwardPressure to recompute the pressure sets based on current /// liveness. We don't yet have a fast version of downward pressure tracking -/// analagous to getUpwardPressureDelta. +/// analogous to getUpwardPressureDelta. void RegPressureTracker:: getMaxDownwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta, ArrayRef<PressureChange> CriticalPSets, diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 9c07f5be8b6..7ce2e0d3beb 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -508,7 +508,7 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI, return false; } -/// This returns true if the two MIs need a chain edge betwee them. +/// This returns true if the two MIs need a chain edge between them. /// If these are not even memory operations, we still may need /// chain deps between them. The question really is - could /// these two MIs be reordered during scheduling from memory dependency @@ -670,7 +670,7 @@ static inline void addChainDependency(AliasAnalysis *AA, unsigned TrueMemOrderLatency = 0, bool isNormalMemory = false) { // If this is a false dependency, - // do not add the edge, but rememeber the rejected node. + // do not add the edge, but remember the rejected node. if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) { SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); Dep.setLatency(TrueMemOrderLatency); @@ -685,7 +685,7 @@ static inline void addChainDependency(AliasAnalysis *AA, } } -/// Create an SUnit for each real instruction, numbered in top-down toplological +/// Create an SUnit for each real instruction, numbered in top-down topological /// order. The instruction order A < B, implies that no edge exists from B to A. /// /// Map each real instruction to its SUnit. @@ -766,7 +766,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // We build scheduling units by walking a block's instruction list from bottom // to top. - // Remember where a generic side-effecting instruction is as we procede. + // Remember where a generic side-effecting instruction is as we proceed. SUnit *BarrierChain = nullptr, *AliasChain = nullptr; // Memory references to specific known memory locations are tracked diff --git a/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp index 6303422b9ae..622e06f0da2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp @@ -49,7 +49,7 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) TII = STI.getInstrInfo(); ResourcesModel.reset(TII->CreateTargetScheduleState(STI)); // This hard requirement could be relaxed, but for now - // do not let it procede. + // do not let it proceed. assert(ResourcesModel && "Unimplemented CreateTargetScheduleState."); unsigned NumRC = TRI->getNumRegClasses(); @@ -269,12 +269,12 @@ bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) { } // Now see if there are no other dependencies - // to instructions alredy in the packet. + // to instructions already in the packet. for (unsigned i = 0, e = Packet.size(); i != e; ++i) for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(), E = Packet[i]->Succs.end(); I != E; ++I) { // Since we do not add pseudos to packets, might as well - // ignor order deps. + // ignore order deps. if (I->isCtrl()) continue; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 8a382270b82..61eb4cbbfd1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2677,7 +2677,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ const int rIndex = Items - 1 - cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); - // If the sign portion ends in our element the substraction gives correct + // If the sign portion ends in our element the subtraction gives correct // result. Otherwise it gives either negative or > bitwidth result return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 99a74cdb8e6..e31ba326d09 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3069,7 +3069,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) { void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) { SDLoc sdl = getCurSDLoc(); - // llvm.masked.store.*(Src0, Ptr, alignemt, Mask) + // llvm.masked.store.*(Src0, Ptr, alignment, Mask) Value *PtrOperand = I.getArgOperand(1); SDValue Ptr = getValue(PtrOperand); SDValue Src0 = getValue(I.getArgOperand(0)); @@ -3094,13 +3094,13 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) { } // Gather/scatter receive a vector of pointers. -// This vector of pointers may be represented as a base pointer + vector of -// indices, it depends on GEP and instruction preceeding GEP +// This vector of pointers may be represented as a base pointer + vector of +// indices, it depends on GEP and instruction preceding GEP // that calculates indices static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index, SelectionDAGBuilder* SDB) { - assert (Ptr->getType()->isVectorTy() && "Uexpected pointer type"); + assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); if (!Gep || Gep->getNumOperands() > 2) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index c62428d308e..7eb8728e1fc 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -105,8 +105,8 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, return Builder.DAG.getFrameIndex(FI, ValueType); } // Note: We deliberately choose to advance this only on the failing path. - // Doing so on the suceeding path involes a bit of complexity that caused a - // minor bug previously. Unless performance shows this matters, please + // Doing so on the succeeding path involves a bit of complexity that caused + // a minor bug previously. Unless performance shows this matters, please // keep this code as simple as possible. NextSlotToAllocate++; } @@ -119,7 +119,7 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, static Optional<int> findPreviousSpillSlot(const Value *Val, SelectionDAGBuilder &Builder, int LookUpDepth) { - // Can not look any futher - give up now + // Can not look any further - give up now if (LookUpDepth <= 0) return Optional<int>(); @@ -196,7 +196,7 @@ static Optional<int> findPreviousSpillSlot(const Value *Val, /// Try to find existing copies of the incoming values in stack slots used for /// statepoint spilling. If we can find a spill slot for the incoming value, /// mark that slot as allocated, and reuse the same slot for this safepoint. -/// This helps to avoid series of loads and stores that only serve to resuffle +/// This helps to avoid series of loads and stores that only serve to reshuffle /// values on the stack between calls. static void reservePreviousStackSlotForValue(const Value *IncomingValue, SelectionDAGBuilder &Builder) { @@ -255,7 +255,7 @@ static void removeDuplicatesGCPtrs(SmallVectorImpl<const Value *> &Bases, SmallVectorImpl<const Value *> &Relocs, SelectionDAGBuilder &Builder) { - // This is horribly ineffecient, but I don't care right now + // This is horribly inefficient, but I don't care right now SmallSet<SDValue, 64> Seen; SmallVector<const Value *, 64> NewBases, NewPtrs, NewRelocs; @@ -347,11 +347,11 @@ lowerCallFromStatepoint(ImmutableStatepoint ISP, MachineBasicBlock *LandingPad, if (CS.isInvoke()) { // Result value will be used in different basic block for invokes // so we need to export it now. But statepoint call has a different type - // than the actuall call. It means that standart exporting mechanism will + // than the actual call. It means that standard exporting mechanism will // create register of the wrong type. So instead we need to create // register with correct type and save value into it manually. // TODO: To eliminate this problem we can remove gc.result intrinsics - // completelly and make statepoint call to return a tuple. + // completely and make statepoint call to return a tuple. unsigned Reg = Builder.FuncInfo.CreateRegs(ISP.getActualReturnType()); RegsForValue RFV( *Builder.DAG.getContext(), Builder.DAG.getTargetLoweringInfo(), @@ -597,16 +597,17 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops, SpillMap[V] = cast<FrameIndexSDNode>(Loc)->getIndex(); } else { // Record value as visited, but not spilled. This is case for allocas - // and constants. For this values we can avoid emiting spill load while + // and constants. For this values we can avoid emitting spill load while // visiting corresponding gc_relocate. // Actually we do not need to record them in this map at all. - // We do this only to check that we are not relocating any unvisited value. + // We do this only to check that we are not relocating any unvisited + // value. SpillMap[V] = None; // Default llvm mechanisms for exporting values which are used in // different basic blocks does not work for gc relocates. // Note that it would be incorrect to teach llvm that all relocates are - // uses of the corresponging values so that it would automatically + // uses of the corresponding values so that it would automatically // export them. Relocates of the spilled values does not use original // value. if (StatepointSite.getCallSite().isInvoke()) @@ -806,7 +807,7 @@ void SelectionDAGBuilder::LowerStatepoint( // Replace original call DAG.ReplaceAllUsesWith(CallNode, SinkNode); // This may update Root - // Remove originall call node + // Remove original call node DAG.DeleteNode(CallNode); // DON'T set the root - under the assumption that it's already set past the @@ -878,7 +879,7 @@ void SelectionDAGBuilder::visitGCRelocate(const CallInst &CI) { // Be conservative: flush all pending loads // TODO: Probably we can be less restrictive on this, - // it may allow more scheduling opprtunities + // it may allow more scheduling opportunities. SDValue Chain = getRoot(); SDValue SpillLoad = diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index b4383cca1b9..3b205d0760e 100644 --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -705,7 +705,7 @@ MCSymbol *TargetLoweringObjectFileMachO::getCFIPersonalitySymbol( const MCExpr *TargetLoweringObjectFileMachO::getIndirectSymViaGOTPCRel( const MCSymbol *Sym, const MCValue &MV, int64_t Offset, MachineModuleInfo *MMI, MCStreamer &Streamer) const { - // Although MachO 32-bit targets do not explictly have a GOTPCREL relocation + // Although MachO 32-bit targets do not explicitly have a GOTPCREL relocation // as 64-bit do, we replace the GOT equivalent by accessing the final symbol // through a non_lazy_ptr stub instead. One advantage is that it allows the // computation of deltas to final external symbols. Example: diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp index 0d26ed333ca..ff52421fb52 100644 --- a/llvm/lib/CodeGen/WinEHPrepare.cpp +++ b/llvm/lib/CodeGen/WinEHPrepare.cpp @@ -2146,7 +2146,7 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad, // Under some circumstances optimized IR will flow unconditionally into a // handler block without checking the selector. This can only happen if // the landing pad has a catch-all handler and the handler for the - // preceeding catch clause is identical to the catch-call handler + // preceding catch clause is identical to the catch-call handler // (typically an empty catch). In this case, the handler must be shared // by all remaining clauses. if (isa<ConstantPointerNull>( |