diff options
Diffstat (limited to 'llvm/lib/Target')
38 files changed, 52 insertions, 50 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 9bf9d1918c0..ba185a436f1 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -611,7 +611,8 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI, int &AccessScale, int &MinOffset, int &MaxOffset) const { switch (MI.getOpcode()) { - default: llvm_unreachable("Unkown load/store kind"); + default: + llvm_unreachable("Unknown load/store kind"); case TargetOpcode::DBG_VALUE: AccessScale = 1; MinOffset = INT_MIN; diff --git a/llvm/lib/Target/AArch64/AArch64InstrNEON.td b/llvm/lib/Target/AArch64/AArch64InstrNEON.td index c673b3adc9d..3c446d5860d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrNEON.td +++ b/llvm/lib/Target/AArch64/AArch64InstrNEON.td @@ -6432,7 +6432,7 @@ defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">; defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">; defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">; -// Table lookup extention +// Table lookup extension class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op, string asmop, string OpS, RegisterOperand OpVPR, RegisterOperand VecList> diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp index ff5b23013df..a88cbb2971f 100644 --- a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -1517,7 +1517,7 @@ static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn, unsigned Q = fieldFromInstruction(Insn, 30, 1); unsigned S = fieldFromInstruction(Insn, 10, 3); unsigned lane = 0; - // Calculate the number of lanes by number of vectors and transfered bytes. + // Calculate the number of lanes by number of vectors and transferred bytes. // NumLanes = 16 bytes / bytes of each lane unsigned NumLanes = 16 / (TransferBytes / NumVecs); switch (NumLanes) { diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index d561db2f07e..9e827cf1035 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1407,7 +1407,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm) { - // This *must* succeed since it's used for the irreplacable ldrex and strex + // This *must* succeed since it's used for the irreplaceable ldrex and strex // instructions. Base = N; OffImm = CurDAG->getTargetConstant(0, MVT::i32); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 1d2236f2c9f..b851a8ffd89 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5987,7 +5987,7 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) return Op; - // Aquire/Release load/store is not legal for targets without a + // Acquire/Release load/store is not legal for targets without a // dmb or equivalent available. return SDValue(); } @@ -10189,7 +10189,7 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const case MVT::v2f64: { // For any little-endian targets with neon, we can support unaligned ld/st // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. - // A big-endian target may also explictly support unaligned accesses + // A big-endian target may also explicitly support unaligned accesses if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) { if (Fast) *Fast = true; diff --git a/llvm/lib/Target/ARM/ARMRegisterInfo.td b/llvm/lib/Target/ARM/ARMRegisterInfo.td index d0457618ef6..7f0fe05738c 100644 --- a/llvm/lib/Target/ARM/ARMRegisterInfo.td +++ b/llvm/lib/Target/ARM/ARMRegisterInfo.td @@ -214,7 +214,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> { } // GPRs without the PC but with APSR. Some instructions allow accessing the -// APSR, while actually encoding PC in the register field. This is usefull +// APSR, while actually encoding PC in the register field. This is useful // for assembly and disassembly only. def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> { let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)]; diff --git a/llvm/lib/Target/ARM/ARMScheduleSwift.td b/llvm/lib/Target/ARM/ARMScheduleSwift.td index 8d7dbc24609..b03d5ff44c6 100644 --- a/llvm/lib/Target/ARM/ARMScheduleSwift.td +++ b/llvm/lib/Target/ARM/ARMScheduleSwift.td @@ -1721,7 +1721,7 @@ let SchedModel = SwiftModel in { SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy, SwiftWriteLM13CyNo, SwiftWriteP01OneCycle, SwiftVLDMPerm3]>, - // Load of a Q register (not neccessarily true). We should not be mapping to + // Load of a Q register (not necessarily true). We should not be mapping to // 4 S registers, either. SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo, SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>, @@ -1858,7 +1858,7 @@ let SchedModel = SwiftModel in { // Assume 5 D registers. SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>, SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>, - // Asume three Q registers. + // Assume three Q registers. SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>, SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>, // Assume 7 D registers. diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp index 2f2da73162c..5d0b73a191e 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -533,7 +533,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK // creates a sequence of shift, and, or instructions to construct values. // These sequences are recognized by the ISel and have zero-cost. Not so for // the vectorized code. Because we have support for v2i64 but not i64 those - // sequences look particularily beneficial to vectorize. + // sequences look particularly beneficial to vectorize. // To work around this we increase the cost of v2i64 operations to make them // seem less beneficial. if (LT.second == MVT::v2i64 && diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h index bb781ecece0..42a1cbb8c22 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h @@ -183,7 +183,8 @@ namespace ARM_ISB { inline static const char *InstSyncBOptToString(unsigned val) { switch (val) { - default: llvm_unreachable("Unkown memory operation"); + default: + llvm_unreachable("Unknown memory operation"); case RESERVED_0: return "#0x0"; case RESERVED_1: return "#0x1"; case RESERVED_2: return "#0x2"; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 8e224780d83..abacc1e3126 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -1035,7 +1035,7 @@ void ARMELFStreamer::emitFnStart() { } void ARMELFStreamer::emitFnEnd() { - assert(FnStart && ".fnstart must preceeds .fnend"); + assert(FnStart && ".fnstart must precedes .fnend"); // Emit unwind opcodes if there is no .handlerdata directive if (!ExTab && !CantUnwind) diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp index 97e7b85f55b..3ab796d01c4 100644 --- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -285,7 +285,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, // Update the intermediate instruction to with the kill flag. if (KillingInstr) { bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true); - (void)Added; // supress compiler warning + (void)Added; // suppress compiler warning assert(Added && "Must successfully update kill flag"); removeKillInfo(I2, KilledOperand); } @@ -343,7 +343,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, // Update I1 to set the kill flag. This flag will later be picked up by // the new COMBINE instruction. bool Added = I1->addRegisterKilled(KilledOperand, TRI); - (void)Added; // supress compiler warning + (void)Added; // suppress compiler warning assert(Added && "Must successfully update kill flag"); } DoInsertAtI1 = false; diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 137c4bf0543..fccbcb3d703 100644 --- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1522,7 +1522,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( if (PB != Latch) { Tmp2.clear(); bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false); - (void)NotAnalyzed; // supress compiler warning + (void)NotAnalyzed; // suppress compiler warning assert (!NotAnalyzed && "Should be analyzable!"); if (TB != Header && (Tmp2.empty() || FB != Header)) TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL); @@ -1534,7 +1534,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( // Insert an unconditional branch to the header. TB = FB = 0; bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false); - (void)LatchNotAnalyzed; // supress compiler warning + (void)LatchNotAnalyzed; // suppress compiler warning assert (!LatchNotAnalyzed && "Should be analyzable!"); if (!TB && !FB) TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL); diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index f9be3192f1f..fff51dda679 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1793,7 +1793,7 @@ bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const { return true; if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { - // Check addressing mode and retreive non-ext equivalent instruction. + // Check addressing mode and retrieve non-ext equivalent instruction. switch (getAddrMode(MI)) { case HexagonII::Absolute : @@ -1827,7 +1827,7 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const { return NonExtOpcode; if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { - // Check addressing mode and retreive non-ext equivalent instruction. + // Check addressing mode and retrieve non-ext equivalent instruction. switch (getAddrMode(MI)) { case HexagonII::Absolute : return Hexagon::getBasedWithImmOffset(MI->getOpcode()); diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index 594ff4f4411..aae2dcd16e7 100644 --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -869,7 +869,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, TempInst.addOperand(MCOperand::CreateReg(BaseRegNum)); Instructions.push_back(TempInst); TempInst.clear(); - // And finaly, create original instruction with low part + // And finally, create original instruction with low part // of offset and new base. TempInst.setOpcode(Inst.getOpcode()); TempInst.addOperand(MCOperand::CreateReg(RegOpNum)); @@ -1247,7 +1247,7 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands, return false; } // Look for the existing symbol, we should check if - // we need to assigne the propper RegisterKind. + // we need to assigne the proper RegisterKind. if (searchSymbolAlias(Operands, MipsOperand::Kind_None)) return false; // Else drop to expression parsing. diff --git a/llvm/lib/Target/Mips/MipsMSAInstrInfo.td b/llvm/lib/Target/Mips/MipsMSAInstrInfo.td index fbcd10fe2b8..a788d60a575 100644 --- a/llvm/lib/Target/Mips/MipsMSAInstrInfo.td +++ b/llvm/lib/Target/Mips/MipsMSAInstrInfo.td @@ -3519,7 +3519,7 @@ class MSABitconvertPat<ValueType DstVT, ValueType SrcVT, MSAPat<(DstVT (bitconvert SrcVT:$src)), (COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>; -// These are endian-independant because the element size doesnt change +// These are endian-independent because the element size doesnt change def : MSABitconvertPat<v8i16, v8f16, MSA128H>; def : MSABitconvertPat<v4i32, v4f32, MSA128W>; def : MSABitconvertPat<v2i64, v2f64, MSA128D>; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index d8151761e05..8c27c9f409b 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1258,7 +1258,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { // Since StoreV2 is a target node, we cannot rely on DAG type legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // stored type to i16 and propogate the "real" type as the memory type. + // stored type to i16 and propagate the "real" type as the memory type. bool NeedExt = false; if (EltVT.getSizeInBits() < 16) NeedExt = true; @@ -2074,7 +2074,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, // Since LoadV2 is a target node, we cannot rely on DAG type legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // loaded type to i16 and propogate the "real" type as the memory type. + // loaded type to i16 and propagate the "real" type as the memory type. bool NeedTrunc = false; if (EltVT.getSizeInBits() < 16) { EltVT = MVT::i16; @@ -2161,7 +2161,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, // Since LDU/LDG are target nodes, we cannot rely on DAG type // legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // loaded type to i16 and propogate the "real" type as the memory type. + // loaded type to i16 and propagate the "real" type as the memory type. bool NeedTrunc = false; if (EltVT.getSizeInBits() < 16) { EltVT = MVT::i16; diff --git a/llvm/lib/Target/NVPTX/NVVMReflect.cpp b/llvm/lib/Target/NVPTX/NVVMReflect.cpp index bc67cb14ff7..5da8c2ed092 100644 --- a/llvm/lib/Target/NVPTX/NVVMReflect.cpp +++ b/llvm/lib/Target/NVPTX/NVVMReflect.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This pass replaces occurences of __nvvm_reflect("string") with an +// This pass replaces occurrences of __nvvm_reflect("string") with an // integer based on -nvvm-reflect-list string=<int> option given to this pass. // If an undefined string value is seen in a call to __nvvm_reflect("string"), // a default value of 0 will be used. @@ -84,7 +84,7 @@ NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden, char NVVMReflect::ID = 0; INITIALIZE_PASS(NVVMReflect, "nvvm-reflect", - "Replace occurences of __nvvm_reflect() calls with 0/1", false, + "Replace occurrences of __nvvm_reflect() calls with 0/1", false, false) static cl::list<std::string> diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index c5d96127716..1c8f928887c 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -7205,7 +7205,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // you might suspect (sizeof(vector) bytes after the last requested // load), but rather sizeof(vector) - 1 bytes after the last // requested vector. The point of this is to avoid a page fault if the - // base address happend to be aligned. This works because if the base + // base address happened to be aligned. This works because if the base // address is aligned, then adding less than a full vector length will // cause the last vector in the sequence to be (re)loaded. Otherwise, // the next vector will be fetched as you might suspect was necessary. diff --git a/llvm/lib/Target/R600/AMDGPU.h b/llvm/lib/Target/R600/AMDGPU.h index 8eb1b695d76..3e1848b5f8e 100644 --- a/llvm/lib/Target/R600/AMDGPU.h +++ b/llvm/lib/Target/R600/AMDGPU.h @@ -68,7 +68,7 @@ namespace ShaderType { /// various memory regions on the hardware. On the CPU /// all of the address spaces point to the same memory, /// however on the GPU, each address space points to -/// a seperate piece of memory that is unique from other +/// a separate piece of memory that is unique from other /// memory locations. namespace AMDGPUAS { enum AddressSpaces { diff --git a/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp b/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp index 4ad7eba36e2..69ced3c8f6c 100644 --- a/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/llvm/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -224,7 +224,7 @@ protected: /// Compute the reversed DFS post order of Blocks void orderBlocks(MachineFunction *MF); - // Function originaly from CFGStructTraits + // Function originally from CFGStructTraits void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode, DebugLoc DL = DebugLoc()); MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode, diff --git a/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp index 5af83209a0d..fc4ed35c189 100644 --- a/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp +++ b/llvm/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp @@ -53,7 +53,7 @@ public: ~SIMCCodeEmitter() { } - /// \breif Encode the instruction and write it to the OS. + /// \brief Encode the instruction and write it to the OS. virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const; diff --git a/llvm/lib/Target/R600/R600ClauseMergePass.cpp b/llvm/lib/Target/R600/R600ClauseMergePass.cpp index 33d2ca32577..3d9015c9dfe 100644 --- a/llvm/lib/Target/R600/R600ClauseMergePass.cpp +++ b/llvm/lib/Target/R600/R600ClauseMergePass.cpp @@ -50,7 +50,7 @@ private: /// IfCvt pass can generate "disabled" ALU clause marker that need to be /// removed and their content affected to the previous alu clause. - /// This function parse instructions after CFAlu untill it find a disabled + /// This function parse instructions after CFAlu until it find a disabled /// CFAlu and merge the content, or an enabled CFAlu. void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const; diff --git a/llvm/lib/Target/R600/R600Defines.h b/llvm/lib/Target/R600/R600Defines.h index 1781f2aee16..f2f28fe469b 100644 --- a/llvm/lib/Target/R600/R600Defines.h +++ b/llvm/lib/Target/R600/R600Defines.h @@ -52,7 +52,7 @@ namespace R600_InstFlag { #define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS) -/// \brief Defines for extracting register infomation from register encoding +/// \brief Defines for extracting register information from register encoding #define HW_REG_MASK 0x1ff #define HW_CHAN_SHIFT 9 diff --git a/llvm/lib/Target/R600/R600ISelLowering.cpp b/llvm/lib/Target/R600/R600ISelLowering.cpp index 03feabe23e6..b9b242a6e89 100644 --- a/llvm/lib/Target/R600/R600ISelLowering.cpp +++ b/llvm/lib/Target/R600/R600ISelLowering.cpp @@ -990,7 +990,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const DAG.getCondCode(ISD::SETNE)); } -/// LLVM generates byte-addresed pointers. For indirect addressing, we need to +/// LLVM generates byte-addressed pointers. For indirect addressing, we need to /// convert these pointers to a register index. Each register holds /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the /// \p StackWidth, which tells us how many of the 4 sub-registrers will be used @@ -1389,8 +1389,8 @@ SDValue R600TargetLowering::LowerFormalArguments( DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32), MachinePointerInfo(UndefValue::get(PtrTy)), MemVT, false, false, 4); - // 4 is the prefered alignment for - // the CONSTANT memory space. + // 4 is the preferred alignment for + // the CONSTANT memory space. InVals.push_back(Arg); } return Chain; diff --git a/llvm/lib/Target/R600/R600ISelLowering.h b/llvm/lib/Target/R600/R600ISelLowering.h index c10257eeada..3cca93306b5 100644 --- a/llvm/lib/Target/R600/R600ISelLowering.h +++ b/llvm/lib/Target/R600/R600ISelLowering.h @@ -43,7 +43,7 @@ private: unsigned Gen; /// Each OpenCL kernel has nine implicit parameters that are stored in the /// first nine dwords of a Vertex Buffer. These implicit parameters are - /// lowered to load instructions which retreive the values from the Vertex + /// lowered to load instructions which retrieve the values from the Vertex /// Buffer. SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, SDLoc DL, unsigned DwordOffset) const; diff --git a/llvm/lib/Target/R600/R600InstrInfo.h b/llvm/lib/Target/R600/R600InstrInfo.h index 13d981094ed..d5ff4de7646 100644 --- a/llvm/lib/Target/R600/R600InstrInfo.h +++ b/llvm/lib/Target/R600/R600InstrInfo.h @@ -138,7 +138,7 @@ namespace llvm { /// Same but using const index set instead of MI set. bool fitsConstReadLimitations(const std::vector<unsigned>&) const; - /// \breif Vector instructions are instructions that must fill all + /// \brief Vector instructions are instructions that must fill all /// instruction slots within an instruction group. bool isVector(const MachineInstr &MI) const; diff --git a/llvm/lib/Target/R600/R600Instructions.td b/llvm/lib/Target/R600/R600Instructions.td index 4441fa6495e..f7b7488d69c 100644 --- a/llvm/lib/Target/R600/R600Instructions.td +++ b/llvm/lib/Target/R600/R600Instructions.td @@ -2263,7 +2263,7 @@ let Inst{63-32} = Word1; //===--------------------------------------------------------------------===// //===---------------------------------------------------------------------===// // Custom Inserter for Branches and returns, this eventually will be a -// seperate pass +// separate pass //===---------------------------------------------------------------------===// let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in { def BRANCH : ILFormat<(outs), (ins brtarget:$target), diff --git a/llvm/lib/Target/R600/R600Packetizer.cpp b/llvm/lib/Target/R600/R600Packetizer.cpp index cd9b6eae6ed..9dd4978fb5b 100644 --- a/llvm/lib/Target/R600/R600Packetizer.cpp +++ b/llvm/lib/Target/R600/R600Packetizer.cpp @@ -66,7 +66,7 @@ private: } /// \returns register to PV chan mapping for bundle/single instructions that - /// immediatly precedes I. + /// immediately precedes I. DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I) const { DenseMap<unsigned, unsigned> Result; diff --git a/llvm/lib/Target/R600/SIISelLowering.cpp b/llvm/lib/Target/R600/SIISelLowering.cpp index a66f289e9ab..36dd3cf7f0b 100644 --- a/llvm/lib/Target/R600/SIISelLowering.cpp +++ b/llvm/lib/Target/R600/SIISelLowering.cpp @@ -1083,7 +1083,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, else return; - // Nothing todo if they fit naturaly + // Nothing to do if they fit naturally if (fitsRegClass(DAG, Operand, RegClass)) return; diff --git a/llvm/lib/Target/R600/SIRegisterInfo.cpp b/llvm/lib/Target/R600/SIRegisterInfo.cpp index ed0bbaffae6..a784fa42647 100644 --- a/llvm/lib/Target/R600/SIRegisterInfo.cpp +++ b/llvm/lib/Target/R600/SIRegisterInfo.cpp @@ -122,7 +122,7 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass( return RC; // If this register has a sub-register, we can safely assume it is a 32-bit - // register, becuase all of SI's sub-registers are 32-bit. + // register, because all of SI's sub-registers are 32-bit. if (isSGPRClass(RC)) { return &AMDGPU::SGPR_32RegClass; } else { diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/llvm/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h index f3caeaa0c23..2e2d4bac797 100644 --- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h +++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h @@ -35,7 +35,7 @@ enum TOF { // Assembler: %hi(addr) or %lm(addr) MO_HI, - // Extract bits 43-22 of an adress. Only for sethi. + // Extract bits 43-22 of an address. Only for sethi. // Assembler: %h44(addr) MO_H44, diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 0ca145e3a61..19f57ab63ea 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1076,7 +1076,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); - // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are + // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are // can be done by inverting the low CC bit and applying one of the // sign-based extractions above. if (CCMask == (CCValid & SystemZ::CCMASK_1)) diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 90941d3616e..55192f9d4e4 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, MachineFunction &MF = *MBB->getParent(); // Get two load or store instructions. Use the original instruction for one - // of them (arbitarily the second here) and create a clone for the other. + // of them (arbitrarily the second here) and create a clone for the other. MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); MBB->insert(MI, EarlierMI); diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td index e1af0932c23..033f0d8ee66 100644 --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td @@ -759,7 +759,7 @@ let Defs = [CC], Uses = [CC] in { // Subtraction //===----------------------------------------------------------------------===// -// Plain substraction. Although immediate forms exist, we use the +// Plain subtraction. Although immediate forms exist, we use the // add-immediate instruction instead. let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { // Subtraction of a register. diff --git a/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index c4c86ada3fa..ac3b39df547 100644 --- a/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -563,7 +563,7 @@ struct InternalInstruction { uint8_t prefixPresent[0x100]; /* contains the location (for use with the reader) of the prefix byte */ uint64_t prefixLocations[0x100]; - /* The value of the vector extention prefix(EVEX/VEX/XOP), if present */ + /* The value of the vector extension prefix(EVEX/VEX/XOP), if present */ uint8_t vectorExtensionPrefix[4]; /* The type of the vector extension prefix */ VectorExtensionType vectorExtensionType; diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 9fdc58a3116..d653c871b29 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1512,7 +1512,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { // garbage. Indeed, only the less significant bit is supposed to be accurate. // If we read more than the lsb, we may see non-zero values whereas lsb // is zero. Therefore, we have to truncate Op0Reg to i1 for the select. - // This is acheived by performing TEST against 1. + // This is achieved by performing TEST against 1. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri)) .addReg(Op0Reg).addImm(1); unsigned ResultReg = createResultReg(RC); diff --git a/llvm/lib/Target/X86/X86Schedule.td b/llvm/lib/Target/X86/X86Schedule.td index 0556437b839..ac28d1e5436 100644 --- a/llvm/lib/Target/X86/X86Schedule.td +++ b/llvm/lib/Target/X86/X86Schedule.td @@ -577,7 +577,7 @@ def IIC_NOP : InstrItinClass; //===----------------------------------------------------------------------===// // Processor instruction itineraries. -// IssueWidth is analagous to the number of decode units. Core and its +// IssueWidth is analogous to the number of decode units. Core and its // descendents, including Nehalem and SandyBridge have 4 decoders. // Resources beyond the decoder operate on micro-ops and are bufferred // so adjacent micro-ops don't directly compete. diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp index 784bd66d2e0..3a93d2ac2e7 100644 --- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -154,8 +154,8 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) { return false; } } - } while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive - // sibbling may have used 'CE' when createReplacementInstr was called. + } while (CE->hasNUsesOrMore(1)); // We need to check because a recursive + // sibling may have used 'CE' when createReplacementInstr was called. CE->destroyConstant(); return true; } |