summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2015-08-08 18:27:36 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2015-08-08 18:27:36 +0000
commitdf005cbe198160b541d473ebe18aee3572d1cf16 (patch)
treee158a70cfc8b09d08c8911ab5378e5b50929c011 /llvm/lib/Target
parent9bb8ef03a2bf081e26400a3b21e9ff476a22c88f (diff)
downloadbcm5719-llvm-df005cbe198160b541d473ebe18aee3572d1cf16.tar.gz
bcm5719-llvm-df005cbe198160b541d473ebe18aee3572d1cf16.zip
Fix some comment typos.
llvm-svn: 244402
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaits.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp6
-rw-r--r--llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp12
19 files changed, 35 insertions, 35 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp
index 398269433ec..6c214f6b411 100644
--- a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp
+++ b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp
@@ -216,8 +216,8 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) {
++Idx;
}
- DEBUG(dbgs() << "Scan complete, "<< Sequences.size()
- << " occurences of pattern found.\n");
+ DEBUG(dbgs() << "Scan complete, " << Sequences.size()
+ << " occurrences of pattern found.\n");
// Then update the basic block, inserting nops between the detected sequences.
for (auto &MI : Sequences) {
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index ab24fe4454e..eaa4173e33f 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -969,7 +969,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
// Cannot encode an offset register and an immediate offset in the same
// instruction. Fold the immediate offset into the load/store instruction and
- // emit an additonal add to take care of the offset register.
+ // emit an additional add to take care of the offset register.
if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg())
RegisterOffsetNeedsLowering = true;
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index f43fb23b937..5b8db3d157f 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -72,9 +72,9 @@
//
// For most functions, some of the frame areas are empty. For those functions,
// it may not be necessary to set up fp or bp:
-// * A base pointer is definitly needed when there are both VLAs and local
+// * A base pointer is definitely needed when there are both VLAs and local
// variables with more-than-default alignment requirements.
-// * A frame pointer is definitly needed when there are local variables with
+// * A frame pointer is definitely needed when there are local variables with
// more-than-default alignment requirements.
//
// In some cases when a base pointer is not strictly needed, it is generated
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fd8ca209c73..3eecaaaabcf 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1466,7 +1466,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmn w0, #1
// Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
// if and only if (sext LHS) == (sext RHS). The checks are in place to
- // ensure both the LHS and RHS are truely zero extended and to make sure the
+ // ensure both the LHS and RHS are truly zero extended and to make sure the
// transformation is profitable.
if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
@@ -8385,7 +8385,7 @@ static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) {
unsigned Alignment = std::min(OrigAlignment, EltOffset);
// Create scalar stores. This is at least as good as the code sequence for a
- // split unaligned store wich is a dup.s, ext.b, and two stores.
+ // split unaligned store which is a dup.s, ext.b, and two stores.
// Most of the time the three stores should be replaced by store pair
// instructions (stp).
SDLoc DL(St);
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
index 0c54446b0fb..7fed3afe843 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
@@ -42,7 +42,7 @@
/// ENDIF
/// %use
///
-/// Adding this use will make the def live thoughout the IF branch, which is
+/// Adding this use will make the def live throughout the IF branch, which is
/// what we want.
#include "AMDGPU.h"
@@ -138,7 +138,7 @@ bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) {
if (MBB.succ_size() < 2)
continue;
- // We have structured control flow, so number of succesors should be two.
+ // We have structured control flow, so number of successors should be two.
assert(MBB.succ_size() == 2);
MachineBasicBlock *SuccA = *MBB.succ_begin();
MachineBasicBlock *SuccB = *(++MBB.succ_begin());
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3d4fbb9fcaa..b90ba886bdd 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -548,7 +548,7 @@ SDValue SITargetLowering::LowerFormalArguments(
assert((PSInputNum <= 15) && "Too many PS inputs!");
if (!Arg.Used) {
- // We can savely skip PS inputs
+ // We can safely skip PS inputs
Skipped.set(i);
++PSInputNum;
continue;
@@ -565,7 +565,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// We REALLY want the ORIGINAL number of vertex elements here, e.g. a
// three or five element vertex only needs three or five registers,
- // NOT four or eigth.
+ // NOT four or eight.
Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
unsigned NumElements = ParamType->getVectorNumElements();
@@ -2248,9 +2248,9 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
}
/// \brief Return a resource descriptor with the 'Add TID' bit enabled
-/// The TID (Thread ID) is multipled by the stride value (bits [61:48]
-/// of the resource descriptor) to create an offset, which is added to the
-/// resource ponter.
+/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
+/// of the resource descriptor) to create an offset, which is added to
+/// the resource pointer.
MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
SDLoc DL,
SDValue Ptr,
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
index 90a37f17468..df76b457af3 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp
@@ -261,7 +261,7 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
if (MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >=
AMDGPUSubtarget::VOLCANIC_ISLANDS) {
- // Any occurence of consecutive VMEM or SMEM instructions forms a VMEM
+ // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
// or SMEM clause, respectively.
//
// The temporary workaround is to break the clauses with S_NOP.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 68c59a1f6ca..bfd765aa5fe 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1088,7 +1088,7 @@ bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
// TODO: Should we check the address space from the MachineMemOperand? That
// would allow us to distinguish objects we know don't alias based on the
- // underlying addres space, even if it was lowered to a different one,
+ // underlying address space, even if it was lowered to a different one,
// e.g. private accesses lowered to use MUBUF instructions on a scratch
// buffer.
if (isDS(Opc0)) {
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index f3a6027107c..b6a2f7fa0d5 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -2058,7 +2058,7 @@ bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
/// \brief Returns whether CPEMI is the first instruction in the block
/// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
/// we can switch the first register to PC and usually remove the address
-/// calculation that preceeded it.
+/// calculation that preceded it.
static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
MachineFunction::iterator MBB = JTMI->getParent();
MachineFunction *MF = MBB->getParent();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index ea0dd6b2a1c..6b7fea400d4 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -609,7 +609,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::ADDC);
if (Subtarget->isFPOnlySP()) {
- // When targetting a floating-point unit with only single-precision
+ // When targeting a floating-point unit with only single-precision
// operations, f64 is legal for the few double-precision instructions which
// are present However, no double-precision operations other than moves,
// loads and stores are provided by the hardware.
@@ -11597,7 +11597,7 @@ bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
return false;
// Floating point values and vector values map to the same register file.
- // Therefore, althought we could do a store extract of a vector type, this is
+ // Therefore, although we could do a store extract of a vector type, this is
// better to leave at float as we have more freedom in the addressing mode for
// those.
if (VectorTy->isFPOrFPVectorTy())
diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 6905c4f6d12..d9675b5173d 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -250,7 +250,7 @@ Register HexagonGenPredicate::getPredRegFor(const Register &Reg) {
unsigned NewPR = MRI->createVirtualRegister(PredRC);
// For convertible instructions, do not modify them, so that they can
- // be coverted later. Generate a copy from Reg to NewPR.
+ // be converted later. Generate a copy from Reg to NewPR.
if (isConvertibleToPredForm(DefI)) {
MachineBasicBlock::iterator DefIt = DefI;
BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR)
diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 53b6bf617e8..e328880c9db 100644
--- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -727,9 +727,9 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop,
// Phis that may feed into the loop.
LoopFeederMap LoopFeederPhi;
- // Check if the inital value may be zero and can be decremented in the first
+ // Check if the initial value may be zero and can be decremented in the first
// iteration. If the value is zero, the endloop instruction will not decrement
- // the loop counter, so we shoudn't generate a hardware loop in this case.
+ // the loop counter, so we shouldn't generate a hardware loop in this case.
if (loopCountMayWrapOrUnderFlow(Start, End, Loop->getLoopPreheader(), Loop,
LoopFeederPhi))
return nullptr;
@@ -1440,7 +1440,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow(
if (Comparison::isSigned(Cmp))
return false;
- // Check if there is a comparison of the inital value. If the initial value
+ // Check if there is a comparison of the initial value. If the initial value
// is greater than or not equal to another value, then assume this is a
// range check.
if ((Cmp & Comparison::G) || Cmp == Comparison::NE)
diff --git a/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp b/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp
index 08981b9d49a..b442fc03b25 100644
--- a/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp
+++ b/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp
@@ -1,4 +1,4 @@
-//===-- MSP430MachineFucntionInfo.cpp - MSP430 machine function info ------===//
+//===-- MSP430MachineFunctionInfo.cpp - MSP430 machine function info ------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index b3f85fd4e61..5f46620e077 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3023,7 +3023,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// We ought to be able to use LocVT directly but O32 sets it to i32
// when allocating floating point values to integer registers.
// This shouldn't influence how we load the value into registers unless
- // we are targetting softfloat.
+ // we are targeting softfloat.
if (VA.getValVT().isFloatingPoint() && !Subtarget.useSoftFloat())
LocVT = VA.getValVT();
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp b/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
index 69a229e32f4..781900a2e79 100644
--- a/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
@@ -98,7 +98,7 @@ private:
/// This reordering exposes to optimizeMemoryInstruction more
/// optimization opportunities on loads and stores.
///
- /// If this function succesfully hoists an eliminable addrspacecast or V is
+ /// If this function successfully hoists an eliminable addrspacecast or V is
/// already such an addrspacecast, it returns the transformed value (which is
/// guaranteed to be an addrspacecast); otherwise, it returns nullptr.
Value *hoistAddrSpaceCastFrom(Value *V, int Depth = 0);
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
index 93d0025d8f5..624052e9b98 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
@@ -81,7 +81,7 @@ bool NVPTXLowerAlloca::runOnBasicBlock(BasicBlock &BB) {
// Check Load, Store, GEP, and BitCast Uses on alloca and make them
// use the converted generic address, in order to expose non-generic
// addrspacecast to NVPTXFavorNonGenericAddrSpace. For other types
- // of instructions this is unecessary and may introduce redudant
+ // of instructions this is unnecessary and may introduce redundant
// address cast.
const auto &AllocaUse = *UI++;
auto LI = dyn_cast<LoadInst>(AllocaUse.getUser());
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
index daf44482934..4cd96f0ce4e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
@@ -138,7 +138,7 @@ INITIALIZE_PASS(NVPTXLowerKernelArgs, "nvptx-lower-kernel-args",
//
// The above code allocates some space in the stack and copies the incoming
// struct from param space to local space.
-// Then replace all occurences of %d by %temp.
+// Then replace all occurrences of %d by %temp.
// =============================================================================
void NVPTXLowerKernelArgs::handleByValParam(Argument *Arg) {
Function *Func = Arg->getParent();
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 01a3acb742e..bf3072f783e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -1028,7 +1028,7 @@ class BitPermutationSelector {
BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 &&
BitGroups[0].V == BitGroups[BitGroups.size()-1].V &&
BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) {
- DEBUG(dbgs() << "\tcombining final bit group with inital one\n");
+ DEBUG(dbgs() << "\tcombining final bit group with initial one\n");
BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx;
BitGroups.erase(BitGroups.begin());
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 482d2a5ecaa..5b0161df4bd 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -15308,11 +15308,11 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
/// \brief Creates an SDNode for a predicated scalar operation.
/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
-/// The mask is comming as MVT::i8 and it should be truncated
+/// The mask is coming as MVT::i8 and it should be truncated
/// to MVT::i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
-/// "X86select" instead of "vselect". We just can't create the "vselect" node for
-/// a scalar instruction.
+/// "X86select" instead of "vselect". We just can't create the "vselect" node
+/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget *Subtarget,
@@ -15454,7 +15454,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDValue Src0 = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// There are 2 kinds of intrinsics in this group:
- // (1) With supress-all-exceptions (sae) or rounding mode- 6 operands
+ // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
// (2) With rounding mode and sae - 7 operands.
if (Op.getNumOperands() == 6) {
SDValue Sae = Op.getOperand(5);
@@ -18069,7 +18069,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
// FIXME: it is required if isAtLeastRelease(Order) but it is not clear
- // otherwise, we might be able to be more agressive on relaxed idempotent
+ // otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
if (SynchScope == SingleThread)
@@ -25690,7 +25690,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
}
// Check if we can bypass extracting and re-inserting an element of an input
- // vector. Essentialy:
+ // vector. Essentially:
// (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
OpenPOWER on IntegriCloud