diff options
author | Robin Morisset <morisset@google.com> | 2014-08-15 22:17:28 +0000 |
---|---|---|
committer | Robin Morisset <morisset@google.com> | 2014-08-15 22:17:28 +0000 |
commit | d18cda620c2fd8f0f2c38271388de17608eeda28 (patch) | |
tree | 38249ee1905a8c36ae103b1627564207d4f134da /llvm/lib | |
parent | 55048cf1410619bb394ba0b17c9e4c84b8d1699b (diff) | |
download | bcm5719-llvm-d18cda620c2fd8f0f2c38271388de17608eeda28.tar.gz bcm5719-llvm-d18cda620c2fd8f0f2c38271388de17608eeda28.zip |
Fix typos in comments
llvm-svn: 215777
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMSubtarget.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonSubtarget.h | 2 |
5 files changed, 7 insertions, 7 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp index 0c37c45b3af..5c40069fd66 100644 --- a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp @@ -105,7 +105,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) { ? Monotonic : LI->getOrdering(); - // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is + // The only 64-bit load guaranteed to be single-copy atomic by the ARM is // an ldrexd (A3.5.3). IRBuilder<> Builder(LI); Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked( diff --git a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp index ab2c4b73e67..287989ffaf0 100644 --- a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -19,7 +19,7 @@ // a = add nsw i64 f, 3 // e = getelementptr ..., i64 a // -// This is legal to do so if the computations are markers with either nsw or nuw +// This is legal to do if the computations are marked with either nsw or nuw // markers. // Moreover, the current heuristic is simple: it does not create new sext // operations, i.e., it gives up when a sext would have forked (e.g., if @@ -223,7 +223,7 @@ AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const { } // Input: -// - SExtInsts contains all the sext instructions that are use direclty in +// - SExtInsts contains all the sext instructions that are used directly in // GetElementPtrInst, i.e., access to memory. // Algorithm: // - For each sext operation in SExtInsts: @@ -353,7 +353,7 @@ AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) { // If the use is already of the right type, connect its uses to its argument // and delete it. - // This can happen for an Instruction which all uses are sign extended. + // This can happen for an Instruction all uses of which are sign extended. if (!ToRemove.count(SExt) && SExt->getType() == SExt->getOperand(0)->getType()) { DEBUG(dbgs() << "Sign extension is useless, attach its use to " diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index fa38332ea49..0bb304a94a8 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -743,7 +743,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // On v8, we have particularly efficient implementations of atomic fences // if they can be combined with nearby atomic loads and stores. if (!Subtarget->hasV8Ops()) { - // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. + // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. setInsertFencesForAtomic(true); } } else { diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index 065d5d61927..1263e8b7121 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -439,7 +439,7 @@ public: // enableAtomicExpandLoadLinked - True if we need to expand our atomics. bool enableAtomicExpandLoadLinked() const override; - /// getInstrItins - Return the instruction itineraies based on subtarget + /// getInstrItins - Return the instruction itineraries based on subtarget /// selection. const InstrItineraryData *getInstrItineraryData() const { return &InstrItins; diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h index 26ff23b853a..e7cbf1b62e7 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h @@ -56,7 +56,7 @@ public: HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS, const TargetMachine &TM); - /// getInstrItins - Return the instruction itineraies based on subtarget + /// getInstrItins - Return the instruction itineraries based on subtarget /// selection. const InstrItineraryData *getInstrItineraryData() const { return &InstrItins; |