summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/IR/IntrinsicsARM.td2
-rw-r--r--llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.h2
6 files changed, 8 insertions, 8 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index a02d7072d72..22282874409 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -21,7 +21,7 @@ def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
-// Saturating Arithmentic
+// Saturating Arithmetic
def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
diff --git a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
index 0c37c45b3af..5c40069fd66 100644
--- a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
@@ -105,7 +105,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
? Monotonic
: LI->getOrdering();
- // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
+ // The only 64-bit load guaranteed to be single-copy atomic by the ARM is
// an ldrexd (A3.5.3).
IRBuilder<> Builder(LI);
Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
diff --git a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
index ab2c4b73e67..287989ffaf0 100644
--- a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
@@ -19,7 +19,7 @@
// a = add nsw i64 f, 3
// e = getelementptr ..., i64 a
//
-// This is legal to do so if the computations are markers with either nsw or nuw
+// This is legal to do if the computations are marked with either nsw or nuw
// markers.
// Moreover, the current heuristic is simple: it does not create new sext
// operations, i.e., it gives up when a sext would have forked (e.g., if
@@ -223,7 +223,7 @@ AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
}
// Input:
-// - SExtInsts contains all the sext instructions that are use direclty in
+// - SExtInsts contains all the sext instructions that are used directly in
// GetElementPtrInst, i.e., access to memory.
// Algorithm:
// - For each sext operation in SExtInsts:
@@ -353,7 +353,7 @@ AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
// If the use is already of the right type, connect its uses to its argument
// and delete it.
- // This can happen for an Instruction which all uses are sign extended.
+ // This can happen for an Instruction all uses of which are sign extended.
if (!ToRemove.count(SExt) &&
SExt->getType() == SExt->getOperand(0)->getType()) {
DEBUG(dbgs() << "Sign extension is useless, attach its use to "
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index fa38332ea49..0bb304a94a8 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -743,7 +743,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// On v8, we have particularly efficient implementations of atomic fences
// if they can be combined with nearby atomic loads and stores.
if (!Subtarget->hasV8Ops()) {
- // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
+ // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
setInsertFencesForAtomic(true);
}
} else {
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 065d5d61927..1263e8b7121 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -439,7 +439,7 @@ public:
// enableAtomicExpandLoadLinked - True if we need to expand our atomics.
bool enableAtomicExpandLoadLinked() const override;
- /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 26ff23b853a..e7cbf1b62e7 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -56,7 +56,7 @@ public:
HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
const TargetMachine &TM);
- /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
OpenPOWER on IntegriCloud