summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInline.cpp2
-rw-r--r--llvm/lib/Target/X86/X86SelectionDAGInfo.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp2
4 files changed, 4 insertions, 4 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
index a5f9a85f50d..945c9acd379 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
@@ -44,7 +44,7 @@ ArgAllocaCost("amdgpu-inline-arg-alloca-cost", cl::Hidden, cl::init(2200),
cl::desc("Cost of alloca argument"));
// If the amount of scratch memory to eliminate exceeds our ability to allocate
-// it into registers we gain nothing by agressively inlining functions for that
+// it into registers we gain nothing by aggressively inlining functions for that
// heuristic.
static cl::opt<unsigned>
ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden, cl::init(256),
diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index a71e5d39595..008a9ec2ba3 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -250,7 +250,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
if (Repeats.BytesLeft() > 0 &&
DAG.getMachineFunction().getFunction().optForMinSize()) {
- // When agressively optimizing for size, avoid generating the code to
+ // When aggressively optimizing for size, avoid generating the code to
// handle BytesLeft.
Repeats.AVT = MVT::i8;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 3e72b1da779..a3962a04b50 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2512,7 +2512,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// Shrink the condition operand if the new type is smaller than the old type.
// But do not shrink to a non-standard type, because backend can't generate
// good code for that yet.
- // TODO: We can make it agressive again after fixing PR39569.
+ // TODO: We can make it aggressive again after fixing PR39569.
if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
shouldChangeType(Known.getBitWidth(), NewWidth)) {
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
diff --git a/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp b/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp
index bac6ef99f03..b9e8e3424cc 100644
--- a/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp
+++ b/llvm/lib/Transforms/Scalar/CallSiteSplitting.cpp
@@ -197,7 +197,7 @@ static bool canSplitCallSite(CallSite CS, TargetTransformInfo &TTI) {
isa<IndirectBrInst>(Preds[1]->getTerminator()))
return false;
- // BasicBlock::canSplitPredecessors is more agressive, so checking for
+ // BasicBlock::canSplitPredecessors is more aggressive, so checking for
// BasicBlock::isEHPad as well.
if (!CallSiteBB->canSplitPredecessors() || CallSiteBB->isEHPad())
return false;
OpenPOWER on IntegriCloud