summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMConstantPoolValue.h2
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMMachineFunctionInfo.h2
-rw-r--r--llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp22
-rw-r--r--llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp10
-rw-r--r--llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp2
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp2
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp6
-rw-r--r--llvm/lib/Target/ARM/MLxExpansionPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/Thumb1FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h2
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp6
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h2
-rw-r--r--llvm/lib/Target/Mips/MipsAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsConstantIslandPass.cpp12
-rw-r--r--llvm/lib/Target/Mips/MipsSubtarget.h2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h2
-rw-r--r--llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h4
-rw-r--r--llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp22
-rw-r--r--llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h2
-rw-r--r--llvm/lib/Target/PowerPC/PPC.h4
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchSelector.cpp16
-rw-r--r--llvm/lib/Target/PowerPC/PPCEarlyReturn.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCMCInstLower.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCMIPeephole.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h4
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp2
-rw-r--r--llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp8
-rw-r--r--llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp8
-rw-r--r--llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp6
-rw-r--r--llvm/lib/Target/Sparc/Sparc.h2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h4
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcTargetMachine.cpp4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZMachineScheduler.h4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp8
-rw-r--r--llvm/lib/Target/Target.cpp2
-rw-r--r--llvm/lib/Target/TargetLoweringObjectFile.cpp4
-rw-r--r--llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp6
-rw-r--r--llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp2
-rw-r--r--llvm/lib/Target/X86/X86CallingConv.h2
-rw-r--r--llvm/lib/Target/X86/X86CmovConversion.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FloatingPoint.cpp6
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp10
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h4
-rw-r--r--llvm/lib/Target/X86/X86InstrFoldTables.cpp2
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h2
-rw-r--r--llvm/lib/Target/XCore/XCoreAsmPrinter.cpp6
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.cpp36
-rw-r--r--llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h6
-rw-r--r--llvm/lib/Target/XCore/XCoreRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/XCore/XCoreRegisterInfo.h2
-rw-r--r--llvm/lib/Target/XCore/XCoreSubtarget.h2
82 files changed, 195 insertions, 195 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5e59fa570ed..032d53d1962 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5142,7 +5142,7 @@ AArch64InstrInfo::getMachineOutlinerMBBFlags(MachineBasicBlock &MBB) const {
MBB.rend(),
[&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
- if (!LRU.available(AArch64::LR))
+ if (!LRU.available(AArch64::LR))
Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
return Flags;
@@ -5168,14 +5168,14 @@ AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
// ahead and skip over them.
if (MI.isKill())
return outliner::InstrType::Invisible;
-
+
// Is this a terminator for a basic block?
if (MI.isTerminator()) {
// Is this the end of a function?
if (MI.getParent()->succ_empty())
return outliner::InstrType::Legal;
-
+
// It's not, so don't outline it.
return outliner::InstrType::Illegal;
}
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 798340f8fed..e42214d1569 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -146,7 +146,7 @@ public:
Optional<bool> hasRedZone() const { return HasRedZone; }
void setHasRedZone(bool s) { HasRedZone = s; }
-
+
int getVarArgsStackIndex() const { return VarArgsStackIndex; }
void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index d75fef7b017..96e751e8697 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -577,7 +577,7 @@ int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
unsigned NumVectorInstToHideOverhead = 10;
int MaxMergeDistance = 64;
- if (Ty->isVectorTy() && SE &&
+ if (Ty->isVectorTy() && SE &&
!BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
return NumVectorInstToHideOverhead;
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 19d483ef97e..b227eaed8d6 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -117,7 +117,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// globals from all functions in PromotedGlobals.
for (auto *GV : AFI->getGlobalsPromotedToConstantPool())
PromotedGlobals.insert(GV);
-
+
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
if (F.hasFnAttribute(Attribute::OptimizeNone))
@@ -991,7 +991,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
if (Subtarget->isThumb1Only())
EmitAlignment(2);
-
+
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
OutStreamer->EmitLabel(JTISymbol);
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 43e8b7d66c6..5342e6e2cd1 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -584,7 +584,7 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// don't know for sure yet whether we'll need that, so we guess based
// on whether there are any local variables that would trigger it.
unsigned StackAlign = TFI->getStackAlignment();
- if (TFI->hasFP(MF) &&
+ if (TFI->hasFP(MF) &&
!((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
return false;
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index de08eb8c698..2c4738d3cb7 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -2128,7 +2128,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
unsigned DeadSize = 0;
bool CanDeleteLEA = false;
bool BaseRegKill = false;
-
+
unsigned IdxReg = ~0U;
bool IdxRegKill = true;
if (isThumb2) {
diff --git a/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 5139a18f926..55194ed9453 100644
--- a/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -113,7 +113,7 @@ public:
bool isLSDA() const { return Kind == ARMCP::CPLSDA; }
bool isMachineBasicBlock() const{ return Kind == ARMCP::CPMachineBasicBlock; }
bool isPromotedGlobal() const{ return Kind == ARMCP::CPPromotedGlobal; }
-
+
int getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) override;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index af983ce2606..a8c75702d7b 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -372,7 +372,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc dl;
-
+
unsigned FramePtr = RegInfo->getFrameRegister(MF);
// Determine the sizes of each callee-save spill areas and record which frame
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 081d4ff033b..9592dd53c34 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -2539,7 +2539,7 @@ void ARMDAGToDAGISel::SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI) {
return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
}
};
-
+
if (Range->second == 0) {
// 1. Mask includes the LSB -> Simply shift the top N bits off
NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
@@ -2633,7 +2633,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
MachineMemOperand::MOLoad, 4, 4);
cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp+1);
-
+
ReplaceNode(N, ResNode);
return;
}
@@ -2920,7 +2920,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
assert(N3.getOpcode() == ISD::Register);
unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
-
+
if (InFlag.getOpcode() == ARMISD::CMPZ) {
bool SwitchEQNEToPLMI;
SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
@@ -3023,7 +3023,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
}
-
+
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 47222a66f79..ede276dd91b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3096,7 +3096,7 @@ static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
// need to be duplicated) or duplicating the constant wouldn't increase code
// size (implying the constant is no larger than 4 bytes).
const Function &F = DAG.getMachineFunction().getFunction();
-
+
// We rely on this decision to inline being idemopotent and unrelated to the
// use-site. We know that if we inline a variable at one use site, we'll
// inline it elsewhere too (and reuse the constant pool entry). Fast-isel
@@ -5162,7 +5162,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
return SDValue();
// SoftFP: read half-precision arguments:
//
- // t2: i32,ch = ...
+ // t2: i32,ch = ...
// t7: i16 = truncate t2 <~~~~ Op
// t8: f16 = bitcast t7 <~~~~ N
//
@@ -5173,7 +5173,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
- // Half-precision return values
+ // Half-precision return values
if (SrcVT == MVT::f16 && DstVT == MVT::i16) {
if (!HasFullFP16)
return SDValue();
@@ -13461,13 +13461,13 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
if (!RHS || RHS->getZExtValue() != 4)
return false;
-
+
Offset = Op->getOperand(1);
Base = Op->getOperand(0);
AM = ISD::POST_INC;
return true;
}
-
+
bool isInc;
bool isLegal = false;
if (Subtarget->isThumb2())
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 901138dbdfd..db5f28480e9 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1275,7 +1275,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
// we're minimizing code size.
if (!MBB.getParent()->getFunction().optForMinSize() || !BaseKill)
return false;
-
+
bool HighRegsUsed = false;
for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
if (MI->getOperand(i).getReg() >= ARM::R8) {
diff --git a/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 81611677299..91310e81e39 100644
--- a/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -126,7 +126,7 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// The amount the literal pool has been increasedby due to promoted globals.
int PromotedGlobalsIncrease = 0;
-
+
public:
ARMFunctionInfo() = default;
diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index d4fbf76f299..4d685158e25 100644
--- a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -49,7 +49,7 @@ SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
case RTLIB::MEMMOVE:
AEABILibcall = AEABI_MEMMOVE;
break;
- case RTLIB::MEMSET:
+ case RTLIB::MEMSET:
AEABILibcall = AEABI_MEMSET;
if (ConstantSDNode *ConstantSrc = dyn_cast<ConstantSDNode>(Src))
if (ConstantSrc->getZExtValue() == 0)
@@ -93,14 +93,14 @@ SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
else if (Src.getValueType().bitsLT(MVT::i32))
Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
- Entry.Node = Src;
+ Entry.Node = Src;
Entry.Ty = Type::getInt32Ty(*DAG.getContext());
Entry.IsSExt = false;
Args.push_back(Entry);
} else {
Entry.Node = Src;
Args.push_back(Entry);
-
+
Entry.Node = Size;
Args.push_back(Entry);
}
@@ -121,7 +121,7 @@ SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
std::move(Args))
.setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
-
+
return CallResult.second;
}
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index f8cae31641f..94f9cefe429 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -389,7 +389,7 @@ int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
unsigned NumVectorInstToHideOverhead = 10;
int MaxMergeDistance = 64;
- if (Ty->isVectorTy() && SE &&
+ if (Ty->isVectorTy() && SE &&
!BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
return NumVectorInstToHideOverhead;
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index cd9fa070902..e0cd2d8e26a 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -153,7 +153,7 @@ public:
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
- int getAddressComputationCost(Type *Val, ScalarEvolution *SE,
+ int getAddressComputationCost(Type *Val, ScalarEvolution *SE,
const SCEV *Ptr);
int getArithmeticInstrCost(
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 807d6254733..47319829cb5 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -969,7 +969,7 @@ public:
// checks whether this operand is a memory operand computed as an offset
// applied to PC. the offset may have 8 bits of magnitude and is represented
- // with two bits of shift. textually it may be either [pc, #imm], #imm or
+ // with two bits of shift. textually it may be either [pc, #imm], #imm or
// relocable expression...
bool isThumbMemPC() const {
int64_t Val = 0;
@@ -2284,7 +2284,7 @@ public:
}
const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
-
+
assert(SR && "Unknown value type!");
Inst.addOperand(MCOperand::createExpr(SR));
return;
@@ -2326,7 +2326,7 @@ public:
assert(isImm() && "Not an immediate!");
// If we have an immediate that's not a constant, treat it as a label
- // reference needing a fixup.
+ // reference needing a fixup.
if (!isa<MCConstantExpr>(getImm())) {
Inst.addOperand(MCOperand::createExpr(getImm()));
return;
@@ -3419,7 +3419,7 @@ int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
- return -1;
+ return -1;
std::string lowerCase = Tok.getString().lower();
ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
@@ -4311,7 +4311,7 @@ ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- if (!Tok.is(AsmToken::Identifier))
+ if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
StringRef IFlagsStr = Tok.getString();
@@ -4353,7 +4353,7 @@ ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
return MatchOperand_NoMatch;
}
unsigned SYSmvalue = Val & 0xFF;
- Parser.Lex();
+ Parser.Lex();
Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
return MatchOperand_Success;
}
@@ -4996,7 +4996,7 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
// first decide whether or not the branch should be conditional
// by looking at it's location relative to an IT block
if(inITBlock()) {
- // inside an IT block we cannot have any conditional branches. any
+ // inside an IT block we cannot have any conditional branches. any
// such instructions needs to be converted to unconditional form
switch(Inst.getOpcode()) {
case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
@@ -5008,11 +5008,11 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
switch(Inst.getOpcode()) {
case ARM::tB:
- case ARM::tBcc:
- Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
+ case ARM::tBcc:
+ Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
break;
case ARM::t2B:
- case ARM::t2Bcc:
+ case ARM::t2Bcc:
Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
break;
}
@@ -8882,7 +8882,7 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
case ARM::MOVsi: {
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
// rrx shifts and asr/lsr of #32 is encoded as 0
- if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
+ if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
return false;
if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
// Shifting by zero is accepted as a vanilla 'MOVr'
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 4733cf49827..61bec04678d 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -620,7 +620,7 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
// assume a predicate of AL.
unsigned CC;
CC = ITBlock.getITCC();
- if (CC == 0xF)
+ if (CC == 0xF)
CC = ARMCC::AL;
if (ITBlock.instrInITBlock())
ITBlock.advanceITState();
@@ -888,7 +888,7 @@ DecodeGPRnopcRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
- if (RegNo == 15)
+ if (RegNo == 15)
S = MCDisassembler::SoftFail;
Check(S, DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder));
@@ -2171,7 +2171,7 @@ static DecodeStatus DecodeSETPANInstruction(MCInst &Inst, unsigned Insn,
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
const FeatureBitset &FeatureBits = Dis->getSubtargetInfo().getFeatureBits();
- if (!FeatureBits[ARM::HasV8_1aOps] ||
+ if (!FeatureBits[ARM::HasV8_1aOps] ||
!FeatureBits[ARM::HasV8Ops])
return MCDisassembler::Fail;
@@ -4467,7 +4467,7 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
index = fieldFromInstruction(Insn, 7, 1);
switch (fieldFromInstruction(Insn, 4, 2)) {
- case 0:
+ case 0:
align = 0; break;
case 3:
align = 4; break;
@@ -5279,7 +5279,7 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
- if (!Check(S, DecodeAddrMode7Operand(Inst, Rn, Address, Decoder)))
+ if (!Check(S, DecodeAddrMode7Operand(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodePostIdxReg(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
diff --git a/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 75ed40c18fa..bfc32073ba1 100644
--- a/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -834,7 +834,7 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
return;
}
- O << SYSm;
+ O << SYSm;
return;
}
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index dfa339091a7..7d04c73fb3f 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -64,7 +64,7 @@ bool ARMELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
}
}
-// Need to examine the Fixup when determining whether to
+// Need to examine the Fixup when determining whether to
// emit the relocation as an explicit symbol or as a section relative
// offset
unsigned ARMELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 0dab789505d..b37b8073548 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -740,7 +740,7 @@ getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr()) {
if (HasConditionalBranch(MI))
- return ::getBranchTargetOpValue(MI, OpIdx,
+ return ::getBranchTargetOpValue(MI, OpIdx,
ARM::fixup_arm_condbl, Fixups, STI);
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups, STI);
}
@@ -766,10 +766,10 @@ uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue(
const MCSubtargetInfo &STI) const {
unsigned Val = 0;
const MCOperand MO = MI.getOperand(OpIdx);
-
+
if(MO.isExpr())
return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups, STI);
- else
+ else
Val = MO.getImm() >> 1;
bool I = (Val & 0x800000);
diff --git a/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 637e4a44c42..7f03e1463c1 100644
--- a/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -233,7 +233,7 @@ bool MLxExpansion::FindMLxHazard(MachineInstr *MI) {
// On Swift, we mostly care about hazards from multiplication instructions
// writing the accumulator and the pipelining of loop iterations by out-of-
- // order execution.
+ // order execution.
if (isSwift)
return isFpMulInstruction(DefMI->getOpcode()) || hasLoopHazard(MI);
diff --git a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index a65e22fd86e..5c745e112b2 100644
--- a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -127,7 +127,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc dl;
-
+
unsigned FramePtr = RegInfo->getFrameRegister(MF);
unsigned BasePtr = RegInfo->getBaseRegister();
int CFAOffset = 0;
diff --git a/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h b/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
index 183dee36a04..de486ec4b7b 100644
--- a/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
+++ b/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
@@ -2,7 +2,7 @@
//
// The LLVM Compiler Infrastructure
//
-// This file is distributed under the University of Illinois Open Source
+// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 2acf701b43c..ce7db657f5e 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -7371,7 +7371,7 @@ bool MipsAsmParser::parseDirectiveGpWord() {
getParser().getStreamer().EmitGPRel32Value(Value);
if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(getLexer().getLoc(),
+ return Error(getLexer().getLoc(),
"unexpected token, expected end of statement");
Parser.Lex(); // Eat EndOfStatement token.
return false;
@@ -7506,7 +7506,7 @@ bool MipsAsmParser::parseDirectiveOption() {
}
// Unknown option.
- Warning(Parser.getTok().getLoc(),
+ Warning(Parser.getTok().getLoc(),
"unknown option, expected 'pic0' or 'pic2'");
Parser.eatToEndOfStatement();
return false;
@@ -8193,7 +8193,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
if (IDVal == ".abicalls") {
getTargetStreamer().emitDirectiveAbiCalls();
if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
- Error(Parser.getTok().getLoc(),
+ Error(Parser.getTok().getLoc(),
"unexpected token, expected end of statement");
}
return false;
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index fdb560f3c72..d7f6cf91db7 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -114,7 +114,7 @@ namespace Mips {
// resulting in - R_MIPS_GOT_DISP
fixup_Mips_GOT_DISP,
- // resulting in - R_MIPS_HIGHER/R_MICROMIPS_HIGHER
+ // resulting in - R_MIPS_HIGHER/R_MICROMIPS_HIGHER
fixup_Mips_HIGHER,
fixup_MICROMIPS_HIGHER,
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 8ffc0731abc..2e0c25de2bc 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -1094,7 +1094,7 @@ void MipsAsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind) {
// ALIGN
// B .tmpN
// 11 NOP instructions (44 bytes)
- // ADDIU T9, T9, 52
+ // ADDIU T9, T9, 52
// .tmpN
//
// We need the 44 bytes (11 instructions) because at runtime, we'd
diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index 9eb13a68e56..744523cc6cb 100644
--- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This pass is used to make Pc relative loads of constants.
-// For now, only Mips16 will use this.
+// For now, only Mips16 will use this.
//
// Loading constants inline is expensive on Mips16 and it's in general better
// to place the constant nearby in code space and then it can be loaded with a
@@ -1171,7 +1171,7 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
/// findAvailableWater - Look for an existing entry in the WaterList in which
/// we can place the CPE referenced from U so it's within range of U's MI.
/// Returns true if found, false if not. If it returns true, WaterIter
-/// is set to the WaterList entry.
+/// is set to the WaterList entry.
/// To ensure that this pass
/// terminates, the CPE location for a particular CPUser is only allowed to
/// move to a lower address, so search backward from the end of the list and
@@ -1231,7 +1231,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
// If the block does not end in an unconditional branch already, and if the
- // end of the block is within range, make new water there.
+ // end of the block is within range, make new water there.
if (BBHasFallthrough(UserMBB)) {
// Size of branch to insert.
unsigned Delta = 2;
@@ -1258,7 +1258,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
}
}
- // What a big block. Find a place within the block to split it.
+ // What a big block. Find a place within the block to split it.
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
@@ -1582,7 +1582,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MachineInstr *BMI = &MBB->back();
bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
unsigned OppositeBranchOpcode = TII->getOppositeBranchOpc(Opcode);
-
+
++NumCBrFixed;
if (BMI != MI) {
if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
@@ -1595,7 +1595,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
// bnez L2
// b L1
unsigned BMITargetOperand = branchTargetOperand(BMI);
- MachineBasicBlock *NewDest =
+ MachineBasicBlock *NewDest =
BMI->getOperand(BMITargetOperand).getMBB();
if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
LLVM_DEBUG(
diff --git a/llvm/lib/Target/Mips/MipsSubtarget.h b/llvm/lib/Target/Mips/MipsSubtarget.h
index 676d702ba63..896dd0eb0a5 100644
--- a/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -163,7 +163,7 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
// HasEVA -- supports EVA ASE.
bool HasEVA;
-
+
// nomadd4 - disables generation of 4-operand madd.s, madd.d and
// related instructions.
bool DisableMadd4;
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
index 3b042c74b26..efe98003b1c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -248,7 +248,7 @@ protected:
private:
bool GlobalsEmitted;
-
+
// This is specific per MachineFunction.
const MachineRegisterInfo *MRI;
// The contents are specific for each
diff --git a/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
index f12ed81b6d9..ad1d7cbb52f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -2,7 +2,7 @@
//
// The LLVM Compiler Infrastructure
//
-// This file is distributed under the University of Illinois Open Source
+// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h b/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
index 10f1135ad84..5a9115f6f7f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
@@ -2,7 +2,7 @@
//
// The LLVM Compiler Infrastructure
//
-// This file is distributed under the University of Illinois Open Source
+// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index ea709a73ebf..fd7f8159142 100644
--- a/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -175,7 +175,7 @@ void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
- raw_ostream &O,
+ raw_ostream &O,
const char *Modifier) {
unsigned Code = MI->getOperand(OpNo).getImm();
diff --git a/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index f000fbb9811..351ccefa2da 100644
--- a/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -35,11 +35,11 @@ public:
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
const MCSubtargetInfo &STI) override;
-
+
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
-
+
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
unsigned PrintMethodIdx,
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index 8ac461b96b8..fb7bf23509c 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -61,7 +61,7 @@ PPCELFMCAsmInfo::PPCELFMCAsmInfo(bool is64Bit, const Triple& T) {
CommentString = "#";
// Uses '.section' before '.bss' directive
- UsesELFSectionDirectiveForBSS = true;
+ UsesELFSectionDirectiveForBSS = true;
// Debug Information
SupportsDebugInformation = true;
@@ -73,7 +73,7 @@ PPCELFMCAsmInfo::PPCELFMCAsmInfo(bool is64Bit, const Triple& T) {
// Exceptions handling
ExceptionsType = ExceptionHandling::DwarfCFI;
-
+
ZeroDirective = "\t.space\t";
Data64bitsDirective = is64Bit ? "\t.quad\t" : nullptr;
AssemblerDialect = 1; // New-Style mnemonics.
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 2b948ca6002..57bda1403c6 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -102,7 +102,7 @@ public:
unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
-
+
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
@@ -138,7 +138,7 @@ public:
default:
llvm_unreachable("Invalid instruction size");
}
-
+
++MCNumEmitted; // Keep track of the # of mi's emitted.
}
@@ -147,7 +147,7 @@ private:
void verifyInstructionPredicates(const MCInst &MI,
uint64_t AvailableFeatures) const;
};
-
+
} // end anonymous namespace
MCCodeEmitter *llvm::createPPCMCCodeEmitter(const MCInstrInfo &MCII,
@@ -162,7 +162,7 @@ getDirectBrEncoding(const MCInst &MI, unsigned OpNo,
const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
-
+
// Add a fixup for the branch target.
Fixups.push_back(MCFixup::create(0, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_br24));
@@ -212,7 +212,7 @@ unsigned PPCMCCodeEmitter::getImm16Encoding(const MCInst &MI, unsigned OpNo,
const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg() || MO.isImm()) return getMachineOpValue(MI, MO, Fixups, STI);
-
+
// Add a fixup for the immediate field.
Fixups.push_back(MCFixup::create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16));
@@ -226,11 +226,11 @@ unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo,
// displacement and the next 5 bits as the register #.
assert(MI.getOperand(OpNo+1).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI) << 16;
-
+
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
return (getMachineOpValue(MI, MO, Fixups, STI) & 0xFFFF) | RegBits;
-
+
// Add a fixup for the displacement field.
Fixups.push_back(MCFixup::create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16));
@@ -244,11 +244,11 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
// displacement and the next 5 bits as the register #.
assert(MI.getOperand(OpNo+1).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI) << 14;
-
+
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isImm())
return ((getMachineOpValue(MI, MO, Fixups, STI) >> 2) & 0x3FFF) | RegBits;
-
+
// Add a fixup for the displacement field.
Fixups.push_back(MCFixup::create(IsLittleEndian? 0 : 2, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_half16ds));
@@ -320,7 +320,7 @@ unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
const MCSubtargetInfo &STI) const {
const MCOperand &MO = MI.getOperand(OpNo);
if (MO.isReg()) return getMachineOpValue(MI, MO, Fixups, STI);
-
+
// Add a fixup for the TLS register, which simply provides a relocation
// hint to the linker that this statement is part of a relocation sequence.
// Return the thread-pointer register's encoding.
@@ -373,7 +373,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
return Encode;
}
-
+
assert(MO.isImm() &&
"Relocation required in an instruction that we cannot encode!");
return MO.getImm();
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
index fe7e7aeeb18..481ba3f09cc 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
@@ -58,7 +58,7 @@ namespace PPC {
PRED_BIT_SET = 1024,
PRED_BIT_UNSET = 1025
};
-
+
// Bit for branch taken (plus) or not-taken (minus) hint
enum BranchHintBit {
BR_NO_HINT = 0x0,
diff --git a/llvm/lib/Target/PowerPC/PPC.h b/llvm/lib/Target/PowerPC/PPC.h
index dfdec246e86..bfc613af3dc 100644
--- a/llvm/lib/Target/PowerPC/PPC.h
+++ b/llvm/lib/Target/PowerPC/PPC.h
@@ -66,7 +66,7 @@ namespace llvm {
extern char &PPCVSXFMAMutateID;
namespace PPCII {
-
+
/// Target Operand Flag enum.
enum TOF {
//===------------------------------------------------------------------===//
@@ -111,7 +111,7 @@ namespace llvm {
MO_TLS = 8 << 4
};
} // end namespace PPCII
-
+
} // end namespace llvm;
#endif
diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 64b8f1168be..0d1bb9297bc 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -130,7 +130,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
BlockSizes[MBB->getNumber()].first = BlockSize;
FuncSize += BlockSize;
}
-
+
// If the entire function is smaller than the displacement of a branch field,
// we know we don't need to shrink any branches in this function. This is a
// common case.
@@ -138,7 +138,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
BlockSizes.clear();
return false;
}
-
+
// For each conditional branch, if the offset to its destination is larger
// than the offset field allows, transform it into a long branch sequence
// like this:
@@ -153,7 +153,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
while (MadeChange) {
// Iteratively expand branches until we reach a fixed point.
MadeChange = false;
-
+
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
++MFI) {
MachineBasicBlock &MBB = *MFI;
@@ -175,7 +175,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
MBBStartOffset += TII->getInstSizeInBytes(*I);
continue;
}
-
+
// Determine the offset from the current branch to the destination
// block.
int BranchSize;
@@ -184,7 +184,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
// start of this block to this branch, plus the sizes of all blocks
// from this block to the dest.
BranchSize = MBBStartOffset;
-
+
for (unsigned i = Dest->getNumber(), e = MBB.getNumber(); i != e; ++i)
BranchSize += BlockSizes[i].first;
} else {
@@ -213,7 +213,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
// 2. Target MBB
PPC::Predicate Pred = (PPC::Predicate)I->getOperand(0).getImm();
unsigned CRReg = I->getOperand(1).getReg();
-
+
// Jump over the uncond branch inst (i.e. $PC+8) on opposite condition.
BuildMI(MBB, I, dl, TII->get(PPC::BCC))
.addImm(PPC::InvertPredicate(Pred)).addReg(CRReg).addImm(2);
@@ -234,7 +234,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
} else {
llvm_unreachable("Unhandled branch type!");
}
-
+
// Uncond branch to the real destination.
I = BuildMI(MBB, I, dl, TII->get(PPC::B)).addMBB(Dest);
@@ -277,7 +277,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
EverMadeChange |= MadeChange;
}
-
+
BlockSizes.clear();
return true;
}
diff --git a/llvm/lib/Target/PowerPC/PPCEarlyReturn.cpp b/llvm/lib/Target/PowerPC/PPCEarlyReturn.cpp
index ed5e496b32f..ac931f7d0ec 100644
--- a/llvm/lib/Target/PowerPC/PPCEarlyReturn.cpp
+++ b/llvm/lib/Target/PowerPC/PPCEarlyReturn.cpp
@@ -73,7 +73,7 @@ protected:
if ((*PI)->empty())
continue;
-
+
for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) {
if (J == (*PI)->end())
break;
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index f0000c5bafd..84dacf39646 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -174,7 +174,7 @@ const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
{PPC::V22, -160},
{PPC::V21, -176},
{PPC::V20, -192},
-
+
// SPE register save area (overlaps Vector save area).
{PPC::S31, -8},
{PPC::S30, -16},
@@ -1229,7 +1229,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
if (MBBI != MBB.end())
dl = MBBI->getDebugLoc();
-
+
const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
@@ -1315,7 +1315,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
bool IsReturnBlock = (MBBI != MBB.end() && MBBI->isReturn());
-
+
if (IsReturnBlock) {
unsigned RetOpcode = MBBI->getOpcode();
bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
diff --git a/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 55122046690..793a4dd7f62 100644
--- a/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -50,7 +50,7 @@ bool PPCDispatchGroupSBHazardRecognizer::isLoadAfterStore(SUnit *SU) {
return true;
}
- return false;
+ return false;
}
bool PPCDispatchGroupSBHazardRecognizer::isBCTRAfterSet(SUnit *SU) {
@@ -76,7 +76,7 @@ bool PPCDispatchGroupSBHazardRecognizer::isBCTRAfterSet(SUnit *SU) {
return true;
}
- return false;
+ return false;
}
// FIXME: Remove this when we don't need this:
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 06c7a51b2eb..f5277816cdc 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1143,7 +1143,7 @@ namespace llvm {
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
- bool
+ bool
CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 4669719744b..0930f7d3b8d 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -316,11 +316,11 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
}
// For opcodes with the ReMaterializable flag set, this function is called to
-// verify the instruction is really rematable.
+// verify the instruction is really rematable.
bool PPCInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
switch (MI.getOpcode()) {
- default:
+ default:
// This function should only be called for opcodes with the ReMaterializable
// flag set.
llvm_unreachable("Unknown rematerializable operation!");
diff --git a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index 2217fa4693c..0b57dd9b618 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -360,7 +360,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
// generate direct offsets from both the pre-incremented and
// post-incremented pointer values. Thus, we'll pick the first non-prefetch
// instruction in each bucket, and adjust the recurrence and other offsets
- // accordingly.
+ // accordingly.
for (int j = 0, je = Buckets[i].Elements.size(); j != je; ++j) {
if (auto *II = dyn_cast<IntrinsicInst>(Buckets[i].Elements[j].Instr))
if (II->getIntrinsicID() == Intrinsic::prefetch)
diff --git a/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
index 62a612feb55..e731c0bc0c2 100644
--- a/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -75,7 +75,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO,
}
return Sym;
}
-
+
return Sym;
}
@@ -130,7 +130,7 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
// Subtract off the PIC base if required.
if (MO.getTargetFlags() & PPCII::MO_PIC_FLAG) {
const MachineFunction *MF = MO.getParent()->getParent()->getParent();
-
+
const MCExpr *PB = MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
Expr = MCBinaryExpr::createSub(Expr, PB, Ctx);
}
@@ -151,7 +151,7 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
void llvm::LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
AsmPrinter &AP, bool isDarwin) {
OutMI.setOpcode(MI->getOpcode());
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MCOperand MCOp;
if (LowerPPCMachineOperandToMCOperand(MI->getOperand(i), MCOp, AP,
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index dbe1fe37ddf..0068df19f0c 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -891,7 +891,7 @@ static bool eligibleForCompareElimination(MachineBasicBlock &MBB,
auto BII = BB.getFirstInstrTerminator();
// We optimize BBs ending with a conditional branch.
// We check only for BCC here, not BCCLR, because BCCLR
- // will be formed only later in the pipeline.
+ // will be formed only later in the pipeline.
if (BB.succ_size() == 2 &&
BII != BB.instr_end() &&
(*BII).getOpcode() == PPC::BCC &&
diff --git a/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index b14bbad2039..8a3f50aa956 100644
--- a/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -29,7 +29,7 @@ class PPCFunctionInfo : public MachineFunctionInfo {
/// stored. Also used as an anchor for instructions that need to be altered
/// when using frame pointers (dyna_add, dyna_sub.)
int FramePointerSaveIndex = 0;
-
+
/// ReturnAddrSaveIndex - Frame index of where the return address is stored.
///
int ReturnAddrSaveIndex = 0;
@@ -128,7 +128,7 @@ public:
int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
-
+
int getReturnAddrSaveIndex() const { return ReturnAddrSaveIndex; }
void setReturnAddrSaveIndex(int idx) { ReturnAddrSaveIndex = idx; }
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 6647ceace5e..96923a97a82 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -979,7 +979,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
SReg = MF.getRegInfo().createVirtualRegister(RC);
// Insert a set of rA with the full offset value before the ld, st, or add
- if (isInt<16>(Offset))
+ if (isInt<16>(Offset))
BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
.addImm(Offset);
else {
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 226c75f704f..b0da9b5a6d7 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -201,7 +201,7 @@ unsigned PPCTTIImpl::getUserCost(const User *U,
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
return LT.first * BaseT::getUserCost(U, Operands);
}
-
+
return BaseT::getUserCost(U, Operands);
}
diff --git a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 1e8a1750ec3..1be193e08c0 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -443,7 +443,7 @@ bool PPCVSXSwapRemoval::gatherVectorInstructions() {
// We can handle STXSDX and STXSSPX similarly to LXSDX and LXSSPX,
// by adding special handling for narrowing copies as well as
// widening ones. However, I've experimented with this, and in
- // practice we currently do not appear to use STXSDX fed by
+ // practice we currently do not appear to use STXSDX fed by
// a narrowing copy from a full vector register. Since I can't
// generate any useful test cases, I've left this alone for now.
case PPC::STXSDX:
diff --git a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index c7a5a1e8e6e..35f52f7d279 100644
--- a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -190,7 +190,7 @@ public:
Sparc::C8_C9, Sparc::C10_C11, Sparc::C12_C13, Sparc::C14_C15,
Sparc::C16_C17, Sparc::C18_C19, Sparc::C20_C21, Sparc::C22_C23,
Sparc::C24_C25, Sparc::C26_C27, Sparc::C28_C29, Sparc::C30_C31};
-
+
namespace {
/// SparcOperand - Instances of this class represent a parsed Sparc machine
@@ -459,7 +459,7 @@ public:
Op.Reg.Kind = rk_CoprocPairReg;
return true;
}
-
+
static std::unique_ptr<SparcOperand>
MorphToMEMrr(unsigned Base, std::unique_ptr<SparcOperand> Op) {
unsigned offsetReg = Op->getReg();
@@ -1000,7 +1000,7 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok, unsigned &RegNo,
RegKind = SparcOperand::rk_Special;
return true;
}
-
+
if (name.equals("wim")) {
RegNo = Sparc::WIM;
RegKind = SparcOperand::rk_Special;
@@ -1093,7 +1093,7 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok, unsigned &RegNo,
RegKind = SparcOperand::rk_CoprocReg;
return true;
}
-
+
if (name.equals("tpc")) {
RegNo = Sparc::TPC;
RegKind = SparcOperand::rk_Special;
diff --git a/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
index 8e298e8316d..3e30dae1537 100644
--- a/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
+++ b/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -350,18 +350,18 @@ DecodeStatus SparcDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
return MCDisassembler::Fail;
// Calling the auto-generated decoder function.
-
+
if (STI.getFeatureBits()[Sparc::FeatureV9])
{
Result = decodeInstruction(DecoderTableSparcV932, Instr, Insn, Address, this, STI);
}
else
{
- Result = decodeInstruction(DecoderTableSparcV832, Instr, Insn, Address, this, STI);
+ Result = decodeInstruction(DecoderTableSparcV832, Instr, Insn, Address, this, STI);
}
if (Result != MCDisassembler::Fail)
return Result;
-
+
Result =
decodeInstruction(DecoderTableSparc32, Instr, Insn, Address, this, STI);
@@ -662,7 +662,7 @@ static DecodeStatus DecodeTRAP(MCInst &MI, unsigned insn, uint64_t Address,
if (status != MCDisassembler::Success)
return status;
}
-
+
// Decode CC
MI.addOperand(MCOperand::createImm(cc));
diff --git a/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp b/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
index 4981deae6af..c1512cbdc44 100644
--- a/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
+++ b/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
@@ -118,9 +118,9 @@ void SparcInstPrinter::printOperand(const MCInst *MI, int opNum,
if (MO.isImm()) {
switch (MI->getOpcode()) {
default:
- O << (int)MO.getImm();
+ O << (int)MO.getImm();
return;
-
+
case SP::TICCri: // Fall through
case SP::TICCrr: // Fall through
case SP::TRAPri: // Fall through
@@ -128,7 +128,7 @@ void SparcInstPrinter::printOperand(const MCInst *MI, int opNum,
case SP::TXCCri: // Fall through
case SP::TXCCrr: // Fall through
// Only seven-bit values up to 127.
- O << ((int) MO.getImm() & 0x7f);
+ O << ((int) MO.getImm() & 0x7f);
return;
}
}
diff --git a/llvm/lib/Target/Sparc/Sparc.h b/llvm/lib/Target/Sparc/Sparc.h
index 4135e4e1b61..0cea53b359e 100644
--- a/llvm/lib/Target/Sparc/Sparc.h
+++ b/llvm/lib/Target/Sparc/Sparc.h
@@ -73,7 +73,7 @@ namespace llvm {
FCC_LE = 13+16, // Less or Equal
FCC_ULE = 14+16, // Unordered or Less or Equal
FCC_O = 15+16, // Ordered
-
+
CPCC_A = 8+32, // Always
CPCC_N = 0+32, // Never
CPCC_3 = 7+32,
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index bf700d6a99d..0cbbda78788 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -59,9 +59,9 @@ namespace llvm {
public:
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
-
+
bool useSoftFloat() const override;
-
+
/// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 6750763d8ee..47b42444b94 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -115,7 +115,7 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::FCC_UE: return SPCC::FCC_LG;
case SPCC::FCC_NE: return SPCC::FCC_E;
case SPCC::FCC_E: return SPCC::FCC_NE;
-
+
case SPCC::CPCC_A: return SPCC::CPCC_N;
case SPCC::CPCC_N: return SPCC::CPCC_A;
case SPCC::CPCC_3: LLVM_FALLTHROUGH;
diff --git a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index a0d40653fd9..07f9e7250bd 100644
--- a/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -100,7 +100,7 @@ SparcTargetMachine::SparcTargetMachine(
SparcTargetMachine::~SparcTargetMachine() {}
-const SparcSubtarget *
+const SparcSubtarget *
SparcTargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
Attribute FSAttr = F.getFnAttribute("target-features");
@@ -119,7 +119,7 @@ SparcTargetMachine::getSubtargetImpl(const Function &F) const {
F.hasFnAttribute("use-soft-float") &&
F.getFnAttribute("use-soft-float").getValueAsString() == "true";
- if (softFloat)
+ if (softFloat)
FS += FS.empty() ? "+soft-float" : ",+soft-float";
auto &I = SubtargetMap[CPU + FS];
diff --git a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index d300d1d88ab..d01dd9eaaaf 100644
--- a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -55,7 +55,7 @@ getNumDecoderSlots(SUnit *SU) const {
else
return 3; // Expanded/group-alone instruction
}
-
+
return 1; // Normal instruction
}
@@ -142,7 +142,7 @@ void SystemZHazardRecognizer::dumpSU(SUnit *SU, raw_ostream &OS) const {
const MCSchedClassDesc *SC = getSchedClass(SU);
if (!SC->isValid())
return;
-
+
for (TargetSchedModel::ProcResIter
PI = SchedModel->getWriteProcResBegin(SC),
PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
@@ -306,7 +306,7 @@ int SystemZHazardRecognizer::groupingCost(SUnit *SU) const {
const MCSchedClassDesc *SC = getSchedClass(SU);
if (!SC->isValid())
return 0;
-
+
// If SU begins new group, it can either break a current group early
// or fit naturally if current group is empty (negative cost).
if (SC->BeginGroup) {
diff --git a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h
index 40cb3acc700..ad06be978ad 100644
--- a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h
+++ b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h
@@ -134,7 +134,7 @@ public:
/// new decoder group, this is negative if this fits the schedule or
/// positive if it would mean ending a group prematurely. For normal
/// instructions this returns 0.
- int groupingCost(SUnit *SU) const;
+ int groupingCost(SUnit *SU) const;
/// Return the cost of SU in regards to processor resources usage.
/// A positive value means it would be better to wait with SU, while
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 302c7883f97..1ad0e964c1e 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -609,7 +609,7 @@ struct AddressingMode {
// True if use of index register is supported.
bool IndexReg;
-
+
AddressingMode(bool LongDispl, bool IdxReg) :
LongDisplacement(LongDispl), IndexReg(IdxReg) {}
};
@@ -5545,7 +5545,7 @@ SDValue SystemZTargetLowering::combineSHIFTROT(
// The AND mask is constant
if (AndMask) {
auto AmtVal = AndMask->getZExtValue();
-
+
// Bottom 6 bits are set
if ((AmtVal & 0x3f) == 0x3f) {
SDValue AndOp = N1->getOperand(0);
diff --git a/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp
index fcbf4c4b5fe..11e60158524 100644
--- a/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp
@@ -129,7 +129,7 @@ SystemZPostRASchedStrategy::
SystemZPostRASchedStrategy(const MachineSchedContext *C)
: MLI(C->MLI),
TII(static_cast<const SystemZInstrInfo *>
- (C->MF->getSubtarget().getInstrInfo())),
+ (C->MF->getSubtarget().getInstrInfo())),
MBB(nullptr), HazardRec(nullptr) {
const TargetSubtargetInfo *ST = &C->MF->getSubtarget();
SchedModel.init(ST);
diff --git a/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h b/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h
index cb030482596..ab820e5d3e6 100644
--- a/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h
+++ b/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h
@@ -26,7 +26,7 @@
using namespace llvm;
namespace llvm {
-
+
/// A MachineSchedStrategy implementation for SystemZ post RA scheduling.
class SystemZPostRASchedStrategy : public MachineSchedStrategy {
@@ -37,7 +37,7 @@ class SystemZPostRASchedStrategy : public MachineSchedStrategy {
// non-scheduled instructions, so it would not always be possible to call
// DAG->getSchedClass(SU).
TargetSchedModel SchedModel;
-
+
/// A candidate during instruction evaluation.
struct Candidate {
SUnit *SU = nullptr;
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index e2a3efda5c5..c5cdc22f209 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -329,7 +329,7 @@ bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
}
int SystemZTTIImpl::getArithmeticInstrCost(
- unsigned Opcode, Type *Ty,
+ unsigned Opcode, Type *Ty,
TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo,
@@ -469,7 +469,7 @@ int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
assert (Tp->isVectorTy());
assert (ST->hasVector() && "getShuffleCost() called.");
unsigned NumVectors = getNumberOfParts(Tp);
-
+
// TODO: Since fp32 is expanded, the shuffle cost should always be 0.
// FP128 values are always in scalar registers, so there is no work
@@ -647,7 +647,7 @@ int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
return Cost;
}
}
-
+
if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
// TODO: Fix base implementation which could simplify things a bit here
@@ -704,7 +704,7 @@ int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP)
return (SrcScalarBits >= 32 ? 1 : 2 /*i8/i16 extend*/);
-
+
if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
Src->isIntegerTy(1)) {
// This should be extension of a compare i1 result, which is done with
diff --git a/llvm/lib/Target/Target.cpp b/llvm/lib/Target/Target.cpp
index 42d92622d6c..f23ea72eb51 100644
--- a/llvm/lib/Target/Target.cpp
+++ b/llvm/lib/Target/Target.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the common infrastructure (including C bindings) for
+// This file implements the common infrastructure (including C bindings) for
// libLLVMTarget.a, which implements target information.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/TargetLoweringObjectFile.cpp b/llvm/lib/Target/TargetLoweringObjectFile.cpp
index 907ecf46e8f..6bcf60fafc3 100644
--- a/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -92,10 +92,10 @@ static bool IsNullTerminatedString(const Constant *C) {
if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(C)) {
unsigned NumElts = CDS->getNumElements();
assert(NumElts != 0 && "Can't have an empty CDS");
-
+
if (CDS->getElementAsInteger(NumElts-1) != 0)
return false; // Not null terminated.
-
+
// Verify that the null doesn't occur anywhere else in the string.
for (unsigned i = 0; i != NumElts-1; ++i)
if (CDS->getElementAsInteger(i) == 0)
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index b84c2d31a63..fafbed0bd93 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2603,11 +2603,11 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
bool HadVerifyError = false;
// Append default arguments to "ins[bwld]"
- if (Name.startswith("ins") &&
+ if (Name.startswith("ins") &&
(Operands.size() == 1 || Operands.size() == 3) &&
(Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
Name == "ins")) {
-
+
AddDefaultSrcDestOperands(TmpOperands,
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
DefaultMemDIOperand(NameLoc));
@@ -2615,7 +2615,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
// Append default arguments to "outs[bwld]"
- if (Name.startswith("outs") &&
+ if (Name.startswith("outs") &&
(Operands.size() == 1 || Operands.size() == 3) &&
(Name == "outsb" || Name == "outsw" || Name == "outsl" ||
Name == "outsd" || Name == "outs")) {
diff --git a/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 82e82fe1efd..0e861d5ddbc 100644
--- a/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -92,7 +92,7 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
// the hex value of the immediate operand when it isn't in the range
// [-256,255].
if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256)) {
- // Don't print unnecessary hex sign bits.
+ // Don't print unnecessary hex sign bits.
if (Imm == (int16_t)(Imm))
*CommentStream << format("imm = 0x%" PRIX16 "\n", (uint16_t)Imm);
else if (Imm == (int32_t)(Imm))
diff --git a/llvm/lib/Target/X86/X86CallingConv.h b/llvm/lib/Target/X86/X86CallingConv.h
index c49a6838fa4..d0fcbd31331 100644
--- a/llvm/lib/Target/X86/X86CallingConv.h
+++ b/llvm/lib/Target/X86/X86CallingConv.h
@@ -66,7 +66,7 @@ inline bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT,
// not to split i64 and double between a register and stack
static const MCPhysReg RegList[] = {X86::EAX, X86::EDX, X86::ECX};
static const unsigned NumRegs = sizeof(RegList)/sizeof(RegList[0]);
-
+
SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
// If this is the first part of an double/i64/i128, or if we're already
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index f73455cc31b..1c5f110d8c6 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -622,7 +622,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
// If the CMOV group is not packed, e.g., there are debug instructions between
// first CMOV and last CMOV, then pack the group and make the CMOV instruction
- // consecutive by moving the debug instructions to after the last CMOV.
+ // consecutive by moving the debug instructions to after the last CMOV.
packCmovGroup(Group.front(), Group.back());
// To convert a CMOVcc instruction, we actually have to insert the diamond
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index f82e46f0c59..35a15577fe0 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2649,7 +2649,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::VMOVPDI2DIrr), ResultReg)
.addReg(InputReg, RegState::Kill);
-
+
// The result value is in the lower 16-bits of ResultReg.
unsigned RegIdx = X86::sub_16bit;
ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
@@ -3687,7 +3687,7 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
unsigned Reg = getRegForValue(I->getOperand(0));
if (Reg == 0)
return false;
-
+
// No instruction is needed for conversion. Reuse the register used by
// the fist operand.
updateValueMap(I, Reg);
diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index ae748901164..f330acff61a 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -347,12 +347,12 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
LiveBundle &Bundle =
LiveBundles[Bundles->getBundle(Entry->getNumber(), false)];
-
+
// In regcall convention, some FP registers may not be passed through
// the stack, so they will need to be assigned to the stack first
if ((Entry->getParent()->getFunction().getCallingConv() ==
CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) {
- // In the register calling convention, up to one FP argument could be
+ // In the register calling convention, up to one FP argument could be
// saved in the first FP register.
// If bundle.mask is non-zero and Bundle.FixCount is zero, it means
// that the FP registers contain arguments.
@@ -991,7 +991,7 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) {
assert(STReturns == 0 || (isMask_32(STReturns) && N <= 2));
// Reset the FP Stack - It is required because of possible leftovers from
- // passed arguments. The caller should assume that the FP stack is
+ // passed arguments. The caller should assume that the FP stack is
// returned empty (unless the callee returns values on FP stack).
while (StackTop > 0)
popReg();
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index a257ec41f75..3122c2a90b2 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -68,7 +68,7 @@ X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
// needsFrameIndexResolution - Do we need to perform FI resolution for
// this function. Normally, this is required only when the function
// has any stack objects. However, FI resolution actually has another job,
-// not apparent from the title - it resolves callframesetup/destroy
+// not apparent from the title - it resolves callframesetup/destroy
// that were not simplified earlier.
// So, this is required for x86 functions that have push sequences even
// when there are no stack objects.
@@ -607,7 +607,7 @@ void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
int64_t RCXShadowSlot = 0;
int64_t RDXShadowSlot = 0;
- // If inlining in the prolog, save RCX and RDX.
+ // If inlining in the prolog, save RCX and RDX.
// Future optimization: don't save or restore if not live in.
if (InProlog) {
// Compute the offsets. We need to account for things already
@@ -2694,7 +2694,7 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
Regs[FoundRegs++] = Regs[0];
for (int i = 0; i < NumPops; ++i)
- BuildMI(MBB, MBBI, DL,
+ BuildMI(MBB, MBBI, DL,
TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
return true;
@@ -2984,7 +2984,7 @@ struct X86FrameSortingComparator {
// in general. Something to keep in mind, though.
if (DensityAScaled == DensityBScaled)
return A.ObjectAlignment < B.ObjectAlignment;
-
+
return DensityAScaled < DensityBScaled;
}
};
@@ -3020,7 +3020,7 @@ void X86FrameLowering::orderFrameObjects(
if (ObjectSize == 0)
// Variable size. Just use 4.
SortingObjects[Obj].ObjectSize = 4;
- else
+ else
SortingObjects[Obj].ObjectSize = ObjectSize;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 85e9f41d547..303903b9b84 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23368,7 +23368,7 @@ static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
return DAG.getBuildVector(VT, dl, Elts);
}
- // If the target doesn't support variable shifts, use either FP conversion
+ // If the target doesn't support variable shifts, use either FP conversion
// or integer multiplication to avoid shifting each element individually.
if (VT == MVT::v4i32) {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index b71e9619d19..ff5006d208e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1126,8 +1126,8 @@ namespace llvm {
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
unsigned Factor) const override;
- SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value,
- SDValue Addr, SelectionDAG &DAG)
+ SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value,
+ SDValue Addr, SelectionDAG &DAG)
const override;
protected:
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index 5d8400595bf..7d31cfab413 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -1576,7 +1576,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable2[] = {
{ X86::SUBSDrr_Int, X86::SUBSDrm_Int, TB_NO_REVERSE },
{ X86::SUBSSrr, X86::SUBSSrm, 0 },
{ X86::SUBSSrr_Int, X86::SUBSSrm_Int, TB_NO_REVERSE },
- // FIXME: TEST*rr -> swapped operand of TEST *mr.
+ // FIXME: TEST*rr -> swapped operand of TEST *mr.
{ X86::UNPCKHPDrr, X86::UNPCKHPDrm, TB_ALIGN_16 },
{ X86::UNPCKHPSrr, X86::UNPCKHPSrm, TB_ALIGN_16 },
{ X86::UNPCKLPDrr, X86::UNPCKLPDrm, TB_ALIGN_16 },
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index fedb13f89e1..85e8256a6e9 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -51,7 +51,7 @@ enum Style {
} // end namespace PICStyles
class X86Subtarget final : public X86GenSubtargetInfo {
-public:
+public:
enum X86ProcFamilyEnum {
Others,
IntelAtom,
diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
index 8f7c8a82380..916bca6392d 100644
--- a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -146,7 +146,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
}
EmitAlignment(Align > 2 ? Align : 2, GV);
-
+
if (GV->isThreadLocal()) {
report_fatal_error("TLS is not supported by this target!");
}
@@ -162,7 +162,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// are padded to 32 bits.
if (Size < 4)
OutStreamer->EmitZeros(4 - Size);
-
+
// Mark the end of the global
getTargetStreamer().emitCCBottomData(GVSym->getName());
}
@@ -295,6 +295,6 @@ void XCoreAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
// Force static initialization.
-extern "C" void LLVMInitializeXCoreAsmPrinter() {
+extern "C" void LLVMInitializeXCoreAsmPrinter() {
RegisterAsmPrinter<XCoreAsmPrinter> X(getTheXCoreTarget());
}
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index d5e276788f7..b0de048672d 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -63,7 +63,7 @@ static bool isZeroImm(const MachineOperand &op) {
unsigned XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
int Opcode = MI.getOpcode();
- if (Opcode == XCore::LDWFI)
+ if (Opcode == XCore::LDWFI)
{
if ((MI.getOperand(1).isFI()) && // is a stack slot
(MI.getOperand(2).isImm()) && // the imm is zero
@@ -74,7 +74,7 @@ unsigned XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
}
return 0;
}
-
+
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
@@ -129,9 +129,9 @@ static inline bool IsBR_JT(unsigned BrOpc) {
|| BrOpc == XCore::BR_JT32;
}
-/// GetCondFromBranchOpc - Return the XCore CC that matches
+/// GetCondFromBranchOpc - Return the XCore CC that matches
/// the correspondent Branch instruction opcode.
-static XCore::CondCode GetCondFromBranchOpc(unsigned BrOpc)
+static XCore::CondCode GetCondFromBranchOpc(unsigned BrOpc)
{
if (IsBRT(BrOpc)) {
return XCore::COND_TRUE;
@@ -144,7 +144,7 @@ static XCore::CondCode GetCondFromBranchOpc(unsigned BrOpc)
/// GetCondBranchFromCond - Return the Branch instruction
/// opcode that matches the cc.
-static inline unsigned GetCondBranchFromCond(XCore::CondCode CC)
+static inline unsigned GetCondBranchFromCond(XCore::CondCode CC)
{
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
@@ -153,7 +153,7 @@ static inline unsigned GetCondBranchFromCond(XCore::CondCode CC)
}
}
-/// GetOppositeBranchCondition - Return the inverse of the specified
+/// GetOppositeBranchCondition - Return the inverse of the specified
/// condition, e.g. turning COND_E to COND_NE.
static inline XCore::CondCode GetOppositeBranchCondition(XCore::CondCode CC)
{
@@ -209,11 +209,11 @@ bool XCoreInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
TBB = LastInst->getOperand(0).getMBB();
return false;
}
-
+
XCore::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
if (BranchCode == XCore::COND_INVALID)
return true; // Can't handle indirect branch.
-
+
// Conditional branch
// Block ends with fall-through condbranch.
@@ -222,17 +222,17 @@ bool XCoreInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
Cond.push_back(LastInst->getOperand(0));
return false;
}
-
+
// Get the instruction before it if it's a terminator.
MachineInstr *SecondLastInst = &*I;
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
-
+
unsigned SecondLastOpc = SecondLastInst->getOpcode();
XCore::CondCode BranchCode = GetCondFromBranchOpc(SecondLastOpc);
-
+
// If the block ends with conditional branch followed by unconditional,
// handle it.
if (BranchCode != XCore::COND_INVALID
@@ -245,10 +245,10 @@ bool XCoreInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
FBB = LastInst->getOperand(0).getMBB();
return false;
}
-
+
// If the block ends with two unconditional branches, handle it. The second
// one is not executed, so remove it.
- if (IsBRU(SecondLastInst->getOpcode()) &&
+ if (IsBRU(SecondLastInst->getOpcode()) &&
IsBRU(LastInst->getOpcode())) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
@@ -293,7 +293,7 @@ unsigned XCoreInstrInfo::insertBranch(MachineBasicBlock &MBB,
}
return 1;
}
-
+
// Two-way Conditional branch.
assert(Cond.size() == 2 && "Unexpected number of components!");
unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm());
@@ -313,17 +313,17 @@ XCoreInstrInfo::removeBranch(MachineBasicBlock &MBB, int *BytesRemoved) const {
if (!IsBRU(I->getOpcode()) && !IsCondBranch(I->getOpcode()))
return 0;
-
+
// Remove the branch.
I->eraseFromParent();
-
+
I = MBB.end();
if (I == MBB.begin()) return 1;
--I;
if (!IsCondBranch(I->getOpcode()))
return 1;
-
+
// Remove the branch.
I->eraseFromParent();
return 2;
@@ -342,7 +342,7 @@ void XCoreInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addImm(0);
return;
}
-
+
if (GRDest && SrcReg == XCore::SP) {
BuildMI(MBB, I, DL, get(XCore::LDAWSP_ru6), DestReg).addImm(0);
return;
diff --git a/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h b/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
index cf469ec3cf1..6c05ab3f10d 100644
--- a/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
+++ b/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
@@ -43,11 +43,11 @@ class XCoreFunctionInfo : public MachineFunctionInfo {
public:
XCoreFunctionInfo() = default;
-
+
explicit XCoreFunctionInfo(MachineFunction &MF) {}
-
+
~XCoreFunctionInfo() override = default;
-
+
void setVarArgsFrameIndex(int off) { VarArgsFrameIndex = off; }
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index 1915aaedc35..e119d9555f9 100644
--- a/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -296,12 +296,12 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// fold constant into offset.
Offset += MI.getOperand(FIOperandNum + 1).getImm();
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
-
+
assert(Offset%4 == 0 && "Misaligned stack offset");
LLVM_DEBUG(errs() << "Offset : " << Offset << "\n"
<< "<--------->\n");
Offset/=4;
-
+
unsigned Reg = MI.getOperand(0).getReg();
assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand");
diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index c31f5d5a7c4..9451a05d8d5 100644
--- a/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -32,7 +32,7 @@ public:
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
BitVector getReservedRegs(const MachineFunction &MF) const override;
-
+
bool enableMultipleCopyHints() const override { return true; }
bool requiresRegisterScavenging(const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/XCore/XCoreSubtarget.h b/llvm/lib/Target/XCore/XCoreSubtarget.h
index 140ddba68aa..ed9936ebf2b 100644
--- a/llvm/lib/Target/XCore/XCoreSubtarget.h
+++ b/llvm/lib/Target/XCore/XCoreSubtarget.h
@@ -43,7 +43,7 @@ public:
XCoreSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM);
- /// ParseSubtargetFeatures - Parses features string setting specified
+ /// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
OpenPOWER on IntegriCloud