summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/ARM64
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/ARM64')
-rw-r--r--llvm/lib/Target/ARM64/ARM64AddressTypePromotion.cpp8
-rw-r--r--llvm/lib/Target/ARM64/ARM64AsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/ARM64/ARM64CollectLOH.cpp14
-rw-r--r--llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp24
-rw-r--r--llvm/lib/Target/ARM64/ARM64FastISel.cpp4
-rw-r--r--llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp28
-rw-r--r--llvm/lib/Target/ARM64/ARM64ISelLowering.cpp14
-rw-r--r--llvm/lib/Target/ARM64/ARM64InstrInfo.cpp12
-rw-r--r--llvm/lib/Target/ARM64/ARM64PromoteConstant.cpp4
-rw-r--r--llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM64/ARM64StorePairSuppress.cpp2
-rw-r--r--llvm/lib/Target/ARM64/ARM64TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp14
-rw-r--r--llvm/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp12
-rw-r--r--llvm/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp10
-rw-r--r--llvm/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp4
-rw-r--r--llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp14
18 files changed, 86 insertions, 86 deletions
diff --git a/llvm/lib/Target/ARM64/ARM64AddressTypePromotion.cpp b/llvm/lib/Target/ARM64/ARM64AddressTypePromotion.cpp
index b34a81081fc..c6e45def9a4 100644
--- a/llvm/lib/Target/ARM64/ARM64AddressTypePromotion.cpp
+++ b/llvm/lib/Target/ARM64/ARM64AddressTypePromotion.cpp
@@ -71,7 +71,7 @@ class ARM64AddressTypePromotion : public FunctionPass {
public:
static char ID;
ARM64AddressTypePromotion()
- : FunctionPass(ID), Func(NULL), ConsideredSExtType(NULL) {
+ : FunctionPass(ID), Func(nullptr), ConsideredSExtType(nullptr) {
initializeARM64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
}
@@ -344,7 +344,7 @@ ARM64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
SExtForOpnd->moveBefore(Inst);
Inst->setOperand(OpIdx, SExtForOpnd);
// If more sext are required, new instructions will have to be created.
- SExtForOpnd = NULL;
+ SExtForOpnd = nullptr;
}
if (SExtForOpnd == SExt) {
DEBUG(dbgs() << "Sign extension is useless now\n");
@@ -466,10 +466,10 @@ void ARM64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
if (insert || AlreadySeen != SeenChains.end()) {
DEBUG(dbgs() << "Insert\n");
SExtInsts.push_back(SExt);
- if (AlreadySeen != SeenChains.end() && AlreadySeen->second != NULL) {
+ if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) {
DEBUG(dbgs() << "Insert chain member\n");
SExtInsts.push_back(AlreadySeen->second);
- SeenChains[Last] = NULL;
+ SeenChains[Last] = nullptr;
}
} else {
DEBUG(dbgs() << "Record its chain membership\n");
diff --git a/llvm/lib/Target/ARM64/ARM64AsmPrinter.cpp b/llvm/lib/Target/ARM64/ARM64AsmPrinter.cpp
index 6692e662e1c..615cb2884d2 100644
--- a/llvm/lib/Target/ARM64/ARM64AsmPrinter.cpp
+++ b/llvm/lib/Target/ARM64/ARM64AsmPrinter.cpp
@@ -53,7 +53,7 @@ class ARM64AsmPrinter : public AsmPrinter {
public:
ARM64AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
: AsmPrinter(TM, Streamer), Subtarget(&TM.getSubtarget<ARM64Subtarget>()),
- MCInstLowering(OutContext, *Mang, *this), SM(*this), ARM64FI(NULL),
+ MCInstLowering(OutContext, *Mang, *this), SM(*this), ARM64FI(nullptr),
LOHLabelCounter(0) {}
virtual const char *getPassName() const { return "ARM64 Assembly Printer"; }
diff --git a/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp b/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp
index abe1b4615d5..7b8a270b897 100644
--- a/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp
+++ b/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp
@@ -262,7 +262,7 @@ static const SetOfMachineInstr *getUses(const InstrToInstrs *sets, unsigned reg,
InstrToInstrs::const_iterator Res = sets[reg].find(&MI);
if (Res != sets[reg].end())
return &(Res->second);
- return NULL;
+ return nullptr;
}
/// Initialize the reaching definition algorithm:
@@ -335,7 +335,7 @@ static void initReachingDef(MachineFunction &MF,
// Do not register clobbered definition for no ADRP.
// This definition is not used anyway (otherwise register
// allocation is wrong).
- BBGen[Reg] = ADRPMode ? &MI : NULL;
+ BBGen[Reg] = ADRPMode ? &MI : nullptr;
BBKillSet.set(Reg);
}
}
@@ -451,7 +451,7 @@ static void finitReachingDef(BlockToSetOfInstrsPerColor &In,
static void reachingDef(MachineFunction &MF,
InstrToInstrs *ColorOpToReachedUses,
const MapRegToId &RegToId, bool ADRPMode = false,
- const MachineInstr *DummyOp = NULL) {
+ const MachineInstr *DummyOp = nullptr) {
// structures:
// For each basic block.
// Out: a set per color of definitions that reach the
@@ -784,7 +784,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs,
const InstrToInstrs *DefsPerColorToUses,
ARM64FunctionInfo &ARM64FI, const MapRegToId &RegToId,
const MachineDominatorTree *MDT) {
- SetOfMachineInstr *InvolvedInLOHs = NULL;
+ SetOfMachineInstr *InvolvedInLOHs = nullptr;
#ifdef DEBUG
SetOfMachineInstr InvolvedInLOHsStorage;
InvolvedInLOHs = &InvolvedInLOHsStorage;
@@ -837,7 +837,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs,
const MachineInstr *Def = *UseToDefs.find(Candidate)->second.begin();
// Record the elements of the chain.
const MachineInstr *L1 = Def;
- const MachineInstr *L2 = NULL;
+ const MachineInstr *L2 = nullptr;
unsigned ImmediateDefOpc = Def->getOpcode();
if (Def->getOpcode() != ARM64::ADRP) {
// Check the number of users of this node.
@@ -907,7 +907,7 @@ static void computeOthers(const InstrToInstrs &UseToDefs,
SmallVector<const MachineInstr *, 3> Args;
MCLOHType Kind;
if (isCandidateLoad(Candidate)) {
- if (L2 == NULL) {
+ if (!L2) {
// At this point, the candidate LOH indicates that the ldr instruction
// may use a direct access to the symbol. There is not such encoding
// for loads of byte and half.
@@ -1057,7 +1057,7 @@ bool ARM64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
if (RegToId.empty())
return false;
- MachineInstr *DummyOp = NULL;
+ MachineInstr *DummyOp = nullptr;
if (BasicBlockScopeOnly) {
const ARM64InstrInfo *TII =
static_cast<const ARM64InstrInfo *>(TM.getInstrInfo());
diff --git a/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp b/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp
index 72848330bdc..16324ffa77b 100644
--- a/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp
+++ b/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp
@@ -298,7 +298,7 @@ static bool parseCond(ArrayRef<MachineOperand> Cond, ARM64CC::CondCode &CC) {
MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
MachineBasicBlock::iterator I = MBB->getFirstTerminator();
if (I == MBB->end())
- return 0;
+ return nullptr;
// The terminator must be controlled by the flags.
if (!I->readsRegister(ARM64::CPSR)) {
switch (I->getOpcode()) {
@@ -311,7 +311,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
}
++NumCmpTermRejs;
DEBUG(dbgs() << "Flags not used by terminator: " << *I);
- return 0;
+ return nullptr;
}
// Now find the instruction controlling the terminator.
@@ -330,7 +330,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
if (I->getOperand(3).getImm() || !isUInt<5>(I->getOperand(2).getImm())) {
DEBUG(dbgs() << "Immediate out of range for ccmp: " << *I);
++NumImmRangeRejs;
- return 0;
+ return nullptr;
}
// Fall through.
case ARM64::SUBSWrr:
@@ -341,7 +341,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
return I;
DEBUG(dbgs() << "Can't convert compare with live destination: " << *I);
++NumLiveDstRejs;
- return 0;
+ return nullptr;
case ARM64::FCMPSrr:
case ARM64::FCMPDrr:
case ARM64::FCMPESrr:
@@ -359,17 +359,17 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
// besides the terminators.
DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
++NumMultCPSRUses;
- return 0;
+ return nullptr;
}
if (PRI.Clobbers) {
DEBUG(dbgs() << "Not convertible compare: " << *I);
++NumUnknCPSRDefs;
- return 0;
+ return nullptr;
}
}
DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
- return 0;
+ return nullptr;
}
/// Determine if all the instructions in MBB can safely
@@ -416,7 +416,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
- if (!I.isSafeToMove(TII, 0, DontMoveAcrossStore)) {
+ if (!I.isSafeToMove(TII, nullptr, DontMoveAcrossStore)) {
DEBUG(dbgs() << "Can't speculate: " << I);
return false;
}
@@ -435,7 +435,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
///
bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
Head = MBB;
- Tail = CmpBB = 0;
+ Tail = CmpBB = nullptr;
if (Head->succ_size() != 2)
return false;
@@ -495,7 +495,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
// The branch we're looking to eliminate must be analyzable.
HeadCond.clear();
- MachineBasicBlock *TBB = 0, *FBB = 0;
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
if (TII->AnalyzeBranch(*Head, TBB, FBB, HeadCond)) {
DEBUG(dbgs() << "Head branch not analyzable.\n");
++NumHeadBranchRejs;
@@ -523,7 +523,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
}
CmpBBCond.clear();
- TBB = FBB = 0;
+ TBB = FBB = nullptr;
if (TII->AnalyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
++NumCmpBranchRejs;
@@ -897,7 +897,7 @@ bool ARM64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
- MinInstr = 0;
+ MinInstr = nullptr;
MinSize = MF.getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::MinSize);
diff --git a/llvm/lib/Target/ARM64/ARM64FastISel.cpp b/llvm/lib/Target/ARM64/ARM64FastISel.cpp
index ffd56adbf20..459c48030f4 100644
--- a/llvm/lib/Target/ARM64/ARM64FastISel.cpp
+++ b/llvm/lib/Target/ARM64/ARM64FastISel.cpp
@@ -303,7 +303,7 @@ unsigned ARM64FastISel::TargetMaterializeConstant(const Constant *C) {
// Computes the address to get to an object.
bool ARM64FastISel::ComputeAddress(const Value *Obj, Address &Addr) {
- const User *U = NULL;
+ const User *U = nullptr;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
// Don't walk into other basic blocks unless the object is an alloca from
@@ -1281,7 +1281,7 @@ bool ARM64FastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
}
bool ARM64FastISel::SelectCall(const Instruction *I,
- const char *IntrMemName = 0) {
+ const char *IntrMemName = nullptr) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = CI->getCalledValue();
diff --git a/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp b/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
index 43620ef8c38..986c2ad049f 100644
--- a/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
@@ -454,7 +454,7 @@ SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
if (Op1.getOpcode() != ISD::MUL ||
!checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
LaneIdx))
- return 0;
+ return nullptr;
}
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
@@ -490,7 +490,7 @@ SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
LaneIdx))
- return 0;
+ return nullptr;
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
@@ -852,7 +852,7 @@ SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isUnindexed())
- return NULL;
+ return nullptr;
EVT VT = LD->getMemoryVT();
EVT DstVT = N->getValueType(0);
ISD::MemIndexedMode AM = LD->getAddressingMode();
@@ -910,7 +910,7 @@ SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
} else if (VT == MVT::f64) {
Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
} else
- return NULL;
+ return nullptr;
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
@@ -929,7 +929,7 @@ SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
- return 0;
+ return nullptr;
}
return Res;
}
@@ -977,7 +977,7 @@ SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
- return 0;
+ return nullptr;
}
SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
@@ -1371,7 +1371,7 @@ SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
unsigned Opc, LSB, MSB;
SDValue Opd0;
if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
- return NULL;
+ return nullptr;
EVT VT = N->getValueType(0);
@@ -1767,14 +1767,14 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
if (N->getOpcode() != ISD::OR)
- return NULL;
+ return nullptr;
unsigned Opc;
unsigned LSB, MSB;
SDValue Opd0, Opd1;
if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
- return NULL;
+ return nullptr;
EVT VT = N->getValueType(0);
SDValue Ops[] = { Opd0,
@@ -1795,14 +1795,14 @@ SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
} else if (VT == MVT::f64) {
Variant = 1;
} else
- return 0; // Unrecognized argument type. Fall back on default codegen.
+ return nullptr; // Unrecognized argument type. Fall back on default codegen.
// Pick the FRINTX variant needed to set the flags.
unsigned FRINTXOpc = FRINTXOpcs[Variant];
switch (N->getOpcode()) {
default:
- return 0; // Unrecognized libm ISD node. Fall back on default codegen.
+ return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
case ISD::FCEIL: {
unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
Opc = FRINTPOpcs[Variant];
@@ -1892,11 +1892,11 @@ SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return NULL;
+ return nullptr;
}
// Few custom selection stuff.
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
EVT VT = Node->getValueType(0);
switch (Node->getOpcode()) {
@@ -2455,7 +2455,7 @@ SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
ResNode = SelectCode(Node);
DEBUG(errs() << "=> ");
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
DEBUG(Node->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));
diff --git a/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp b/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
index 58e425938e1..37eccb1499c 100644
--- a/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
+++ b/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
@@ -619,7 +619,7 @@ ARM64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const char *ARM64TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default:
- return 0;
+ return nullptr;
case ARM64ISD::CALL: return "ARM64ISD::CALL";
case ARM64ISD::ADRP: return "ARM64ISD::ADRP";
case ARM64ISD::ADDlow: return "ARM64ISD::ADDlow";
@@ -2565,7 +2565,7 @@ SDValue ARM64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (RHS.getNode() == 0) {
+ if (!RHS.getNode()) {
RHS = DAG.getConstant(0, LHS.getValueType());
CC = ISD::SETNE;
}
@@ -2815,7 +2815,7 @@ SDValue ARM64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
// If softenSetCCOperands returned a scalar, use it.
- if (RHS.getNode() == 0) {
+ if (!RHS.getNode()) {
assert(LHS.getValueType() == Op.getValueType() &&
"Unexpected setcc expansion!");
return LHS;
@@ -2939,7 +2939,7 @@ SDValue ARM64TargetLowering::LowerSELECT_CC(SDValue Op,
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
- if (RHS.getNode() == 0) {
+ if (!RHS.getNode()) {
RHS = DAG.getConstant(0, LHS.getValueType());
CC = ISD::SETNE;
}
@@ -3563,7 +3563,7 @@ ARM64TargetLowering::getSingleConstraintMatchWeight(
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -3617,7 +3617,7 @@ ARM64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
// Not found as a standard register?
- if (Res.second == 0) {
+ if (!Res.second) {
unsigned Size = Constraint.size();
if ((Size == 4 || Size == 5) && Constraint[0] == '{' &&
tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') {
@@ -3642,7 +3642,7 @@ ARM64TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
void ARM64TargetLowering::LowerAsmOperandForConstraint(
SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
- SDValue Result(0, 0);
+ SDValue Result;
// Currently only support length 1 constraints.
if (Constraint.length() != 1)
diff --git a/llvm/lib/Target/ARM64/ARM64InstrInfo.cpp b/llvm/lib/Target/ARM64/ARM64InstrInfo.cpp
index 6a86723e04d..95b247a0a2f 100644
--- a/llvm/lib/Target/ARM64/ARM64InstrInfo.cpp
+++ b/llvm/lib/Target/ARM64/ARM64InstrInfo.cpp
@@ -260,7 +260,7 @@ unsigned ARM64InstrInfo::InsertBranch(
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- if (FBB == 0) {
+ if (!FBB) {
if (Cond.empty()) // Unconditional branch?
BuildMI(&MBB, DL, get(ARM64::B)).addMBB(TBB);
else
@@ -289,7 +289,7 @@ static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
// csel instruction. If so, return the folded opcode, and the replacement
// register.
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
- unsigned *NewVReg = 0) {
+ unsigned *NewVReg = nullptr) {
VReg = removeCopies(MRI, VReg);
if (!TargetRegisterInfo::isVirtualRegister(VReg))
return 0;
@@ -469,7 +469,7 @@ void ARM64InstrInfo::insertSelect(MachineBasicBlock &MBB,
}
unsigned Opc = 0;
- const TargetRegisterClass *RC = 0;
+ const TargetRegisterClass *RC = nullptr;
bool TryFold = false;
if (MRI.constrainRegClass(DstReg, &ARM64::GPR64RegClass)) {
RC = &ARM64::GPR64RegClass;
@@ -1667,16 +1667,16 @@ ARM64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
unsigned SrcReg = MI->getOperand(1).getReg();
if (SrcReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &ARM64::GPR64RegClass);
- return 0;
+ return nullptr;
}
if (DstReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(SrcReg)) {
MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass);
- return 0;
+ return nullptr;
}
}
// Cannot fold.
- return 0;
+ return nullptr;
}
int llvm::isARM64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
diff --git a/llvm/lib/Target/ARM64/ARM64PromoteConstant.cpp b/llvm/lib/Target/ARM64/ARM64PromoteConstant.cpp
index 6fc57505942..2eef90d6cfc 100644
--- a/llvm/lib/Target/ARM64/ARM64PromoteConstant.cpp
+++ b/llvm/lib/Target/ARM64/ARM64PromoteConstant.cpp
@@ -489,8 +489,8 @@ ARM64PromoteConstant::insertDefinitions(Constant *Cst,
ModuleToMergedGV.find(M);
if (MapIt == ModuleToMergedGV.end()) {
PromotedGV = new GlobalVariable(
- *M, Cst->getType(), true, GlobalValue::InternalLinkage, 0,
- "_PromotedConst", 0, GlobalVariable::NotThreadLocal);
+ *M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr,
+ "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
PromotedGV->setInitializer(Cst);
ModuleToMergedGV[M] = PromotedGV;
DEBUG(dbgs() << "Global replacement: ");
diff --git a/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp b/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp
index 21d3d955700..aa7d9b70b50 100644
--- a/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp
+++ b/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp
@@ -136,7 +136,7 @@ ARM64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
const TargetRegisterClass *
ARM64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &ARM64::CCRRegClass)
- return NULL; // Can't copy CPSR.
+ return nullptr; // Can't copy CPSR.
return RC;
}
diff --git a/llvm/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp b/llvm/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp
index 49c3c0c1bf3..a087b407d97 100644
--- a/llvm/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/ARM64/ARM64SelectionDAGInfo.cpp
@@ -30,7 +30,7 @@ SDValue ARM64SelectionDAGInfo::EmitTargetCodeForMemset(
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
ConstantSDNode *SizeValue = dyn_cast<ConstantSDNode>(Size);
const char *bzeroEntry =
- (V && V->isNullValue()) ? Subtarget->getBZeroEntry() : 0;
+ (V && V->isNullValue()) ? Subtarget->getBZeroEntry() : nullptr;
// For small size (< 256), it is not beneficial to use bzero
// instead of memset.
if (bzeroEntry && (!SizeValue || SizeValue->getZExtValue() > 256)) {
diff --git a/llvm/lib/Target/ARM64/ARM64StorePairSuppress.cpp b/llvm/lib/Target/ARM64/ARM64StorePairSuppress.cpp
index 15b465da5ad..5416f11510c 100644
--- a/llvm/lib/Target/ARM64/ARM64StorePairSuppress.cpp
+++ b/llvm/lib/Target/ARM64/ARM64StorePairSuppress.cpp
@@ -126,7 +126,7 @@ bool ARM64StorePairSuppress::runOnMachineFunction(MachineFunction &mf) {
SchedModel.init(*ST.getSchedModel(), &ST, TII);
Traces = &getAnalysis<MachineTraceMetrics>();
- MinInstr = 0;
+ MinInstr = nullptr;
DEBUG(dbgs() << "*** " << getPassName() << ": " << MF->getName() << '\n');
diff --git a/llvm/lib/Target/ARM64/ARM64TargetTransformInfo.cpp b/llvm/lib/Target/ARM64/ARM64TargetTransformInfo.cpp
index 40228021e42..ac7142f3feb 100644
--- a/llvm/lib/Target/ARM64/ARM64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM64/ARM64TargetTransformInfo.cpp
@@ -45,7 +45,7 @@ class ARM64TTI final : public ImmutablePass, public TargetTransformInfo {
unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
public:
- ARM64TTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ ARM64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}
diff --git a/llvm/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp b/llvm/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp
index 0e57565baa0..5fe0acc59da 100644
--- a/llvm/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp
+++ b/llvm/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp
@@ -964,7 +964,7 @@ public:
return false;
if (Mem.Mode != ImmediateOffset)
return false;
- return Mem.OffsetImm == 0;
+ return Mem.OffsetImm == nullptr;
}
bool isMemoryIndexedSImm9() const {
if (!isMem() || Mem.Mode != ImmediateOffset)
@@ -1041,7 +1041,7 @@ public:
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
@@ -1688,7 +1688,7 @@ public:
ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
Op->Mem.BaseRegNum = BaseReg;
Op->Mem.OffsetRegNum = OffsetReg;
- Op->Mem.OffsetImm = 0;
+ Op->Mem.OffsetImm = nullptr;
Op->Mem.ExtType = ExtType;
Op->Mem.ShiftVal = ShiftVal;
Op->Mem.ExplicitShift = ExplicitShift;
@@ -2379,7 +2379,7 @@ bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
StringRef Op = Tok.getString();
SMLoc S = Tok.getLoc();
- const MCExpr *Expr = 0;
+ const MCExpr *Expr = nullptr;
#define SYS_ALIAS(op1, Cn, Cm, op2) \
do { \
@@ -2799,7 +2799,7 @@ ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
Parser.Lex(); // Eat right bracket token.
- Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
+ Operands.push_back(ARM64Operand::CreateMem(Reg, nullptr, S, E, E, getContext()));
return MatchOperand_Success;
}
@@ -2818,7 +2818,7 @@ bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
return Error(BaseRegTok.getLoc(), "register expected");
// If there is an offset expression, parse it.
- const MCExpr *OffsetExpr = 0;
+ const MCExpr *OffsetExpr = nullptr;
SMLoc OffsetLoc;
if (Parser.getTok().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
@@ -3848,7 +3848,7 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
const char *Repl = StringSwitch<const char *>(Tok)
.Case("cmp", "subs")
.Case("cmn", "adds")
- .Default(0);
+ .Default(nullptr);
assert(Repl && "Unknown compare instruction");
delete Operands[0];
Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
diff --git a/llvm/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp b/llvm/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp
index 4ce432372a0..2f8e516d185 100644
--- a/llvm/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp
+++ b/llvm/lib/Target/ARM64/Disassembler/ARM64ExternalSymbolizer.cpp
@@ -167,7 +167,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand(
}
}
- const MCExpr *Add = NULL;
+ const MCExpr *Add = nullptr;
if (SymbolicOp.AddSymbol.Present) {
if (SymbolicOp.AddSymbol.Name) {
StringRef Name(SymbolicOp.AddSymbol.Name);
@@ -182,7 +182,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand(
}
}
- const MCExpr *Sub = NULL;
+ const MCExpr *Sub = nullptr;
if (SymbolicOp.SubtractSymbol.Present) {
if (SymbolicOp.SubtractSymbol.Name) {
StringRef Name(SymbolicOp.SubtractSymbol.Name);
@@ -193,7 +193,7 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand(
}
}
- const MCExpr *Off = NULL;
+ const MCExpr *Off = nullptr;
if (SymbolicOp.Value != 0)
Off = MCConstantExpr::Create(SymbolicOp.Value, Ctx);
@@ -204,17 +204,17 @@ bool ARM64ExternalSymbolizer::tryAddingSymbolicOperand(
LHS = MCBinaryExpr::CreateSub(Add, Sub, Ctx);
else
LHS = MCUnaryExpr::CreateMinus(Sub, Ctx);
- if (Off != 0)
+ if (Off)
Expr = MCBinaryExpr::CreateAdd(LHS, Off, Ctx);
else
Expr = LHS;
} else if (Add) {
- if (Off != 0)
+ if (Off)
Expr = MCBinaryExpr::CreateAdd(Add, Off, Ctx);
else
Expr = Add;
} else {
- if (Off != 0)
+ if (Off)
Expr = Off;
else
Expr = MCConstantExpr::Create(0, Ctx);
diff --git a/llvm/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp b/llvm/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp
index 0dea241ed18..b683ae130c3 100644
--- a/llvm/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp
+++ b/llvm/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp
@@ -85,7 +85,7 @@ void ARM64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
if (Op2.isImm() && Op2.getImm() == 0 && Op3.isImm()) {
bool IsSigned = (Opcode == ARM64::SBFMXri || Opcode == ARM64::SBFMWri);
- const char *AsmMnemonic = 0;
+ const char *AsmMnemonic = nullptr;
switch (Op3.getImm()) {
default:
@@ -115,7 +115,7 @@ void ARM64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
// instruction. In all cases the immediate shift amount shift must be in
// the range 0 to (reg.size -1).
if (Op2.isImm() && Op3.isImm()) {
- const char *AsmMnemonic = 0;
+ const char *AsmMnemonic = nullptr;
int shift = 0;
int64_t immr = Op2.getImm();
int64_t imms = Op3.getImm();
@@ -693,7 +693,7 @@ static LdStNInstrDesc *getLdStNInstrDesc(unsigned Opcode) {
if (LdStNInstInfo[Idx].Opcode == Opcode)
return &LdStNInstInfo[Idx];
- return 0;
+ return nullptr;
}
void ARM64AppleInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
@@ -754,7 +754,7 @@ bool ARM64InstPrinter::printSysAlias(const MCInst *MI, raw_ostream &O) {
assert(Opcode == ARM64::SYSxt && "Invalid opcode for SYS alias!");
#endif
- const char *Asm = 0;
+ const char *Asm = nullptr;
const MCOperand &Op1 = MI->getOperand(0);
const MCOperand &Cn = MI->getOperand(1);
const MCOperand &Cm = MI->getOperand(2);
@@ -968,7 +968,7 @@ bool ARM64InstPrinter::printSysAlias(const MCInst *MI, raw_ostream &O) {
O << ", " << getRegisterName(Reg);
}
- return Asm != 0;
+ return Asm != nullptr;
}
void ARM64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
diff --git a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp
index 30dcda49ff6..9775a471f52 100644
--- a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp
+++ b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp
@@ -71,7 +71,7 @@ static MCAsmInfo *createARM64MCAsmInfo(const MCRegisterInfo &MRI,
// Initial state of the frame pointer is SP.
unsigned Reg = MRI.getDwarfRegNum(ARM64::SP, true);
- MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -119,7 +119,7 @@ static MCInstPrinter *createARM64MCInstPrinter(const Target &T,
if (SyntaxVariant == 1)
return new ARM64AppleInstPrinter(MAI, MII, MRI, STI);
- return 0;
+ return nullptr;
}
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
diff --git a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp
index 9d2dcb6c5de..ba725069a37 100644
--- a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp
+++ b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp
@@ -241,14 +241,14 @@ void ARM64MachObjectWriter::RecordRelocation(
Asm.getContext().FatalError(Fixup.getLoc(),
"unsupported relocation with identical base");
- Value += (A_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress(
- &A_SD, Layout)) -
- (A_Base == NULL || A_Base->getFragment() == NULL
+ Value += (!A_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&A_SD, Layout)) -
+ (!A_Base || !A_Base->getFragment()
? 0
: Writer->getSymbolAddress(A_Base, Layout));
- Value -= (B_SD.getFragment() == NULL ? 0 : Writer->getSymbolAddress(
- &B_SD, Layout)) -
- (B_Base == NULL || B_Base->getFragment() == NULL
+ Value -= (!B_SD.getFragment() ? 0
+ : Writer->getSymbolAddress(&B_SD, Layout)) -
+ (!B_Base || !B_Base->getFragment()
? 0
: Writer->getSymbolAddress(B_Base, Layout));
@@ -302,7 +302,7 @@ void ARM64MachObjectWriter::RecordRelocation(
// have already been fixed up.
if (Symbol->isInSection()) {
if (Section.hasAttribute(MachO::S_ATTR_DEBUG))
- Base = 0;
+ Base = nullptr;
}
// ARM64 uses external relocations as much as possible. For debug sections,
OpenPOWER on IntegriCloud