summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64')
-rw-r--r--llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp28
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp16
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp16
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h6
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h16
-rw-r--r--llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h18
-rw-r--r--llvm/lib/Target/AArch64/AArch64MacroFusion.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp16
14 files changed, 71 insertions, 71 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 994b8436f94..7788fa5d3cc 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -71,7 +71,7 @@ public:
StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
- /// \brief Wrapper for MCInstLowering.lowerOperand() for the
+ /// Wrapper for MCInstLowering.lowerOperand() for the
/// tblgen'erated pseudo lowering.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
return MCInstLowering.lowerOperand(MO, MCOp);
@@ -88,7 +88,7 @@ public:
void EmitSled(const MachineInstr &MI, SledKind Kind);
- /// \brief tblgen'erated driver function for lowering simple MI->MC
+ /// tblgen'erated driver function for lowering simple MI->MC
/// pseudo instructions.
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
@@ -131,7 +131,7 @@ private:
AArch64FunctionInfo *AArch64FI = nullptr;
- /// \brief Emit the LOHs contained in AArch64FI.
+ /// Emit the LOHs contained in AArch64FI.
void EmitLOHs();
/// Emit instruction to set float register to zero.
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 90039adcb20..7992c8793d5 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -83,7 +83,7 @@ char AArch64ExpandPseudo::ID = 0;
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
AARCH64_EXPAND_PSEUDO_NAME, false, false)
-/// \brief Transfer implicit operands on the pseudo instruction to the
+/// Transfer implicit operands on the pseudo instruction to the
/// instructions created from the expansion.
static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
MachineInstrBuilder &DefMI) {
@@ -99,7 +99,7 @@ static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
}
}
-/// \brief Helper function which extracts the specified 16-bit chunk from a
+/// Helper function which extracts the specified 16-bit chunk from a
/// 64-bit value.
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
assert(ChunkIdx < 4 && "Out of range chunk index specified!");
@@ -107,7 +107,7 @@ static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
}
-/// \brief Helper function which replicates a 16-bit chunk within a 64-bit
+/// Helper function which replicates a 16-bit chunk within a 64-bit
/// value. Indices correspond to element numbers in a v4i16.
static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!");
@@ -121,7 +121,7 @@ static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
return Imm | Chunk;
}
-/// \brief Helper function which tries to materialize a 64-bit value with an
+/// Helper function which tries to materialize a 64-bit value with an
/// ORR + MOVK instruction sequence.
static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
MachineBasicBlock &MBB,
@@ -158,7 +158,7 @@ static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
return false;
}
-/// \brief Check whether the given 16-bit chunk replicated to full 64-bit width
+/// Check whether the given 16-bit chunk replicated to full 64-bit width
/// can be materialized with an ORR instruction.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
@@ -166,7 +166,7 @@ static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
}
-/// \brief Check for identical 16-bit chunks within the constant and if so
+/// Check for identical 16-bit chunks within the constant and if so
/// materialize them with a single ORR instruction. The remaining one or two
/// 16-bit chunks will be materialized with MOVK instructions.
///
@@ -260,7 +260,7 @@ static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
return false;
}
-/// \brief Check whether this chunk matches the pattern '1...0...'. This pattern
+/// Check whether this chunk matches the pattern '1...0...'. This pattern
/// starts a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isStartChunk(uint64_t Chunk) {
@@ -270,7 +270,7 @@ static bool isStartChunk(uint64_t Chunk) {
return isMask_64(~Chunk);
}
-/// \brief Check whether this chunk matches the pattern '0...1...' This pattern
+/// Check whether this chunk matches the pattern '0...1...' This pattern
/// ends a contiguous sequence of ones if we look at the bits from the LSB
/// towards the MSB.
static bool isEndChunk(uint64_t Chunk) {
@@ -280,7 +280,7 @@ static bool isEndChunk(uint64_t Chunk) {
return isMask_64(Chunk);
}
-/// \brief Clear or set all bits in the chunk at the given index.
+/// Clear or set all bits in the chunk at the given index.
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
const uint64_t Mask = 0xFFFF;
@@ -294,7 +294,7 @@ static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
return Imm;
}
-/// \brief Check whether the constant contains a sequence of contiguous ones,
+/// Check whether the constant contains a sequence of contiguous ones,
/// which might be interrupted by one or two chunks. If so, materialize the
/// sequence of contiguous ones with an ORR instruction.
/// Materialize the chunks which are either interrupting the sequence or outside
@@ -423,7 +423,7 @@ static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
return true;
}
-/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
+/// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
/// real move-immediate instructions to synthesize the immediate.
bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -778,7 +778,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128(
return true;
}
-/// \brief If MBBI references a pseudo instruction that should be expanded here,
+/// If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -990,7 +990,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return false;
}
-/// \brief Iterate over the instructions in basic block MBB and expand any
+/// Iterate over the instructions in basic block MBB and expand any
/// pseudo instructions. Return true if anything was modified.
bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
@@ -1014,7 +1014,7 @@ bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
return Modified;
}
-/// \brief Returns an instance of the pseudo instruction expansion pass.
+/// Returns an instance of the pseudo instruction expansion pass.
FunctionPass *llvm::createAArch64ExpandPseudoPass() {
return new AArch64ExpandPseudo();
}
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index b7738c3e33a..43a3ae77a17 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -307,7 +307,7 @@ public:
#include "AArch64GenCallingConv.inc"
-/// \brief Check if the sign-/zero-extend will be a noop.
+/// Check if the sign-/zero-extend will be a noop.
static bool isIntExtFree(const Instruction *I) {
assert((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
"Unexpected integer extend instruction.");
@@ -326,7 +326,7 @@ static bool isIntExtFree(const Instruction *I) {
return false;
}
-/// \brief Determine the implicit scale factor that is applied by a memory
+/// Determine the implicit scale factor that is applied by a memory
/// operation for a given value type.
static unsigned getImplicitScaleFactor(MVT VT) {
switch (VT.SimpleTy) {
@@ -535,7 +535,7 @@ unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
}
-/// \brief Check if the multiply is by a power-of-2 constant.
+/// Check if the multiply is by a power-of-2 constant.
static bool isMulPowOf2(const Value *I) {
if (const auto *MI = dyn_cast<MulOperator>(I)) {
if (const auto *C = dyn_cast<ConstantInt>(MI->getOperand(0)))
@@ -964,7 +964,7 @@ bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {
return TLI.isTypeLegal(VT);
}
-/// \brief Determine if the value type is supported by FastISel.
+/// Determine if the value type is supported by FastISel.
///
/// FastISel for AArch64 can handle more value types than are legal. This adds
/// simple value type such as i1, i8, and i16.
@@ -1524,7 +1524,7 @@ unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
IsZExt);
}
-/// \brief This method is a wrapper to simplify add emission.
+/// This method is a wrapper to simplify add emission.
///
/// First try to emit an add with an immediate operand using emitAddSub_ri. If
/// that fails, then try to materialize the immediate into a register and use
@@ -2254,7 +2254,7 @@ static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {
}
}
-/// \brief Try to emit a combined compare-and-branch instruction.
+/// Try to emit a combined compare-and-branch instruction.
bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
assert(isa<CmpInst>(BI->getCondition()) && "Expected cmp instruction");
const CmpInst *CI = cast<CmpInst>(BI->getCondition());
@@ -2607,7 +2607,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) {
return true;
}
-/// \brief Optimize selects of i1 if one of the operands has a 'true' or 'false'
+/// Optimize selects of i1 if one of the operands has a 'true' or 'false'
/// value.
bool AArch64FastISel::optimizeSelect(const SelectInst *SI) {
if (!SI->getType()->isIntegerTy(1))
@@ -3322,7 +3322,7 @@ bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src,
return true;
}
-/// \brief Check if it is possible to fold the condition from the XALU intrinsic
+/// Check if it is possible to fold the condition from the XALU intrinsic
/// into the user. The condition code will only be updated on success.
bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
const Instruction *I,
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index 55a256867fa..104e52b5f1f 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -53,7 +53,7 @@ public:
std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
- /// \brief Can this function use the red zone for local allocations.
+ /// Can this function use the red zone for local allocations.
bool canUseRedZone(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index d44eee051aa..3124204fc59 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -336,7 +336,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
}
}
-/// \brief Determine whether it is worth it to fold SHL into the addressing
+/// Determine whether it is worth it to fold SHL into the addressing
/// mode.
static bool isWorthFoldingSHL(SDValue V) {
assert(V.getOpcode() == ISD::SHL && "invalid opcode");
@@ -360,7 +360,7 @@ static bool isWorthFoldingSHL(SDValue V) {
return true;
}
-/// \brief Determine whether it is worth to fold V into an extended register.
+/// Determine whether it is worth to fold V into an extended register.
bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
// Trivial if we are optimizing for code size or if there is only
// one use of the value.
@@ -826,7 +826,7 @@ static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
return SDValue(Node, 0);
}
-/// \brief Check if the given SHL node (\p N), can be used to form an
+/// Check if the given SHL node (\p N), can be used to form an
/// extended register for an addressing mode.
bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
bool WantExtend, SDValue &Offset,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e12aeb46765..27dd4249770 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3778,7 +3778,7 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
return Result;
}
-/// \brief Convert a TLS address reference into the correct sequence of loads
+/// Convert a TLS address reference into the correct sequence of loads
/// and calls to compute the variable's address (for Darwin, currently) and
/// return an SDValue containing the final node.
@@ -7863,7 +7863,7 @@ bool AArch64TargetLowering::isLegalInterleavedAccessType(
return VecSize == 64 || VecSize % 128 == 0;
}
-/// \brief Lower an interleaved load into a ldN intrinsic.
+/// Lower an interleaved load into a ldN intrinsic.
///
/// E.g. Lower an interleaved load (Factor = 2):
/// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
@@ -7975,7 +7975,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
return true;
}
-/// \brief Lower an interleaved store into a stN intrinsic.
+/// Lower an interleaved store into a stN intrinsic.
///
/// E.g. Lower an interleaved store (Factor = 3):
/// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
@@ -9159,26 +9159,26 @@ static bool isEssentiallyExtractSubvector(SDValue N) {
N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR;
}
-/// \brief Helper structure to keep track of ISD::SET_CC operands.
+/// Helper structure to keep track of ISD::SET_CC operands.
struct GenericSetCCInfo {
const SDValue *Opnd0;
const SDValue *Opnd1;
ISD::CondCode CC;
};
-/// \brief Helper structure to keep track of a SET_CC lowered into AArch64 code.
+/// Helper structure to keep track of a SET_CC lowered into AArch64 code.
struct AArch64SetCCInfo {
const SDValue *Cmp;
AArch64CC::CondCode CC;
};
-/// \brief Helper structure to keep track of SetCC information.
+/// Helper structure to keep track of SetCC information.
union SetCCInfo {
GenericSetCCInfo Generic;
AArch64SetCCInfo AArch64;
};
-/// \brief Helper structure to be able to read SetCC information. If set to
+/// Helper structure to be able to read SetCC information. If set to
/// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a
/// GenericSetCCInfo.
struct SetCCInfoAndKind {
@@ -9186,7 +9186,7 @@ struct SetCCInfoAndKind {
bool IsAArch64;
};
-/// \brief Check whether or not \p Op is a SET_CC operation, either a generic or
+/// Check whether or not \p Op is a SET_CC operation, either a generic or
/// an
/// AArch64 lowered one.
/// \p SetCCInfo is filled accordingly.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 5754ed97380..e48fa95224c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -345,7 +345,7 @@ public:
unsigned AS,
Instruction *I = nullptr) const override;
- /// \brief Return the cost of the scaling factor used in the addressing
+ /// Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
/// of the specified type.
/// If the AM is supported, the return value must be >= 0.
@@ -360,10 +360,10 @@ public:
const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
- /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
+ /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N) const override;
- /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 53946ea873c..452bb13bf88 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1210,7 +1210,7 @@ static bool UpdateOperandRegClass(MachineInstr &Instr) {
return true;
}
-/// \brief Return the opcode that does not set flags when possible - otherwise
+/// Return the opcode that does not set flags when possible - otherwise
/// return the original opcode. The caller is responsible to do the actual
/// substitution and legality checking.
static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
@@ -4643,7 +4643,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
DelInstrs.push_back(&Root);
}
-/// \brief Replace csincr-branch sequence by simple conditional branch
+/// Replace csincr-branch sequence by simple conditional branch
///
/// Examples:
/// 1. \code
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index c517f970adc..33abba1f29d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -70,13 +70,13 @@ public:
/// value is non-zero.
static bool hasExtendedReg(const MachineInstr &MI);
- /// \brief Does this instruction set its full destination register to zero?
+ /// Does this instruction set its full destination register to zero?
static bool isGPRZero(const MachineInstr &MI);
- /// \brief Does this instruction rename a GPR without modifying bits?
+ /// Does this instruction rename a GPR without modifying bits?
static bool isGPRCopy(const MachineInstr &MI);
- /// \brief Does this instruction rename an FPR without modifying bits?
+ /// Does this instruction rename an FPR without modifying bits?
static bool isFPRCopy(const MachineInstr &MI);
/// Return true if this is load/store scales or extends its register offset.
@@ -100,7 +100,7 @@ public:
/// Return true if pairing the given load or store may be paired with another.
static bool isPairableLdStInst(const MachineInstr &MI);
- /// \brief Return the opcode that set flags when possible. The caller is
+ /// Return the opcode that set flags when possible. The caller is
/// responsible for ensuring the opc has a flag setting equivalent.
static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
@@ -121,7 +121,7 @@ public:
/// Return the immediate offset of the base register in a load/store \p LdSt.
MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
- /// \brief Returns true if opcode \p Opc is a memory operation. If it is, set
+ /// Returns true if opcode \p Opc is a memory operation. If it is, set
/// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
///
/// For unscaled instructions, \p Scale is set to 1.
@@ -269,7 +269,7 @@ public:
bool isFalkorShiftExtFast(const MachineInstr &MI) const;
private:
- /// \brief Sets the offsets on outlined instructions in \p MBB which use SP
+ /// Sets the offsets on outlined instructions in \p MBB which use SP
/// so that they will be valid post-outlining.
///
/// \param MBB A \p MachineBasicBlock in an outlined function.
@@ -299,14 +299,14 @@ bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const AArch64InstrInfo *TII);
-/// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal.
+/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
enum AArch64FrameOffsetStatus {
AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
};
-/// \brief Check if the @p Offset is a valid frame offset for @p MI.
+/// Check if the @p Offset is a valid frame offset for @p MI.
/// The returned value reports the validity of the frame offset for @p MI.
/// It uses the values defined by AArch64FrameOffsetStatus for that.
/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index e7feb021f52..798340f8fed 100644
--- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -49,33 +49,33 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
/// determineCalleeSaves().
bool HasStackFrame = false;
- /// \brief Amount of stack frame size, not including callee-saved registers.
+ /// Amount of stack frame size, not including callee-saved registers.
unsigned LocalStackSize;
- /// \brief Amount of stack frame size used for saving callee-saved registers.
+ /// Amount of stack frame size used for saving callee-saved registers.
unsigned CalleeSavedStackSize;
- /// \brief Number of TLS accesses using the special (combinable)
+ /// Number of TLS accesses using the special (combinable)
/// _TLS_MODULE_BASE_ symbol.
unsigned NumLocalDynamicTLSAccesses = 0;
- /// \brief FrameIndex for start of varargs area for arguments passed on the
+ /// FrameIndex for start of varargs area for arguments passed on the
/// stack.
int VarArgsStackIndex = 0;
- /// \brief FrameIndex for start of varargs area for arguments passed in
+ /// FrameIndex for start of varargs area for arguments passed in
/// general purpose registers.
int VarArgsGPRIndex = 0;
- /// \brief Size of the varargs area for arguments passed in general purpose
+ /// Size of the varargs area for arguments passed in general purpose
/// registers.
unsigned VarArgsGPRSize = 0;
- /// \brief FrameIndex for start of varargs area for arguments passed in
+ /// FrameIndex for start of varargs area for arguments passed in
/// floating-point registers.
int VarArgsFPRIndex = 0;
- /// \brief Size of the varargs area for arguments passed in floating-point
+ /// Size of the varargs area for arguments passed in floating-point
/// registers.
unsigned VarArgsFPRSize = 0;
@@ -91,7 +91,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
/// other stack allocations.
bool CalleeSaveStackHasFreeSpace = false;
- /// \brief Has a value when it is known whether or not the function uses a
+ /// Has a value when it is known whether or not the function uses a
/// redzone, and no value otherwise.
/// Initialized during frame lowering, unless the function has the noredzone
/// attribute, in which case it is set to false at construction.
diff --git a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
index b46509d1d65..bc0168e783b 100644
--- a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
@@ -255,7 +255,7 @@ static bool isCCSelectPair(const MachineInstr *FirstMI,
return false;
}
-/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
+/// Check if the instr pair, FirstMI and SecondMI, should be fused
/// together. Given SecondMI, when FirstMI is unspecified, then check if
/// SecondMI may be part of a fused pair at all.
static bool shouldScheduleAdjacent(const TargetInstrInfo &TII,
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index d58f50dd8d7..356e084e856 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -243,7 +243,7 @@ public:
bool hasFuseCCSelect() const { return HasFuseCCSelect; }
bool hasFuseLiterals() const { return HasFuseLiterals; }
- /// \brief Return true if the CPU supports any kind of instruction fusion.
+ /// Return true if the CPU supports any kind of instruction fusion.
bool hasFusion() const {
return hasArithmeticBccFusion() || hasArithmeticCbzFusion() ||
hasFuseAES() || hasFuseCCSelect() || hasFuseLiterals();
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 337db546658..316ea048436 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -38,7 +38,7 @@ bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
return (CallerBits & CalleeBits) == CalleeBits;
}
-/// \brief Calculate the cost of materializing a 64-bit value. This helper
+/// Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
int AArch64TTIImpl::getIntImmCost(int64_t Val) {
@@ -54,7 +54,7 @@ int AArch64TTIImpl::getIntImmCost(int64_t Val) {
return (64 - LZ + 15) / 16;
}
-/// \brief Calculate the cost of materializing the given constant.
+/// Calculate the cost of materializing the given constant.
int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 4d1d3fd5735..7eed296a1b1 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -97,7 +97,7 @@ public:
} // end anonymous namespace
-/// \brief The number of bytes the fixup may change.
+/// The number of bytes the fixup may change.
static unsigned getFixupKindNumBytes(unsigned Kind) {
switch (Kind) {
default:
@@ -381,20 +381,20 @@ namespace {
namespace CU {
-/// \brief Compact unwind encoding values.
+/// Compact unwind encoding values.
enum CompactUnwindEncodings {
- /// \brief A "frameless" leaf function, where no non-volatile registers are
+ /// A "frameless" leaf function, where no non-volatile registers are
/// saved. The return remains in LR throughout the function.
UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
- /// \brief No compact unwind encoding available. Instead the low 23-bits of
+ /// No compact unwind encoding available. Instead the low 23-bits of
/// the compact unwind encoding is the offset of the DWARF FDE in the
/// __eh_frame section. This mode is never used in object files. It is only
/// generated by the linker in final linked images, which have only DWARF info
/// for a function.
UNWIND_ARM64_MODE_DWARF = 0x03000000,
- /// \brief This is a standard arm64 prologue where FP/LR are immediately
+ /// This is a standard arm64 prologue where FP/LR are immediately
/// pushed on the stack, then SP is copied to FP. If there are any
/// non-volatile register saved, they are copied into the stack fame in pairs
/// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
@@ -402,7 +402,7 @@ enum CompactUnwindEncodings {
/// in register number order.
UNWIND_ARM64_MODE_FRAME = 0x04000000,
- /// \brief Frame register pair encodings.
+ /// Frame register pair encodings.
UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
@@ -420,7 +420,7 @@ enum CompactUnwindEncodings {
class DarwinAArch64AsmBackend : public AArch64AsmBackend {
const MCRegisterInfo &MRI;
- /// \brief Encode compact unwind stack adjustment for frameless functions.
+ /// Encode compact unwind stack adjustment for frameless functions.
/// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
/// The stack size always needs to be 16 byte aligned.
uint32_t encodeStackAdjustment(uint32_t StackSize) const {
@@ -438,7 +438,7 @@ public:
MachO::CPU_SUBTYPE_ARM64_ALL);
}
- /// \brief Generate the compact unwind encoding from the CFI directives.
+ /// Generate the compact unwind encoding from the CFI directives.
uint32_t generateCompactUnwindEncoding(
ArrayRef<MCCFIInstruction> Instrs) const override {
if (Instrs.empty())
OpenPOWER on IntegriCloud