diff options
Diffstat (limited to 'llvm/lib/CodeGen')
48 files changed, 371 insertions, 371 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AddressPool.h b/llvm/lib/CodeGen/AsmPrinter/AddressPool.h index 990a158d87c..5350006bf74 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AddressPool.h +++ b/llvm/lib/CodeGen/AsmPrinter/AddressPool.h @@ -39,7 +39,7 @@ class AddressPool { public: AddressPool() = default; - /// \brief Returns the index into the address pool with the given + /// Returns the index into the address pool with the given /// label/symbol. unsigned getIndex(const MCSymbol *Sym, bool TLS = false); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index f81d6d04d16..a1af6fddda3 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1186,7 +1186,7 @@ void AsmPrinter::EmitFunctionBody() { OutStreamer->AddBlankLine(); } -/// \brief Compute the number of Global Variables that uses a Constant. +/// Compute the number of Global Variables that uses a Constant. static unsigned getNumGlobalVariableUses(const Constant *C) { if (!C) return 0; @@ -1201,7 +1201,7 @@ static unsigned getNumGlobalVariableUses(const Constant *C) { return NumUses; } -/// \brief Only consider global GOT equivalents if at least one user is a +/// Only consider global GOT equivalents if at least one user is a /// cstexpr inside an initializer of another global variables. Also, don't /// handle cstexpr inside instructions. During global variable emission, /// candidates are skipped and are emitted later in case at least one cstexpr @@ -1224,7 +1224,7 @@ static bool isGOTEquivalentCandidate(const GlobalVariable *GV, return NumGOTEquivUsers > 0; } -/// \brief Unnamed constant global variables solely contaning a pointer to +/// Unnamed constant global variables solely contaning a pointer to /// another globals variable is equivalent to a GOT table entry; it contains the /// the address of another symbol. Optimize it and replace accesses to these /// "GOT equivalents" by using the GOT entry for the final global instead. @@ -1245,7 +1245,7 @@ void AsmPrinter::computeGlobalGOTEquivs(Module &M) { } } -/// \brief Constant expressions using GOT equivalent globals may not be eligible +/// Constant expressions using GOT equivalent globals may not be eligible /// for PC relative GOT entry conversion, in such cases we need to emit such /// globals we previously omitted in EmitGlobalVariable. void AsmPrinter::emitGlobalGOTEquivs() { @@ -2405,7 +2405,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) { } } -/// \brief Transform a not absolute MCExpr containing a reference to a GOT +/// Transform a not absolute MCExpr containing a reference to a GOT /// equivalent global, by a target specific GOT pc relative access to the /// final symbol. static void handleIndirectSymViaGOTPCRel(AsmPrinter &AP, const MCExpr **ME, diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h index 638226e90a7..f5ac95a20b1 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h @@ -27,29 +27,29 @@ class MCSymbol; typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm); -/// \brief Collects and handles AsmPrinter objects required to build debug +/// Collects and handles AsmPrinter objects required to build debug /// or EH information. class AsmPrinterHandler { public: virtual ~AsmPrinterHandler(); - /// \brief For symbols that have a size designated (e.g. common symbols), + /// For symbols that have a size designated (e.g. common symbols), /// this tracks that size. virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0; - /// \brief Emit all sections that should come after the content. + /// Emit all sections that should come after the content. virtual void endModule() = 0; - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. /// Every beginFunction(MF) call should be followed by an endFunction(MF) /// call. virtual void beginFunction(const MachineFunction *MF) = 0; - // \brief Emit any of function marker (like .cfi_endproc). This is called + // Emit any of function marker (like .cfi_endproc). This is called // before endFunction and cannot switch sections. virtual void markFunctionEnd(); - /// \brief Gather post-function debug information. + /// Gather post-function debug information. /// Please note that some AsmPrinter implementations may not call /// beginFunction at all. virtual void endFunction(const MachineFunction *MF) = 0; @@ -58,15 +58,15 @@ public: ExceptionSymbolProvider ESP) {} virtual void endFragment() {} - /// \brief Emit target-specific EH funclet machinery. + /// Emit target-specific EH funclet machinery. virtual void beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym = nullptr) {} virtual void endFunclet() {} - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. virtual void beginInstruction(const MachineInstr *MI) = 0; - /// \brief Process end of an instruction. + /// Process end of an instruction. virtual void endInstruction() = 0; }; } // End of namespace llvm diff --git a/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h b/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h index e5941de69ff..2163cc7e3e1 100644 --- a/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h +++ b/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h @@ -76,7 +76,7 @@ private: SmallVectorImpl<char> &Buffer; SmallVectorImpl<std::string> &Comments; - /// \brief Only verbose textual output needs comments. This will be set to + /// Only verbose textual output needs comments. This will be set to /// true for that case, and false otherwise. If false, comments passed in to /// the emit methods will be ignored. bool GenerateComments; diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h index e16c035cdfd..395a6e37828 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h @@ -48,7 +48,7 @@ class MCStreamer; class MCSymbol; class MachineFunction; -/// \brief Collects and handles line tables information in a CodeView format. +/// Collects and handles line tables information in a CodeView format. class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase { MCStreamer &OS; BumpPtrAllocator Allocator; @@ -379,10 +379,10 @@ class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase { unsigned getPointerSizeInBytes(); protected: - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. void beginFunctionImpl(const MachineFunction *MF) override; - /// \brief Gather post-function debug information. + /// Gather post-function debug information. void endFunctionImpl(const MachineFunction *) override; public: @@ -390,10 +390,10 @@ public: void setSymbolSize(const MCSymbol *, uint64_t) override {} - /// \brief Emit the COFF section that holds the line table information. + /// Emit the COFF section that holds the line table information. void endModule() override; - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override; }; diff --git a/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp b/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp index 15ade3c96df..5392b7150a7 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp @@ -28,7 +28,7 @@ using namespace llvm; #define DEBUG_TYPE "dwarfdebug" -/// \brief Grabs the string in whichever attribute is passed in and returns +/// Grabs the string in whichever attribute is passed in and returns /// a reference to it. static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) { // Iterate through all the attributes until we find the one we're @@ -40,7 +40,7 @@ static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) { return StringRef(""); } -/// \brief Adds the string in \p Str to the hash. This also hashes +/// Adds the string in \p Str to the hash. This also hashes /// a trailing NULL with the string. void DIEHash::addString(StringRef Str) { DEBUG(dbgs() << "Adding string " << Str << " to hash.\n"); @@ -51,7 +51,7 @@ void DIEHash::addString(StringRef Str) { // FIXME: The LEB128 routines are copied and only slightly modified out of // LEB128.h. -/// \brief Adds the unsigned in \p Value to the hash encoded as a ULEB128. +/// Adds the unsigned in \p Value to the hash encoded as a ULEB128. void DIEHash::addULEB128(uint64_t Value) { DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n"); do { @@ -77,7 +77,7 @@ void DIEHash::addSLEB128(int64_t Value) { } while (More); } -/// \brief Including \p Parent adds the context of Parent to the hash.. +/// Including \p Parent adds the context of Parent to the hash.. void DIEHash::addParentContext(const DIE &Parent) { DEBUG(dbgs() << "Adding parent context to hash...\n"); diff --git a/llvm/lib/CodeGen/AsmPrinter/DIEHash.h b/llvm/lib/CodeGen/AsmPrinter/DIEHash.h index 29337ae38a9..85f2fea937f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DIEHash.h +++ b/llvm/lib/CodeGen/AsmPrinter/DIEHash.h @@ -23,7 +23,7 @@ namespace llvm { class AsmPrinter; class CompileUnit; -/// \brief An object containing the capability of hashing and adding hash +/// An object containing the capability of hashing and adding hash /// attributes onto a DIE. class DIEHash { // Collection of all attributes used in hashing a particular DIE. @@ -35,66 +35,66 @@ class DIEHash { public: DIEHash(AsmPrinter *A = nullptr) : AP(A) {} - /// \brief Computes the CU signature. + /// Computes the CU signature. uint64_t computeCUSignature(StringRef DWOName, const DIE &Die); - /// \brief Computes the type signature. + /// Computes the type signature. uint64_t computeTypeSignature(const DIE &Die); // Helper routines to process parts of a DIE. private: - /// \brief Adds the parent context of \param Die to the hash. + /// Adds the parent context of \param Die to the hash. void addParentContext(const DIE &Die); - /// \brief Adds the attributes of \param Die to the hash. + /// Adds the attributes of \param Die to the hash. void addAttributes(const DIE &Die); - /// \brief Computes the full DWARF4 7.27 hash of the DIE. + /// Computes the full DWARF4 7.27 hash of the DIE. void computeHash(const DIE &Die); // Routines that add DIEValues to the hash. public: - /// \brief Adds \param Value to the hash. + /// Adds \param Value to the hash. void update(uint8_t Value) { Hash.update(Value); } - /// \brief Encodes and adds \param Value to the hash as a ULEB128. + /// Encodes and adds \param Value to the hash as a ULEB128. void addULEB128(uint64_t Value); - /// \brief Encodes and adds \param Value to the hash as a SLEB128. + /// Encodes and adds \param Value to the hash as a SLEB128. void addSLEB128(int64_t Value); private: - /// \brief Adds \param Str to the hash and includes a NULL byte. + /// Adds \param Str to the hash and includes a NULL byte. void addString(StringRef Str); - /// \brief Collects the attributes of DIE \param Die into the \param Attrs + /// Collects the attributes of DIE \param Die into the \param Attrs /// structure. void collectAttributes(const DIE &Die, DIEAttrs &Attrs); - /// \brief Hashes the attributes in \param Attrs in order. + /// Hashes the attributes in \param Attrs in order. void hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag); - /// \brief Hashes the data in a block like DIEValue, e.g. DW_FORM_block or + /// Hashes the data in a block like DIEValue, e.g. DW_FORM_block or /// DW_FORM_exprloc. void hashBlockData(const DIE::const_value_range &Values); - /// \brief Hashes the contents pointed to in the .debug_loc section. + /// Hashes the contents pointed to in the .debug_loc section. void hashLocList(const DIELocList &LocList); - /// \brief Hashes an individual attribute. + /// Hashes an individual attribute. void hashAttribute(const DIEValue &Value, dwarf::Tag Tag); - /// \brief Hashes an attribute that refers to another DIE. + /// Hashes an attribute that refers to another DIE. void hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag, const DIE &Entry); - /// \brief Hashes a reference to a named type in such a way that is + /// Hashes a reference to a named type in such a way that is /// independent of whether that type is described by a declaration or a /// definition. void hashShallowTypeReference(dwarf::Attribute Attribute, const DIE &Entry, StringRef Name); - /// \brief Hashes a reference to a previously referenced type DIE. + /// Hashes a reference to a previously referenced type DIE. void hashRepeatedTypeReference(dwarf::Attribute Attribute, unsigned DieNumber); diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp index 856758c8e4f..c6c661dddf9 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp @@ -31,7 +31,7 @@ using namespace llvm; #define DEBUG_TYPE "dwarfdebug" -// \brief If @MI is a DBG_VALUE with debug value described by a +// If @MI is a DBG_VALUE with debug value described by a // defined register, returns the number of this register. // In the other case, returns 0. static unsigned isDescribedByReg(const MachineInstr &MI) { @@ -86,7 +86,7 @@ using RegDescribedVarsMap = std::map<unsigned, SmallVector<InlinedVariable, 1>>; } // end anonymous namespace -// \brief Claim that @Var is not described by @RegNo anymore. +// Claim that @Var is not described by @RegNo anymore. static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, InlinedVariable Var) { const auto &I = RegVars.find(RegNo); @@ -100,7 +100,7 @@ static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, RegVars.erase(I); } -// \brief Claim that @Var is now described by @RegNo. +// Claim that @Var is now described by @RegNo. static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, InlinedVariable Var) { assert(RegNo != 0U); @@ -109,7 +109,7 @@ static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, VarSet.push_back(Var); } -// \brief Terminate the location range for variables described by register at +// Terminate the location range for variables described by register at // @I by inserting @ClobberingInstr to their history. static void clobberRegisterUses(RegDescribedVarsMap &RegVars, RegDescribedVarsMap::iterator I, @@ -122,7 +122,7 @@ static void clobberRegisterUses(RegDescribedVarsMap &RegVars, RegVars.erase(I); } -// \brief Terminate the location range for variables described by register +// Terminate the location range for variables described by register // @RegNo by inserting @ClobberingInstr to their history. static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo, DbgValueHistoryMap &HistMap, @@ -133,7 +133,7 @@ static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo, clobberRegisterUses(RegVars, I, HistMap, ClobberingInstr); } -// \brief Returns the first instruction in @MBB which corresponds to +// Returns the first instruction in @MBB which corresponds to // the function epilogue, or nullptr if @MBB doesn't contain an epilogue. static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) { auto LastMI = MBB.getLastNonDebugInstr(); @@ -155,7 +155,7 @@ static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) { return &*MBB.begin(); } -// \brief Collect registers that are modified in the function body (their +// Collect registers that are modified in the function body (their // contents is changed outside of the prologue and epilogue). static void collectChangingRegs(const MachineFunction *MF, const TargetRegisterInfo *TRI, diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h index 6dff45dce55..ac49657b68f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h @@ -21,7 +21,7 @@ namespace llvm { class AsmPrinter; -/// \brief This struct describes location entries emitted in the .debug_loc +/// This struct describes location entries emitted in the .debug_loc /// section. class DebugLocEntry { /// Begin and end symbols for the address range that this location is valid. @@ -29,7 +29,7 @@ class DebugLocEntry { const MCSymbol *End; public: - /// \brief A single location or constant. + /// A single location or constant. struct Value { Value(const DIExpression *Expr, int64_t i) : Expression(Expr), EntryKind(E_Integer) { @@ -106,13 +106,13 @@ public: Values.push_back(std::move(Val)); } - /// \brief If this and Next are describing different pieces of the same + /// If this and Next are describing different pieces of the same /// variable, merge them by appending Next's values to the current /// list of values. /// Return true if the merge was successful. bool MergeValues(const DebugLocEntry &Next); - /// \brief Attempt to merge this DebugLocEntry with Next and return + /// Attempt to merge this DebugLocEntry with Next and return /// true if the merge was successful. Entries can be merged if they /// share the same Loc/Constant and if Next immediately follows this /// Entry. @@ -136,7 +136,7 @@ public: }) && "value must be a piece"); } - // \brief Sort the pieces by offset. + // Sort the pieces by offset. // Remove any duplicate entries by dropping all but the first. void sortUniqueValues() { llvm::sort(Values.begin(), Values.end()); @@ -148,12 +148,12 @@ public: Values.end()); } - /// \brief Lower this entry into a DWARF expression. + /// Lower this entry into a DWARF expression. void finalize(const AsmPrinter &AP, DebugLocStream::ListBuilder &List, const DIBasicType *BT); }; -/// \brief Compare two Values for equality. +/// Compare two Values for equality. inline bool operator==(const DebugLocEntry::Value &A, const DebugLocEntry::Value &B) { if (A.EntryKind != B.EntryKind) diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h index 0c551dfff9c..8dcf5cbc188 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h @@ -22,7 +22,7 @@ class DwarfCompileUnit; class MachineInstr; class MCSymbol; -/// \brief Byte stream of .debug_loc entries. +/// Byte stream of .debug_loc entries. /// /// Stores a unified stream of .debug_loc entries. There's \a List for each /// variable/inlined-at pair, and an \a Entry for each \a DebugLocEntry. @@ -55,7 +55,7 @@ private: SmallString<256> DWARFBytes; SmallVector<std::string, 32> Comments; - /// \brief Only verbose textual output needs comments. This will be set to + /// Only verbose textual output needs comments. This will be set to /// true for that case, and false otherwise. bool GenerateComments; @@ -69,7 +69,7 @@ public: class EntryBuilder; private: - /// \brief Start a new .debug_loc entry list. + /// Start a new .debug_loc entry list. /// /// Start a new .debug_loc entry list. Return the new list's index so it can /// be retrieved later via \a getList(). @@ -89,7 +89,7 @@ private: /// \return false iff the list is deleted. bool finalizeList(AsmPrinter &Asm); - /// \brief Start a new .debug_loc entry. + /// Start a new .debug_loc entry. /// /// Until the next call, bytes added to the stream will be added to this /// entry. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h index 3325b1a345e..60821c2cc99 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h @@ -83,7 +83,7 @@ class DwarfCompileUnit final : public DwarfUnit { DenseMap<const MDNode *, DIE *> AbstractSPDies; DenseMap<const MDNode *, std::unique_ptr<DbgVariable>> AbstractVariables; - /// \brief Construct a DIE for the given DbgVariable without initializing the + /// Construct a DIE for the given DbgVariable without initializing the /// DbgVariable's DIE reference. DIE *constructVariableDIEImpl(const DbgVariable &DV, bool Abstract); @@ -159,7 +159,7 @@ public: void attachLowHighPC(DIE &D, const MCSymbol *Begin, const MCSymbol *End); - /// \brief Find DIE for the given subprogram and attach appropriate + /// Find DIE for the given subprogram and attach appropriate /// DW_AT_low_pc and DW_AT_high_pc attributes. If there are global /// variables in this scope then create and insert DIEs for these /// variables. @@ -168,7 +168,7 @@ public: void constructScopeDIE(LexicalScope *Scope, SmallVectorImpl<DIE *> &FinalChildren); - /// \brief A helper function to construct a RangeSpanList for a given + /// A helper function to construct a RangeSpanList for a given /// lexical scope. void addScopeRangeList(DIE &ScopeDIE, SmallVector<RangeSpan, 2> Range); @@ -177,11 +177,11 @@ public: void attachRangesOrLowHighPC(DIE &D, const SmallVectorImpl<InsnRange> &Ranges); - /// \brief This scope represents inlined body of a function. Construct + /// This scope represents inlined body of a function. Construct /// DIE to represent this concrete inlined copy of the function. DIE *constructInlinedScopeDIE(LexicalScope *Scope); - /// \brief Construct new DW_TAG_lexical_block for this scope and + /// Construct new DW_TAG_lexical_block for this scope and /// attach DW_AT_low_pc/DW_AT_high_pc labels. DIE *constructLexicalScopeDIE(LexicalScope *Scope); @@ -196,14 +196,14 @@ public: SmallVectorImpl<DIE *> &Children, bool *HasNonScopeChildren = nullptr); - /// \brief Construct a DIE for this subprogram scope. + /// Construct a DIE for this subprogram scope. void constructSubprogramScopeDIE(const DISubprogram *Sub, LexicalScope *Scope); DIE *createAndAddScopeChildren(LexicalScope *Scope, DIE &ScopeDIE); void constructAbstractSubprogramScopeDIE(LexicalScope *Scope); - /// \brief Construct import_module DIE. + /// Construct import_module DIE. DIE *constructImportedEntityDIE(const DIImportedEntity *Module); void finishSubprogramDefinition(const DISubprogram *SP); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index a8e36ec66e5..182fbd6a6ce 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -939,7 +939,7 @@ static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) { llvm_unreachable("Unexpected 4-operand DBG_VALUE instruction!"); } -/// \brief If this and Next are describing different fragments of the same +/// If this and Next are describing different fragments of the same /// variable, merge them by appending Next's values to the current /// list of values. /// Return true if the merge was successful. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h b/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h index 23ed043afb9..442b4fc14b5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h @@ -74,30 +74,30 @@ public: return CUs; } - /// \brief Compute the size and offset of a DIE given an incoming Offset. + /// Compute the size and offset of a DIE given an incoming Offset. unsigned computeSizeAndOffset(DIE &Die, unsigned Offset); - /// \brief Compute the size and offset of all the DIEs. + /// Compute the size and offset of all the DIEs. void computeSizeAndOffsets(); - /// \brief Compute the size and offset of all the DIEs in the given unit. + /// Compute the size and offset of all the DIEs in the given unit. /// \returns The size of the root DIE. unsigned computeSizeAndOffsetsForUnit(DwarfUnit *TheU); - /// \brief Add a unit to the list of CUs. + /// Add a unit to the list of CUs. void addUnit(std::unique_ptr<DwarfCompileUnit> U); /// Emit the string table offsets header. void emitStringOffsetsTableHeader(MCSection *Section); - /// \brief Emit all of the units to the section listed with the given + /// Emit all of the units to the section listed with the given /// abbreviation section. void emitUnits(bool UseOffsets); - /// \brief Emit the given unit to its section. + /// Emit the given unit to its section. void emitUnit(DwarfUnit *U, bool UseOffsets); - /// \brief Emit a set of abbreviations to the specific section. + /// Emit a set of abbreviations to the specific section. void emitAbbrevs(MCSection *); /// Emit all of the strings to the section given. If OffsetSection is @@ -107,7 +107,7 @@ public: void emitStrings(MCSection *StrSection, MCSection *OffsetSection = nullptr, bool UseRelativeOffsets = false); - /// \brief Returns the string pool. + /// Returns the string pool. DwarfStringPool &getStringPool() { return StrPool; } MCSymbol *getStringOffsetsStartSym() const { return StringOffsetsStartSym; } diff --git a/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h b/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h index 553b4ae261c..124e8f04bfa 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h +++ b/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h @@ -29,23 +29,23 @@ public: void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {} - /// \brief Emit the Control Flow Guard function ID table + /// Emit the Control Flow Guard function ID table void endModule() override; - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. /// Every beginFunction(MF) call should be followed by an endFunction(MF) /// call. void beginFunction(const MachineFunction *MF) override {} - /// \brief Gather post-function debug information. + /// Gather post-function debug information. /// Please note that some AsmPrinter implementations may not call /// beginFunction at all. void endFunction(const MachineFunction *MF) override {} - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override {} - /// \brief Process end of an instruction. + /// Process end of an instruction. void endInstruction() override {} }; diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.h b/llvm/lib/CodeGen/AsmPrinter/WinException.h index 371061c2c2e..eed3c4453ff 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinException.h +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.h @@ -100,7 +100,7 @@ public: /// Gather and emit post-function exception information. void endFunction(const MachineFunction *) override; - /// \brief Emit target-specific EH funclet machinery. + /// Emit target-specific EH funclet machinery. void beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym) override; void endFunclet() override; }; diff --git a/llvm/lib/CodeGen/BranchFolding.h b/llvm/lib/CodeGen/BranchFolding.h index 0f095255013..21e1e2e6134 100644 --- a/llvm/lib/CodeGen/BranchFolding.h +++ b/llvm/lib/CodeGen/BranchFolding.h @@ -132,7 +132,7 @@ class TargetRegisterInfo; LivePhysRegs LiveRegs; public: - /// \brief This class keeps track of branch frequencies of newly created + /// This class keeps track of branch frequencies of newly created /// blocks and tail-merged blocks. class MBFIWrapper { public: diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp index 5e60b7ae32f..1e30a08b9dc 100644 --- a/llvm/lib/CodeGen/BreakFalseDeps.cpp +++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp @@ -74,7 +74,7 @@ private: /// Also break dependencies on partial defs and undef uses. void processDefs(MachineInstr *MI); - /// \brief Helps avoid false dependencies on undef registers by updating the + /// Helps avoid false dependencies on undef registers by updating the /// machine instructions' undef operand to use a register that the instruction /// is truly dependent on, or use a register with clearance higher than Pref. /// Returns true if it was able to find a true dependency, thus not requiring @@ -82,11 +82,11 @@ private: bool pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx, unsigned Pref); - /// \brief Return true to if it makes sense to break dependence on a partial + /// Return true to if it makes sense to break dependence on a partial /// def or undef use. bool shouldBreakDependence(MachineInstr *, unsigned OpIdx, unsigned Pref); - /// \brief Break false dependencies on undefined register reads. + /// Break false dependencies on undefined register reads. /// Walk the block backward computing precise liveness. This is expensive, so /// we only do it on demand. Note that the occurrence of undefined register /// reads that should be broken is very rare, but when they occur we may have diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 1b2bb60ed55..8729db47b7d 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2022,11 +2022,11 @@ LLVM_DUMP_METHOD void ExtAddrMode::dump() const { namespace { -/// \brief This class provides transaction based operation on the IR. +/// This class provides transaction based operation on the IR. /// Every change made through this class is recorded in the internal state and /// can be undone (rollback) until commit is called. class TypePromotionTransaction { - /// \brief This represents the common interface of the individual transaction. + /// This represents the common interface of the individual transaction. /// Each class implements the logic for doing one specific modification on /// the IR via the TypePromotionTransaction. class TypePromotionAction { @@ -2035,20 +2035,20 @@ class TypePromotionTransaction { Instruction *Inst; public: - /// \brief Constructor of the action. + /// Constructor of the action. /// The constructor performs the related action on the IR. TypePromotionAction(Instruction *Inst) : Inst(Inst) {} virtual ~TypePromotionAction() = default; - /// \brief Undo the modification done by this action. + /// Undo the modification done by this action. /// When this method is called, the IR must be in the same state as it was /// before this action was applied. /// \pre Undoing the action works if and only if the IR is in the exact same /// state as it was directly after this action was applied. virtual void undo() = 0; - /// \brief Advocate every change made by this action. + /// Advocate every change made by this action. /// When the results on the IR of the action are to be kept, it is important /// to call this function, otherwise hidden information may be kept forever. virtual void commit() { @@ -2056,7 +2056,7 @@ class TypePromotionTransaction { } }; - /// \brief Utility to remember the position of an instruction. + /// Utility to remember the position of an instruction. class InsertionHandler { /// Position of an instruction. /// Either an instruction: @@ -2071,7 +2071,7 @@ class TypePromotionTransaction { bool HasPrevInstruction; public: - /// \brief Record the position of \p Inst. + /// Record the position of \p Inst. InsertionHandler(Instruction *Inst) { BasicBlock::iterator It = Inst->getIterator(); HasPrevInstruction = (It != (Inst->getParent()->begin())); @@ -2081,7 +2081,7 @@ class TypePromotionTransaction { Point.BB = Inst->getParent(); } - /// \brief Insert \p Inst at the recorded position. + /// Insert \p Inst at the recorded position. void insert(Instruction *Inst) { if (HasPrevInstruction) { if (Inst->getParent()) @@ -2097,27 +2097,27 @@ class TypePromotionTransaction { } }; - /// \brief Move an instruction before another. + /// Move an instruction before another. class InstructionMoveBefore : public TypePromotionAction { /// Original position of the instruction. InsertionHandler Position; public: - /// \brief Move \p Inst before \p Before. + /// Move \p Inst before \p Before. InstructionMoveBefore(Instruction *Inst, Instruction *Before) : TypePromotionAction(Inst), Position(Inst) { DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); Inst->moveBefore(Before); } - /// \brief Move the instruction back to its original position. + /// Move the instruction back to its original position. void undo() override { DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); Position.insert(Inst); } }; - /// \brief Set the operand of an instruction with a new value. + /// Set the operand of an instruction with a new value. class OperandSetter : public TypePromotionAction { /// Original operand of the instruction. Value *Origin; @@ -2126,7 +2126,7 @@ class TypePromotionTransaction { unsigned Idx; public: - /// \brief Set \p Idx operand of \p Inst with \p NewVal. + /// Set \p Idx operand of \p Inst with \p NewVal. OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) : TypePromotionAction(Inst), Idx(Idx) { DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" @@ -2136,7 +2136,7 @@ class TypePromotionTransaction { Inst->setOperand(Idx, NewVal); } - /// \brief Restore the original value of the instruction. + /// Restore the original value of the instruction. void undo() override { DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" @@ -2145,14 +2145,14 @@ class TypePromotionTransaction { } }; - /// \brief Hide the operands of an instruction. + /// Hide the operands of an instruction. /// Do as if this instruction was not using any of its operands. class OperandsHider : public TypePromotionAction { /// The list of original operands. SmallVector<Value *, 4> OriginalValues; public: - /// \brief Remove \p Inst from the uses of the operands of \p Inst. + /// Remove \p Inst from the uses of the operands of \p Inst. OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); unsigned NumOpnds = Inst->getNumOperands(); @@ -2168,7 +2168,7 @@ class TypePromotionTransaction { } } - /// \brief Restore the original list of uses. + /// Restore the original list of uses. void undo() override { DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) @@ -2176,12 +2176,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a truncate instruction. + /// Build a truncate instruction. class TruncBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a truncate instruction of \p Opnd producing a \p Ty + /// Build a truncate instruction of \p Opnd producing a \p Ty /// result. /// trunc Opnd to Ty. TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { @@ -2190,10 +2190,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2201,12 +2201,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a sign extension instruction. + /// Build a sign extension instruction. class SExtBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty + /// Build a sign extension instruction of \p Opnd producing a \p Ty /// result. /// sext Opnd to Ty. SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) @@ -2216,10 +2216,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2227,12 +2227,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a zero extension instruction. + /// Build a zero extension instruction. class ZExtBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty + /// Build a zero extension instruction of \p Opnd producing a \p Ty /// result. /// zext Opnd to Ty. ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) @@ -2242,10 +2242,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2253,13 +2253,13 @@ class TypePromotionTransaction { } }; - /// \brief Mutate an instruction to another type. + /// Mutate an instruction to another type. class TypeMutator : public TypePromotionAction { /// Record the original type. Type *OrigTy; public: - /// \brief Mutate the type of \p Inst into \p NewTy. + /// Mutate the type of \p Inst into \p NewTy. TypeMutator(Instruction *Inst, Type *NewTy) : TypePromotionAction(Inst), OrigTy(Inst->getType()) { DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy @@ -2267,7 +2267,7 @@ class TypePromotionTransaction { Inst->mutateType(NewTy); } - /// \brief Mutate the instruction back to its original type. + /// Mutate the instruction back to its original type. void undo() override { DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"); @@ -2275,7 +2275,7 @@ class TypePromotionTransaction { } }; - /// \brief Replace the uses of an instruction by another instruction. + /// Replace the uses of an instruction by another instruction. class UsesReplacer : public TypePromotionAction { /// Helper structure to keep track of the replaced uses. struct InstructionAndIdx { @@ -2295,7 +2295,7 @@ class TypePromotionTransaction { using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; public: - /// \brief Replace all the use of \p Inst by \p New. + /// Replace all the use of \p Inst by \p New. UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"); @@ -2308,7 +2308,7 @@ class TypePromotionTransaction { Inst->replaceAllUsesWith(New); } - /// \brief Reassign the original uses of Inst to Inst. + /// Reassign the original uses of Inst to Inst. void undo() override { DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); for (use_iterator UseIt = OriginalUses.begin(), @@ -2319,7 +2319,7 @@ class TypePromotionTransaction { } }; - /// \brief Remove an instruction from the IR. + /// Remove an instruction from the IR. class InstructionRemover : public TypePromotionAction { /// Original position of the instruction. InsertionHandler Inserter; @@ -2335,7 +2335,7 @@ class TypePromotionTransaction { SetOfInstrs &RemovedInsts; public: - /// \brief Remove all reference of \p Inst and optinally replace all its + /// Remove all reference of \p Inst and optinally replace all its /// uses with New. /// \p RemovedInsts Keep track of the instructions removed by this Action. /// \pre If !Inst->use_empty(), then New != nullptr @@ -2355,7 +2355,7 @@ class TypePromotionTransaction { ~InstructionRemover() override { delete Replacer; } - /// \brief Resurrect the instruction and reassign it to the proper uses if + /// Resurrect the instruction and reassign it to the proper uses if /// new value was provided when build this action. void undo() override { DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); @@ -2500,7 +2500,7 @@ void TypePromotionTransaction::rollback( namespace { -/// \brief A helper class for matching addressing modes. +/// A helper class for matching addressing modes. /// /// This encapsulates the logic for matching the target-legal addressing modes. class AddressingModeMatcher { @@ -2586,7 +2586,7 @@ private: Value *PromotedOperand) const; }; -/// \brief Keep track of simplification of Phi nodes. +/// Keep track of simplification of Phi nodes. /// Accept the set of all phi nodes and erase phi node from this set /// if it is simplified. class SimplificationTracker { @@ -2679,7 +2679,7 @@ public: } }; -/// \brief A helper class for combining addressing modes. +/// A helper class for combining addressing modes. class AddressingModeCombiner { typedef std::pair<Value *, BasicBlock *> ValueInBB; typedef DenseMap<ValueInBB, Value *> FoldAddrToValueMapping; @@ -2708,12 +2708,12 @@ public: AddressingModeCombiner(const SimplifyQuery &_SQ, ValueInBB OriginalValue) : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} - /// \brief Get the combined AddrMode + /// Get the combined AddrMode const ExtAddrMode &getAddrMode() const { return AddrModes[0]; } - /// \brief Add a new AddrMode if it's compatible with the AddrModes we already + /// Add a new AddrMode if it's compatible with the AddrModes we already /// have. /// \return True iff we succeeded in doing so. bool addNewAddrMode(ExtAddrMode &NewAddrMode) { @@ -2766,7 +2766,7 @@ public: return CanHandle; } - /// \brief Combine the addressing modes we've collected into a single + /// Combine the addressing modes we've collected into a single /// addressing mode. /// \return True iff we successfully combined them or we only had one so /// didn't need to combine them anyway. @@ -2801,7 +2801,7 @@ public: } private: - /// \brief Initialize Map with anchor values. For address seen in some BB + /// Initialize Map with anchor values. For address seen in some BB /// we set the value of different field saw in this address. /// If address is not an instruction than basic block is set to null. /// At the same time we find a common type for different field we will @@ -2834,7 +2834,7 @@ private: return true; } - /// \brief We have mapping between value A and basic block where value A + /// We have mapping between value A and basic block where value A /// seen to other value B where B was a field in addressing mode represented /// by A. Also we have an original value C representin an address in some /// basic block. Traversing from C through phi and selects we ended up with @@ -2894,7 +2894,7 @@ private: return Result; } - /// \brief Try to match PHI node to Candidate. + /// Try to match PHI node to Candidate. /// Matcher tracks the matched Phi nodes. bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, SmallSetVector<PHIPair, 8> &Matcher, @@ -2942,7 +2942,7 @@ private: return true; } - /// \brief For the given set of PHI nodes (in the SimplificationTracker) try + /// For the given set of PHI nodes (in the SimplificationTracker) try /// to find their equivalents. /// Returns false if this matching fails and creation of new Phi is disabled. bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, @@ -2990,7 +2990,7 @@ private: } return true; } - /// \brief Fill the placeholder with values from predecessors and simplify it. + /// Fill the placeholder with values from predecessors and simplify it. void FillPlaceholders(FoldAddrToValueMapping &Map, SmallVectorImpl<ValueInBB> &TraverseOrder, SimplificationTracker &ST) { @@ -3219,7 +3219,7 @@ static bool MightBeFoldableInst(Instruction *I) { } } -/// \brief Check whether or not \p Val is a legal instruction for \p TLI. +/// Check whether or not \p Val is a legal instruction for \p TLI. /// \note \p Val is assumed to be the product of some type promotion. /// Therefore if \p Val has an undefined state in \p TLI, this is assumed /// to be legal, as the non-promoted value would have had the same state. @@ -3239,9 +3239,9 @@ static bool isPromotedInstructionLegal(const TargetLowering &TLI, namespace { -/// \brief Hepler class to perform type promotion. +/// Hepler class to perform type promotion. class TypePromotionHelper { - /// \brief Utility function to check whether or not a sign or zero extension + /// Utility function to check whether or not a sign or zero extension /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by /// either using the operands of \p Inst or promoting \p Inst. /// The type of the extension is defined by \p IsSExt. @@ -3255,13 +3255,13 @@ class TypePromotionHelper { static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, const InstrToOrigTy &PromotedInsts, bool IsSExt); - /// \brief Utility function to determine if \p OpIdx should be promoted when + /// Utility function to determine if \p OpIdx should be promoted when /// promoting \p Inst. static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { return !(isa<SelectInst>(Inst) && OpIdx == 0); } - /// \brief Utility function to promote the operand of \p Ext when this + /// Utility function to promote the operand of \p Ext when this /// operand is a promotable trunc or sext or zext. /// \p PromotedInsts maps the instructions to their type before promotion. /// \p CreatedInstsCost[out] contains the cost of all instructions @@ -3276,7 +3276,7 @@ class TypePromotionHelper { SmallVectorImpl<Instruction *> *Exts, SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); - /// \brief Utility function to promote the operand of \p Ext when this + /// Utility function to promote the operand of \p Ext when this /// operand is promotable and is not a supported trunc or sext. /// \p PromotedInsts maps the instructions to their type before promotion. /// \p CreatedInstsCost[out] contains the cost of all the instructions @@ -3322,7 +3322,7 @@ public: SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); - /// \brief Given a sign/zero extend instruction \p Ext, return the approriate + /// Given a sign/zero extend instruction \p Ext, return the approriate /// action to promote the operand of \p Ext instead of using Ext. /// \return NULL if no promotable action is possible with the current /// sign extension. @@ -4585,7 +4585,7 @@ bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { return MadeChange; } -/// \brief Check if all the uses of \p Val are equivalent (or free) zero or +/// Check if all the uses of \p Val are equivalent (or free) zero or /// sign extensions. static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { assert(!Val->use_empty() && "Input must have at least one use"); @@ -4633,7 +4633,7 @@ static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { return true; } -/// \brief Try to speculatively promote extensions in \p Exts and continue +/// Try to speculatively promote extensions in \p Exts and continue /// promoting through newly promoted operands recursively as far as doing so is /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. /// When some promotion happened, \p TPT contains the proper state to revert @@ -5550,7 +5550,7 @@ bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { namespace { -/// \brief Helper class to promote a scalar operation to a vector one. +/// Helper class to promote a scalar operation to a vector one. /// This class is used to move downward extractelement transition. /// E.g., /// a = vector_op <2 x i32> @@ -5587,7 +5587,7 @@ class VectorPromoteHelper { /// Instruction that will be combined with the transition. Instruction *CombineInst = nullptr; - /// \brief The instruction that represents the current end of the transition. + /// The instruction that represents the current end of the transition. /// Since we are faking the promotion until we reach the end of the chain /// of computation, we need a way to get the current end of the transition. Instruction *getEndOfTransition() const { @@ -5596,7 +5596,7 @@ class VectorPromoteHelper { return InstsToBePromoted.back(); } - /// \brief Return the index of the original value in the transition. + /// Return the index of the original value in the transition. /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, /// c, is at index 0. unsigned getTransitionOriginalValueIdx() const { @@ -5605,7 +5605,7 @@ class VectorPromoteHelper { return 0; } - /// \brief Return the index of the index in the transition. + /// Return the index of the index in the transition. /// E.g., for "extractelement <2 x i32> c, i32 0" the index /// is at index 1. unsigned getTransitionIdx() const { @@ -5614,7 +5614,7 @@ class VectorPromoteHelper { return 1; } - /// \brief Get the type of the transition. + /// Get the type of the transition. /// This is the type of the original value. /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the /// transition is <2 x i32>. @@ -5622,7 +5622,7 @@ class VectorPromoteHelper { return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); } - /// \brief Promote \p ToBePromoted by moving \p Def downward through. + /// Promote \p ToBePromoted by moving \p Def downward through. /// I.e., we have the following sequence: /// Def = Transition <ty1> a to <ty2> /// b = ToBePromoted <ty2> Def, ... @@ -5631,7 +5631,7 @@ class VectorPromoteHelper { /// Def = Transition <ty1> ToBePromoted to <ty2> void promoteImpl(Instruction *ToBePromoted); - /// \brief Check whether or not it is profitable to promote all the + /// Check whether or not it is profitable to promote all the /// instructions enqueued to be promoted. bool isProfitableToPromote() { Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); @@ -5682,7 +5682,7 @@ class VectorPromoteHelper { return ScalarCost > VectorCost; } - /// \brief Generate a constant vector with \p Val with the same + /// Generate a constant vector with \p Val with the same /// number of elements as the transition. /// \p UseSplat defines whether or not \p Val should be replicated /// across the whole vector. @@ -5717,7 +5717,7 @@ class VectorPromoteHelper { return ConstantVector::get(ConstVec); } - /// \brief Check if promoting to a vector type an operand at \p OperandIdx + /// Check if promoting to a vector type an operand at \p OperandIdx /// in \p Use can trigger undefined behavior. static bool canCauseUndefinedBehavior(const Instruction *Use, unsigned OperandIdx) { @@ -5749,13 +5749,13 @@ public: assert(Transition && "Do not know how to promote null"); } - /// \brief Check if we can promote \p ToBePromoted to \p Type. + /// Check if we can promote \p ToBePromoted to \p Type. bool canPromote(const Instruction *ToBePromoted) const { // We could support CastInst too. return isa<BinaryOperator>(ToBePromoted); } - /// \brief Check if it is profitable to promote \p ToBePromoted + /// Check if it is profitable to promote \p ToBePromoted /// by moving downward the transition through. bool shouldPromote(const Instruction *ToBePromoted) const { // Promote only if all the operands can be statically expanded. @@ -5783,23 +5783,23 @@ public: ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); } - /// \brief Check whether or not \p Use can be combined + /// Check whether or not \p Use can be combined /// with the transition. /// I.e., is it possible to do Use(Transition) => AnotherUse? bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } - /// \brief Record \p ToBePromoted as part of the chain to be promoted. + /// Record \p ToBePromoted as part of the chain to be promoted. void enqueueForPromotion(Instruction *ToBePromoted) { InstsToBePromoted.push_back(ToBePromoted); } - /// \brief Set the instruction that will be combined with the transition. + /// Set the instruction that will be combined with the transition. void recordCombineInstruction(Instruction *ToBeCombined) { assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); CombineInst = ToBeCombined; } - /// \brief Promote all the instructions enqueued for promotion if it is + /// Promote all the instructions enqueued for promotion if it is /// is profitable. /// \return True if the promotion happened, false otherwise. bool promote() { @@ -6420,7 +6420,7 @@ bool CodeGenPrepare::placeDbgValues(Function &F) { return MadeChange; } -/// \brief Scale down both weights to fit into uint32_t. +/// Scale down both weights to fit into uint32_t. static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; @@ -6428,7 +6428,7 @@ static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { NewFalse = NewFalse / Scale; } -/// \brief Some targets prefer to split a conditional branch like: +/// Some targets prefer to split a conditional branch like: /// \code /// %0 = icmp ne i32 %a, 0 /// %1 = icmp ne i32 %b, 0 diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp index ea33ea4b3bf..be4ba4d75a5 100644 --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -159,13 +159,13 @@ namespace { bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals, Module &M, bool isConst, unsigned AddrSpace) const; - /// \brief Merge everything in \p Globals for which the corresponding bit + /// Merge everything in \p Globals for which the corresponding bit /// in \p GlobalSet is set. bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, const BitVector &GlobalSet, Module &M, bool isConst, unsigned AddrSpace) const; - /// \brief Check if the given variable has been identified as must keep + /// Check if the given variable has been identified as must keep /// \pre setMustKeepGlobalVariables must have been called on the Module that /// contains GV bool isMustKeepGlobalVariable(const GlobalVariable *GV) const { diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index 9c906d30963..e3dc9649473 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -104,15 +104,15 @@ private: /// The maximum supported interleave factor. unsigned MaxFactor; - /// \brief Transform an interleaved load into target specific intrinsics. + /// Transform an interleaved load into target specific intrinsics. bool lowerInterleavedLoad(LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts); - /// \brief Transform an interleaved store into target specific intrinsics. + /// Transform an interleaved store into target specific intrinsics. bool lowerInterleavedStore(StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts); - /// \brief Returns true if the uses of an interleaved load by the + /// Returns true if the uses of an interleaved load by the /// extractelement instructions in \p Extracts can be replaced by uses of the /// shufflevector instructions in \p Shuffles instead. If so, the necessary /// replacements are also performed. @@ -136,7 +136,7 @@ FunctionPass *llvm::createInterleavedAccessPass() { return new InterleavedAccess(); } -/// \brief Check if the mask is a DE-interleave mask of the given factor +/// Check if the mask is a DE-interleave mask of the given factor /// \p Factor like: /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, @@ -158,7 +158,7 @@ static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, return false; } -/// \brief Check if the mask is a DE-interleave mask for an interleaved load. +/// Check if the mask is a DE-interleave mask for an interleaved load. /// /// E.g. DE-interleave masks (Factor = 2) could be: /// <0, 2, 4, 6> (mask of index 0 to extract even elements) @@ -176,7 +176,7 @@ static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor, return false; } -/// \brief Check if the mask can be used in an interleaved store. +/// Check if the mask can be used in an interleaved store. // /// It checks for a more general pattern than the RE-interleave mask. /// I.e. <x, y, ... z, x+1, y+1, ...z+1, x+2, y+2, ...z+2, ...> diff --git a/llvm/lib/CodeGen/LiveDebugValues.cpp b/llvm/lib/CodeGen/LiveDebugValues.cpp index 7b224473c5a..0554908584e 100644 --- a/llvm/lib/CodeGen/LiveDebugValues.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues.cpp @@ -65,7 +65,7 @@ using namespace llvm; STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted"); -// \brief If @MI is a DBG_VALUE with debug value described by a defined +// If @MI is a DBG_VALUE with debug value described by a defined // register, returns the number of this register. In the other case, returns 0. static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) { assert(MI.isDebugValue() && "expected a DBG_VALUE"); diff --git a/llvm/lib/CodeGen/LivePhysRegs.cpp b/llvm/lib/CodeGen/LivePhysRegs.cpp index b0cc62d5099..86c6c8e29f9 100644 --- a/llvm/lib/CodeGen/LivePhysRegs.cpp +++ b/llvm/lib/CodeGen/LivePhysRegs.cpp @@ -24,7 +24,7 @@ using namespace llvm; -/// \brief Remove all registers from the set that get clobbered by the register +/// Remove all registers from the set that get clobbered by the register /// mask. /// The clobbers set will be the list of live registers clobbered /// by the regmask. diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index 167135b56ec..ec43097c23b 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -198,10 +198,10 @@ namespace { class BlockChain; -/// \brief Type for our function-wide basic block -> block chain mapping. +/// Type for our function-wide basic block -> block chain mapping. using BlockToChainMapType = DenseMap<const MachineBasicBlock *, BlockChain *>; -/// \brief A chain of blocks which will be laid out contiguously. +/// A chain of blocks which will be laid out contiguously. /// /// This is the datastructure representing a chain of consecutive blocks that /// are profitable to layout together in order to maximize fallthrough @@ -213,13 +213,13 @@ using BlockToChainMapType = DenseMap<const MachineBasicBlock *, BlockChain *>; /// them. They participate in a block-to-chain mapping, which is updated /// automatically as chains are merged together. class BlockChain { - /// \brief The sequence of blocks belonging to this chain. + /// The sequence of blocks belonging to this chain. /// /// This is the sequence of blocks for a particular chain. These will be laid /// out in-order within the function. SmallVector<MachineBasicBlock *, 4> Blocks; - /// \brief A handle to the function-wide basic block to block chain mapping. + /// A handle to the function-wide basic block to block chain mapping. /// /// This is retained in each block chain to simplify the computation of child /// block chains for SCC-formation and iteration. We store the edges to child @@ -228,7 +228,7 @@ class BlockChain { BlockToChainMapType &BlockToChain; public: - /// \brief Construct a new BlockChain. + /// Construct a new BlockChain. /// /// This builds a new block chain representing a single basic block in the /// function. It also registers itself as the chain that block participates @@ -239,15 +239,15 @@ public: BlockToChain[BB] = this; } - /// \brief Iterator over blocks within the chain. + /// Iterator over blocks within the chain. using iterator = SmallVectorImpl<MachineBasicBlock *>::iterator; using const_iterator = SmallVectorImpl<MachineBasicBlock *>::const_iterator; - /// \brief Beginning of blocks within the chain. + /// Beginning of blocks within the chain. iterator begin() { return Blocks.begin(); } const_iterator begin() const { return Blocks.begin(); } - /// \brief End of blocks within the chain. + /// End of blocks within the chain. iterator end() { return Blocks.end(); } const_iterator end() const { return Blocks.end(); } @@ -261,7 +261,7 @@ public: return false; } - /// \brief Merge a block chain into this one. + /// Merge a block chain into this one. /// /// This routine merges a block chain into this one. It takes care of forming /// a contiguous sequence of basic blocks, updating the edge list, and @@ -293,14 +293,14 @@ public: } #ifndef NDEBUG - /// \brief Dump the blocks in this chain. + /// Dump the blocks in this chain. LLVM_DUMP_METHOD void dump() { for (MachineBasicBlock *MBB : *this) MBB->dump(); } #endif // NDEBUG - /// \brief Count of predecessors of any block within the chain which have not + /// Count of predecessors of any block within the chain which have not /// yet been scheduled. In general, we will delay scheduling this chain /// until those predecessors are scheduled (or we find a sufficiently good /// reason to override this heuristic.) Note that when forming loop chains, @@ -313,7 +313,7 @@ public: }; class MachineBlockPlacement : public MachineFunctionPass { - /// \brief A type for a block filter set. + /// A type for a block filter set. using BlockFilterSet = SmallSetVector<const MachineBasicBlock *, 16>; /// Pair struct containing basic block and taildup profitiability @@ -329,47 +329,47 @@ class MachineBlockPlacement : public MachineFunctionPass { MachineBasicBlock *Dest; }; - /// \brief work lists of blocks that are ready to be laid out + /// work lists of blocks that are ready to be laid out SmallVector<MachineBasicBlock *, 16> BlockWorkList; SmallVector<MachineBasicBlock *, 16> EHPadWorkList; /// Edges that have already been computed as optimal. DenseMap<const MachineBasicBlock *, BlockAndTailDupResult> ComputedEdges; - /// \brief Machine Function + /// Machine Function MachineFunction *F; - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; - /// \brief A handle to the function-wide block frequency pass. + /// A handle to the function-wide block frequency pass. std::unique_ptr<BranchFolder::MBFIWrapper> MBFI; - /// \brief A handle to the loop info. + /// A handle to the loop info. MachineLoopInfo *MLI; - /// \brief Preferred loop exit. + /// Preferred loop exit. /// Member variable for convenience. It may be removed by duplication deep /// in the call stack. MachineBasicBlock *PreferredLoopExit; - /// \brief A handle to the target's instruction info. + /// A handle to the target's instruction info. const TargetInstrInfo *TII; - /// \brief A handle to the target's lowering info. + /// A handle to the target's lowering info. const TargetLoweringBase *TLI; - /// \brief A handle to the post dominator tree. + /// A handle to the post dominator tree. MachinePostDominatorTree *MPDT; - /// \brief Duplicator used to duplicate tails during placement. + /// Duplicator used to duplicate tails during placement. /// /// Placement decisions can open up new tail duplication opportunities, but /// since tail duplication affects placement decisions of later blocks, it /// must be done inline. TailDuplicator TailDup; - /// \brief Allocator and owner of BlockChain structures. + /// Allocator and owner of BlockChain structures. /// /// We build BlockChains lazily while processing the loop structure of /// a function. To reduce malloc traffic, we allocate them using this @@ -378,7 +378,7 @@ class MachineBlockPlacement : public MachineFunctionPass { /// the chains. SpecificBumpPtrAllocator<BlockChain> ChainAllocator; - /// \brief Function wide BasicBlock to BlockChain mapping. + /// Function wide BasicBlock to BlockChain mapping. /// /// This mapping allows efficiently moving from any given basic block to the /// BlockChain it participates in, if any. We use it to, among other things, @@ -441,7 +441,7 @@ class MachineBlockPlacement : public MachineFunctionPass { MachineFunction::iterator &PrevUnplacedBlockIt, const BlockFilterSet *BlockFilter); - /// \brief Add a basic block to the work list if it is appropriate. + /// Add a basic block to the work list if it is appropriate. /// /// If the optional parameter BlockFilter is provided, only MBB /// present in the set will be added to the worklist. If nullptr @@ -545,7 +545,7 @@ INITIALIZE_PASS_END(MachineBlockPlacement, DEBUG_TYPE, "Branch Probability Basic Block Placement", false, false) #ifndef NDEBUG -/// \brief Helper to print the name of a MBB. +/// Helper to print the name of a MBB. /// /// Only used by debug logging. static std::string getBlockName(const MachineBasicBlock *BB) { @@ -558,7 +558,7 @@ static std::string getBlockName(const MachineBasicBlock *BB) { } #endif -/// \brief Mark a chain's successors as having one fewer preds. +/// Mark a chain's successors as having one fewer preds. /// /// When a chain is being merged into the "placed" chain, this routine will /// quickly walk the successors of each block in the chain and mark them as @@ -574,7 +574,7 @@ void MachineBlockPlacement::markChainSuccessors( } } -/// \brief Mark a single block's successors as having one fewer preds. +/// Mark a single block's successors as having one fewer preds. /// /// Under normal circumstances, this is only called by markChainSuccessors, /// but if a block that was to be placed is completely tail-duplicated away, @@ -1439,7 +1439,7 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor( return false; } -/// \brief Select the best successor for a block. +/// Select the best successor for a block. /// /// This looks across all successors of a particular block and attempts to /// select the "best" one to be the layout successor. It only considers direct @@ -1555,7 +1555,7 @@ MachineBlockPlacement::selectBestSuccessor( return BestSucc; } -/// \brief Select the best block from a worklist. +/// Select the best block from a worklist. /// /// This looks through the provided worklist as a list of candidate basic /// blocks and select the most profitable one to place. The definition of @@ -1627,7 +1627,7 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock( return BestBlock; } -/// \brief Retrieve the first unplaced basic block. +/// Retrieve the first unplaced basic block. /// /// This routine is called when we are unable to use the CFG to walk through /// all of the basic blocks and form a chain due to unnatural loops in the CFG. @@ -1754,7 +1754,7 @@ void MachineBlockPlacement::buildChain( << getBlockName(*Chain.begin()) << "\n"); } -/// \brief Find the best loop top block for layout. +/// Find the best loop top block for layout. /// /// Look for a block which is strictly better than the loop header for laying /// out at the top of the loop. This looks for one and only one pattern: @@ -1823,7 +1823,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L, return BestPred; } -/// \brief Find the best loop exiting block for layout. +/// Find the best loop exiting block for layout. /// /// This routine implements the logic to analyze the loop looking for the best /// block to layout at the top of the loop. Typically this is done to maximize @@ -1941,7 +1941,7 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L, return ExitingBB; } -/// \brief Attempt to rotate an exiting block to the bottom of the loop. +/// Attempt to rotate an exiting block to the bottom of the loop. /// /// Once we have built a chain, try to rotate it to line up the hot exit block /// with fallthrough out of the loop if doing so doesn't introduce unnecessary @@ -2019,7 +2019,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end()); } -/// \brief Attempt to rotate a loop based on profile data to reduce branch cost. +/// Attempt to rotate a loop based on profile data to reduce branch cost. /// /// With profile data, we can determine the cost in terms of missed fall through /// opportunities when rotating a loop chain and select the best rotation. @@ -2166,7 +2166,7 @@ void MachineBlockPlacement::rotateLoopWithProfile( } } -/// \brief Collect blocks in the given loop that are to be placed. +/// Collect blocks in the given loop that are to be placed. /// /// When profile data is available, exclude cold blocks from the returned set; /// otherwise, collect all blocks in the loop. @@ -2202,7 +2202,7 @@ MachineBlockPlacement::collectLoopBlockSet(const MachineLoop &L) { return LoopBlockSet; } -/// \brief Forms basic block chains from the natural loop structures. +/// Forms basic block chains from the natural loop structures. /// /// These chains are designed to preserve the existing *structure* of the code /// as much as possible. We can then stitch the chains together in a way which @@ -2834,17 +2834,17 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { namespace { -/// \brief A pass to compute block placement statistics. +/// A pass to compute block placement statistics. /// /// A separate pass to compute interesting statistics for evaluating block /// placement. This is separate from the actual placement pass so that they can /// be computed in the absence of any placement transformations or when using /// alternative placement strategies. class MachineBlockPlacementStats : public MachineFunctionPass { - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; - /// \brief A handle to the function-wide block frequency pass. + /// A handle to the function-wide block frequency pass. const MachineBlockFrequencyInfo *MBFI; public: diff --git a/llvm/lib/CodeGen/MachineOutliner.cpp b/llvm/lib/CodeGen/MachineOutliner.cpp index d0f6f56b3d7..e9000fffdb5 100644 --- a/llvm/lib/CodeGen/MachineOutliner.cpp +++ b/llvm/lib/CodeGen/MachineOutliner.cpp @@ -101,7 +101,7 @@ static cl::opt<bool> EnableLinkOnceODROutlining( namespace { -/// \brief An individual sequence of instructions to be replaced with a call to +/// An individual sequence of instructions to be replaced with a call to /// an outlined function. struct Candidate { private: @@ -118,7 +118,7 @@ public: /// Set to false if the candidate overlapped with another candidate. bool InCandidateList = true; - /// \brief The index of this \p Candidate's \p OutlinedFunction in the list of + /// The index of this \p Candidate's \p OutlinedFunction in the list of /// \p OutlinedFunctions. unsigned FunctionIdx; @@ -143,7 +143,7 @@ public: // Return the end index of this candidate. unsigned getEndIdx() const { return StartIdx + Len - 1; } - /// \brief The number of instructions that would be saved by outlining every + /// The number of instructions that would be saved by outlining every /// candidate of this type. /// /// This is a fixed value which is not updated during the candidate pruning @@ -158,14 +158,14 @@ public: Candidate() {} - /// \brief Used to ensure that \p Candidates are outlined in an order that + /// Used to ensure that \p Candidates are outlined in an order that /// preserves the start and end indices of other \p Candidates. bool operator<(const Candidate &RHS) const { return getStartIdx() > RHS.getStartIdx(); } }; -/// \brief The information necessary to create an outlined function for some +/// The information necessary to create an outlined function for some /// class of candidate. struct OutlinedFunction { @@ -183,7 +183,7 @@ public: /// A number assigned to this function which appears at the end of its name. unsigned Name; - /// \brief The sequence of integers corresponding to the instructions in this + /// The sequence of integers corresponding to the instructions in this /// function. std::vector<unsigned> Sequence; @@ -210,14 +210,14 @@ public: return getOccurrenceCount(); } - /// \brief Return the number of instructions it would take to outline this + /// Return the number of instructions it would take to outline this /// function. unsigned getOutliningCost() { return (OccurrenceCount * MInfo.CallOverhead) + Sequence.size() + MInfo.FrameOverhead; } - /// \brief Return the number of instructions that would be saved by outlining + /// Return the number of instructions that would be saved by outlining /// this function. unsigned getBenefit() { unsigned NotOutlinedCost = OccurrenceCount * Sequence.size(); @@ -279,7 +279,7 @@ struct SuffixTreeNode { /// For all other nodes, this is ignored. unsigned SuffixIdx = EmptyIdx; - /// \brief For internal nodes, a pointer to the internal node representing + /// For internal nodes, a pointer to the internal node representing /// the same sequence with the first character chopped off. /// /// This acts as a shortcut in Ukkonen's algorithm. One of the things that @@ -393,7 +393,7 @@ private: /// The end index of each leaf in the tree. unsigned LeafEndIdx = -1; - /// \brief Helper struct which keeps track of the next insertion point in + /// Helper struct which keeps track of the next insertion point in /// Ukkonen's algorithm. struct ActiveState { /// The next node to insert at. @@ -406,7 +406,7 @@ private: unsigned Len = 0; }; - /// \brief The point the next insertion will take place at in the + /// The point the next insertion will take place at in the /// construction algorithm. ActiveState Active; @@ -453,7 +453,7 @@ private: return N; } - /// \brief Set the suffix indices of the leaves to the start indices of their + /// Set the suffix indices of the leaves to the start indices of their /// respective suffixes. Also stores each leaf in \p LeafVector at its /// respective suffix index. /// @@ -491,7 +491,7 @@ private: } } - /// \brief Construct the suffix tree for the prefix of the input ending at + /// Construct the suffix tree for the prefix of the input ending at /// \p EndIdx. /// /// Used to construct the full suffix tree iteratively. At the end of each @@ -652,16 +652,16 @@ public: } }; -/// \brief Maps \p MachineInstrs to unsigned integers and stores the mappings. +/// Maps \p MachineInstrs to unsigned integers and stores the mappings. struct InstructionMapper { - /// \brief The next available integer to assign to a \p MachineInstr that + /// The next available integer to assign to a \p MachineInstr that /// cannot be outlined. /// /// Set to -3 for compatability with \p DenseMapInfo<unsigned>. unsigned IllegalInstrNumber = -3; - /// \brief The next available integer to assign to a \p MachineInstr that can + /// The next available integer to assign to a \p MachineInstr that can /// be outlined. unsigned LegalInstrNumber = 0; @@ -676,11 +676,11 @@ struct InstructionMapper { /// The vector of unsigned integers that the module is mapped to. std::vector<unsigned> UnsignedVec; - /// \brief Stores the location of the instruction associated with the integer + /// Stores the location of the instruction associated with the integer /// at index i in \p UnsignedVec for each index i. std::vector<MachineBasicBlock::iterator> InstrList; - /// \brief Maps \p *It to a legal integer. + /// Maps \p *It to a legal integer. /// /// Updates \p InstrList, \p UnsignedVec, \p InstructionIntegerMap, /// \p IntegerInstructionMap, and \p LegalInstrNumber. @@ -743,7 +743,7 @@ struct InstructionMapper { return MINumber; } - /// \brief Transforms a \p MachineBasicBlock into a \p vector of \p unsigneds + /// Transforms a \p MachineBasicBlock into a \p vector of \p unsigneds /// and appends it to \p UnsignedVec and \p InstrList. /// /// Two instructions are assigned the same integer if they are identical. @@ -796,7 +796,7 @@ struct InstructionMapper { } }; -/// \brief An interprocedural pass which finds repeated sequences of +/// An interprocedural pass which finds repeated sequences of /// instructions and replaces them with calls to functions. /// /// Each instruction is mapped to an unsigned integer and placed in a string. @@ -809,7 +809,7 @@ struct MachineOutliner : public ModulePass { static char ID; - /// \brief Set to true if the outliner should consider functions with + /// Set to true if the outliner should consider functions with /// linkonceodr linkage. bool OutlineFromLinkOnceODRs = false; @@ -853,7 +853,7 @@ struct MachineOutliner : public ModulePass { std::vector<std::shared_ptr<Candidate>> &CandidateList, std::vector<OutlinedFunction> &FunctionList); - /// \brief Replace the sequences of instructions represented by the + /// Replace the sequences of instructions represented by the /// \p Candidates in \p CandidateList with calls to \p MachineFunctions /// described in \p FunctionList. /// @@ -893,7 +893,7 @@ struct MachineOutliner : public ModulePass { /// Removes \p C from the candidate list, and updates its \p OutlinedFunction. void prune(Candidate &C, std::vector<OutlinedFunction> &FunctionList); - /// \brief Remove any overlapping candidates that weren't handled by the + /// Remove any overlapping candidates that weren't handled by the /// suffix tree's pruning method. /// /// Pruning from the suffix tree doesn't necessarily remove all overlaps. diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 5f12cb1dfa9..f80d6a695a5 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -1486,7 +1486,7 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { namespace { -/// \brief Post-process the DAG to create cluster edges between neighboring +/// Post-process the DAG to create cluster edges between neighboring /// loads or between neighboring stores. class BaseMemOpClusterMutation : public ScheduleDAGMutation { struct MemOpInfo { @@ -1590,7 +1590,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps( } } -/// \brief Callback from DAG postProcessing to create cluster edges for loads. +/// Callback from DAG postProcessing to create cluster edges for loads. void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); @@ -1631,7 +1631,7 @@ void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { namespace { -/// \brief Post-process the DAG to create weak edges from all uses of a copy to +/// Post-process the DAG to create weak edges from all uses of a copy to /// the one use that defines the copy's source vreg, most likely an induction /// variable increment. class CopyConstrain : public ScheduleDAGMutation { @@ -1806,7 +1806,7 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { } } -/// \brief Callback from DAG postProcessing to create weak edges to encourage +/// Callback from DAG postProcessing to create weak edges to encourage /// copy elimination. void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); @@ -3361,7 +3361,7 @@ ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { namespace { -/// \brief Order nodes by the ILP metric. +/// Order nodes by the ILP metric. struct ILPOrder { const SchedDFSResult *DFSResult = nullptr; const BitVector *ScheduledTrees = nullptr; @@ -3369,7 +3369,7 @@ struct ILPOrder { ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} - /// \brief Apply a less-than relation on node priority. + /// Apply a less-than relation on node priority. /// /// (Return true if A comes after B in the Q.) bool operator()(const SUnit *A, const SUnit *B) const { @@ -3394,7 +3394,7 @@ struct ILPOrder { } }; -/// \brief Schedule based on the ILP metric. +/// Schedule based on the ILP metric. class ILPScheduler : public MachineSchedStrategy { ScheduleDAGMILive *DAG = nullptr; ILPOrder Cmp; @@ -3437,7 +3437,7 @@ public: return SU; } - /// \brief Scheduler callback to notify that a new subtree is scheduled. + /// Scheduler callback to notify that a new subtree is scheduled. void scheduleTree(unsigned SubtreeID) override { std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); } diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index 835c55d8c7c..0e5c839c180 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -139,7 +139,7 @@ namespace { MachineBasicBlock *From, MachineBasicBlock *To); - /// \brief Postpone the splitting of the given critical + /// Postpone the splitting of the given critical /// edge (\p From, \p To). /// /// We do not split the edges on the fly. Indeed, this invalidates @@ -709,7 +709,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, return SuccToSinkTo; } -/// \brief Return true if MI is likely to be usable as a memory operation by the +/// Return true if MI is likely to be usable as a memory operation by the /// implicit null check optimization. /// /// This is a "best effort" heuristic, and should not be relied upon for diff --git a/llvm/lib/CodeGen/MacroFusion.cpp b/llvm/lib/CodeGen/MacroFusion.cpp index e7f426c469a..5b3523be635 100644 --- a/llvm/lib/CodeGen/MacroFusion.cpp +++ b/llvm/lib/CodeGen/MacroFusion.cpp @@ -105,7 +105,7 @@ static bool fuseInstructionPair(ScheduleDAGMI &DAG, SUnit &FirstSU, namespace { -/// \brief Post-process the DAG to create cluster edges between instrs that may +/// Post-process the DAG to create cluster edges between instrs that may /// be fused by the processor into a single operation. class MacroFusion : public ScheduleDAGMutation { ShouldSchedulePredTy shouldScheduleAdjacent; @@ -135,7 +135,7 @@ void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) { scheduleAdjacentImpl(*DAG, DAG->ExitSU); } -/// \brief Implement the fusion of instr pairs in the scheduling DAG, +/// Implement the fusion of instr pairs in the scheduling DAG, /// anchored at the instr in AnchorSU.. bool MacroFusion::scheduleAdjacentImpl(ScheduleDAGMI &DAG, SUnit &AnchorSU) { const MachineInstr &AnchorMI = *AnchorSU.getInstr(); diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 1320f998555..5ce7da8f6c5 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -202,7 +202,7 @@ namespace { bool foldImmediate(MachineInstr &MI, SmallSet<unsigned, 4> &ImmDefRegs, DenseMap<unsigned, MachineInstr*> &ImmDefMIs); - /// \brief Finds recurrence cycles, but only ones that formulated around + /// Finds recurrence cycles, but only ones that formulated around /// a def operand and a use operand that are tied. If there is a use /// operand commutable with the tied use operand, find recurrence cycle /// along that operand as well. @@ -210,7 +210,7 @@ namespace { const SmallSet<unsigned, 2> &TargetReg, RecurrenceCycle &RC); - /// \brief If copy instruction \p MI is a virtual register copy, track it in + /// If copy instruction \p MI is a virtual register copy, track it in /// the set \p CopySrcRegs and \p CopyMIs. If this virtual register was /// previously seen as a copy, replace the uses of this copy with the /// previously seen copy's destination register. @@ -221,7 +221,7 @@ namespace { /// Is the register \p Reg a non-allocatable physical register? bool isNAPhysCopy(unsigned Reg); - /// \brief If copy instruction \p MI is a non-allocatable virtual<->physical + /// If copy instruction \p MI is a non-allocatable virtual<->physical /// register copy, track it in the \p NAPhysToVirtMIs map. If this /// non-allocatable physical register was previously copied to a virtual /// registered and hasn't been clobbered, the virt->phys copy can be @@ -232,7 +232,7 @@ namespace { bool isLoadFoldable(MachineInstr &MI, SmallSet<unsigned, 16> &FoldAsLoadDefCandidates); - /// \brief Check whether \p MI is understood by the register coalescer + /// Check whether \p MI is understood by the register coalescer /// but may require some rewriting. bool isCoalescableCopy(const MachineInstr &MI) { // SubregToRegs are not interesting, because they are already register @@ -242,7 +242,7 @@ namespace { MI.isExtractSubreg())); } - /// \brief Check whether \p MI is a copy like instruction that is + /// Check whether \p MI is a copy like instruction that is /// not recognized by the register coalescer. bool isUncoalescableCopy(const MachineInstr &MI) { return MI.isBitcast() || @@ -345,7 +345,7 @@ namespace { } }; - /// \brief Helper class to track the possible sources of a value defined by + /// Helper class to track the possible sources of a value defined by /// a (chain of) copy related instructions. /// Given a definition (instruction and definition index), this class /// follows the use-def chain to find successive suitable sources. @@ -425,7 +425,7 @@ namespace { } } - /// \brief Following the use-def chain, get the next available source + /// Following the use-def chain, get the next available source /// for the tracked value. /// \return A ValueTrackerResult containing a set of registers /// and sub registers with tracked values. A ValueTrackerResult with @@ -646,7 +646,7 @@ bool PeepholeOptimizer::optimizeCondBranch(MachineInstr &MI) { return TII->optimizeCondBranch(MI); } -/// \brief Try to find the next source that share the same register file +/// Try to find the next source that share the same register file /// for the value defined by \p Reg and \p SubReg. /// When true is returned, the \p RewriteMap can be used by the client to /// retrieve all Def -> Use along the way up to the next source. Any found @@ -746,7 +746,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg, return CurSrcPair.Reg != Reg; } -/// \brief Insert a PHI instruction with incoming edges \p SrcRegs that are +/// Insert a PHI instruction with incoming edges \p SrcRegs that are /// guaranteed to have the same register class. This is necessary whenever we /// successfully traverse a PHI instruction and find suitable sources coming /// from its edges. By inserting a new PHI, we provide a rewritten PHI def @@ -791,7 +791,7 @@ public: Rewriter(MachineInstr &CopyLike) : CopyLike(CopyLike) {} virtual ~Rewriter() {} - /// \brief Get the next rewritable source (SrcReg, SrcSubReg) and + /// Get the next rewritable source (SrcReg, SrcSubReg) and /// the related value that it affects (DstReg, DstSubReg). /// A source is considered rewritable if its register class and the /// register class of the related DstReg may not be register @@ -859,7 +859,7 @@ public: } }; -/// \brief Helper class to rewrite uncoalescable copy like instructions +/// Helper class to rewrite uncoalescable copy like instructions /// into new COPY (coalescable friendly) instructions. class UncoalescableRewriter : public Rewriter { unsigned NumDefs; ///< Number of defs in the bitcast. @@ -1101,7 +1101,7 @@ static Rewriter *getCopyRewriter(MachineInstr &MI, const TargetInstrInfo &TII) { } } -/// \brief Given a \p Def.Reg and Def.SubReg pair, use \p RewriteMap to find +/// Given a \p Def.Reg and Def.SubReg pair, use \p RewriteMap to find /// the new source to use for rewrite. If \p HandleMultipleSources is true and /// multiple sources for a given \p Def are found along the way, we found a /// PHI instructions that needs to be rewritten. @@ -1213,7 +1213,7 @@ bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) { return Changed; } -/// \brief Rewrite the source found through \p Def, by using the \p RewriteMap +/// Rewrite the source found through \p Def, by using the \p RewriteMap /// and create a new COPY instruction. More info about RewriteMap in /// PeepholeOptimizer::findNextSource. Right now this is only used to handle /// Uncoalescable copies, since they are copy like instructions that aren't @@ -1254,7 +1254,7 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike, return *NewCopy; } -/// \brief Optimize copy-like instructions to create +/// Optimize copy-like instructions to create /// register coalescer friendly instruction. /// The optimization tries to kill-off the \p MI by looking /// through a chain of copies to find a source that has a compatible diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index 7a8d4225ad0..78b94a25210 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -470,7 +470,7 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI, } } -/// \brief Return the cost of spilling clearing out PhysReg and aliases so it is +/// Return the cost of spilling clearing out PhysReg and aliases so it is /// free for allocation. Returns 0 when PhysReg is free or disabled with all /// aliases disabled - it can be allocated directly. /// \returns spillImpossible when PhysReg or an alias can't be spilled. @@ -519,7 +519,7 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const { return Cost; } -/// \brief This method updates local state so that we know that PhysReg is the +/// This method updates local state so that we know that PhysReg is the /// proper container for VirtReg now. The physical register must not be used /// for anything else when this is called. void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) { diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 80349457783..04b5393d79d 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -300,17 +300,17 @@ class RAGreedy : public MachineFunctionPass, EvicteeInfo Evictees; public: - /// \brief Clear all eviction information. + /// Clear all eviction information. void clear() { Evictees.clear(); } - /// \brief Clear eviction information for the given evictee Vreg. + /// Clear eviction information for the given evictee Vreg. /// E.g. when Vreg get's a new allocation, the old eviction info is no /// longer relevant. /// \param Evictee The evictee Vreg for whom we want to clear collected /// eviction info. void clearEvicteeInfo(unsigned Evictee) { Evictees.erase(Evictee); } - /// \brief Track new eviction. + /// Track new eviction. /// The Evictor vreg has evicted the Evictee vreg from Physreg. /// \praram PhysReg The phisical register Evictee was evicted from. /// \praram Evictor The evictor Vreg that evicted Evictee. @@ -937,7 +937,7 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, return true; } -/// \brief Return true if all interferences between VirtReg and PhysReg between +/// Return true if all interferences between VirtReg and PhysReg between /// Start and End can be evicted. /// /// \param VirtReg Live range that is about to be assigned. @@ -989,7 +989,7 @@ bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg, return true; } -/// \brief Return tthe physical register that will be best +/// Return tthe physical register that will be best /// candidate for eviction by a local split interval that will be created /// between Start and End. /// @@ -1381,7 +1381,7 @@ BlockFrequency RAGreedy::calcSpillCost() { return Cost; } -/// \brief Check if splitting Evictee will create a local split interval in +/// Check if splitting Evictee will create a local split interval in /// basic block number BBNumber that may cause a bad eviction chain. This is /// intended to prevent bad eviction sequences like: /// movl %ebp, 8(%esp) # 4-byte Spill @@ -1482,7 +1482,7 @@ bool RAGreedy::splitCanCauseEvictionChain(unsigned Evictee, return true; } -/// \brief Check if splitting VirtRegToSplit will create a local split interval +/// Check if splitting VirtRegToSplit will create a local split interval /// in basic block number BBNumber that may cause a spill. /// /// \param VirtRegToSplit The register considered to be split. @@ -2793,7 +2793,7 @@ void RAGreedy::initializeCSRCost() { CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); } -/// \brief Collect the hint info for \p Reg. +/// Collect the hint info for \p Reg. /// The results are stored into \p Out. /// \p Out is not cleared before being populated. void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { @@ -2817,7 +2817,7 @@ void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { } } -/// \brief Using the given \p List, compute the cost of the broken hints if +/// Using the given \p List, compute the cost of the broken hints if /// \p PhysReg was used. /// \return The cost of \p List for \p PhysReg. BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, @@ -2830,7 +2830,7 @@ BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, return Cost; } -/// \brief Using the register assigned to \p VirtReg, try to recolor +/// Using the register assigned to \p VirtReg, try to recolor /// all the live ranges that are copy-related with \p VirtReg. /// The recoloring is then propagated to all the live-ranges that have /// been recolored and so on, until no more copies can be coalesced or @@ -2909,7 +2909,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { } while (!RecoloringCandidates.empty()); } -/// \brief Try to recolor broken hints. +/// Try to recolor broken hints. /// Broken hints may be repaired by recoloring when an evicted variable /// freed up a register for a larger live-range. /// Consider the following example: diff --git a/llvm/lib/CodeGen/RegAllocPBQP.cpp b/llvm/lib/CodeGen/RegAllocPBQP.cpp index a71f839ccf0..7ce4438e613 100644 --- a/llvm/lib/CodeGen/RegAllocPBQP.cpp +++ b/llvm/lib/CodeGen/RegAllocPBQP.cpp @@ -160,25 +160,25 @@ private: /// always available for the remat of all the siblings of the original reg. SmallPtrSet<MachineInstr *, 32> DeadRemats; - /// \brief Finds the initial set of vreg intervals to allocate. + /// Finds the initial set of vreg intervals to allocate. void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS); - /// \brief Constructs an initial graph. + /// Constructs an initial graph. void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Spill the given VReg. + /// Spill the given VReg. void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals, MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Given a solved PBQP problem maps this solution back to a register + /// Given a solved PBQP problem maps this solution back to a register /// assignment. bool mapPBQPToRegAlloc(const PBQPRAGraph &G, const PBQP::Solution &Solution, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Postprocessing before final spilling. Sets basic block "live in" + /// Postprocessing before final spilling. Sets basic block "live in" /// variables. void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM) const; diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 45ee0d17e25..c0deb11d06d 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -115,11 +115,11 @@ namespace { /// checked for smaller live intervals. bool ShrinkMainRange; - /// \brief True if the coalescer should aggressively coalesce global copies + /// True if the coalescer should aggressively coalesce global copies /// in favor of keeping local copies. bool JoinGlobalCopies; - /// \brief True if the coalescer should aggressively coalesce fall-thru + /// True if the coalescer should aggressively coalesce fall-thru /// blocks exclusively containing copies. bool JoinSplitEdges; diff --git a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp index 1e1f36a35ec..e25e53a24b5 100644 --- a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp +++ b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp @@ -77,20 +77,20 @@ private: /// Split unrelated subregister components and rename them to new vregs. bool renameComponents(LiveInterval &LI) const; - /// \brief Build a vector of SubRange infos and a union find set of + /// Build a vector of SubRange infos and a union find set of /// equivalence classes. /// Returns true if more than 1 equivalence class was found. bool findComponents(IntEqClasses &Classes, SmallVectorImpl<SubRangeInfo> &SubRangeInfos, LiveInterval &LI) const; - /// \brief Distribute the LiveInterval segments into the new LiveIntervals + /// Distribute the LiveInterval segments into the new LiveIntervals /// belonging to their class. void distribute(const IntEqClasses &Classes, const SmallVectorImpl<SubRangeInfo> &SubRangeInfos, const SmallVectorImpl<LiveInterval*> &Intervals) const; - /// \brief Constructs main liverange and add missing undef+dead flags. + /// Constructs main liverange and add missing undef+dead flags. void computeMainRangesFixFlags(const IntEqClasses &Classes, const SmallVectorImpl<SubRangeInfo> &SubRangeInfos, const SmallVectorImpl<LiveInterval*> &Intervals) const; diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp index a8f16a9ec0a..3475bae4990 100644 --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -143,14 +143,14 @@ class SafeStack { /// might expect to appear on the stack on most common targets. enum { StackAlignment = 16 }; - /// \brief Return the value of the stack canary. + /// Return the value of the stack canary. Value *getStackGuard(IRBuilder<> &IRB, Function &F); - /// \brief Load stack guard from the frame and check if it has changed. + /// Load stack guard from the frame and check if it has changed. void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, AllocaInst *StackGuardSlot, Value *StackGuard); - /// \brief Find all static allocas, dynamic allocas, return instructions and + /// Find all static allocas, dynamic allocas, return instructions and /// stack restore points (exception unwind blocks and setjmp calls) in the /// given function and append them to the respective vectors. void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, @@ -159,11 +159,11 @@ class SafeStack { SmallVectorImpl<ReturnInst *> &Returns, SmallVectorImpl<Instruction *> &StackRestorePoints); - /// \brief Calculate the allocation size of a given alloca. Returns 0 if the + /// Calculate the allocation size of a given alloca. Returns 0 if the /// size can not be statically determined. uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); - /// \brief Allocate space for all static allocas in \p StaticAllocas, + /// Allocate space for all static allocas in \p StaticAllocas, /// replace allocas with pointers into the unsafe stack and generate code to /// restore the stack pointer before all return instructions in \p Returns. /// @@ -176,7 +176,7 @@ class SafeStack { Instruction *BasePointer, AllocaInst *StackGuardSlot); - /// \brief Generate code to restore the stack after all stack restore points + /// Generate code to restore the stack after all stack restore points /// in \p StackRestorePoints. /// /// \returns A local variable in which to maintain the dynamic top of the @@ -186,7 +186,7 @@ class SafeStack { ArrayRef<Instruction *> StackRestorePoints, Value *StaticTop, bool NeedDynamicTop); - /// \brief Replace all allocas in \p DynamicAllocas with code to allocate + /// Replace all allocas in \p DynamicAllocas with code to allocate /// space dynamically on the unsafe stack and store the dynamic unsafe stack /// top to \p DynamicTop if non-null. void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr, diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 2b994422bab..45df5c9d0b5 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -267,7 +267,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { } } -/// \brief Adds register dependencies (data, anti, and output) from this SUnit +/// Adds register dependencies (data, anti, and output) from this SUnit /// to following instructions in the same scheduling region that depend the /// physical register referenced at OperIdx. void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { @@ -469,7 +469,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); } -/// \brief Adds a register data dependency if the instruction that defines the +/// Adds a register data dependency if the instruction that defines the /// virtual register used at OperIdx is mapped to an SUnit. Add a register /// antidependency from this SUnit to instructions that occur later in the same /// scheduling region if they write the virtual register. @@ -515,7 +515,7 @@ void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, } } -/// \brief Creates an SUnit for each real instruction, numbered in top-down +/// Creates an SUnit for each real instruction, numbered in top-down /// topological order. The instruction order A < B, implies that no edge exists /// from B to A. /// @@ -1213,7 +1213,7 @@ public: RootSet[SU->NodeNum] = RData; } - /// \brief Called once for each tree edge after calling visitPostOrderNode on + /// Called once for each tree edge after calling visitPostOrderNode on /// the predecessor. Increment the parent node's instruction count and /// preemptively join this subtree to its parent's if it is small enough. void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 5c47fe21ec8..540e31048c6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -122,7 +122,7 @@ namespace { bool LegalTypes = false; bool ForCodeSize; - /// \brief Worklist of all of the nodes that need to be simplified. + /// Worklist of all of the nodes that need to be simplified. /// /// This must behave as a stack -- new nodes to process are pushed onto the /// back and when processing we pop off of the back. @@ -131,14 +131,14 @@ namespace { /// due to nodes being deleted from the underlying DAG. SmallVector<SDNode *, 64> Worklist; - /// \brief Mapping from an SDNode to its position on the worklist. + /// Mapping from an SDNode to its position on the worklist. /// /// This is used to find and remove nodes from the worklist (by nulling /// them) when they are deleted from the underlying DAG. It relies on /// stable indices of nodes within the worklist. DenseMap<SDNode *, unsigned> WorklistMap; - /// \brief Set of nodes which have been combined (at least once). + /// Set of nodes which have been combined (at least once). /// /// This is used to allow us to reliably add any operands of a DAG node /// which have not yet been combined to the worklist. @@ -249,7 +249,7 @@ namespace { SDValue SplitIndexingFromLoad(LoadSDNode *LD); bool SliceUpLoad(SDNode *N); - /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed + /// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed /// load. /// /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. @@ -561,7 +561,7 @@ namespace { /// affected nodes are stored as a prefix in \p StoreNodes). bool MergeConsecutiveStores(StoreSDNode *N); - /// \brief Try to transform a truncation where C is a constant: + /// Try to transform a truncation where C is a constant: /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) /// /// \p N needs to be a truncation and its first operand an AND. Other @@ -856,7 +856,7 @@ bool DAGCombiner::isOneUseSetCC(SDValue N) const { return false; } -// \brief Returns the SDNode if it is a constant float BuildVector +// Returns the SDNode if it is a constant float BuildVector // or constant float. static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) { if (isa<ConstantFPSDNode>(N)) @@ -1347,7 +1347,7 @@ bool DAGCombiner::PromoteLoad(SDValue Op) { return false; } -/// \brief Recursively delete a node which has no uses and any operands for +/// Recursively delete a node which has no uses and any operands for /// which it is the only use. /// /// Note that this both deletes the nodes and removes them from the worklist. @@ -6474,7 +6474,7 @@ SDValue DAGCombiner::visitCTPOP(SDNode *N) { return SDValue(); } -/// \brief Generate Min/Max node +/// Generate Min/Max node static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode CC, const TargetLowering &TLI, @@ -11954,7 +11954,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { return false; } -/// \brief Return the base-pointer arithmetic from an indexed \p LD. +/// Return the base-pointer arithmetic from an indexed \p LD. SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { ISD::MemIndexedMode AM = LD->getAddressingMode(); assert(AM != ISD::UNINDEXED); @@ -12116,7 +12116,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) { namespace { -/// \brief Helper structure used to slice a load in smaller loads. +/// Helper structure used to slice a load in smaller loads. /// Basically a slice is obtained from the following sequence: /// Origin = load Ty1, Base /// Shift = srl Ty1 Origin, CstTy Amount @@ -12129,7 +12129,7 @@ namespace { /// SliceTy is deduced from the number of bits that are actually used to /// build Inst. struct LoadedSlice { - /// \brief Helper structure used to compute the cost of a slice. + /// Helper structure used to compute the cost of a slice. struct Cost { /// Are we optimizing for code size. bool ForCodeSize; @@ -12143,7 +12143,7 @@ struct LoadedSlice { Cost(bool ForCodeSize = false) : ForCodeSize(ForCodeSize) {} - /// \brief Get the cost of one isolated slice. + /// Get the cost of one isolated slice. Cost(const LoadedSlice &LS, bool ForCodeSize = false) : ForCodeSize(ForCodeSize), Loads(1) { EVT TruncType = LS.Inst->getValueType(0); @@ -12153,7 +12153,7 @@ struct LoadedSlice { ZExts = 1; } - /// \brief Account for slicing gain in the current cost. + /// Account for slicing gain in the current cost. /// Slicing provide a few gains like removing a shift or a /// truncate. This method allows to grow the cost of the original /// load with the gain from this slice. @@ -12226,7 +12226,7 @@ struct LoadedSlice { unsigned Shift = 0, SelectionDAG *DAG = nullptr) : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} - /// \brief Get the bits used in a chunk of bits \p BitWidth large. + /// Get the bits used in a chunk of bits \p BitWidth large. /// \return Result is \p BitWidth and has used bits set to 1 and /// not used bits set to 0. APInt getUsedBits() const { @@ -12246,14 +12246,14 @@ struct LoadedSlice { return UsedBits; } - /// \brief Get the size of the slice to be loaded in bytes. + /// Get the size of the slice to be loaded in bytes. unsigned getLoadedSize() const { unsigned SliceSize = getUsedBits().countPopulation(); assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); return SliceSize / 8; } - /// \brief Get the type that will be loaded for this slice. + /// Get the type that will be loaded for this slice. /// Note: This may not be the final type for the slice. EVT getLoadedType() const { assert(DAG && "Missing context"); @@ -12261,7 +12261,7 @@ struct LoadedSlice { return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); } - /// \brief Get the alignment of the load used for this slice. + /// Get the alignment of the load used for this slice. unsigned getAlignment() const { unsigned Alignment = Origin->getAlignment(); unsigned Offset = getOffsetFromBase(); @@ -12270,7 +12270,7 @@ struct LoadedSlice { return Alignment; } - /// \brief Check if this slice can be rewritten with legal operations. + /// Check if this slice can be rewritten with legal operations. bool isLegal() const { // An invalid slice is not legal. if (!Origin || !Inst || !DAG) @@ -12314,7 +12314,7 @@ struct LoadedSlice { return true; } - /// \brief Get the offset in bytes of this slice in the original chunk of + /// Get the offset in bytes of this slice in the original chunk of /// bits. /// \pre DAG != nullptr. uint64_t getOffsetFromBase() const { @@ -12335,7 +12335,7 @@ struct LoadedSlice { return Offset; } - /// \brief Generate the sequence of instructions to load the slice + /// Generate the sequence of instructions to load the slice /// represented by this object and redirect the uses of this slice to /// this new sequence of instructions. /// \pre this->Inst && this->Origin are valid Instructions and this @@ -12373,7 +12373,7 @@ struct LoadedSlice { return LastInst; } - /// \brief Check if this slice can be merged with an expensive cross register + /// Check if this slice can be merged with an expensive cross register /// bank copy. E.g., /// i = load i32 /// f = bitcast i32 i to float @@ -12422,7 +12422,7 @@ struct LoadedSlice { } // end anonymous namespace -/// \brief Check that all bits set in \p UsedBits form a dense region, i.e., +/// Check that all bits set in \p UsedBits form a dense region, i.e., /// \p UsedBits looks like 0..0 1..1 0..0. static bool areUsedBitsDense(const APInt &UsedBits) { // If all the bits are one, this is dense! @@ -12438,7 +12438,7 @@ static bool areUsedBitsDense(const APInt &UsedBits) { return NarrowedUsedBits.isAllOnesValue(); } -/// \brief Check whether or not \p First and \p Second are next to each other +/// Check whether or not \p First and \p Second are next to each other /// in memory. This means that there is no hole between the bits loaded /// by \p First and the bits loaded by \p Second. static bool areSlicesNextToEachOther(const LoadedSlice &First, @@ -12452,7 +12452,7 @@ static bool areSlicesNextToEachOther(const LoadedSlice &First, return areUsedBitsDense(UsedBits); } -/// \brief Adjust the \p GlobalLSCost according to the target +/// Adjust the \p GlobalLSCost according to the target /// paring capabilities and the layout of the slices. /// \pre \p GlobalLSCost should account for at least as many loads as /// there is in the slices in \p LoadedSlices. @@ -12513,7 +12513,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, } } -/// \brief Check the profitability of all involved LoadedSlice. +/// Check the profitability of all involved LoadedSlice. /// Currently, it is considered profitable if there is exactly two /// involved slices (1) which are (2) next to each other in memory, and /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). @@ -12557,7 +12557,7 @@ static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, return OrigCost > GlobalSlicingCost; } -/// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) +/// If the given load, \p LI, is used only by trunc or trunc(lshr) /// operations, split it in the various pieces being extracted. /// /// This sort of thing is introduced by SROA. diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 4b100acc9a5..571fd667cda 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -848,7 +848,7 @@ bool FastISel::selectStackmap(const CallInst *I) { return true; } -/// \brief Lower an argument list according to the target calling convention. +/// Lower an argument list according to the target calling convention. /// /// This is a helper for lowering intrinsics that follow a target calling /// convention or require stack pointer adjustment. Only a subset of the diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index fc191c457d9..486b5430537 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -87,11 +87,11 @@ class SelectionDAGLegalize { const TargetLowering &TLI; SelectionDAG &DAG; - /// \brief The set of nodes which have already been legalized. We hold a + /// The set of nodes which have already been legalized. We hold a /// reference to it in order to update as necessary on node deletion. SmallPtrSetImpl<SDNode *> &LegalizedNodes; - /// \brief A set of all the nodes updated during legalization. + /// A set of all the nodes updated during legalization. SmallSetVector<SDNode *, 16> *UpdatedNodes; EVT getSetCCResultType(EVT VT) const { @@ -107,7 +107,7 @@ public: : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG), LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {} - /// \brief Legalizes the given operation. + /// Legalizes the given operation. void LegalizeOp(SDNode *Node); private: diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index a481acd07ca..724a909a210 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -63,7 +63,7 @@ class VectorLegalizer { /// legalizing the same thing more than once. SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; - /// \brief Adds a node to the translation cache. + /// Adds a node to the translation cache. void AddLegalizedOperand(SDValue From, SDValue To) { LegalizedNodes.insert(std::make_pair(From, To)); // If someone requests legalization of the new node, return itself. @@ -71,55 +71,55 @@ class VectorLegalizer { LegalizedNodes.insert(std::make_pair(To, To)); } - /// \brief Legalizes the given node. + /// Legalizes the given node. SDValue LegalizeOp(SDValue Op); - /// \brief Assuming the node is legal, "legalize" the results. + /// Assuming the node is legal, "legalize" the results. SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); - /// \brief Implements unrolling a VSETCC. + /// Implements unrolling a VSETCC. SDValue UnrollVSETCC(SDValue Op); - /// \brief Implement expand-based legalization of vector operations. + /// Implement expand-based legalization of vector operations. /// /// This is just a high-level routine to dispatch to specific code paths for /// operations to legalize them. SDValue Expand(SDValue Op); - /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if + /// Implements expansion for FNEG; falls back to UnrollVectorOp if /// FSUB isn't legal. /// /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if /// SINT_TO_FLOAT and SHR on vectors isn't legal. SDValue ExpandUINT_TO_FLOAT(SDValue Op); - /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. + /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. SDValue ExpandSEXTINREG(SDValue Op); - /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG. + /// Implement expansion for ANY_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place and bitcasts to the proper /// type. The contents of the bits in the extended part of each element are /// undef. SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG. + /// Implement expansion for SIGN_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place, bitcasts to the proper /// type, then shifts left and arithmetic shifts right to introduce a sign /// extension. SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG. + /// Implement expansion for ZERO_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place and blends zeros into /// the remaining lanes, finally bitcasting to the proper type. SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Expand bswap of vectors into a shuffle if legal. + /// Expand bswap of vectors into a shuffle if legal. SDValue ExpandBSWAP(SDValue Op); - /// \brief Implement vselect in terms of XOR, AND, OR when blend is not + /// Implement vselect in terms of XOR, AND, OR when blend is not /// supported by the target. SDValue ExpandVSELECT(SDValue Op); SDValue ExpandSELECT(SDValue Op); @@ -131,18 +131,18 @@ class VectorLegalizer { SDValue ExpandCTLZ(SDValue Op); SDValue ExpandCTTZ_ZERO_UNDEF(SDValue Op); - /// \brief Implements vector promotion. + /// Implements vector promotion. /// /// This is essentially just bitcasting the operands to a different type and /// bitcasting the result back to the original type. SDValue Promote(SDValue Op); - /// \brief Implements [SU]INT_TO_FP vector promotion. + /// Implements [SU]INT_TO_FP vector promotion. /// /// This is a [zs]ext of the input operand to a larger integer type. SDValue PromoteINT_TO_FP(SDValue Op); - /// \brief Implements FP_TO_[SU]INT vector promotion of the result type. + /// Implements FP_TO_[SU]INT vector promotion of the result type. /// /// It is promoted to a larger integer type. The result is then /// truncated back to the original type. @@ -152,7 +152,7 @@ public: VectorLegalizer(SelectionDAG& dag) : DAG(dag), TLI(dag.getTargetLoweringInfo()) {} - /// \brief Begin legalizer the vector operations in the DAG. + /// Begin legalizer the vector operations in the DAG. bool Run(); }; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0a4e7e6f737..cf6993baef3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -773,7 +773,7 @@ static void VerifySDNode(SDNode *N) { } #endif // NDEBUG -/// \brief Insert a newly allocated node into the DAG. +/// Insert a newly allocated node into the DAG. /// /// Handles insertion into the all nodes list and CSE map, as well as /// verification and other common operations when a new node is allocated. @@ -5446,7 +5446,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); } -/// \brief Lower the call to 'memset' intrinsic function into a series of store +/// Lower the call to 'memset' intrinsic function into a series of store /// operations. /// /// \param DAG Selection DAG where lowered code is placed. @@ -8522,7 +8522,7 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { return true; } -// \brief Returns the SDNode if it is a constant integer BuildVector +// Returns the SDNode if it is a constant integer BuildVector // or constant integer. SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { if (isa<ConstantSDNode>(N)) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 4695374bc7a..2ac4d3a7b24 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7758,7 +7758,7 @@ SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, return DAG.getMergeValues(Ops, SL); } -/// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of +/// Populate a CallLowerinInfo (into \p CLI) based on the properties of /// the call being lowered. /// /// This is a helper for lowering intrinsics that follow a target calling @@ -7793,7 +7793,7 @@ void SelectionDAGBuilder::populateCallLoweringInfo( .setIsPatchPoint(IsPatchPoint); } -/// \brief Add a stack map intrinsic call's live variable operands to a stackmap +/// Add a stack map intrinsic call's live variable operands to a stackmap /// or patchpoint target node's operand list. /// /// Constants are converted to TargetConstants purely as an optimization to @@ -7829,7 +7829,7 @@ static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, } } -/// \brief Lower llvm.experimental.stackmap directly to its target opcode. +/// Lower llvm.experimental.stackmap directly to its target opcode. void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, // [live variables...]) @@ -7892,7 +7892,7 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { FuncInfo.MF->getFrameInfo().setHasStackMap(); } -/// \brief Lower llvm.experimental.patchpoint directly to its target opcode. +/// Lower llvm.experimental.patchpoint directly to its target opcode. void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS, const BasicBlock *EHPadBB) { // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 1334f1b2bf5..c00a72753e1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -197,7 +197,7 @@ defaultListDAGScheduler("default", "Best scheduler for the target", namespace llvm { //===--------------------------------------------------------------------===// - /// \brief This class is used by SelectionDAGISel to temporarily override + /// This class is used by SelectionDAGISel to temporarily override /// the optimization level on a per-function basis. class OptLevelChanger { SelectionDAGISel &IS; @@ -2835,7 +2835,7 @@ struct MatchScope { bool HasChainNodesMatched; }; -/// \\brief A DAG update listener to keep the matching state +/// \A DAG update listener to keep the matching state /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to /// change the DAG while matching. X86 addressing mode matcher is an example /// for this. diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index d34e12fac7b..6e829a35f7c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -96,7 +96,7 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, return true; } -/// \brief Set CallLoweringInfo attribute flags based on a call instruction +/// Set CallLoweringInfo attribute flags based on a call instruction /// and called function attributes. void TargetLoweringBase::ArgListEntry::setAttributes(ImmutableCallSite *CS, unsigned ArgIdx) { @@ -3310,7 +3310,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, } } -/// \brief Given an exact SDIV by a constant, create a multiplication +/// Given an exact SDIV by a constant, create a multiplication /// with the multiplicative inverse of the constant. static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d, const SDLoc &dl, SelectionDAG &DAG, @@ -3352,7 +3352,7 @@ SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, return SDValue(); } -/// \brief Given an ISD::SDIV node expressing a divide by constant, +/// Given an ISD::SDIV node expressing a divide by constant, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". @@ -3416,7 +3416,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor, return DAG.getNode(ISD::ADD, dl, VT, Q, T); } -/// \brief Given an ISD::UDIV node expressing a divide by constant, +/// Given an ISD::UDIV node expressing a divide by constant, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp index e20233917e3..d4fbe0a8df0 100644 --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -99,7 +99,7 @@ EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden, namespace { -/// \brief Class to determine where the safe point to insert the +/// Class to determine where the safe point to insert the /// prologue and epilogue are. /// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the /// shrink-wrapping term for prologue/epilogue placement, this pass @@ -153,7 +153,7 @@ class ShrinkWrap : public MachineFunctionPass { /// Current MachineFunction. MachineFunction *MachineFunc; - /// \brief Check if \p MI uses or defines a callee-saved register or + /// Check if \p MI uses or defines a callee-saved register or /// a frame index. If this is the case, this means \p MI must happen /// after Save and before Restore. bool useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS) const; @@ -173,14 +173,14 @@ class ShrinkWrap : public MachineFunctionPass { return CurrentCSRs; } - /// \brief Update the Save and Restore points such that \p MBB is in + /// Update the Save and Restore points such that \p MBB is in /// the region that is dominated by Save and post-dominated by Restore /// and Save and Restore still match the safe point definition. /// Such point may not exist and Save and/or Restore may be null after /// this call. void updateSaveRestorePoints(MachineBasicBlock &MBB, RegScavenger *RS); - /// \brief Initialize the pass for \p MF. + /// Initialize the pass for \p MF. void init(MachineFunction &MF) { RCI.runOnMachineFunction(MF); MDT = &getAnalysis<MachineDominatorTree>(); @@ -206,7 +206,7 @@ class ShrinkWrap : public MachineFunctionPass { /// shrink-wrapping. bool ArePointsInteresting() const { return Save != Entry && Save && Restore; } - /// \brief Check if shrink wrapping is enabled for this target and function. + /// Check if shrink wrapping is enabled for this target and function. static bool isShrinkWrapEnabled(const MachineFunction &MF); public: @@ -232,7 +232,7 @@ public: StringRef getPassName() const override { return "Shrink Wrapping analysis"; } - /// \brief Perform the shrink-wrapping analysis and update + /// Perform the shrink-wrapping analysis and update /// the MachineFrameInfo attached to \p MF with the results. bool runOnMachineFunction(MachineFunction &MF) override; }; @@ -294,7 +294,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI, return false; } -/// \brief Helper function to find the immediate (post) dominator. +/// Helper function to find the immediate (post) dominator. template <typename ListOfBBs, typename DominanceAnalysis> static MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs, DominanceAnalysis &Dom) { diff --git a/llvm/lib/CodeGen/SpillPlacement.cpp b/llvm/lib/CodeGen/SpillPlacement.cpp index b989b54d419..f6786b30b21 100644 --- a/llvm/lib/CodeGen/SpillPlacement.cpp +++ b/llvm/lib/CodeGen/SpillPlacement.cpp @@ -246,7 +246,7 @@ void SpillPlacement::activate(unsigned n) { } } -/// \brief Set the threshold for a given entry frequency. +/// Set the threshold for a given entry frequency. /// /// Set the threshold relative to \c Entry. Since the threshold is used as a /// bound on the open interval (-Threshold;Threshold), 1 is the minimum diff --git a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp index cc9af92c395..32d6f54f679 100644 --- a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp +++ b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp @@ -39,7 +39,7 @@ STATISTIC(NumBBsHaveNoStackmap, "Number of basic blocks with no stackmap"); STATISTIC(NumStackMaps, "Number of StackMaps visited"); namespace { -/// \brief This pass calculates the liveness information for each basic block in +/// This pass calculates the liveness information for each basic block in /// a function and attaches the register live-out information to a patchpoint /// intrinsic if present. /// @@ -54,10 +54,10 @@ class StackMapLiveness : public MachineFunctionPass { public: static char ID; - /// \brief Default construct and initialize the pass. + /// Default construct and initialize the pass. StackMapLiveness(); - /// \brief Tell the pass manager which passes we depend on and what + /// Tell the pass manager which passes we depend on and what /// information we preserve. void getAnalysisUsage(AnalysisUsage &AU) const override; @@ -66,17 +66,17 @@ public: MachineFunctionProperties::Property::NoVRegs); } - /// \brief Calculate the liveness information for the given machine function. + /// Calculate the liveness information for the given machine function. bool runOnMachineFunction(MachineFunction &MF) override; private: - /// \brief Performs the actual liveness calculation for the function. + /// Performs the actual liveness calculation for the function. bool calculateLiveness(MachineFunction &MF); - /// \brief Add the current register live set to the instruction. + /// Add the current register live set to the instruction. void addLiveOutSetToMI(MachineFunction &MF, MachineInstr &MI); - /// \brief Create a register mask and initialize it with the registers from + /// Create a register mask and initialize it with the registers from /// the register live set. uint32_t *createRegisterMask(MachineFunction &MF) const; }; diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index 8a7393501d0..9bc0c1fc043 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -225,7 +225,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) { return false; } -/// \brief Check whether or not this function needs a stack protector based +/// Check whether or not this function needs a stack protector based /// upon the stack protector level. /// /// We use two heuristics: a standard (ssp) and strong (sspstrong). diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index aa071ddbf9f..80622ea7bae 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -345,7 +345,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, return BestRC; } -/// \brief Check if the registers defined by the pair (RegisterClass, SubReg) +/// Check if the registers defined by the pair (RegisterClass, SubReg) /// share the same register file. static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, const TargetRegisterClass *DefRC, |