diff options
Diffstat (limited to 'llvm/lib')
411 files changed, 1883 insertions, 1883 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp index 2e77c2f19a1..a499a6e6413 100644 --- a/llvm/lib/Analysis/AliasAnalysis.cpp +++ b/llvm/lib/Analysis/AliasAnalysis.cpp @@ -521,7 +521,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, return ModRefInfo::ModRef; } -/// \brief Return information about whether a particular call site modifies +/// Return information about whether a particular call site modifies /// or reads the specified memory location \p MemLoc before instruction \p I /// in a BasicBlock. An ordered basic block \p OBB can be used to speed up /// instruction-ordering queries inside the BasicBlock containing \p I. diff --git a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp index 796916d49d3..748736b6080 100644 --- a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp +++ b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp @@ -74,7 +74,7 @@ using LoopData = BlockFrequencyInfoImplBase::LoopData; using Weight = BlockFrequencyInfoImplBase::Weight; using FrequencyData = BlockFrequencyInfoImplBase::FrequencyData; -/// \brief Dithering mass distributer. +/// Dithering mass distributer. /// /// This class splits up a single mass into portions by weight, dithering to /// spread out error. No mass is lost. The dithering precision depends on the @@ -277,7 +277,7 @@ void BlockFrequencyInfoImplBase::clear() { Loops.clear(); } -/// \brief Clear all memory not needed downstream. +/// Clear all memory not needed downstream. /// /// Releases all memory not used downstream. In particular, saves Freqs. static void cleanup(BlockFrequencyInfoImplBase &BFI) { @@ -362,7 +362,7 @@ bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( return true; } -/// \brief Compute the loop scale for a loop. +/// Compute the loop scale for a loop. void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { // Compute loop scale. DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n"); @@ -396,7 +396,7 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { << " - scale = " << Loop.Scale << "\n"); } -/// \brief Package up a loop. +/// Package up a loop. void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) { DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n"); @@ -492,7 +492,7 @@ static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, } } -/// \brief Unwrap a loop package. +/// Unwrap a loop package. /// /// Visits all the members of a loop, adjusting their BlockData according to /// the loop's pseudo-node. @@ -670,7 +670,7 @@ template <> struct GraphTraits<IrreducibleGraph> { } // end namespace llvm -/// \brief Find extra irreducible headers. +/// Find extra irreducible headers. /// /// Find entry blocks and other blocks with backedges, which exist when \c G /// contains irreducible sub-SCCs. diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp index f4aea51d301..1937e1c20ec 100644 --- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp +++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp @@ -88,14 +88,14 @@ static const uint32_t LBH_NONTAKEN_WEIGHT = 4; // Unlikely edges within a loop are half as likely as other edges static const uint32_t LBH_UNLIKELY_WEIGHT = 62; -/// \brief Unreachable-terminating branch taken probability. +/// Unreachable-terminating branch taken probability. /// /// This is the probability for a branch being taken to a block that terminates /// (eventually) in unreachable. These are predicted as unlikely as possible. /// All reachable probability will equally share the remaining part. static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1); -/// \brief Weight for a branch taken going into a cold block. +/// Weight for a branch taken going into a cold block. /// /// This is the weight for a branch taken toward a block marked /// cold. A block is marked cold if it's postdominated by a @@ -103,7 +103,7 @@ static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1); /// are those marked with attribute 'cold'. static const uint32_t CC_TAKEN_WEIGHT = 4; -/// \brief Weight for a branch not-taken into a cold block. +/// Weight for a branch not-taken into a cold block. /// /// This is the weight for a branch not taken toward a block marked /// cold. @@ -118,20 +118,20 @@ static const uint32_t ZH_NONTAKEN_WEIGHT = 12; static const uint32_t FPH_TAKEN_WEIGHT = 20; static const uint32_t FPH_NONTAKEN_WEIGHT = 12; -/// \brief Invoke-terminating normal branch taken weight +/// Invoke-terminating normal branch taken weight /// /// This is the weight for branching to the normal destination of an invoke /// instruction. We expect this to happen most of the time. Set the weight to an /// absurdly high value so that nested loops subsume it. static const uint32_t IH_TAKEN_WEIGHT = 1024 * 1024 - 1; -/// \brief Invoke-terminating normal branch not-taken weight. +/// Invoke-terminating normal branch not-taken weight. /// /// This is the weight for branching to the unwind destination of an invoke /// instruction. This is essentially never taken. static const uint32_t IH_NONTAKEN_WEIGHT = 1; -/// \brief Add \p BB to PostDominatedByUnreachable set if applicable. +/// Add \p BB to PostDominatedByUnreachable set if applicable. void BranchProbabilityInfo::updatePostDominatedByUnreachable(const BasicBlock *BB) { const TerminatorInst *TI = BB->getTerminator(); @@ -162,7 +162,7 @@ BranchProbabilityInfo::updatePostDominatedByUnreachable(const BasicBlock *BB) { PostDominatedByUnreachable.insert(BB); } -/// \brief Add \p BB to PostDominatedByColdCall set if applicable. +/// Add \p BB to PostDominatedByColdCall set if applicable. void BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) { assert(!PostDominatedByColdCall.count(BB)); @@ -196,7 +196,7 @@ BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) { } } -/// \brief Calculate edge weights for successors lead to unreachable. +/// Calculate edge weights for successors lead to unreachable. /// /// Predict that a successor which leads necessarily to an /// unreachable-terminated block as extremely unlikely. @@ -340,7 +340,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { return true; } -/// \brief Calculate edge weights for edges leading to cold blocks. +/// Calculate edge weights for edges leading to cold blocks. /// /// A cold block is one post-dominated by a block with a call to a /// cold function. Those edges are unlikely to be taken, so we give diff --git a/llvm/lib/Analysis/CFLGraph.h b/llvm/lib/Analysis/CFLGraph.h index e4e92864061..02e1e2b9315 100644 --- a/llvm/lib/Analysis/CFLGraph.h +++ b/llvm/lib/Analysis/CFLGraph.h @@ -46,7 +46,7 @@ namespace llvm { namespace cflaa { -/// \brief The Program Expression Graph (PEG) of CFL analysis +/// The Program Expression Graph (PEG) of CFL analysis /// CFLGraph is auxiliary data structure used by CFL-based alias analysis to /// describe flow-insensitive pointer-related behaviors. Given an LLVM function, /// the main purpose of this graph is to abstract away unrelated facts and @@ -154,7 +154,7 @@ public: } }; -///\brief A builder class used to create CFLGraph instance from a given function +///A builder class used to create CFLGraph instance from a given function /// The CFL-AA that uses this builder must provide its own type as a template /// argument. This is necessary for interprocedural processing: CFLGraphBuilder /// needs a way of obtaining the summary of other functions when callinsts are diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index c575a8be772..c81a66a051a 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -311,12 +311,12 @@ public: } // namespace -/// \brief Test whether the given value is an Alloca-derived function argument. +/// Test whether the given value is an Alloca-derived function argument. bool CallAnalyzer::isAllocaDerivedArg(Value *V) { return SROAArgValues.count(V); } -/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. +/// Lookup the SROA-candidate argument and cost iterator which V maps to. /// Returns false if V does not map to a SROA-candidate. bool CallAnalyzer::lookupSROAArgAndCost( Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { @@ -332,7 +332,7 @@ bool CallAnalyzer::lookupSROAArgAndCost( return CostIt != SROAArgCosts.end(); } -/// \brief Disable SROA for the candidate marked by this cost iterator. +/// Disable SROA for the candidate marked by this cost iterator. /// /// This marks the candidate as no longer viable for SROA, and adds the cost /// savings associated with it back into the inline cost measurement. @@ -346,7 +346,7 @@ void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { disableLoadElimination(); } -/// \brief If 'V' maps to a SROA candidate, disable SROA for it. +/// If 'V' maps to a SROA candidate, disable SROA for it. void CallAnalyzer::disableSROA(Value *V) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; @@ -354,7 +354,7 @@ void CallAnalyzer::disableSROA(Value *V) { disableSROA(CostIt); } -/// \brief Accumulate the given cost for a particular SROA candidate. +/// Accumulate the given cost for a particular SROA candidate. void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, int InstructionCost) { CostIt->second += InstructionCost; @@ -369,7 +369,7 @@ void CallAnalyzer::disableLoadElimination() { } } -/// \brief Accumulate a constant GEP offset into an APInt if possible. +/// Accumulate a constant GEP offset into an APInt if possible. /// /// Returns false if unable to compute the offset for any reason. Respects any /// simplified values known during the analysis of this callsite. @@ -402,7 +402,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { return true; } -/// \brief Use TTI to check whether a GEP is free. +/// Use TTI to check whether a GEP is free. /// /// Respects any simplified values known during the analysis of this callsite. bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { @@ -543,7 +543,7 @@ bool CallAnalyzer::visitPHI(PHINode &I) { return true; } -/// \brief Check we can fold GEPs of constant-offset call site argument pointers. +/// Check we can fold GEPs of constant-offset call site argument pointers. /// This requires target data and inbounds GEPs. /// /// \return true if the specified GEP can be folded. @@ -1163,7 +1163,7 @@ bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { return false; } -/// \brief Try to simplify a call site. +/// Try to simplify a call site. /// /// Takes a concrete function and callsite and tries to actually simplify it by /// analyzing the arguments and call itself with instsimplify. Returns true if @@ -1534,7 +1534,7 @@ bool CallAnalyzer::visitInstruction(Instruction &I) { return false; } -/// \brief Analyze a basic block for its contribution to the inline cost. +/// Analyze a basic block for its contribution to the inline cost. /// /// This method walks the analyzer over every instruction in the given basic /// block and accounts for their cost during inlining at this callsite. It @@ -1611,7 +1611,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB, return true; } -/// \brief Compute the base pointer and cumulative constant offsets for V. +/// Compute the base pointer and cumulative constant offsets for V. /// /// This strips all constant offsets off of V, leaving it the base pointer, and /// accumulates the total constant offset applied in the returned constant. It @@ -1650,7 +1650,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); } -/// \brief Find dead blocks due to deleted CFG edges during inlining. +/// Find dead blocks due to deleted CFG edges during inlining. /// /// If we know the successor of the current block, \p CurrBB, has to be \p /// NextBB, the other successors of \p CurrBB are dead if these successors have @@ -1688,7 +1688,7 @@ void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) { } } -/// \brief Analyze a call site for potential inlining. +/// Analyze a call site for potential inlining. /// /// Returns true if inlining this call is viable, and false if it is not /// viable. It computes the cost and adjusts the threshold based on numerous @@ -1881,7 +1881,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -/// \brief Dump stats about this call's analysis. +/// Dump stats about this call's analysis. LLVM_DUMP_METHOD void CallAnalyzer::dump() { #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" DEBUG_PRINT_STAT(NumConstantArgs); @@ -1901,7 +1901,7 @@ LLVM_DUMP_METHOD void CallAnalyzer::dump() { } #endif -/// \brief Test that there are no attribute conflicts between Caller and Callee +/// Test that there are no attribute conflicts between Caller and Callee /// that prevent inlining. static bool functionsHaveCompatibleAttributes(Function *Caller, Function *Callee, diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 25c38c06223..cdcb81b8060 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -588,7 +588,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query, RecursionLimit); } -/// \brief Compute the base pointer and cumulative constant offsets for V. +/// Compute the base pointer and cumulative constant offsets for V. /// /// This strips all constant offsets off of V, leaving it the base pointer, and /// accumulates the total constant offset applied in the returned constant. It @@ -639,7 +639,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, return OffsetIntPtr; } -/// \brief Compute the constant difference between two pointer values. +/// Compute the constant difference between two pointer values. /// If the difference is not a constant, returns zero. static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS) { @@ -1183,7 +1183,7 @@ static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, return nullptr; } -/// \brief Given operands for an Shl, LShr or AShr, see if we can +/// Given operands for an Shl, LShr or AShr, see if we can /// fold the result. If not, this returns null. static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool isExact, const SimplifyQuery &Q, @@ -4931,7 +4931,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, return Result == I ? UndefValue::get(I->getType()) : Result; } -/// \brief Implementation of recursive simplification through an instruction's +/// Implementation of recursive simplification through an instruction's /// uses. /// /// This is the common implementation of the recursive simplification routines. diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp index 011c01f2ad8..4bb1e56d086 100644 --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -1222,7 +1222,7 @@ static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, return ValueLatticeElement::getOverdefined(); } -/// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if +/// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if /// Val is not constrained on the edge. Result is unspecified if return value /// is false. static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, @@ -1347,7 +1347,7 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, return false; } -/// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at +/// Compute the value of Val on the edge BBFrom -> BBTo or the value at /// the basic block if the edge does not constrain Val. bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom, BasicBlock *BBTo, diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp index 327f09a05e9..db919bd233b 100644 --- a/llvm/lib/Analysis/Lint.cpp +++ b/llvm/lib/Analysis/Lint.cpp @@ -165,13 +165,13 @@ namespace { } } - /// \brief A check failed, so printout out the condition and the message. + /// A check failed, so printout out the condition and the message. /// /// This provides a nice place to put a breakpoint if you want to see why /// something is not correct. void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; } - /// \brief A check failed (with values to print). + /// A check failed (with values to print). /// /// This calls the Message-only version so that the above is easier to set /// a breakpoint on. diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 9be8456963d..10a71de82e3 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -156,7 +156,7 @@ bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT); } -/// \brief Test if A and B will obviously have the same value. +/// Test if A and B will obviously have the same value. /// /// This includes recognizing that %t0 and %t1 will have the same /// value in code like this: @@ -187,7 +187,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) { return false; } -/// \brief Check if executing a load of this pointer value cannot trap. +/// Check if executing a load of this pointer value cannot trap. /// /// If DT and ScanFrom are specified this method performs context-sensitive /// analysis and returns true if it is safe to load immediately before ScanFrom. diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 8bbe5a25215..6f94d30855c 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -92,7 +92,7 @@ static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); unsigned VectorizerParams::RuntimeMemoryCheckThreshold; -/// \brief The maximum iterations used to merge memory checks +/// The maximum iterations used to merge memory checks static cl::opt<unsigned> MemoryCheckMergeThreshold( "memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " @@ -102,7 +102,7 @@ static cl::opt<unsigned> MemoryCheckMergeThreshold( /// Maximum SIMD width. const unsigned VectorizerParams::MaxVectorWidth = 64; -/// \brief We collect dependences up to this threshold. +/// We collect dependences up to this threshold. static cl::opt<unsigned> MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " @@ -124,7 +124,7 @@ static cl::opt<bool> EnableMemAccessVersioning( "enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning")); -/// \brief Enable store-to-load forwarding conflict detection. This option can +/// Enable store-to-load forwarding conflict detection. This option can /// be disabled for correctness testing. static cl::opt<bool> EnableForwardingConflictDetection( "store-to-load-forwarding-conflict-detection", cl::Hidden, @@ -490,13 +490,13 @@ void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const { namespace { -/// \brief Analyses memory accesses in a loop. +/// Analyses memory accesses in a loop. /// /// Checks whether run time pointer checks are needed and builds sets for data /// dependence checking. class AccessAnalysis { public: - /// \brief Read or write access location. + /// Read or write access location. typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList; @@ -506,7 +506,7 @@ public: : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), PSE(PSE) {} - /// \brief Register a load and whether it is only read from. + /// Register a load and whether it is only read from. void addLoad(MemoryLocation &Loc, bool IsReadOnly) { Value *Ptr = const_cast<Value*>(Loc.Ptr); AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); @@ -515,14 +515,14 @@ public: ReadOnlyPtr.insert(Ptr); } - /// \brief Register a store. + /// Register a store. void addStore(MemoryLocation &Loc) { Value *Ptr = const_cast<Value*>(Loc.Ptr); AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); Accesses.insert(MemAccessInfo(Ptr, true)); } - /// \brief Check if we can emit a run-time no-alias check for \p Access. + /// Check if we can emit a run-time no-alias check for \p Access. /// /// Returns true if we can emit a run-time no alias check for \p Access. /// If we can check this access, this also adds it to a dependence set and @@ -537,7 +537,7 @@ public: unsigned ASId, bool ShouldCheckStride, bool Assume); - /// \brief Check whether we can check the pointers at runtime for + /// Check whether we can check the pointers at runtime for /// non-intersection. /// /// Returns true if we need no check or if we do and we can generate them @@ -546,13 +546,13 @@ public: Loop *TheLoop, const ValueToValueMap &Strides, bool ShouldCheckWrap = false); - /// \brief Goes over all memory accesses, checks whether a RT check is needed + /// Goes over all memory accesses, checks whether a RT check is needed /// and builds sets of dependent accesses. void buildDependenceSets() { processMemAccesses(); } - /// \brief Initial processing of memory accesses determined that we need to + /// Initial processing of memory accesses determined that we need to /// perform dependency checking. /// /// Note that this can later be cleared if we retry memcheck analysis without @@ -570,7 +570,7 @@ public: private: typedef SetVector<MemAccessInfo> PtrAccessSet; - /// \brief Go over all memory access and check whether runtime pointer checks + /// Go over all memory access and check whether runtime pointer checks /// are needed and build sets of dependency check candidates. void processMemAccesses(); @@ -596,7 +596,7 @@ private: /// dependence check. MemoryDepChecker::DepCandidates &DepCands; - /// \brief Initial processing of memory accesses determined that we may need + /// Initial processing of memory accesses determined that we may need /// to add memchecks. Perform the analysis to determine the necessary checks. /// /// Note that, this is different from isDependencyCheckNeeded. When we retry @@ -611,7 +611,7 @@ private: } // end anonymous namespace -/// \brief Check whether a pointer can participate in a runtime bounds check. +/// Check whether a pointer can participate in a runtime bounds check. /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr /// by adding run-time checks (overflow checks) if necessary. static bool hasComputableBounds(PredicatedScalarEvolution &PSE, @@ -634,7 +634,7 @@ static bool hasComputableBounds(PredicatedScalarEvolution &PSE, return AR->isAffine(); } -/// \brief Check whether a pointer address cannot wrap. +/// Check whether a pointer address cannot wrap. static bool isNoWrap(PredicatedScalarEvolution &PSE, const ValueToValueMap &Strides, Value *Ptr, Loop *L) { const SCEV *PtrScev = PSE.getSCEV(Ptr); @@ -931,7 +931,7 @@ static bool isInBoundsGep(Value *Ptr) { return false; } -/// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, +/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, /// i.e. monotonically increasing/decreasing. static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L) { @@ -979,7 +979,7 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, return false; } -/// \brief Check whether the access through \p Ptr has a constant stride. +/// Check whether the access through \p Ptr has a constant stride. int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp, const ValueToValueMap &StridesMap, bool Assume, bool ShouldCheckWrap) { @@ -1372,7 +1372,7 @@ static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, return false; } -/// \brief Check the dependence for two accesses with the same stride \p Stride. +/// Check the dependence for two accesses with the same stride \p Stride. /// \p Distance is the positive distance and \p TypeByteSize is type size in /// bytes. /// @@ -2025,7 +2025,7 @@ static Instruction *getFirstInst(Instruction *FirstInst, Value *V, namespace { -/// \brief IR Values for the lower and upper bounds of a pointer evolution. We +/// IR Values for the lower and upper bounds of a pointer evolution. We /// need to use value-handles because SCEV expansion can invalidate previously /// expanded values. Thus expansion of a pointer can invalidate the bounds for /// a previous one. @@ -2036,7 +2036,7 @@ struct PointerBounds { } // end anonymous namespace -/// \brief Expand code for the lower and upper bound of the pointer group \p CG +/// Expand code for the lower and upper bound of the pointer group \p CG /// in \p TheLoop. \return the values for the bounds. static PointerBounds expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, @@ -2074,7 +2074,7 @@ expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop, } } -/// \brief Turns a collection of checks into a collection of expanded upper and +/// Turns a collection of checks into a collection of expanded upper and /// lower bounds for both pointers in the check. static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds( const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks, diff --git a/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp b/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp index 0da90dae3d9..c8b91a7a1a5 100644 --- a/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp +++ b/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp @@ -17,7 +17,7 @@ using namespace llvm; -/// \brief Try to simplify instruction \param I using its SCEV expression. +/// Try to simplify instruction \param I using its SCEV expression. /// /// The idea is that some AddRec expressions become constants, which then /// could trigger folding of other instructions. However, that only happens diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index 2e5197a8ff1..186fda18886 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -217,7 +217,7 @@ static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) { return CS && CS.hasRetAttr(Attribute::NoAlias); } -/// \brief Tests if a value is a call or invoke to a library function that +/// Tests if a value is a call or invoke to a library function that /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// like). bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, @@ -225,7 +225,7 @@ bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast).hasValue(); } -/// \brief Tests if a value is a call or invoke to a function that returns a +/// Tests if a value is a call or invoke to a function that returns a /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { @@ -235,21 +235,21 @@ bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, hasNoAliasAttr(V, LookThroughBitCast); } -/// \brief Tests if a value is a call or invoke to a library function that +/// Tests if a value is a call or invoke to a library function that /// allocates uninitialized memory (such as malloc). bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, MallocLike, TLI, LookThroughBitCast).hasValue(); } -/// \brief Tests if a value is a call or invoke to a library function that +/// Tests if a value is a call or invoke to a library function that /// allocates zero-filled memory (such as calloc). bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, CallocLike, TLI, LookThroughBitCast).hasValue(); } -/// \brief Tests if a value is a call or invoke to a library function that +/// Tests if a value is a call or invoke to a library function that /// allocates memory similar to malloc or calloc. bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { @@ -257,7 +257,7 @@ bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, LookThroughBitCast).hasValue(); } -/// \brief Tests if a value is a call or invoke to a library function that +/// Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { @@ -427,7 +427,7 @@ static APInt getSizeWithOverflow(const SizeOffsetType &Data) { return Data.first - Data.second; } -/// \brief Compute the size of the object pointed by Ptr. Returns true and the +/// Compute the size of the object pointed by Ptr. Returns true and the /// object size in Size if successful, and false otherwise. /// If RoundToAlign is true, then Size is rounded up to the alignment of /// allocas, byval arguments, and global variables. diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp index 5130c21cde9..7341188c719 100644 --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -83,7 +83,7 @@ static cl::opt<bool> namespace llvm { -/// \brief An assembly annotator class to print Memory SSA information in +/// An assembly annotator class to print Memory SSA information in /// comments. class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { friend class MemorySSA; @@ -906,7 +906,7 @@ struct RenamePassData { namespace llvm { -/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no +/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no /// longer does caching on its own, /// but the name has been retained for the moment. class MemorySSA::CachingWalker final : public MemorySSAWalker { @@ -952,7 +952,7 @@ void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, } } -/// \brief Rename a single basic block into MemorySSA form. +/// Rename a single basic block into MemorySSA form. /// Uses the standard SSA renaming algorithm. /// \returns The new incoming value. MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, @@ -975,7 +975,7 @@ MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, return IncomingVal; } -/// \brief This is the standard SSA renaming algorithm. +/// This is the standard SSA renaming algorithm. /// /// We walk the dominator tree in preorder, renaming accesses, and then filling /// in phi nodes in our successors. @@ -1024,7 +1024,7 @@ void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, } } -/// \brief This handles unreachable block accesses by deleting phi nodes in +/// This handles unreachable block accesses by deleting phi nodes in /// unreachable blocks, and marking all other unreachable MemoryAccess's as /// being uses of the live on entry definition. void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { @@ -1525,7 +1525,7 @@ static inline bool isOrdered(const Instruction *I) { return false; } -/// \brief Helper function to create new memory accesses +/// Helper function to create new memory accesses MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { // The assume intrinsic has a control dependency which we model by claiming // that it writes arbitrarily. Ignore that fake memory dependency here. @@ -1562,7 +1562,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { return MUD; } -/// \brief Returns true if \p Replacer dominates \p Replacee . +/// Returns true if \p Replacer dominates \p Replacee . bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, const MemoryAccess *Replacee) const { if (isa<MemoryUseOrDef>(Replacee)) @@ -1579,7 +1579,7 @@ bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, return true; } -/// \brief Properly remove \p MA from all of MemorySSA's lookup tables. +/// Properly remove \p MA from all of MemorySSA's lookup tables. void MemorySSA::removeFromLookups(MemoryAccess *MA) { assert(MA->use_empty() && "Trying to remove memory access that still has uses"); @@ -1602,7 +1602,7 @@ void MemorySSA::removeFromLookups(MemoryAccess *MA) { ValueToMemoryAccess.erase(VMA); } -/// \brief Properly remove \p MA from all of MemorySSA's lists. +/// Properly remove \p MA from all of MemorySSA's lists. /// /// Because of the way the intrusive list and use lists work, it is important to /// do removal in the right order. @@ -1648,7 +1648,7 @@ void MemorySSA::verifyMemorySSA() const { Walker->verify(this); } -/// \brief Verify that the order and existence of MemoryAccesses matches the +/// Verify that the order and existence of MemoryAccesses matches the /// order and existence of memory affecting instructions. void MemorySSA::verifyOrdering(Function &F) const { // Walk all the blocks, comparing what the lookups think and what the access @@ -1711,7 +1711,7 @@ void MemorySSA::verifyOrdering(Function &F) const { } } -/// \brief Verify the domination properties of MemorySSA by checking that each +/// Verify the domination properties of MemorySSA by checking that each /// definition dominates all of its uses. void MemorySSA::verifyDomination(Function &F) const { #ifndef NDEBUG @@ -1733,7 +1733,7 @@ void MemorySSA::verifyDomination(Function &F) const { #endif } -/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use +/// Verify the def-use lists in MemorySSA, by verifying that \p Use /// appears in the use list of \p Def. void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { #ifndef NDEBUG @@ -1747,7 +1747,7 @@ void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { #endif } -/// \brief Verify the immediate use information, by walking all the memory +/// Verify the immediate use information, by walking all the memory /// accesses and verifying that, for each use, it appears in the /// appropriate def's use list void MemorySSA::verifyDefUses(Function &F) const { @@ -1793,7 +1793,7 @@ void MemorySSA::renumberBlock(const BasicBlock *B) const { BlockNumberingValid.insert(B); } -/// \brief Determine, for two memory accesses in the same block, +/// Determine, for two memory accesses in the same block, /// whether \p Dominator dominates \p Dominatee. /// \returns True if \p Dominator dominates \p Dominatee. bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, @@ -2001,7 +2001,7 @@ void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { MUD->resetOptimized(); } -/// \brief Walk the use-def chains starting at \p MA and find +/// Walk the use-def chains starting at \p MA and find /// the MemoryAccess that actually clobbers Loc. /// /// \returns our clobbering memory access diff --git a/llvm/lib/Analysis/MemorySSAUpdater.cpp b/llvm/lib/Analysis/MemorySSAUpdater.cpp index 57b9cc98a43..e14319bba87 100644 --- a/llvm/lib/Analysis/MemorySSAUpdater.cpp +++ b/llvm/lib/Analysis/MemorySSAUpdater.cpp @@ -424,7 +424,7 @@ void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, return moveTo(What, BB, Where); } -/// \brief If all arguments of a MemoryPHI are defined by the same incoming +/// If all arguments of a MemoryPHI are defined by the same incoming /// argument, return that argument. static MemoryAccess *onlySingleValue(MemoryPhi *MP) { MemoryAccess *MA = nullptr; diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp index 1e922fd44c4..677b5a28222 100644 --- a/llvm/lib/Analysis/MustExecute.cpp +++ b/llvm/lib/Analysis/MustExecute.cpp @@ -198,7 +198,7 @@ static bool isMustExecuteIn(const Instruction &I, Loop *L, DominatorTree *DT) { } namespace { -/// \brief An assembly annotator class to print must execute information in +/// An assembly annotator class to print must execute information in /// comments. class MustExecuteAnnotatedWriter : public AssemblyAnnotationWriter { DenseMap<const Value*, SmallVector<Loop*, 4> > MustExec; diff --git a/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp b/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp index 55335f3a7cb..d6db6386c38 100644 --- a/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp +++ b/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp @@ -19,7 +19,7 @@ using namespace llvm; using namespace llvm::objcarc; -/// \brief A handy option to enable/disable all ARC Optimizations. +/// A handy option to enable/disable all ARC Optimizations. bool llvm::objcarc::EnableARCOpts; static cl::opt<bool, true> EnableARCOptimizations( "enable-objc-arc-opts", cl::desc("enable/disable all ARC Optimizations"), diff --git a/llvm/lib/Analysis/ObjCARCInstKind.cpp b/llvm/lib/Analysis/ObjCARCInstKind.cpp index f374dd33f86..332c9e894de 100644 --- a/llvm/lib/Analysis/ObjCARCInstKind.cpp +++ b/llvm/lib/Analysis/ObjCARCInstKind.cpp @@ -233,7 +233,7 @@ static bool isUseOnlyIntrinsic(unsigned ID) { } } -/// \brief Determine what kind of construct V is. +/// Determine what kind of construct V is. ARCInstKind llvm::objcarc::GetARCInstKind(const Value *V) { if (const Instruction *I = dyn_cast<Instruction>(V)) { // Any instruction other than bitcast and gep with a pointer operand have a @@ -331,7 +331,7 @@ ARCInstKind llvm::objcarc::GetARCInstKind(const Value *V) { return ARCInstKind::None; } -/// \brief Test if the given class is a kind of user. +/// Test if the given class is a kind of user. bool llvm::objcarc::IsUser(ARCInstKind Class) { switch (Class) { case ARCInstKind::User: @@ -365,7 +365,7 @@ bool llvm::objcarc::IsUser(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class is objc_retain or equivalent. +/// Test if the given class is objc_retain or equivalent. bool llvm::objcarc::IsRetain(ARCInstKind Class) { switch (Class) { case ARCInstKind::Retain: @@ -401,7 +401,7 @@ bool llvm::objcarc::IsRetain(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class is objc_autorelease or equivalent. +/// Test if the given class is objc_autorelease or equivalent. bool llvm::objcarc::IsAutorelease(ARCInstKind Class) { switch (Class) { case ARCInstKind::Autorelease: @@ -435,7 +435,7 @@ bool llvm::objcarc::IsAutorelease(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class represents instructions which return their +/// Test if the given class represents instructions which return their /// argument verbatim. bool llvm::objcarc::IsForwarding(ARCInstKind Class) { switch (Class) { @@ -470,7 +470,7 @@ bool llvm::objcarc::IsForwarding(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class represents instructions which do nothing if +/// Test if the given class represents instructions which do nothing if /// passed a null pointer. bool llvm::objcarc::IsNoopOnNull(ARCInstKind Class) { switch (Class) { @@ -505,7 +505,7 @@ bool llvm::objcarc::IsNoopOnNull(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class represents instructions which are always safe +/// Test if the given class represents instructions which are always safe /// to mark with the "tail" keyword. bool llvm::objcarc::IsAlwaysTail(ARCInstKind Class) { // ARCInstKind::RetainBlock may be given a stack argument. @@ -541,7 +541,7 @@ bool llvm::objcarc::IsAlwaysTail(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class represents instructions which are never safe +/// Test if the given class represents instructions which are never safe /// to mark with the "tail" keyword. bool llvm::objcarc::IsNeverTail(ARCInstKind Class) { /// It is never safe to tail call objc_autorelease since by tail calling @@ -580,7 +580,7 @@ bool llvm::objcarc::IsNeverTail(ARCInstKind Class) { llvm_unreachable("covered switch isn't covered?"); } -/// \brief Test if the given class represents instructions which are always safe +/// Test if the given class represents instructions which are always safe /// to mark with the nounwind attribute. bool llvm::objcarc::IsNoThrow(ARCInstKind Class) { // objc_retainBlock is not nounwind because it calls user copy constructors diff --git a/llvm/lib/Analysis/OrderedBasicBlock.cpp b/llvm/lib/Analysis/OrderedBasicBlock.cpp index a04c0aef04b..6c47651eae9 100644 --- a/llvm/lib/Analysis/OrderedBasicBlock.cpp +++ b/llvm/lib/Analysis/OrderedBasicBlock.cpp @@ -30,7 +30,7 @@ OrderedBasicBlock::OrderedBasicBlock(const BasicBlock *BasicB) LastInstFound = BB->end(); } -/// \brief Given no cached results, find if \p A comes before \p B in \p BB. +/// Given no cached results, find if \p A comes before \p B in \p BB. /// Cache and number out instruction while walking \p BB. bool OrderedBasicBlock::comesBefore(const Instruction *A, const Instruction *B) { @@ -58,7 +58,7 @@ bool OrderedBasicBlock::comesBefore(const Instruction *A, return Inst != B; } -/// \brief Find out whether \p A dominates \p B, meaning whether \p A +/// Find out whether \p A dominates \p B, meaning whether \p A /// comes before \p B in \p BB. This is a simplification that considers /// cached instruction positions and ignores other basic blocks, being /// only relevant to compare relative instructions positions inside \p BB. diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index e2113d17196..f2ce0f4aa86 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1051,7 +1051,7 @@ Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, return IncV; } -/// \brief Hoist the addrec instruction chain rooted in the loop phi above the +/// Hoist the addrec instruction chain rooted in the loop phi above the /// position. This routine assumes that this is possible (has been checked). void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, Instruction *Pos, PHINode *LoopPhi) { @@ -1067,7 +1067,7 @@ void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, } while (InstToHoist != LoopPhi); } -/// \brief Check whether we can cheaply express the requested SCEV in terms of +/// Check whether we can cheaply express the requested SCEV in terms of /// the available PHI SCEV by truncation and/or inversion of the step. static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, diff --git a/llvm/lib/Analysis/StratifiedSets.h b/llvm/lib/Analysis/StratifiedSets.h index 772df175b38..2f20cd12506 100644 --- a/llvm/lib/Analysis/StratifiedSets.h +++ b/llvm/lib/Analysis/StratifiedSets.h @@ -29,7 +29,7 @@ typedef unsigned StratifiedIndex; /// NOTE: ^ This can't be a short -- bootstrapping clang has a case where /// ~1M sets exist. -// \brief Container of information related to a value in a StratifiedSet. +// Container of information related to a value in a StratifiedSet. struct StratifiedInfo { StratifiedIndex Index; /// For field sensitivity, etc. we can tack fields on here. @@ -37,7 +37,7 @@ struct StratifiedInfo { /// A "link" between two StratifiedSets. struct StratifiedLink { - /// \brief This is a value used to signify "does not exist" where the + /// This is a value used to signify "does not exist" where the /// StratifiedIndex type is used. /// /// This is used instead of Optional<StratifiedIndex> because @@ -63,7 +63,7 @@ struct StratifiedLink { void clearAbove() { Above = SetSentinel; } }; -/// \brief These are stratified sets, as described in "Fast algorithms for +/// These are stratified sets, as described in "Fast algorithms for /// Dyck-CFL-reachability with applications to Alias Analysis" by Zhang Q, Lyu M /// R, Yuan H, and Su Z. -- in short, this is meant to represent different sets /// of Value*s. If two Value*s are in the same set, or if both sets have @@ -172,7 +172,7 @@ private: /// remap has occurred, and use this information so we can defer renumbering set /// elements until build time. template <typename T> class StratifiedSetsBuilder { - /// \brief Represents a Stratified Set, with information about the Stratified + /// Represents a Stratified Set, with information about the Stratified /// Set above it, the set below it, and whether the current set has been /// remapped to another. struct BuilderLink { @@ -263,7 +263,7 @@ template <typename T> class StratifiedSetsBuilder { StratifiedIndex Remap; }; - /// \brief This function performs all of the set unioning/value renumbering + /// This function performs all of the set unioning/value renumbering /// that we've been putting off, and generates a vector<StratifiedLink> that /// may be placed in a StratifiedSets instance. void finalizeSets(std::vector<StratifiedLink> &StratLinks) { @@ -302,7 +302,7 @@ template <typename T> class StratifiedSetsBuilder { } } - /// \brief There's a guarantee in StratifiedLink where all bits set in a + /// There's a guarantee in StratifiedLink where all bits set in a /// Link.externals will be set in all Link.externals "below" it. static void propagateAttrs(std::vector<StratifiedLink> &Links) { const auto getHighestParentAbove = [&Links](StratifiedIndex Idx) { @@ -351,7 +351,7 @@ public: return addAtMerging(Main, NewIndex); } - /// \brief Restructures the stratified sets as necessary to make "ToAdd" in a + /// Restructures the stratified sets as necessary to make "ToAdd" in a /// set above "Main". There are some cases where this is not possible (see /// above), so we merge them such that ToAdd and Main are in the same set. bool addAbove(const T &Main, const T &ToAdd) { @@ -364,7 +364,7 @@ public: return addAtMerging(ToAdd, Above); } - /// \brief Restructures the stratified sets as necessary to make "ToAdd" in a + /// Restructures the stratified sets as necessary to make "ToAdd" in a /// set below "Main". There are some cases where this is not possible (see /// above), so we merge them such that ToAdd and Main are in the same set. bool addBelow(const T &Main, const T &ToAdd) { @@ -437,7 +437,7 @@ private: return *Current; } - /// \brief Merges two sets into one another. Assumes that these sets are not + /// Merges two sets into one another. Assumes that these sets are not /// already one in the same. void merge(StratifiedIndex Idx1, StratifiedIndex Idx2) { assert(inbounds(Idx1) && inbounds(Idx2)); @@ -458,7 +458,7 @@ private: mergeDirect(Idx1, Idx2); } - /// \brief Merges two sets assuming that the set at `Idx1` is unreachable from + /// Merges two sets assuming that the set at `Idx1` is unreachable from /// traversing above or below the set at `Idx2`. void mergeDirect(StratifiedIndex Idx1, StratifiedIndex Idx2) { assert(inbounds(Idx1) && inbounds(Idx2)); diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 6ac4fbe2dc2..684545243ae 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -31,7 +31,7 @@ static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::desc("Recognize reduction patterns.")); namespace { -/// \brief No-op implementation of the TTI interface using the utility base +/// No-op implementation of the TTI interface using the utility base /// classes. /// /// This is used when no target specific information is available. diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index a799f7c05b2..f757acfade0 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1754,7 +1754,7 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, return false; } -/// \brief Test whether a GEP's result is known to be non-null. +/// Test whether a GEP's result is known to be non-null. /// /// Uses properties inherent in a GEP to try to determine whether it is known /// to be non-null. @@ -3380,7 +3380,7 @@ uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { return Len == ~0ULL ? 1 : Len; } -/// \brief \p PN defines a loop-variant pointer to an object. Check if the +/// \p PN defines a loop-variant pointer to an object. Check if the /// previous iteration of the loop was referring to the same object as \p PN. static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI) { @@ -3729,7 +3729,7 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, return OverflowResult::MayOverflow; } -/// \brief Return true if we can prove that adding the two values of the +/// Return true if we can prove that adding the two values of the /// knownbits will not overflow. /// Otherwise return false. static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 7827bcf5c6b..d73d2473643 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -28,7 +28,7 @@ using namespace llvm; using namespace llvm::PatternMatch; -/// \brief Identify if the intrinsic is trivially vectorizable. +/// Identify if the intrinsic is trivially vectorizable. /// This method returns true if the intrinsic's argument types are all /// scalars for the scalar form of the intrinsic and all vectors for /// the vector form of the intrinsic. @@ -67,7 +67,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { } } -/// \brief Identifies if the intrinsic has a scalar operand. It check for +/// Identifies if the intrinsic has a scalar operand. It check for /// ctlz,cttz and powi special intrinsics whose argument is scalar. bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx) { @@ -81,7 +81,7 @@ bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, } } -/// \brief Returns intrinsic ID for call. +/// Returns intrinsic ID for call. /// For the input call instruction it finds mapping intrinsic and returns /// its ID, in case it does not found it return not_intrinsic. Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, @@ -97,7 +97,7 @@ Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, return Intrinsic::not_intrinsic; } -/// \brief Find the operand of the GEP that should be checked for consecutive +/// Find the operand of the GEP that should be checked for consecutive /// stores. This ignores trailing indices that have no effect on the final /// pointer. unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { @@ -121,7 +121,7 @@ unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { return LastOperand; } -/// \brief If the argument is a GEP, then returns the operand identified by +/// If the argument is a GEP, then returns the operand identified by /// getGEPInductionOperand. However, if there is some other non-loop-invariant /// operand, it returns that instead. Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { @@ -140,7 +140,7 @@ Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { return GEP->getOperand(InductionOperand); } -/// \brief If a value has only one user that is a CastInst, return it. +/// If a value has only one user that is a CastInst, return it. Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { Value *UniqueCast = nullptr; for (User *U : Ptr->users()) { @@ -155,7 +155,7 @@ Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { return UniqueCast; } -/// \brief Get the stride of a pointer access in a loop. Looks for symbolic +/// Get the stride of a pointer access in a loop. Looks for symbolic /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); @@ -230,7 +230,7 @@ Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { return Stride; } -/// \brief Given a vector and an element number, see if the scalar value is +/// Given a vector and an element number, see if the scalar value is /// already around as a register, for example if it were inserted then extracted /// from the vector. Value *llvm::findScalarElement(Value *V, unsigned EltNo) { @@ -280,7 +280,7 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) { return nullptr; } -/// \brief Get splat value if the input is a splat vector or return nullptr. +/// Get splat value if the input is a splat vector or return nullptr. /// This function is not fully general. It checks only 2 cases: /// the input value is (1) a splat constants vector or (2) a sequence /// of instructions that broadcast a single value into a vector. diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index cbd597f1b15..5f044f41dfd 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -533,7 +533,7 @@ public: Error materializeModule() override; std::vector<StructType *> getIdentifiedStructTypes() const override; - /// \brief Main interface to parsing a bitcode buffer. + /// Main interface to parsing a bitcode buffer. /// \returns true if an error occurred. Error parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata = false, bool IsImporting = false); @@ -1202,7 +1202,7 @@ static void addRawAttributeValue(AttrBuilder &B, uint64_t Val) { } } -/// \brief This fills an AttrBuilder object with the LLVM attributes that have +/// This fills an AttrBuilder object with the LLVM attributes that have /// been decoded from the given integer. This function must stay in sync with /// 'encodeLLVMAttributesForBitcode'. static void decodeLLVMAttributesForBitcode(AttrBuilder &B, @@ -5705,7 +5705,7 @@ llvm::getBitcodeFileContents(MemoryBufferRef Buffer) { } } -/// \brief Get a lazy one-at-time loading module from bitcode. +/// Get a lazy one-at-time loading module from bitcode. /// /// This isn't always used in a lazy context. In particular, it's also used by /// \a parseModule(). If this is truly lazy, then we need to eagerly pull diff --git a/llvm/lib/Bitcode/Reader/ValueList.cpp b/llvm/lib/Bitcode/Reader/ValueList.cpp index 6bf510890b6..1ab22b5cc3d 100644 --- a/llvm/lib/Bitcode/Reader/ValueList.cpp +++ b/llvm/lib/Bitcode/Reader/ValueList.cpp @@ -32,7 +32,7 @@ namespace llvm { namespace { -/// \brief A class for maintaining the slot number definition +/// A class for maintaining the slot number definition /// as a placeholder for the actual definition for forward constants defs. class ConstantPlaceHolder : public ConstantExpr { public: @@ -46,7 +46,7 @@ public: // allocate space for exactly one operand void *operator new(size_t s) { return User::operator new(s, 1); } - /// \brief Methods to support type inquiry through isa, cast, and dyn_cast. + /// Methods to support type inquiry through isa, cast, and dyn_cast. static bool classof(const Value *V) { return isa<ConstantExpr>(V) && cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1; diff --git a/llvm/lib/CodeGen/AsmPrinter/AddressPool.h b/llvm/lib/CodeGen/AsmPrinter/AddressPool.h index 990a158d87c..5350006bf74 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AddressPool.h +++ b/llvm/lib/CodeGen/AsmPrinter/AddressPool.h @@ -39,7 +39,7 @@ class AddressPool { public: AddressPool() = default; - /// \brief Returns the index into the address pool with the given + /// Returns the index into the address pool with the given /// label/symbol. unsigned getIndex(const MCSymbol *Sym, bool TLS = false); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index f81d6d04d16..a1af6fddda3 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1186,7 +1186,7 @@ void AsmPrinter::EmitFunctionBody() { OutStreamer->AddBlankLine(); } -/// \brief Compute the number of Global Variables that uses a Constant. +/// Compute the number of Global Variables that uses a Constant. static unsigned getNumGlobalVariableUses(const Constant *C) { if (!C) return 0; @@ -1201,7 +1201,7 @@ static unsigned getNumGlobalVariableUses(const Constant *C) { return NumUses; } -/// \brief Only consider global GOT equivalents if at least one user is a +/// Only consider global GOT equivalents if at least one user is a /// cstexpr inside an initializer of another global variables. Also, don't /// handle cstexpr inside instructions. During global variable emission, /// candidates are skipped and are emitted later in case at least one cstexpr @@ -1224,7 +1224,7 @@ static bool isGOTEquivalentCandidate(const GlobalVariable *GV, return NumGOTEquivUsers > 0; } -/// \brief Unnamed constant global variables solely contaning a pointer to +/// Unnamed constant global variables solely contaning a pointer to /// another globals variable is equivalent to a GOT table entry; it contains the /// the address of another symbol. Optimize it and replace accesses to these /// "GOT equivalents" by using the GOT entry for the final global instead. @@ -1245,7 +1245,7 @@ void AsmPrinter::computeGlobalGOTEquivs(Module &M) { } } -/// \brief Constant expressions using GOT equivalent globals may not be eligible +/// Constant expressions using GOT equivalent globals may not be eligible /// for PC relative GOT entry conversion, in such cases we need to emit such /// globals we previously omitted in EmitGlobalVariable. void AsmPrinter::emitGlobalGOTEquivs() { @@ -2405,7 +2405,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) { } } -/// \brief Transform a not absolute MCExpr containing a reference to a GOT +/// Transform a not absolute MCExpr containing a reference to a GOT /// equivalent global, by a target specific GOT pc relative access to the /// final symbol. static void handleIndirectSymViaGOTPCRel(AsmPrinter &AP, const MCExpr **ME, diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h index 638226e90a7..f5ac95a20b1 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterHandler.h @@ -27,29 +27,29 @@ class MCSymbol; typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm); -/// \brief Collects and handles AsmPrinter objects required to build debug +/// Collects and handles AsmPrinter objects required to build debug /// or EH information. class AsmPrinterHandler { public: virtual ~AsmPrinterHandler(); - /// \brief For symbols that have a size designated (e.g. common symbols), + /// For symbols that have a size designated (e.g. common symbols), /// this tracks that size. virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0; - /// \brief Emit all sections that should come after the content. + /// Emit all sections that should come after the content. virtual void endModule() = 0; - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. /// Every beginFunction(MF) call should be followed by an endFunction(MF) /// call. virtual void beginFunction(const MachineFunction *MF) = 0; - // \brief Emit any of function marker (like .cfi_endproc). This is called + // Emit any of function marker (like .cfi_endproc). This is called // before endFunction and cannot switch sections. virtual void markFunctionEnd(); - /// \brief Gather post-function debug information. + /// Gather post-function debug information. /// Please note that some AsmPrinter implementations may not call /// beginFunction at all. virtual void endFunction(const MachineFunction *MF) = 0; @@ -58,15 +58,15 @@ public: ExceptionSymbolProvider ESP) {} virtual void endFragment() {} - /// \brief Emit target-specific EH funclet machinery. + /// Emit target-specific EH funclet machinery. virtual void beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym = nullptr) {} virtual void endFunclet() {} - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. virtual void beginInstruction(const MachineInstr *MI) = 0; - /// \brief Process end of an instruction. + /// Process end of an instruction. virtual void endInstruction() = 0; }; } // End of namespace llvm diff --git a/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h b/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h index e5941de69ff..2163cc7e3e1 100644 --- a/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h +++ b/llvm/lib/CodeGen/AsmPrinter/ByteStreamer.h @@ -76,7 +76,7 @@ private: SmallVectorImpl<char> &Buffer; SmallVectorImpl<std::string> &Comments; - /// \brief Only verbose textual output needs comments. This will be set to + /// Only verbose textual output needs comments. This will be set to /// true for that case, and false otherwise. If false, comments passed in to /// the emit methods will be ignored. bool GenerateComments; diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h index e16c035cdfd..395a6e37828 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.h @@ -48,7 +48,7 @@ class MCStreamer; class MCSymbol; class MachineFunction; -/// \brief Collects and handles line tables information in a CodeView format. +/// Collects and handles line tables information in a CodeView format. class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase { MCStreamer &OS; BumpPtrAllocator Allocator; @@ -379,10 +379,10 @@ class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase { unsigned getPointerSizeInBytes(); protected: - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. void beginFunctionImpl(const MachineFunction *MF) override; - /// \brief Gather post-function debug information. + /// Gather post-function debug information. void endFunctionImpl(const MachineFunction *) override; public: @@ -390,10 +390,10 @@ public: void setSymbolSize(const MCSymbol *, uint64_t) override {} - /// \brief Emit the COFF section that holds the line table information. + /// Emit the COFF section that holds the line table information. void endModule() override; - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override; }; diff --git a/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp b/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp index 15ade3c96df..5392b7150a7 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DIEHash.cpp @@ -28,7 +28,7 @@ using namespace llvm; #define DEBUG_TYPE "dwarfdebug" -/// \brief Grabs the string in whichever attribute is passed in and returns +/// Grabs the string in whichever attribute is passed in and returns /// a reference to it. static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) { // Iterate through all the attributes until we find the one we're @@ -40,7 +40,7 @@ static StringRef getDIEStringAttr(const DIE &Die, uint16_t Attr) { return StringRef(""); } -/// \brief Adds the string in \p Str to the hash. This also hashes +/// Adds the string in \p Str to the hash. This also hashes /// a trailing NULL with the string. void DIEHash::addString(StringRef Str) { DEBUG(dbgs() << "Adding string " << Str << " to hash.\n"); @@ -51,7 +51,7 @@ void DIEHash::addString(StringRef Str) { // FIXME: The LEB128 routines are copied and only slightly modified out of // LEB128.h. -/// \brief Adds the unsigned in \p Value to the hash encoded as a ULEB128. +/// Adds the unsigned in \p Value to the hash encoded as a ULEB128. void DIEHash::addULEB128(uint64_t Value) { DEBUG(dbgs() << "Adding ULEB128 " << Value << " to hash.\n"); do { @@ -77,7 +77,7 @@ void DIEHash::addSLEB128(int64_t Value) { } while (More); } -/// \brief Including \p Parent adds the context of Parent to the hash.. +/// Including \p Parent adds the context of Parent to the hash.. void DIEHash::addParentContext(const DIE &Parent) { DEBUG(dbgs() << "Adding parent context to hash...\n"); diff --git a/llvm/lib/CodeGen/AsmPrinter/DIEHash.h b/llvm/lib/CodeGen/AsmPrinter/DIEHash.h index 29337ae38a9..85f2fea937f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DIEHash.h +++ b/llvm/lib/CodeGen/AsmPrinter/DIEHash.h @@ -23,7 +23,7 @@ namespace llvm { class AsmPrinter; class CompileUnit; -/// \brief An object containing the capability of hashing and adding hash +/// An object containing the capability of hashing and adding hash /// attributes onto a DIE. class DIEHash { // Collection of all attributes used in hashing a particular DIE. @@ -35,66 +35,66 @@ class DIEHash { public: DIEHash(AsmPrinter *A = nullptr) : AP(A) {} - /// \brief Computes the CU signature. + /// Computes the CU signature. uint64_t computeCUSignature(StringRef DWOName, const DIE &Die); - /// \brief Computes the type signature. + /// Computes the type signature. uint64_t computeTypeSignature(const DIE &Die); // Helper routines to process parts of a DIE. private: - /// \brief Adds the parent context of \param Die to the hash. + /// Adds the parent context of \param Die to the hash. void addParentContext(const DIE &Die); - /// \brief Adds the attributes of \param Die to the hash. + /// Adds the attributes of \param Die to the hash. void addAttributes(const DIE &Die); - /// \brief Computes the full DWARF4 7.27 hash of the DIE. + /// Computes the full DWARF4 7.27 hash of the DIE. void computeHash(const DIE &Die); // Routines that add DIEValues to the hash. public: - /// \brief Adds \param Value to the hash. + /// Adds \param Value to the hash. void update(uint8_t Value) { Hash.update(Value); } - /// \brief Encodes and adds \param Value to the hash as a ULEB128. + /// Encodes and adds \param Value to the hash as a ULEB128. void addULEB128(uint64_t Value); - /// \brief Encodes and adds \param Value to the hash as a SLEB128. + /// Encodes and adds \param Value to the hash as a SLEB128. void addSLEB128(int64_t Value); private: - /// \brief Adds \param Str to the hash and includes a NULL byte. + /// Adds \param Str to the hash and includes a NULL byte. void addString(StringRef Str); - /// \brief Collects the attributes of DIE \param Die into the \param Attrs + /// Collects the attributes of DIE \param Die into the \param Attrs /// structure. void collectAttributes(const DIE &Die, DIEAttrs &Attrs); - /// \brief Hashes the attributes in \param Attrs in order. + /// Hashes the attributes in \param Attrs in order. void hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag); - /// \brief Hashes the data in a block like DIEValue, e.g. DW_FORM_block or + /// Hashes the data in a block like DIEValue, e.g. DW_FORM_block or /// DW_FORM_exprloc. void hashBlockData(const DIE::const_value_range &Values); - /// \brief Hashes the contents pointed to in the .debug_loc section. + /// Hashes the contents pointed to in the .debug_loc section. void hashLocList(const DIELocList &LocList); - /// \brief Hashes an individual attribute. + /// Hashes an individual attribute. void hashAttribute(const DIEValue &Value, dwarf::Tag Tag); - /// \brief Hashes an attribute that refers to another DIE. + /// Hashes an attribute that refers to another DIE. void hashDIEEntry(dwarf::Attribute Attribute, dwarf::Tag Tag, const DIE &Entry); - /// \brief Hashes a reference to a named type in such a way that is + /// Hashes a reference to a named type in such a way that is /// independent of whether that type is described by a declaration or a /// definition. void hashShallowTypeReference(dwarf::Attribute Attribute, const DIE &Entry, StringRef Name); - /// \brief Hashes a reference to a previously referenced type DIE. + /// Hashes a reference to a previously referenced type DIE. void hashRepeatedTypeReference(dwarf::Attribute Attribute, unsigned DieNumber); diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp index 856758c8e4f..c6c661dddf9 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp @@ -31,7 +31,7 @@ using namespace llvm; #define DEBUG_TYPE "dwarfdebug" -// \brief If @MI is a DBG_VALUE with debug value described by a +// If @MI is a DBG_VALUE with debug value described by a // defined register, returns the number of this register. // In the other case, returns 0. static unsigned isDescribedByReg(const MachineInstr &MI) { @@ -86,7 +86,7 @@ using RegDescribedVarsMap = std::map<unsigned, SmallVector<InlinedVariable, 1>>; } // end anonymous namespace -// \brief Claim that @Var is not described by @RegNo anymore. +// Claim that @Var is not described by @RegNo anymore. static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, InlinedVariable Var) { const auto &I = RegVars.find(RegNo); @@ -100,7 +100,7 @@ static void dropRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, RegVars.erase(I); } -// \brief Claim that @Var is now described by @RegNo. +// Claim that @Var is now described by @RegNo. static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, InlinedVariable Var) { assert(RegNo != 0U); @@ -109,7 +109,7 @@ static void addRegDescribedVar(RegDescribedVarsMap &RegVars, unsigned RegNo, VarSet.push_back(Var); } -// \brief Terminate the location range for variables described by register at +// Terminate the location range for variables described by register at // @I by inserting @ClobberingInstr to their history. static void clobberRegisterUses(RegDescribedVarsMap &RegVars, RegDescribedVarsMap::iterator I, @@ -122,7 +122,7 @@ static void clobberRegisterUses(RegDescribedVarsMap &RegVars, RegVars.erase(I); } -// \brief Terminate the location range for variables described by register +// Terminate the location range for variables described by register // @RegNo by inserting @ClobberingInstr to their history. static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo, DbgValueHistoryMap &HistMap, @@ -133,7 +133,7 @@ static void clobberRegisterUses(RegDescribedVarsMap &RegVars, unsigned RegNo, clobberRegisterUses(RegVars, I, HistMap, ClobberingInstr); } -// \brief Returns the first instruction in @MBB which corresponds to +// Returns the first instruction in @MBB which corresponds to // the function epilogue, or nullptr if @MBB doesn't contain an epilogue. static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) { auto LastMI = MBB.getLastNonDebugInstr(); @@ -155,7 +155,7 @@ static const MachineInstr *getFirstEpilogueInst(const MachineBasicBlock &MBB) { return &*MBB.begin(); } -// \brief Collect registers that are modified in the function body (their +// Collect registers that are modified in the function body (their // contents is changed outside of the prologue and epilogue). static void collectChangingRegs(const MachineFunction *MF, const TargetRegisterInfo *TRI, diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h index 6dff45dce55..ac49657b68f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocEntry.h @@ -21,7 +21,7 @@ namespace llvm { class AsmPrinter; -/// \brief This struct describes location entries emitted in the .debug_loc +/// This struct describes location entries emitted in the .debug_loc /// section. class DebugLocEntry { /// Begin and end symbols for the address range that this location is valid. @@ -29,7 +29,7 @@ class DebugLocEntry { const MCSymbol *End; public: - /// \brief A single location or constant. + /// A single location or constant. struct Value { Value(const DIExpression *Expr, int64_t i) : Expression(Expr), EntryKind(E_Integer) { @@ -106,13 +106,13 @@ public: Values.push_back(std::move(Val)); } - /// \brief If this and Next are describing different pieces of the same + /// If this and Next are describing different pieces of the same /// variable, merge them by appending Next's values to the current /// list of values. /// Return true if the merge was successful. bool MergeValues(const DebugLocEntry &Next); - /// \brief Attempt to merge this DebugLocEntry with Next and return + /// Attempt to merge this DebugLocEntry with Next and return /// true if the merge was successful. Entries can be merged if they /// share the same Loc/Constant and if Next immediately follows this /// Entry. @@ -136,7 +136,7 @@ public: }) && "value must be a piece"); } - // \brief Sort the pieces by offset. + // Sort the pieces by offset. // Remove any duplicate entries by dropping all but the first. void sortUniqueValues() { llvm::sort(Values.begin(), Values.end()); @@ -148,12 +148,12 @@ public: Values.end()); } - /// \brief Lower this entry into a DWARF expression. + /// Lower this entry into a DWARF expression. void finalize(const AsmPrinter &AP, DebugLocStream::ListBuilder &List, const DIBasicType *BT); }; -/// \brief Compare two Values for equality. +/// Compare two Values for equality. inline bool operator==(const DebugLocEntry::Value &A, const DebugLocEntry::Value &B) { if (A.EntryKind != B.EntryKind) diff --git a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h index 0c551dfff9c..8dcf5cbc188 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h +++ b/llvm/lib/CodeGen/AsmPrinter/DebugLocStream.h @@ -22,7 +22,7 @@ class DwarfCompileUnit; class MachineInstr; class MCSymbol; -/// \brief Byte stream of .debug_loc entries. +/// Byte stream of .debug_loc entries. /// /// Stores a unified stream of .debug_loc entries. There's \a List for each /// variable/inlined-at pair, and an \a Entry for each \a DebugLocEntry. @@ -55,7 +55,7 @@ private: SmallString<256> DWARFBytes; SmallVector<std::string, 32> Comments; - /// \brief Only verbose textual output needs comments. This will be set to + /// Only verbose textual output needs comments. This will be set to /// true for that case, and false otherwise. bool GenerateComments; @@ -69,7 +69,7 @@ public: class EntryBuilder; private: - /// \brief Start a new .debug_loc entry list. + /// Start a new .debug_loc entry list. /// /// Start a new .debug_loc entry list. Return the new list's index so it can /// be retrieved later via \a getList(). @@ -89,7 +89,7 @@ private: /// \return false iff the list is deleted. bool finalizeList(AsmPrinter &Asm); - /// \brief Start a new .debug_loc entry. + /// Start a new .debug_loc entry. /// /// Until the next call, bytes added to the stream will be added to this /// entry. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h index 3325b1a345e..60821c2cc99 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h @@ -83,7 +83,7 @@ class DwarfCompileUnit final : public DwarfUnit { DenseMap<const MDNode *, DIE *> AbstractSPDies; DenseMap<const MDNode *, std::unique_ptr<DbgVariable>> AbstractVariables; - /// \brief Construct a DIE for the given DbgVariable without initializing the + /// Construct a DIE for the given DbgVariable without initializing the /// DbgVariable's DIE reference. DIE *constructVariableDIEImpl(const DbgVariable &DV, bool Abstract); @@ -159,7 +159,7 @@ public: void attachLowHighPC(DIE &D, const MCSymbol *Begin, const MCSymbol *End); - /// \brief Find DIE for the given subprogram and attach appropriate + /// Find DIE for the given subprogram and attach appropriate /// DW_AT_low_pc and DW_AT_high_pc attributes. If there are global /// variables in this scope then create and insert DIEs for these /// variables. @@ -168,7 +168,7 @@ public: void constructScopeDIE(LexicalScope *Scope, SmallVectorImpl<DIE *> &FinalChildren); - /// \brief A helper function to construct a RangeSpanList for a given + /// A helper function to construct a RangeSpanList for a given /// lexical scope. void addScopeRangeList(DIE &ScopeDIE, SmallVector<RangeSpan, 2> Range); @@ -177,11 +177,11 @@ public: void attachRangesOrLowHighPC(DIE &D, const SmallVectorImpl<InsnRange> &Ranges); - /// \brief This scope represents inlined body of a function. Construct + /// This scope represents inlined body of a function. Construct /// DIE to represent this concrete inlined copy of the function. DIE *constructInlinedScopeDIE(LexicalScope *Scope); - /// \brief Construct new DW_TAG_lexical_block for this scope and + /// Construct new DW_TAG_lexical_block for this scope and /// attach DW_AT_low_pc/DW_AT_high_pc labels. DIE *constructLexicalScopeDIE(LexicalScope *Scope); @@ -196,14 +196,14 @@ public: SmallVectorImpl<DIE *> &Children, bool *HasNonScopeChildren = nullptr); - /// \brief Construct a DIE for this subprogram scope. + /// Construct a DIE for this subprogram scope. void constructSubprogramScopeDIE(const DISubprogram *Sub, LexicalScope *Scope); DIE *createAndAddScopeChildren(LexicalScope *Scope, DIE &ScopeDIE); void constructAbstractSubprogramScopeDIE(LexicalScope *Scope); - /// \brief Construct import_module DIE. + /// Construct import_module DIE. DIE *constructImportedEntityDIE(const DIImportedEntity *Module); void finishSubprogramDefinition(const DISubprogram *SP); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index a8e36ec66e5..182fbd6a6ce 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -939,7 +939,7 @@ static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) { llvm_unreachable("Unexpected 4-operand DBG_VALUE instruction!"); } -/// \brief If this and Next are describing different fragments of the same +/// If this and Next are describing different fragments of the same /// variable, merge them by appending Next's values to the current /// list of values. /// Return true if the merge was successful. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h b/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h index 23ed043afb9..442b4fc14b5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfFile.h @@ -74,30 +74,30 @@ public: return CUs; } - /// \brief Compute the size and offset of a DIE given an incoming Offset. + /// Compute the size and offset of a DIE given an incoming Offset. unsigned computeSizeAndOffset(DIE &Die, unsigned Offset); - /// \brief Compute the size and offset of all the DIEs. + /// Compute the size and offset of all the DIEs. void computeSizeAndOffsets(); - /// \brief Compute the size and offset of all the DIEs in the given unit. + /// Compute the size and offset of all the DIEs in the given unit. /// \returns The size of the root DIE. unsigned computeSizeAndOffsetsForUnit(DwarfUnit *TheU); - /// \brief Add a unit to the list of CUs. + /// Add a unit to the list of CUs. void addUnit(std::unique_ptr<DwarfCompileUnit> U); /// Emit the string table offsets header. void emitStringOffsetsTableHeader(MCSection *Section); - /// \brief Emit all of the units to the section listed with the given + /// Emit all of the units to the section listed with the given /// abbreviation section. void emitUnits(bool UseOffsets); - /// \brief Emit the given unit to its section. + /// Emit the given unit to its section. void emitUnit(DwarfUnit *U, bool UseOffsets); - /// \brief Emit a set of abbreviations to the specific section. + /// Emit a set of abbreviations to the specific section. void emitAbbrevs(MCSection *); /// Emit all of the strings to the section given. If OffsetSection is @@ -107,7 +107,7 @@ public: void emitStrings(MCSection *StrSection, MCSection *OffsetSection = nullptr, bool UseRelativeOffsets = false); - /// \brief Returns the string pool. + /// Returns the string pool. DwarfStringPool &getStringPool() { return StrPool; } MCSymbol *getStringOffsetsStartSym() const { return StringOffsetsStartSym; } diff --git a/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h b/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h index 553b4ae261c..124e8f04bfa 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h +++ b/llvm/lib/CodeGen/AsmPrinter/WinCFGuard.h @@ -29,23 +29,23 @@ public: void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {} - /// \brief Emit the Control Flow Guard function ID table + /// Emit the Control Flow Guard function ID table void endModule() override; - /// \brief Gather pre-function debug information. + /// Gather pre-function debug information. /// Every beginFunction(MF) call should be followed by an endFunction(MF) /// call. void beginFunction(const MachineFunction *MF) override {} - /// \brief Gather post-function debug information. + /// Gather post-function debug information. /// Please note that some AsmPrinter implementations may not call /// beginFunction at all. void endFunction(const MachineFunction *MF) override {} - /// \brief Process beginning of an instruction. + /// Process beginning of an instruction. void beginInstruction(const MachineInstr *MI) override {} - /// \brief Process end of an instruction. + /// Process end of an instruction. void endInstruction() override {} }; diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.h b/llvm/lib/CodeGen/AsmPrinter/WinException.h index 371061c2c2e..eed3c4453ff 100644 --- a/llvm/lib/CodeGen/AsmPrinter/WinException.h +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.h @@ -100,7 +100,7 @@ public: /// Gather and emit post-function exception information. void endFunction(const MachineFunction *) override; - /// \brief Emit target-specific EH funclet machinery. + /// Emit target-specific EH funclet machinery. void beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym) override; void endFunclet() override; }; diff --git a/llvm/lib/CodeGen/BranchFolding.h b/llvm/lib/CodeGen/BranchFolding.h index 0f095255013..21e1e2e6134 100644 --- a/llvm/lib/CodeGen/BranchFolding.h +++ b/llvm/lib/CodeGen/BranchFolding.h @@ -132,7 +132,7 @@ class TargetRegisterInfo; LivePhysRegs LiveRegs; public: - /// \brief This class keeps track of branch frequencies of newly created + /// This class keeps track of branch frequencies of newly created /// blocks and tail-merged blocks. class MBFIWrapper { public: diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp index 5e60b7ae32f..1e30a08b9dc 100644 --- a/llvm/lib/CodeGen/BreakFalseDeps.cpp +++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp @@ -74,7 +74,7 @@ private: /// Also break dependencies on partial defs and undef uses. void processDefs(MachineInstr *MI); - /// \brief Helps avoid false dependencies on undef registers by updating the + /// Helps avoid false dependencies on undef registers by updating the /// machine instructions' undef operand to use a register that the instruction /// is truly dependent on, or use a register with clearance higher than Pref. /// Returns true if it was able to find a true dependency, thus not requiring @@ -82,11 +82,11 @@ private: bool pickBestRegisterForUndef(MachineInstr *MI, unsigned OpIdx, unsigned Pref); - /// \brief Return true to if it makes sense to break dependence on a partial + /// Return true to if it makes sense to break dependence on a partial /// def or undef use. bool shouldBreakDependence(MachineInstr *, unsigned OpIdx, unsigned Pref); - /// \brief Break false dependencies on undefined register reads. + /// Break false dependencies on undefined register reads. /// Walk the block backward computing precise liveness. This is expensive, so /// we only do it on demand. Note that the occurrence of undefined register /// reads that should be broken is very rare, but when they occur we may have diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 1b2bb60ed55..8729db47b7d 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2022,11 +2022,11 @@ LLVM_DUMP_METHOD void ExtAddrMode::dump() const { namespace { -/// \brief This class provides transaction based operation on the IR. +/// This class provides transaction based operation on the IR. /// Every change made through this class is recorded in the internal state and /// can be undone (rollback) until commit is called. class TypePromotionTransaction { - /// \brief This represents the common interface of the individual transaction. + /// This represents the common interface of the individual transaction. /// Each class implements the logic for doing one specific modification on /// the IR via the TypePromotionTransaction. class TypePromotionAction { @@ -2035,20 +2035,20 @@ class TypePromotionTransaction { Instruction *Inst; public: - /// \brief Constructor of the action. + /// Constructor of the action. /// The constructor performs the related action on the IR. TypePromotionAction(Instruction *Inst) : Inst(Inst) {} virtual ~TypePromotionAction() = default; - /// \brief Undo the modification done by this action. + /// Undo the modification done by this action. /// When this method is called, the IR must be in the same state as it was /// before this action was applied. /// \pre Undoing the action works if and only if the IR is in the exact same /// state as it was directly after this action was applied. virtual void undo() = 0; - /// \brief Advocate every change made by this action. + /// Advocate every change made by this action. /// When the results on the IR of the action are to be kept, it is important /// to call this function, otherwise hidden information may be kept forever. virtual void commit() { @@ -2056,7 +2056,7 @@ class TypePromotionTransaction { } }; - /// \brief Utility to remember the position of an instruction. + /// Utility to remember the position of an instruction. class InsertionHandler { /// Position of an instruction. /// Either an instruction: @@ -2071,7 +2071,7 @@ class TypePromotionTransaction { bool HasPrevInstruction; public: - /// \brief Record the position of \p Inst. + /// Record the position of \p Inst. InsertionHandler(Instruction *Inst) { BasicBlock::iterator It = Inst->getIterator(); HasPrevInstruction = (It != (Inst->getParent()->begin())); @@ -2081,7 +2081,7 @@ class TypePromotionTransaction { Point.BB = Inst->getParent(); } - /// \brief Insert \p Inst at the recorded position. + /// Insert \p Inst at the recorded position. void insert(Instruction *Inst) { if (HasPrevInstruction) { if (Inst->getParent()) @@ -2097,27 +2097,27 @@ class TypePromotionTransaction { } }; - /// \brief Move an instruction before another. + /// Move an instruction before another. class InstructionMoveBefore : public TypePromotionAction { /// Original position of the instruction. InsertionHandler Position; public: - /// \brief Move \p Inst before \p Before. + /// Move \p Inst before \p Before. InstructionMoveBefore(Instruction *Inst, Instruction *Before) : TypePromotionAction(Inst), Position(Inst) { DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n"); Inst->moveBefore(Before); } - /// \brief Move the instruction back to its original position. + /// Move the instruction back to its original position. void undo() override { DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n"); Position.insert(Inst); } }; - /// \brief Set the operand of an instruction with a new value. + /// Set the operand of an instruction with a new value. class OperandSetter : public TypePromotionAction { /// Original operand of the instruction. Value *Origin; @@ -2126,7 +2126,7 @@ class TypePromotionTransaction { unsigned Idx; public: - /// \brief Set \p Idx operand of \p Inst with \p NewVal. + /// Set \p Idx operand of \p Inst with \p NewVal. OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) : TypePromotionAction(Inst), Idx(Idx) { DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n" @@ -2136,7 +2136,7 @@ class TypePromotionTransaction { Inst->setOperand(Idx, NewVal); } - /// \brief Restore the original value of the instruction. + /// Restore the original value of the instruction. void undo() override { DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" @@ -2145,14 +2145,14 @@ class TypePromotionTransaction { } }; - /// \brief Hide the operands of an instruction. + /// Hide the operands of an instruction. /// Do as if this instruction was not using any of its operands. class OperandsHider : public TypePromotionAction { /// The list of original operands. SmallVector<Value *, 4> OriginalValues; public: - /// \brief Remove \p Inst from the uses of the operands of \p Inst. + /// Remove \p Inst from the uses of the operands of \p Inst. OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n"); unsigned NumOpnds = Inst->getNumOperands(); @@ -2168,7 +2168,7 @@ class TypePromotionTransaction { } } - /// \brief Restore the original list of uses. + /// Restore the original list of uses. void undo() override { DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n"); for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) @@ -2176,12 +2176,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a truncate instruction. + /// Build a truncate instruction. class TruncBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a truncate instruction of \p Opnd producing a \p Ty + /// Build a truncate instruction of \p Opnd producing a \p Ty /// result. /// trunc Opnd to Ty. TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { @@ -2190,10 +2190,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2201,12 +2201,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a sign extension instruction. + /// Build a sign extension instruction. class SExtBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty + /// Build a sign extension instruction of \p Opnd producing a \p Ty /// result. /// sext Opnd to Ty. SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) @@ -2216,10 +2216,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2227,12 +2227,12 @@ class TypePromotionTransaction { } }; - /// \brief Build a zero extension instruction. + /// Build a zero extension instruction. class ZExtBuilder : public TypePromotionAction { Value *Val; public: - /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty + /// Build a zero extension instruction of \p Opnd producing a \p Ty /// result. /// zext Opnd to Ty. ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) @@ -2242,10 +2242,10 @@ class TypePromotionTransaction { DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n"); } - /// \brief Get the built value. + /// Get the built value. Value *getBuiltValue() { return Val; } - /// \brief Remove the built instruction. + /// Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); if (Instruction *IVal = dyn_cast<Instruction>(Val)) @@ -2253,13 +2253,13 @@ class TypePromotionTransaction { } }; - /// \brief Mutate an instruction to another type. + /// Mutate an instruction to another type. class TypeMutator : public TypePromotionAction { /// Record the original type. Type *OrigTy; public: - /// \brief Mutate the type of \p Inst into \p NewTy. + /// Mutate the type of \p Inst into \p NewTy. TypeMutator(Instruction *Inst, Type *NewTy) : TypePromotionAction(Inst), OrigTy(Inst->getType()) { DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy @@ -2267,7 +2267,7 @@ class TypePromotionTransaction { Inst->mutateType(NewTy); } - /// \brief Mutate the instruction back to its original type. + /// Mutate the instruction back to its original type. void undo() override { DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"); @@ -2275,7 +2275,7 @@ class TypePromotionTransaction { } }; - /// \brief Replace the uses of an instruction by another instruction. + /// Replace the uses of an instruction by another instruction. class UsesReplacer : public TypePromotionAction { /// Helper structure to keep track of the replaced uses. struct InstructionAndIdx { @@ -2295,7 +2295,7 @@ class TypePromotionTransaction { using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; public: - /// \brief Replace all the use of \p Inst by \p New. + /// Replace all the use of \p Inst by \p New. UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"); @@ -2308,7 +2308,7 @@ class TypePromotionTransaction { Inst->replaceAllUsesWith(New); } - /// \brief Reassign the original uses of Inst to Inst. + /// Reassign the original uses of Inst to Inst. void undo() override { DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"); for (use_iterator UseIt = OriginalUses.begin(), @@ -2319,7 +2319,7 @@ class TypePromotionTransaction { } }; - /// \brief Remove an instruction from the IR. + /// Remove an instruction from the IR. class InstructionRemover : public TypePromotionAction { /// Original position of the instruction. InsertionHandler Inserter; @@ -2335,7 +2335,7 @@ class TypePromotionTransaction { SetOfInstrs &RemovedInsts; public: - /// \brief Remove all reference of \p Inst and optinally replace all its + /// Remove all reference of \p Inst and optinally replace all its /// uses with New. /// \p RemovedInsts Keep track of the instructions removed by this Action. /// \pre If !Inst->use_empty(), then New != nullptr @@ -2355,7 +2355,7 @@ class TypePromotionTransaction { ~InstructionRemover() override { delete Replacer; } - /// \brief Resurrect the instruction and reassign it to the proper uses if + /// Resurrect the instruction and reassign it to the proper uses if /// new value was provided when build this action. void undo() override { DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"); @@ -2500,7 +2500,7 @@ void TypePromotionTransaction::rollback( namespace { -/// \brief A helper class for matching addressing modes. +/// A helper class for matching addressing modes. /// /// This encapsulates the logic for matching the target-legal addressing modes. class AddressingModeMatcher { @@ -2586,7 +2586,7 @@ private: Value *PromotedOperand) const; }; -/// \brief Keep track of simplification of Phi nodes. +/// Keep track of simplification of Phi nodes. /// Accept the set of all phi nodes and erase phi node from this set /// if it is simplified. class SimplificationTracker { @@ -2679,7 +2679,7 @@ public: } }; -/// \brief A helper class for combining addressing modes. +/// A helper class for combining addressing modes. class AddressingModeCombiner { typedef std::pair<Value *, BasicBlock *> ValueInBB; typedef DenseMap<ValueInBB, Value *> FoldAddrToValueMapping; @@ -2708,12 +2708,12 @@ public: AddressingModeCombiner(const SimplifyQuery &_SQ, ValueInBB OriginalValue) : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} - /// \brief Get the combined AddrMode + /// Get the combined AddrMode const ExtAddrMode &getAddrMode() const { return AddrModes[0]; } - /// \brief Add a new AddrMode if it's compatible with the AddrModes we already + /// Add a new AddrMode if it's compatible with the AddrModes we already /// have. /// \return True iff we succeeded in doing so. bool addNewAddrMode(ExtAddrMode &NewAddrMode) { @@ -2766,7 +2766,7 @@ public: return CanHandle; } - /// \brief Combine the addressing modes we've collected into a single + /// Combine the addressing modes we've collected into a single /// addressing mode. /// \return True iff we successfully combined them or we only had one so /// didn't need to combine them anyway. @@ -2801,7 +2801,7 @@ public: } private: - /// \brief Initialize Map with anchor values. For address seen in some BB + /// Initialize Map with anchor values. For address seen in some BB /// we set the value of different field saw in this address. /// If address is not an instruction than basic block is set to null. /// At the same time we find a common type for different field we will @@ -2834,7 +2834,7 @@ private: return true; } - /// \brief We have mapping between value A and basic block where value A + /// We have mapping between value A and basic block where value A /// seen to other value B where B was a field in addressing mode represented /// by A. Also we have an original value C representin an address in some /// basic block. Traversing from C through phi and selects we ended up with @@ -2894,7 +2894,7 @@ private: return Result; } - /// \brief Try to match PHI node to Candidate. + /// Try to match PHI node to Candidate. /// Matcher tracks the matched Phi nodes. bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, SmallSetVector<PHIPair, 8> &Matcher, @@ -2942,7 +2942,7 @@ private: return true; } - /// \brief For the given set of PHI nodes (in the SimplificationTracker) try + /// For the given set of PHI nodes (in the SimplificationTracker) try /// to find their equivalents. /// Returns false if this matching fails and creation of new Phi is disabled. bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, @@ -2990,7 +2990,7 @@ private: } return true; } - /// \brief Fill the placeholder with values from predecessors and simplify it. + /// Fill the placeholder with values from predecessors and simplify it. void FillPlaceholders(FoldAddrToValueMapping &Map, SmallVectorImpl<ValueInBB> &TraverseOrder, SimplificationTracker &ST) { @@ -3219,7 +3219,7 @@ static bool MightBeFoldableInst(Instruction *I) { } } -/// \brief Check whether or not \p Val is a legal instruction for \p TLI. +/// Check whether or not \p Val is a legal instruction for \p TLI. /// \note \p Val is assumed to be the product of some type promotion. /// Therefore if \p Val has an undefined state in \p TLI, this is assumed /// to be legal, as the non-promoted value would have had the same state. @@ -3239,9 +3239,9 @@ static bool isPromotedInstructionLegal(const TargetLowering &TLI, namespace { -/// \brief Hepler class to perform type promotion. +/// Hepler class to perform type promotion. class TypePromotionHelper { - /// \brief Utility function to check whether or not a sign or zero extension + /// Utility function to check whether or not a sign or zero extension /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by /// either using the operands of \p Inst or promoting \p Inst. /// The type of the extension is defined by \p IsSExt. @@ -3255,13 +3255,13 @@ class TypePromotionHelper { static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, const InstrToOrigTy &PromotedInsts, bool IsSExt); - /// \brief Utility function to determine if \p OpIdx should be promoted when + /// Utility function to determine if \p OpIdx should be promoted when /// promoting \p Inst. static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { return !(isa<SelectInst>(Inst) && OpIdx == 0); } - /// \brief Utility function to promote the operand of \p Ext when this + /// Utility function to promote the operand of \p Ext when this /// operand is a promotable trunc or sext or zext. /// \p PromotedInsts maps the instructions to their type before promotion. /// \p CreatedInstsCost[out] contains the cost of all instructions @@ -3276,7 +3276,7 @@ class TypePromotionHelper { SmallVectorImpl<Instruction *> *Exts, SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); - /// \brief Utility function to promote the operand of \p Ext when this + /// Utility function to promote the operand of \p Ext when this /// operand is promotable and is not a supported trunc or sext. /// \p PromotedInsts maps the instructions to their type before promotion. /// \p CreatedInstsCost[out] contains the cost of all the instructions @@ -3322,7 +3322,7 @@ public: SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); - /// \brief Given a sign/zero extend instruction \p Ext, return the approriate + /// Given a sign/zero extend instruction \p Ext, return the approriate /// action to promote the operand of \p Ext instead of using Ext. /// \return NULL if no promotable action is possible with the current /// sign extension. @@ -4585,7 +4585,7 @@ bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { return MadeChange; } -/// \brief Check if all the uses of \p Val are equivalent (or free) zero or +/// Check if all the uses of \p Val are equivalent (or free) zero or /// sign extensions. static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { assert(!Val->use_empty() && "Input must have at least one use"); @@ -4633,7 +4633,7 @@ static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { return true; } -/// \brief Try to speculatively promote extensions in \p Exts and continue +/// Try to speculatively promote extensions in \p Exts and continue /// promoting through newly promoted operands recursively as far as doing so is /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. /// When some promotion happened, \p TPT contains the proper state to revert @@ -5550,7 +5550,7 @@ bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { namespace { -/// \brief Helper class to promote a scalar operation to a vector one. +/// Helper class to promote a scalar operation to a vector one. /// This class is used to move downward extractelement transition. /// E.g., /// a = vector_op <2 x i32> @@ -5587,7 +5587,7 @@ class VectorPromoteHelper { /// Instruction that will be combined with the transition. Instruction *CombineInst = nullptr; - /// \brief The instruction that represents the current end of the transition. + /// The instruction that represents the current end of the transition. /// Since we are faking the promotion until we reach the end of the chain /// of computation, we need a way to get the current end of the transition. Instruction *getEndOfTransition() const { @@ -5596,7 +5596,7 @@ class VectorPromoteHelper { return InstsToBePromoted.back(); } - /// \brief Return the index of the original value in the transition. + /// Return the index of the original value in the transition. /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, /// c, is at index 0. unsigned getTransitionOriginalValueIdx() const { @@ -5605,7 +5605,7 @@ class VectorPromoteHelper { return 0; } - /// \brief Return the index of the index in the transition. + /// Return the index of the index in the transition. /// E.g., for "extractelement <2 x i32> c, i32 0" the index /// is at index 1. unsigned getTransitionIdx() const { @@ -5614,7 +5614,7 @@ class VectorPromoteHelper { return 1; } - /// \brief Get the type of the transition. + /// Get the type of the transition. /// This is the type of the original value. /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the /// transition is <2 x i32>. @@ -5622,7 +5622,7 @@ class VectorPromoteHelper { return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); } - /// \brief Promote \p ToBePromoted by moving \p Def downward through. + /// Promote \p ToBePromoted by moving \p Def downward through. /// I.e., we have the following sequence: /// Def = Transition <ty1> a to <ty2> /// b = ToBePromoted <ty2> Def, ... @@ -5631,7 +5631,7 @@ class VectorPromoteHelper { /// Def = Transition <ty1> ToBePromoted to <ty2> void promoteImpl(Instruction *ToBePromoted); - /// \brief Check whether or not it is profitable to promote all the + /// Check whether or not it is profitable to promote all the /// instructions enqueued to be promoted. bool isProfitableToPromote() { Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); @@ -5682,7 +5682,7 @@ class VectorPromoteHelper { return ScalarCost > VectorCost; } - /// \brief Generate a constant vector with \p Val with the same + /// Generate a constant vector with \p Val with the same /// number of elements as the transition. /// \p UseSplat defines whether or not \p Val should be replicated /// across the whole vector. @@ -5717,7 +5717,7 @@ class VectorPromoteHelper { return ConstantVector::get(ConstVec); } - /// \brief Check if promoting to a vector type an operand at \p OperandIdx + /// Check if promoting to a vector type an operand at \p OperandIdx /// in \p Use can trigger undefined behavior. static bool canCauseUndefinedBehavior(const Instruction *Use, unsigned OperandIdx) { @@ -5749,13 +5749,13 @@ public: assert(Transition && "Do not know how to promote null"); } - /// \brief Check if we can promote \p ToBePromoted to \p Type. + /// Check if we can promote \p ToBePromoted to \p Type. bool canPromote(const Instruction *ToBePromoted) const { // We could support CastInst too. return isa<BinaryOperator>(ToBePromoted); } - /// \brief Check if it is profitable to promote \p ToBePromoted + /// Check if it is profitable to promote \p ToBePromoted /// by moving downward the transition through. bool shouldPromote(const Instruction *ToBePromoted) const { // Promote only if all the operands can be statically expanded. @@ -5783,23 +5783,23 @@ public: ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); } - /// \brief Check whether or not \p Use can be combined + /// Check whether or not \p Use can be combined /// with the transition. /// I.e., is it possible to do Use(Transition) => AnotherUse? bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } - /// \brief Record \p ToBePromoted as part of the chain to be promoted. + /// Record \p ToBePromoted as part of the chain to be promoted. void enqueueForPromotion(Instruction *ToBePromoted) { InstsToBePromoted.push_back(ToBePromoted); } - /// \brief Set the instruction that will be combined with the transition. + /// Set the instruction that will be combined with the transition. void recordCombineInstruction(Instruction *ToBeCombined) { assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); CombineInst = ToBeCombined; } - /// \brief Promote all the instructions enqueued for promotion if it is + /// Promote all the instructions enqueued for promotion if it is /// is profitable. /// \return True if the promotion happened, false otherwise. bool promote() { @@ -6420,7 +6420,7 @@ bool CodeGenPrepare::placeDbgValues(Function &F) { return MadeChange; } -/// \brief Scale down both weights to fit into uint32_t. +/// Scale down both weights to fit into uint32_t. static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; @@ -6428,7 +6428,7 @@ static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { NewFalse = NewFalse / Scale; } -/// \brief Some targets prefer to split a conditional branch like: +/// Some targets prefer to split a conditional branch like: /// \code /// %0 = icmp ne i32 %a, 0 /// %1 = icmp ne i32 %b, 0 diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp index ea33ea4b3bf..be4ba4d75a5 100644 --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -159,13 +159,13 @@ namespace { bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals, Module &M, bool isConst, unsigned AddrSpace) const; - /// \brief Merge everything in \p Globals for which the corresponding bit + /// Merge everything in \p Globals for which the corresponding bit /// in \p GlobalSet is set. bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, const BitVector &GlobalSet, Module &M, bool isConst, unsigned AddrSpace) const; - /// \brief Check if the given variable has been identified as must keep + /// Check if the given variable has been identified as must keep /// \pre setMustKeepGlobalVariables must have been called on the Module that /// contains GV bool isMustKeepGlobalVariable(const GlobalVariable *GV) const { diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index 9c906d30963..e3dc9649473 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -104,15 +104,15 @@ private: /// The maximum supported interleave factor. unsigned MaxFactor; - /// \brief Transform an interleaved load into target specific intrinsics. + /// Transform an interleaved load into target specific intrinsics. bool lowerInterleavedLoad(LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts); - /// \brief Transform an interleaved store into target specific intrinsics. + /// Transform an interleaved store into target specific intrinsics. bool lowerInterleavedStore(StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts); - /// \brief Returns true if the uses of an interleaved load by the + /// Returns true if the uses of an interleaved load by the /// extractelement instructions in \p Extracts can be replaced by uses of the /// shufflevector instructions in \p Shuffles instead. If so, the necessary /// replacements are also performed. @@ -136,7 +136,7 @@ FunctionPass *llvm::createInterleavedAccessPass() { return new InterleavedAccess(); } -/// \brief Check if the mask is a DE-interleave mask of the given factor +/// Check if the mask is a DE-interleave mask of the given factor /// \p Factor like: /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, @@ -158,7 +158,7 @@ static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, return false; } -/// \brief Check if the mask is a DE-interleave mask for an interleaved load. +/// Check if the mask is a DE-interleave mask for an interleaved load. /// /// E.g. DE-interleave masks (Factor = 2) could be: /// <0, 2, 4, 6> (mask of index 0 to extract even elements) @@ -176,7 +176,7 @@ static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor, return false; } -/// \brief Check if the mask can be used in an interleaved store. +/// Check if the mask can be used in an interleaved store. // /// It checks for a more general pattern than the RE-interleave mask. /// I.e. <x, y, ... z, x+1, y+1, ...z+1, x+2, y+2, ...z+2, ...> diff --git a/llvm/lib/CodeGen/LiveDebugValues.cpp b/llvm/lib/CodeGen/LiveDebugValues.cpp index 7b224473c5a..0554908584e 100644 --- a/llvm/lib/CodeGen/LiveDebugValues.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues.cpp @@ -65,7 +65,7 @@ using namespace llvm; STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted"); -// \brief If @MI is a DBG_VALUE with debug value described by a defined +// If @MI is a DBG_VALUE with debug value described by a defined // register, returns the number of this register. In the other case, returns 0. static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) { assert(MI.isDebugValue() && "expected a DBG_VALUE"); diff --git a/llvm/lib/CodeGen/LivePhysRegs.cpp b/llvm/lib/CodeGen/LivePhysRegs.cpp index b0cc62d5099..86c6c8e29f9 100644 --- a/llvm/lib/CodeGen/LivePhysRegs.cpp +++ b/llvm/lib/CodeGen/LivePhysRegs.cpp @@ -24,7 +24,7 @@ using namespace llvm; -/// \brief Remove all registers from the set that get clobbered by the register +/// Remove all registers from the set that get clobbered by the register /// mask. /// The clobbers set will be the list of live registers clobbered /// by the regmask. diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index 167135b56ec..ec43097c23b 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -198,10 +198,10 @@ namespace { class BlockChain; -/// \brief Type for our function-wide basic block -> block chain mapping. +/// Type for our function-wide basic block -> block chain mapping. using BlockToChainMapType = DenseMap<const MachineBasicBlock *, BlockChain *>; -/// \brief A chain of blocks which will be laid out contiguously. +/// A chain of blocks which will be laid out contiguously. /// /// This is the datastructure representing a chain of consecutive blocks that /// are profitable to layout together in order to maximize fallthrough @@ -213,13 +213,13 @@ using BlockToChainMapType = DenseMap<const MachineBasicBlock *, BlockChain *>; /// them. They participate in a block-to-chain mapping, which is updated /// automatically as chains are merged together. class BlockChain { - /// \brief The sequence of blocks belonging to this chain. + /// The sequence of blocks belonging to this chain. /// /// This is the sequence of blocks for a particular chain. These will be laid /// out in-order within the function. SmallVector<MachineBasicBlock *, 4> Blocks; - /// \brief A handle to the function-wide basic block to block chain mapping. + /// A handle to the function-wide basic block to block chain mapping. /// /// This is retained in each block chain to simplify the computation of child /// block chains for SCC-formation and iteration. We store the edges to child @@ -228,7 +228,7 @@ class BlockChain { BlockToChainMapType &BlockToChain; public: - /// \brief Construct a new BlockChain. + /// Construct a new BlockChain. /// /// This builds a new block chain representing a single basic block in the /// function. It also registers itself as the chain that block participates @@ -239,15 +239,15 @@ public: BlockToChain[BB] = this; } - /// \brief Iterator over blocks within the chain. + /// Iterator over blocks within the chain. using iterator = SmallVectorImpl<MachineBasicBlock *>::iterator; using const_iterator = SmallVectorImpl<MachineBasicBlock *>::const_iterator; - /// \brief Beginning of blocks within the chain. + /// Beginning of blocks within the chain. iterator begin() { return Blocks.begin(); } const_iterator begin() const { return Blocks.begin(); } - /// \brief End of blocks within the chain. + /// End of blocks within the chain. iterator end() { return Blocks.end(); } const_iterator end() const { return Blocks.end(); } @@ -261,7 +261,7 @@ public: return false; } - /// \brief Merge a block chain into this one. + /// Merge a block chain into this one. /// /// This routine merges a block chain into this one. It takes care of forming /// a contiguous sequence of basic blocks, updating the edge list, and @@ -293,14 +293,14 @@ public: } #ifndef NDEBUG - /// \brief Dump the blocks in this chain. + /// Dump the blocks in this chain. LLVM_DUMP_METHOD void dump() { for (MachineBasicBlock *MBB : *this) MBB->dump(); } #endif // NDEBUG - /// \brief Count of predecessors of any block within the chain which have not + /// Count of predecessors of any block within the chain which have not /// yet been scheduled. In general, we will delay scheduling this chain /// until those predecessors are scheduled (or we find a sufficiently good /// reason to override this heuristic.) Note that when forming loop chains, @@ -313,7 +313,7 @@ public: }; class MachineBlockPlacement : public MachineFunctionPass { - /// \brief A type for a block filter set. + /// A type for a block filter set. using BlockFilterSet = SmallSetVector<const MachineBasicBlock *, 16>; /// Pair struct containing basic block and taildup profitiability @@ -329,47 +329,47 @@ class MachineBlockPlacement : public MachineFunctionPass { MachineBasicBlock *Dest; }; - /// \brief work lists of blocks that are ready to be laid out + /// work lists of blocks that are ready to be laid out SmallVector<MachineBasicBlock *, 16> BlockWorkList; SmallVector<MachineBasicBlock *, 16> EHPadWorkList; /// Edges that have already been computed as optimal. DenseMap<const MachineBasicBlock *, BlockAndTailDupResult> ComputedEdges; - /// \brief Machine Function + /// Machine Function MachineFunction *F; - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; - /// \brief A handle to the function-wide block frequency pass. + /// A handle to the function-wide block frequency pass. std::unique_ptr<BranchFolder::MBFIWrapper> MBFI; - /// \brief A handle to the loop info. + /// A handle to the loop info. MachineLoopInfo *MLI; - /// \brief Preferred loop exit. + /// Preferred loop exit. /// Member variable for convenience. It may be removed by duplication deep /// in the call stack. MachineBasicBlock *PreferredLoopExit; - /// \brief A handle to the target's instruction info. + /// A handle to the target's instruction info. const TargetInstrInfo *TII; - /// \brief A handle to the target's lowering info. + /// A handle to the target's lowering info. const TargetLoweringBase *TLI; - /// \brief A handle to the post dominator tree. + /// A handle to the post dominator tree. MachinePostDominatorTree *MPDT; - /// \brief Duplicator used to duplicate tails during placement. + /// Duplicator used to duplicate tails during placement. /// /// Placement decisions can open up new tail duplication opportunities, but /// since tail duplication affects placement decisions of later blocks, it /// must be done inline. TailDuplicator TailDup; - /// \brief Allocator and owner of BlockChain structures. + /// Allocator and owner of BlockChain structures. /// /// We build BlockChains lazily while processing the loop structure of /// a function. To reduce malloc traffic, we allocate them using this @@ -378,7 +378,7 @@ class MachineBlockPlacement : public MachineFunctionPass { /// the chains. SpecificBumpPtrAllocator<BlockChain> ChainAllocator; - /// \brief Function wide BasicBlock to BlockChain mapping. + /// Function wide BasicBlock to BlockChain mapping. /// /// This mapping allows efficiently moving from any given basic block to the /// BlockChain it participates in, if any. We use it to, among other things, @@ -441,7 +441,7 @@ class MachineBlockPlacement : public MachineFunctionPass { MachineFunction::iterator &PrevUnplacedBlockIt, const BlockFilterSet *BlockFilter); - /// \brief Add a basic block to the work list if it is appropriate. + /// Add a basic block to the work list if it is appropriate. /// /// If the optional parameter BlockFilter is provided, only MBB /// present in the set will be added to the worklist. If nullptr @@ -545,7 +545,7 @@ INITIALIZE_PASS_END(MachineBlockPlacement, DEBUG_TYPE, "Branch Probability Basic Block Placement", false, false) #ifndef NDEBUG -/// \brief Helper to print the name of a MBB. +/// Helper to print the name of a MBB. /// /// Only used by debug logging. static std::string getBlockName(const MachineBasicBlock *BB) { @@ -558,7 +558,7 @@ static std::string getBlockName(const MachineBasicBlock *BB) { } #endif -/// \brief Mark a chain's successors as having one fewer preds. +/// Mark a chain's successors as having one fewer preds. /// /// When a chain is being merged into the "placed" chain, this routine will /// quickly walk the successors of each block in the chain and mark them as @@ -574,7 +574,7 @@ void MachineBlockPlacement::markChainSuccessors( } } -/// \brief Mark a single block's successors as having one fewer preds. +/// Mark a single block's successors as having one fewer preds. /// /// Under normal circumstances, this is only called by markChainSuccessors, /// but if a block that was to be placed is completely tail-duplicated away, @@ -1439,7 +1439,7 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor( return false; } -/// \brief Select the best successor for a block. +/// Select the best successor for a block. /// /// This looks across all successors of a particular block and attempts to /// select the "best" one to be the layout successor. It only considers direct @@ -1555,7 +1555,7 @@ MachineBlockPlacement::selectBestSuccessor( return BestSucc; } -/// \brief Select the best block from a worklist. +/// Select the best block from a worklist. /// /// This looks through the provided worklist as a list of candidate basic /// blocks and select the most profitable one to place. The definition of @@ -1627,7 +1627,7 @@ MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock( return BestBlock; } -/// \brief Retrieve the first unplaced basic block. +/// Retrieve the first unplaced basic block. /// /// This routine is called when we are unable to use the CFG to walk through /// all of the basic blocks and form a chain due to unnatural loops in the CFG. @@ -1754,7 +1754,7 @@ void MachineBlockPlacement::buildChain( << getBlockName(*Chain.begin()) << "\n"); } -/// \brief Find the best loop top block for layout. +/// Find the best loop top block for layout. /// /// Look for a block which is strictly better than the loop header for laying /// out at the top of the loop. This looks for one and only one pattern: @@ -1823,7 +1823,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L, return BestPred; } -/// \brief Find the best loop exiting block for layout. +/// Find the best loop exiting block for layout. /// /// This routine implements the logic to analyze the loop looking for the best /// block to layout at the top of the loop. Typically this is done to maximize @@ -1941,7 +1941,7 @@ MachineBlockPlacement::findBestLoopExit(const MachineLoop &L, return ExitingBB; } -/// \brief Attempt to rotate an exiting block to the bottom of the loop. +/// Attempt to rotate an exiting block to the bottom of the loop. /// /// Once we have built a chain, try to rotate it to line up the hot exit block /// with fallthrough out of the loop if doing so doesn't introduce unnecessary @@ -2019,7 +2019,7 @@ void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, std::rotate(LoopChain.begin(), std::next(ExitIt), LoopChain.end()); } -/// \brief Attempt to rotate a loop based on profile data to reduce branch cost. +/// Attempt to rotate a loop based on profile data to reduce branch cost. /// /// With profile data, we can determine the cost in terms of missed fall through /// opportunities when rotating a loop chain and select the best rotation. @@ -2166,7 +2166,7 @@ void MachineBlockPlacement::rotateLoopWithProfile( } } -/// \brief Collect blocks in the given loop that are to be placed. +/// Collect blocks in the given loop that are to be placed. /// /// When profile data is available, exclude cold blocks from the returned set; /// otherwise, collect all blocks in the loop. @@ -2202,7 +2202,7 @@ MachineBlockPlacement::collectLoopBlockSet(const MachineLoop &L) { return LoopBlockSet; } -/// \brief Forms basic block chains from the natural loop structures. +/// Forms basic block chains from the natural loop structures. /// /// These chains are designed to preserve the existing *structure* of the code /// as much as possible. We can then stitch the chains together in a way which @@ -2834,17 +2834,17 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { namespace { -/// \brief A pass to compute block placement statistics. +/// A pass to compute block placement statistics. /// /// A separate pass to compute interesting statistics for evaluating block /// placement. This is separate from the actual placement pass so that they can /// be computed in the absence of any placement transformations or when using /// alternative placement strategies. class MachineBlockPlacementStats : public MachineFunctionPass { - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; - /// \brief A handle to the function-wide block frequency pass. + /// A handle to the function-wide block frequency pass. const MachineBlockFrequencyInfo *MBFI; public: diff --git a/llvm/lib/CodeGen/MachineOutliner.cpp b/llvm/lib/CodeGen/MachineOutliner.cpp index d0f6f56b3d7..e9000fffdb5 100644 --- a/llvm/lib/CodeGen/MachineOutliner.cpp +++ b/llvm/lib/CodeGen/MachineOutliner.cpp @@ -101,7 +101,7 @@ static cl::opt<bool> EnableLinkOnceODROutlining( namespace { -/// \brief An individual sequence of instructions to be replaced with a call to +/// An individual sequence of instructions to be replaced with a call to /// an outlined function. struct Candidate { private: @@ -118,7 +118,7 @@ public: /// Set to false if the candidate overlapped with another candidate. bool InCandidateList = true; - /// \brief The index of this \p Candidate's \p OutlinedFunction in the list of + /// The index of this \p Candidate's \p OutlinedFunction in the list of /// \p OutlinedFunctions. unsigned FunctionIdx; @@ -143,7 +143,7 @@ public: // Return the end index of this candidate. unsigned getEndIdx() const { return StartIdx + Len - 1; } - /// \brief The number of instructions that would be saved by outlining every + /// The number of instructions that would be saved by outlining every /// candidate of this type. /// /// This is a fixed value which is not updated during the candidate pruning @@ -158,14 +158,14 @@ public: Candidate() {} - /// \brief Used to ensure that \p Candidates are outlined in an order that + /// Used to ensure that \p Candidates are outlined in an order that /// preserves the start and end indices of other \p Candidates. bool operator<(const Candidate &RHS) const { return getStartIdx() > RHS.getStartIdx(); } }; -/// \brief The information necessary to create an outlined function for some +/// The information necessary to create an outlined function for some /// class of candidate. struct OutlinedFunction { @@ -183,7 +183,7 @@ public: /// A number assigned to this function which appears at the end of its name. unsigned Name; - /// \brief The sequence of integers corresponding to the instructions in this + /// The sequence of integers corresponding to the instructions in this /// function. std::vector<unsigned> Sequence; @@ -210,14 +210,14 @@ public: return getOccurrenceCount(); } - /// \brief Return the number of instructions it would take to outline this + /// Return the number of instructions it would take to outline this /// function. unsigned getOutliningCost() { return (OccurrenceCount * MInfo.CallOverhead) + Sequence.size() + MInfo.FrameOverhead; } - /// \brief Return the number of instructions that would be saved by outlining + /// Return the number of instructions that would be saved by outlining /// this function. unsigned getBenefit() { unsigned NotOutlinedCost = OccurrenceCount * Sequence.size(); @@ -279,7 +279,7 @@ struct SuffixTreeNode { /// For all other nodes, this is ignored. unsigned SuffixIdx = EmptyIdx; - /// \brief For internal nodes, a pointer to the internal node representing + /// For internal nodes, a pointer to the internal node representing /// the same sequence with the first character chopped off. /// /// This acts as a shortcut in Ukkonen's algorithm. One of the things that @@ -393,7 +393,7 @@ private: /// The end index of each leaf in the tree. unsigned LeafEndIdx = -1; - /// \brief Helper struct which keeps track of the next insertion point in + /// Helper struct which keeps track of the next insertion point in /// Ukkonen's algorithm. struct ActiveState { /// The next node to insert at. @@ -406,7 +406,7 @@ private: unsigned Len = 0; }; - /// \brief The point the next insertion will take place at in the + /// The point the next insertion will take place at in the /// construction algorithm. ActiveState Active; @@ -453,7 +453,7 @@ private: return N; } - /// \brief Set the suffix indices of the leaves to the start indices of their + /// Set the suffix indices of the leaves to the start indices of their /// respective suffixes. Also stores each leaf in \p LeafVector at its /// respective suffix index. /// @@ -491,7 +491,7 @@ private: } } - /// \brief Construct the suffix tree for the prefix of the input ending at + /// Construct the suffix tree for the prefix of the input ending at /// \p EndIdx. /// /// Used to construct the full suffix tree iteratively. At the end of each @@ -652,16 +652,16 @@ public: } }; -/// \brief Maps \p MachineInstrs to unsigned integers and stores the mappings. +/// Maps \p MachineInstrs to unsigned integers and stores the mappings. struct InstructionMapper { - /// \brief The next available integer to assign to a \p MachineInstr that + /// The next available integer to assign to a \p MachineInstr that /// cannot be outlined. /// /// Set to -3 for compatability with \p DenseMapInfo<unsigned>. unsigned IllegalInstrNumber = -3; - /// \brief The next available integer to assign to a \p MachineInstr that can + /// The next available integer to assign to a \p MachineInstr that can /// be outlined. unsigned LegalInstrNumber = 0; @@ -676,11 +676,11 @@ struct InstructionMapper { /// The vector of unsigned integers that the module is mapped to. std::vector<unsigned> UnsignedVec; - /// \brief Stores the location of the instruction associated with the integer + /// Stores the location of the instruction associated with the integer /// at index i in \p UnsignedVec for each index i. std::vector<MachineBasicBlock::iterator> InstrList; - /// \brief Maps \p *It to a legal integer. + /// Maps \p *It to a legal integer. /// /// Updates \p InstrList, \p UnsignedVec, \p InstructionIntegerMap, /// \p IntegerInstructionMap, and \p LegalInstrNumber. @@ -743,7 +743,7 @@ struct InstructionMapper { return MINumber; } - /// \brief Transforms a \p MachineBasicBlock into a \p vector of \p unsigneds + /// Transforms a \p MachineBasicBlock into a \p vector of \p unsigneds /// and appends it to \p UnsignedVec and \p InstrList. /// /// Two instructions are assigned the same integer if they are identical. @@ -796,7 +796,7 @@ struct InstructionMapper { } }; -/// \brief An interprocedural pass which finds repeated sequences of +/// An interprocedural pass which finds repeated sequences of /// instructions and replaces them with calls to functions. /// /// Each instruction is mapped to an unsigned integer and placed in a string. @@ -809,7 +809,7 @@ struct MachineOutliner : public ModulePass { static char ID; - /// \brief Set to true if the outliner should consider functions with + /// Set to true if the outliner should consider functions with /// linkonceodr linkage. bool OutlineFromLinkOnceODRs = false; @@ -853,7 +853,7 @@ struct MachineOutliner : public ModulePass { std::vector<std::shared_ptr<Candidate>> &CandidateList, std::vector<OutlinedFunction> &FunctionList); - /// \brief Replace the sequences of instructions represented by the + /// Replace the sequences of instructions represented by the /// \p Candidates in \p CandidateList with calls to \p MachineFunctions /// described in \p FunctionList. /// @@ -893,7 +893,7 @@ struct MachineOutliner : public ModulePass { /// Removes \p C from the candidate list, and updates its \p OutlinedFunction. void prune(Candidate &C, std::vector<OutlinedFunction> &FunctionList); - /// \brief Remove any overlapping candidates that weren't handled by the + /// Remove any overlapping candidates that weren't handled by the /// suffix tree's pruning method. /// /// Pruning from the suffix tree doesn't necessarily remove all overlaps. diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index 5f12cb1dfa9..f80d6a695a5 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -1486,7 +1486,7 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) { namespace { -/// \brief Post-process the DAG to create cluster edges between neighboring +/// Post-process the DAG to create cluster edges between neighboring /// loads or between neighboring stores. class BaseMemOpClusterMutation : public ScheduleDAGMutation { struct MemOpInfo { @@ -1590,7 +1590,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps( } } -/// \brief Callback from DAG postProcessing to create cluster edges for loads. +/// Callback from DAG postProcessing to create cluster edges for loads. void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); @@ -1631,7 +1631,7 @@ void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) { namespace { -/// \brief Post-process the DAG to create weak edges from all uses of a copy to +/// Post-process the DAG to create weak edges from all uses of a copy to /// the one use that defines the copy's source vreg, most likely an induction /// variable increment. class CopyConstrain : public ScheduleDAGMutation { @@ -1806,7 +1806,7 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) { } } -/// \brief Callback from DAG postProcessing to create weak edges to encourage +/// Callback from DAG postProcessing to create weak edges to encourage /// copy elimination. void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) { ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); @@ -3361,7 +3361,7 @@ ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) { namespace { -/// \brief Order nodes by the ILP metric. +/// Order nodes by the ILP metric. struct ILPOrder { const SchedDFSResult *DFSResult = nullptr; const BitVector *ScheduledTrees = nullptr; @@ -3369,7 +3369,7 @@ struct ILPOrder { ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {} - /// \brief Apply a less-than relation on node priority. + /// Apply a less-than relation on node priority. /// /// (Return true if A comes after B in the Q.) bool operator()(const SUnit *A, const SUnit *B) const { @@ -3394,7 +3394,7 @@ struct ILPOrder { } }; -/// \brief Schedule based on the ILP metric. +/// Schedule based on the ILP metric. class ILPScheduler : public MachineSchedStrategy { ScheduleDAGMILive *DAG = nullptr; ILPOrder Cmp; @@ -3437,7 +3437,7 @@ public: return SU; } - /// \brief Scheduler callback to notify that a new subtree is scheduled. + /// Scheduler callback to notify that a new subtree is scheduled. void scheduleTree(unsigned SubtreeID) override { std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); } diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index 835c55d8c7c..0e5c839c180 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -139,7 +139,7 @@ namespace { MachineBasicBlock *From, MachineBasicBlock *To); - /// \brief Postpone the splitting of the given critical + /// Postpone the splitting of the given critical /// edge (\p From, \p To). /// /// We do not split the edges on the fly. Indeed, this invalidates @@ -709,7 +709,7 @@ MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB, return SuccToSinkTo; } -/// \brief Return true if MI is likely to be usable as a memory operation by the +/// Return true if MI is likely to be usable as a memory operation by the /// implicit null check optimization. /// /// This is a "best effort" heuristic, and should not be relied upon for diff --git a/llvm/lib/CodeGen/MacroFusion.cpp b/llvm/lib/CodeGen/MacroFusion.cpp index e7f426c469a..5b3523be635 100644 --- a/llvm/lib/CodeGen/MacroFusion.cpp +++ b/llvm/lib/CodeGen/MacroFusion.cpp @@ -105,7 +105,7 @@ static bool fuseInstructionPair(ScheduleDAGMI &DAG, SUnit &FirstSU, namespace { -/// \brief Post-process the DAG to create cluster edges between instrs that may +/// Post-process the DAG to create cluster edges between instrs that may /// be fused by the processor into a single operation. class MacroFusion : public ScheduleDAGMutation { ShouldSchedulePredTy shouldScheduleAdjacent; @@ -135,7 +135,7 @@ void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) { scheduleAdjacentImpl(*DAG, DAG->ExitSU); } -/// \brief Implement the fusion of instr pairs in the scheduling DAG, +/// Implement the fusion of instr pairs in the scheduling DAG, /// anchored at the instr in AnchorSU.. bool MacroFusion::scheduleAdjacentImpl(ScheduleDAGMI &DAG, SUnit &AnchorSU) { const MachineInstr &AnchorMI = *AnchorSU.getInstr(); diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 1320f998555..5ce7da8f6c5 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -202,7 +202,7 @@ namespace { bool foldImmediate(MachineInstr &MI, SmallSet<unsigned, 4> &ImmDefRegs, DenseMap<unsigned, MachineInstr*> &ImmDefMIs); - /// \brief Finds recurrence cycles, but only ones that formulated around + /// Finds recurrence cycles, but only ones that formulated around /// a def operand and a use operand that are tied. If there is a use /// operand commutable with the tied use operand, find recurrence cycle /// along that operand as well. @@ -210,7 +210,7 @@ namespace { const SmallSet<unsigned, 2> &TargetReg, RecurrenceCycle &RC); - /// \brief If copy instruction \p MI is a virtual register copy, track it in + /// If copy instruction \p MI is a virtual register copy, track it in /// the set \p CopySrcRegs and \p CopyMIs. If this virtual register was /// previously seen as a copy, replace the uses of this copy with the /// previously seen copy's destination register. @@ -221,7 +221,7 @@ namespace { /// Is the register \p Reg a non-allocatable physical register? bool isNAPhysCopy(unsigned Reg); - /// \brief If copy instruction \p MI is a non-allocatable virtual<->physical + /// If copy instruction \p MI is a non-allocatable virtual<->physical /// register copy, track it in the \p NAPhysToVirtMIs map. If this /// non-allocatable physical register was previously copied to a virtual /// registered and hasn't been clobbered, the virt->phys copy can be @@ -232,7 +232,7 @@ namespace { bool isLoadFoldable(MachineInstr &MI, SmallSet<unsigned, 16> &FoldAsLoadDefCandidates); - /// \brief Check whether \p MI is understood by the register coalescer + /// Check whether \p MI is understood by the register coalescer /// but may require some rewriting. bool isCoalescableCopy(const MachineInstr &MI) { // SubregToRegs are not interesting, because they are already register @@ -242,7 +242,7 @@ namespace { MI.isExtractSubreg())); } - /// \brief Check whether \p MI is a copy like instruction that is + /// Check whether \p MI is a copy like instruction that is /// not recognized by the register coalescer. bool isUncoalescableCopy(const MachineInstr &MI) { return MI.isBitcast() || @@ -345,7 +345,7 @@ namespace { } }; - /// \brief Helper class to track the possible sources of a value defined by + /// Helper class to track the possible sources of a value defined by /// a (chain of) copy related instructions. /// Given a definition (instruction and definition index), this class /// follows the use-def chain to find successive suitable sources. @@ -425,7 +425,7 @@ namespace { } } - /// \brief Following the use-def chain, get the next available source + /// Following the use-def chain, get the next available source /// for the tracked value. /// \return A ValueTrackerResult containing a set of registers /// and sub registers with tracked values. A ValueTrackerResult with @@ -646,7 +646,7 @@ bool PeepholeOptimizer::optimizeCondBranch(MachineInstr &MI) { return TII->optimizeCondBranch(MI); } -/// \brief Try to find the next source that share the same register file +/// Try to find the next source that share the same register file /// for the value defined by \p Reg and \p SubReg. /// When true is returned, the \p RewriteMap can be used by the client to /// retrieve all Def -> Use along the way up to the next source. Any found @@ -746,7 +746,7 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg, return CurSrcPair.Reg != Reg; } -/// \brief Insert a PHI instruction with incoming edges \p SrcRegs that are +/// Insert a PHI instruction with incoming edges \p SrcRegs that are /// guaranteed to have the same register class. This is necessary whenever we /// successfully traverse a PHI instruction and find suitable sources coming /// from its edges. By inserting a new PHI, we provide a rewritten PHI def @@ -791,7 +791,7 @@ public: Rewriter(MachineInstr &CopyLike) : CopyLike(CopyLike) {} virtual ~Rewriter() {} - /// \brief Get the next rewritable source (SrcReg, SrcSubReg) and + /// Get the next rewritable source (SrcReg, SrcSubReg) and /// the related value that it affects (DstReg, DstSubReg). /// A source is considered rewritable if its register class and the /// register class of the related DstReg may not be register @@ -859,7 +859,7 @@ public: } }; -/// \brief Helper class to rewrite uncoalescable copy like instructions +/// Helper class to rewrite uncoalescable copy like instructions /// into new COPY (coalescable friendly) instructions. class UncoalescableRewriter : public Rewriter { unsigned NumDefs; ///< Number of defs in the bitcast. @@ -1101,7 +1101,7 @@ static Rewriter *getCopyRewriter(MachineInstr &MI, const TargetInstrInfo &TII) { } } -/// \brief Given a \p Def.Reg and Def.SubReg pair, use \p RewriteMap to find +/// Given a \p Def.Reg and Def.SubReg pair, use \p RewriteMap to find /// the new source to use for rewrite. If \p HandleMultipleSources is true and /// multiple sources for a given \p Def are found along the way, we found a /// PHI instructions that needs to be rewritten. @@ -1213,7 +1213,7 @@ bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) { return Changed; } -/// \brief Rewrite the source found through \p Def, by using the \p RewriteMap +/// Rewrite the source found through \p Def, by using the \p RewriteMap /// and create a new COPY instruction. More info about RewriteMap in /// PeepholeOptimizer::findNextSource. Right now this is only used to handle /// Uncoalescable copies, since they are copy like instructions that aren't @@ -1254,7 +1254,7 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike, return *NewCopy; } -/// \brief Optimize copy-like instructions to create +/// Optimize copy-like instructions to create /// register coalescer friendly instruction. /// The optimization tries to kill-off the \p MI by looking /// through a chain of copies to find a source that has a compatible diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index 7a8d4225ad0..78b94a25210 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -470,7 +470,7 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI, } } -/// \brief Return the cost of spilling clearing out PhysReg and aliases so it is +/// Return the cost of spilling clearing out PhysReg and aliases so it is /// free for allocation. Returns 0 when PhysReg is free or disabled with all /// aliases disabled - it can be allocated directly. /// \returns spillImpossible when PhysReg or an alias can't be spilled. @@ -519,7 +519,7 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const { return Cost; } -/// \brief This method updates local state so that we know that PhysReg is the +/// This method updates local state so that we know that PhysReg is the /// proper container for VirtReg now. The physical register must not be used /// for anything else when this is called. void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) { diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 80349457783..04b5393d79d 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -300,17 +300,17 @@ class RAGreedy : public MachineFunctionPass, EvicteeInfo Evictees; public: - /// \brief Clear all eviction information. + /// Clear all eviction information. void clear() { Evictees.clear(); } - /// \brief Clear eviction information for the given evictee Vreg. + /// Clear eviction information for the given evictee Vreg. /// E.g. when Vreg get's a new allocation, the old eviction info is no /// longer relevant. /// \param Evictee The evictee Vreg for whom we want to clear collected /// eviction info. void clearEvicteeInfo(unsigned Evictee) { Evictees.erase(Evictee); } - /// \brief Track new eviction. + /// Track new eviction. /// The Evictor vreg has evicted the Evictee vreg from Physreg. /// \praram PhysReg The phisical register Evictee was evicted from. /// \praram Evictor The evictor Vreg that evicted Evictee. @@ -937,7 +937,7 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, return true; } -/// \brief Return true if all interferences between VirtReg and PhysReg between +/// Return true if all interferences between VirtReg and PhysReg between /// Start and End can be evicted. /// /// \param VirtReg Live range that is about to be assigned. @@ -989,7 +989,7 @@ bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg, return true; } -/// \brief Return tthe physical register that will be best +/// Return tthe physical register that will be best /// candidate for eviction by a local split interval that will be created /// between Start and End. /// @@ -1381,7 +1381,7 @@ BlockFrequency RAGreedy::calcSpillCost() { return Cost; } -/// \brief Check if splitting Evictee will create a local split interval in +/// Check if splitting Evictee will create a local split interval in /// basic block number BBNumber that may cause a bad eviction chain. This is /// intended to prevent bad eviction sequences like: /// movl %ebp, 8(%esp) # 4-byte Spill @@ -1482,7 +1482,7 @@ bool RAGreedy::splitCanCauseEvictionChain(unsigned Evictee, return true; } -/// \brief Check if splitting VirtRegToSplit will create a local split interval +/// Check if splitting VirtRegToSplit will create a local split interval /// in basic block number BBNumber that may cause a spill. /// /// \param VirtRegToSplit The register considered to be split. @@ -2793,7 +2793,7 @@ void RAGreedy::initializeCSRCost() { CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); } -/// \brief Collect the hint info for \p Reg. +/// Collect the hint info for \p Reg. /// The results are stored into \p Out. /// \p Out is not cleared before being populated. void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { @@ -2817,7 +2817,7 @@ void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { } } -/// \brief Using the given \p List, compute the cost of the broken hints if +/// Using the given \p List, compute the cost of the broken hints if /// \p PhysReg was used. /// \return The cost of \p List for \p PhysReg. BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, @@ -2830,7 +2830,7 @@ BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, return Cost; } -/// \brief Using the register assigned to \p VirtReg, try to recolor +/// Using the register assigned to \p VirtReg, try to recolor /// all the live ranges that are copy-related with \p VirtReg. /// The recoloring is then propagated to all the live-ranges that have /// been recolored and so on, until no more copies can be coalesced or @@ -2909,7 +2909,7 @@ void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { } while (!RecoloringCandidates.empty()); } -/// \brief Try to recolor broken hints. +/// Try to recolor broken hints. /// Broken hints may be repaired by recoloring when an evicted variable /// freed up a register for a larger live-range. /// Consider the following example: diff --git a/llvm/lib/CodeGen/RegAllocPBQP.cpp b/llvm/lib/CodeGen/RegAllocPBQP.cpp index a71f839ccf0..7ce4438e613 100644 --- a/llvm/lib/CodeGen/RegAllocPBQP.cpp +++ b/llvm/lib/CodeGen/RegAllocPBQP.cpp @@ -160,25 +160,25 @@ private: /// always available for the remat of all the siblings of the original reg. SmallPtrSet<MachineInstr *, 32> DeadRemats; - /// \brief Finds the initial set of vreg intervals to allocate. + /// Finds the initial set of vreg intervals to allocate. void findVRegIntervalsToAlloc(const MachineFunction &MF, LiveIntervals &LIS); - /// \brief Constructs an initial graph. + /// Constructs an initial graph. void initializeGraph(PBQPRAGraph &G, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Spill the given VReg. + /// Spill the given VReg. void spillVReg(unsigned VReg, SmallVectorImpl<unsigned> &NewIntervals, MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Given a solved PBQP problem maps this solution back to a register + /// Given a solved PBQP problem maps this solution back to a register /// assignment. bool mapPBQPToRegAlloc(const PBQPRAGraph &G, const PBQP::Solution &Solution, VirtRegMap &VRM, Spiller &VRegSpiller); - /// \brief Postprocessing before final spilling. Sets basic block "live in" + /// Postprocessing before final spilling. Sets basic block "live in" /// variables. void finalizeAlloc(MachineFunction &MF, LiveIntervals &LIS, VirtRegMap &VRM) const; diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 45ee0d17e25..c0deb11d06d 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -115,11 +115,11 @@ namespace { /// checked for smaller live intervals. bool ShrinkMainRange; - /// \brief True if the coalescer should aggressively coalesce global copies + /// True if the coalescer should aggressively coalesce global copies /// in favor of keeping local copies. bool JoinGlobalCopies; - /// \brief True if the coalescer should aggressively coalesce fall-thru + /// True if the coalescer should aggressively coalesce fall-thru /// blocks exclusively containing copies. bool JoinSplitEdges; diff --git a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp index 1e1f36a35ec..e25e53a24b5 100644 --- a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp +++ b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp @@ -77,20 +77,20 @@ private: /// Split unrelated subregister components and rename them to new vregs. bool renameComponents(LiveInterval &LI) const; - /// \brief Build a vector of SubRange infos and a union find set of + /// Build a vector of SubRange infos and a union find set of /// equivalence classes. /// Returns true if more than 1 equivalence class was found. bool findComponents(IntEqClasses &Classes, SmallVectorImpl<SubRangeInfo> &SubRangeInfos, LiveInterval &LI) const; - /// \brief Distribute the LiveInterval segments into the new LiveIntervals + /// Distribute the LiveInterval segments into the new LiveIntervals /// belonging to their class. void distribute(const IntEqClasses &Classes, const SmallVectorImpl<SubRangeInfo> &SubRangeInfos, const SmallVectorImpl<LiveInterval*> &Intervals) const; - /// \brief Constructs main liverange and add missing undef+dead flags. + /// Constructs main liverange and add missing undef+dead flags. void computeMainRangesFixFlags(const IntEqClasses &Classes, const SmallVectorImpl<SubRangeInfo> &SubRangeInfos, const SmallVectorImpl<LiveInterval*> &Intervals) const; diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp index a8f16a9ec0a..3475bae4990 100644 --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -143,14 +143,14 @@ class SafeStack { /// might expect to appear on the stack on most common targets. enum { StackAlignment = 16 }; - /// \brief Return the value of the stack canary. + /// Return the value of the stack canary. Value *getStackGuard(IRBuilder<> &IRB, Function &F); - /// \brief Load stack guard from the frame and check if it has changed. + /// Load stack guard from the frame and check if it has changed. void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, AllocaInst *StackGuardSlot, Value *StackGuard); - /// \brief Find all static allocas, dynamic allocas, return instructions and + /// Find all static allocas, dynamic allocas, return instructions and /// stack restore points (exception unwind blocks and setjmp calls) in the /// given function and append them to the respective vectors. void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, @@ -159,11 +159,11 @@ class SafeStack { SmallVectorImpl<ReturnInst *> &Returns, SmallVectorImpl<Instruction *> &StackRestorePoints); - /// \brief Calculate the allocation size of a given alloca. Returns 0 if the + /// Calculate the allocation size of a given alloca. Returns 0 if the /// size can not be statically determined. uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); - /// \brief Allocate space for all static allocas in \p StaticAllocas, + /// Allocate space for all static allocas in \p StaticAllocas, /// replace allocas with pointers into the unsafe stack and generate code to /// restore the stack pointer before all return instructions in \p Returns. /// @@ -176,7 +176,7 @@ class SafeStack { Instruction *BasePointer, AllocaInst *StackGuardSlot); - /// \brief Generate code to restore the stack after all stack restore points + /// Generate code to restore the stack after all stack restore points /// in \p StackRestorePoints. /// /// \returns A local variable in which to maintain the dynamic top of the @@ -186,7 +186,7 @@ class SafeStack { ArrayRef<Instruction *> StackRestorePoints, Value *StaticTop, bool NeedDynamicTop); - /// \brief Replace all allocas in \p DynamicAllocas with code to allocate + /// Replace all allocas in \p DynamicAllocas with code to allocate /// space dynamically on the unsafe stack and store the dynamic unsafe stack /// top to \p DynamicTop if non-null. void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr, diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 2b994422bab..45df5c9d0b5 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -267,7 +267,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { } } -/// \brief Adds register dependencies (data, anti, and output) from this SUnit +/// Adds register dependencies (data, anti, and output) from this SUnit /// to following instructions in the same scheduling region that depend the /// physical register referenced at OperIdx. void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { @@ -469,7 +469,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); } -/// \brief Adds a register data dependency if the instruction that defines the +/// Adds a register data dependency if the instruction that defines the /// virtual register used at OperIdx is mapped to an SUnit. Add a register /// antidependency from this SUnit to instructions that occur later in the same /// scheduling region if they write the virtual register. @@ -515,7 +515,7 @@ void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, } } -/// \brief Creates an SUnit for each real instruction, numbered in top-down +/// Creates an SUnit for each real instruction, numbered in top-down /// topological order. The instruction order A < B, implies that no edge exists /// from B to A. /// @@ -1213,7 +1213,7 @@ public: RootSet[SU->NodeNum] = RData; } - /// \brief Called once for each tree edge after calling visitPostOrderNode on + /// Called once for each tree edge after calling visitPostOrderNode on /// the predecessor. Increment the parent node's instruction count and /// preemptively join this subtree to its parent's if it is small enough. void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 5c47fe21ec8..540e31048c6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -122,7 +122,7 @@ namespace { bool LegalTypes = false; bool ForCodeSize; - /// \brief Worklist of all of the nodes that need to be simplified. + /// Worklist of all of the nodes that need to be simplified. /// /// This must behave as a stack -- new nodes to process are pushed onto the /// back and when processing we pop off of the back. @@ -131,14 +131,14 @@ namespace { /// due to nodes being deleted from the underlying DAG. SmallVector<SDNode *, 64> Worklist; - /// \brief Mapping from an SDNode to its position on the worklist. + /// Mapping from an SDNode to its position on the worklist. /// /// This is used to find and remove nodes from the worklist (by nulling /// them) when they are deleted from the underlying DAG. It relies on /// stable indices of nodes within the worklist. DenseMap<SDNode *, unsigned> WorklistMap; - /// \brief Set of nodes which have been combined (at least once). + /// Set of nodes which have been combined (at least once). /// /// This is used to allow us to reliably add any operands of a DAG node /// which have not yet been combined to the worklist. @@ -249,7 +249,7 @@ namespace { SDValue SplitIndexingFromLoad(LoadSDNode *LD); bool SliceUpLoad(SDNode *N); - /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed + /// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed /// load. /// /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. @@ -561,7 +561,7 @@ namespace { /// affected nodes are stored as a prefix in \p StoreNodes). bool MergeConsecutiveStores(StoreSDNode *N); - /// \brief Try to transform a truncation where C is a constant: + /// Try to transform a truncation where C is a constant: /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) /// /// \p N needs to be a truncation and its first operand an AND. Other @@ -856,7 +856,7 @@ bool DAGCombiner::isOneUseSetCC(SDValue N) const { return false; } -// \brief Returns the SDNode if it is a constant float BuildVector +// Returns the SDNode if it is a constant float BuildVector // or constant float. static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) { if (isa<ConstantFPSDNode>(N)) @@ -1347,7 +1347,7 @@ bool DAGCombiner::PromoteLoad(SDValue Op) { return false; } -/// \brief Recursively delete a node which has no uses and any operands for +/// Recursively delete a node which has no uses and any operands for /// which it is the only use. /// /// Note that this both deletes the nodes and removes them from the worklist. @@ -6474,7 +6474,7 @@ SDValue DAGCombiner::visitCTPOP(SDNode *N) { return SDValue(); } -/// \brief Generate Min/Max node +/// Generate Min/Max node static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode CC, const TargetLowering &TLI, @@ -11954,7 +11954,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { return false; } -/// \brief Return the base-pointer arithmetic from an indexed \p LD. +/// Return the base-pointer arithmetic from an indexed \p LD. SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { ISD::MemIndexedMode AM = LD->getAddressingMode(); assert(AM != ISD::UNINDEXED); @@ -12116,7 +12116,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) { namespace { -/// \brief Helper structure used to slice a load in smaller loads. +/// Helper structure used to slice a load in smaller loads. /// Basically a slice is obtained from the following sequence: /// Origin = load Ty1, Base /// Shift = srl Ty1 Origin, CstTy Amount @@ -12129,7 +12129,7 @@ namespace { /// SliceTy is deduced from the number of bits that are actually used to /// build Inst. struct LoadedSlice { - /// \brief Helper structure used to compute the cost of a slice. + /// Helper structure used to compute the cost of a slice. struct Cost { /// Are we optimizing for code size. bool ForCodeSize; @@ -12143,7 +12143,7 @@ struct LoadedSlice { Cost(bool ForCodeSize = false) : ForCodeSize(ForCodeSize) {} - /// \brief Get the cost of one isolated slice. + /// Get the cost of one isolated slice. Cost(const LoadedSlice &LS, bool ForCodeSize = false) : ForCodeSize(ForCodeSize), Loads(1) { EVT TruncType = LS.Inst->getValueType(0); @@ -12153,7 +12153,7 @@ struct LoadedSlice { ZExts = 1; } - /// \brief Account for slicing gain in the current cost. + /// Account for slicing gain in the current cost. /// Slicing provide a few gains like removing a shift or a /// truncate. This method allows to grow the cost of the original /// load with the gain from this slice. @@ -12226,7 +12226,7 @@ struct LoadedSlice { unsigned Shift = 0, SelectionDAG *DAG = nullptr) : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} - /// \brief Get the bits used in a chunk of bits \p BitWidth large. + /// Get the bits used in a chunk of bits \p BitWidth large. /// \return Result is \p BitWidth and has used bits set to 1 and /// not used bits set to 0. APInt getUsedBits() const { @@ -12246,14 +12246,14 @@ struct LoadedSlice { return UsedBits; } - /// \brief Get the size of the slice to be loaded in bytes. + /// Get the size of the slice to be loaded in bytes. unsigned getLoadedSize() const { unsigned SliceSize = getUsedBits().countPopulation(); assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); return SliceSize / 8; } - /// \brief Get the type that will be loaded for this slice. + /// Get the type that will be loaded for this slice. /// Note: This may not be the final type for the slice. EVT getLoadedType() const { assert(DAG && "Missing context"); @@ -12261,7 +12261,7 @@ struct LoadedSlice { return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); } - /// \brief Get the alignment of the load used for this slice. + /// Get the alignment of the load used for this slice. unsigned getAlignment() const { unsigned Alignment = Origin->getAlignment(); unsigned Offset = getOffsetFromBase(); @@ -12270,7 +12270,7 @@ struct LoadedSlice { return Alignment; } - /// \brief Check if this slice can be rewritten with legal operations. + /// Check if this slice can be rewritten with legal operations. bool isLegal() const { // An invalid slice is not legal. if (!Origin || !Inst || !DAG) @@ -12314,7 +12314,7 @@ struct LoadedSlice { return true; } - /// \brief Get the offset in bytes of this slice in the original chunk of + /// Get the offset in bytes of this slice in the original chunk of /// bits. /// \pre DAG != nullptr. uint64_t getOffsetFromBase() const { @@ -12335,7 +12335,7 @@ struct LoadedSlice { return Offset; } - /// \brief Generate the sequence of instructions to load the slice + /// Generate the sequence of instructions to load the slice /// represented by this object and redirect the uses of this slice to /// this new sequence of instructions. /// \pre this->Inst && this->Origin are valid Instructions and this @@ -12373,7 +12373,7 @@ struct LoadedSlice { return LastInst; } - /// \brief Check if this slice can be merged with an expensive cross register + /// Check if this slice can be merged with an expensive cross register /// bank copy. E.g., /// i = load i32 /// f = bitcast i32 i to float @@ -12422,7 +12422,7 @@ struct LoadedSlice { } // end anonymous namespace -/// \brief Check that all bits set in \p UsedBits form a dense region, i.e., +/// Check that all bits set in \p UsedBits form a dense region, i.e., /// \p UsedBits looks like 0..0 1..1 0..0. static bool areUsedBitsDense(const APInt &UsedBits) { // If all the bits are one, this is dense! @@ -12438,7 +12438,7 @@ static bool areUsedBitsDense(const APInt &UsedBits) { return NarrowedUsedBits.isAllOnesValue(); } -/// \brief Check whether or not \p First and \p Second are next to each other +/// Check whether or not \p First and \p Second are next to each other /// in memory. This means that there is no hole between the bits loaded /// by \p First and the bits loaded by \p Second. static bool areSlicesNextToEachOther(const LoadedSlice &First, @@ -12452,7 +12452,7 @@ static bool areSlicesNextToEachOther(const LoadedSlice &First, return areUsedBitsDense(UsedBits); } -/// \brief Adjust the \p GlobalLSCost according to the target +/// Adjust the \p GlobalLSCost according to the target /// paring capabilities and the layout of the slices. /// \pre \p GlobalLSCost should account for at least as many loads as /// there is in the slices in \p LoadedSlices. @@ -12513,7 +12513,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, } } -/// \brief Check the profitability of all involved LoadedSlice. +/// Check the profitability of all involved LoadedSlice. /// Currently, it is considered profitable if there is exactly two /// involved slices (1) which are (2) next to each other in memory, and /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). @@ -12557,7 +12557,7 @@ static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, return OrigCost > GlobalSlicingCost; } -/// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) +/// If the given load, \p LI, is used only by trunc or trunc(lshr) /// operations, split it in the various pieces being extracted. /// /// This sort of thing is introduced by SROA. diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 4b100acc9a5..571fd667cda 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -848,7 +848,7 @@ bool FastISel::selectStackmap(const CallInst *I) { return true; } -/// \brief Lower an argument list according to the target calling convention. +/// Lower an argument list according to the target calling convention. /// /// This is a helper for lowering intrinsics that follow a target calling /// convention or require stack pointer adjustment. Only a subset of the diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index fc191c457d9..486b5430537 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -87,11 +87,11 @@ class SelectionDAGLegalize { const TargetLowering &TLI; SelectionDAG &DAG; - /// \brief The set of nodes which have already been legalized. We hold a + /// The set of nodes which have already been legalized. We hold a /// reference to it in order to update as necessary on node deletion. SmallPtrSetImpl<SDNode *> &LegalizedNodes; - /// \brief A set of all the nodes updated during legalization. + /// A set of all the nodes updated during legalization. SmallSetVector<SDNode *, 16> *UpdatedNodes; EVT getSetCCResultType(EVT VT) const { @@ -107,7 +107,7 @@ public: : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG), LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {} - /// \brief Legalizes the given operation. + /// Legalizes the given operation. void LegalizeOp(SDNode *Node); private: diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index a481acd07ca..724a909a210 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -63,7 +63,7 @@ class VectorLegalizer { /// legalizing the same thing more than once. SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; - /// \brief Adds a node to the translation cache. + /// Adds a node to the translation cache. void AddLegalizedOperand(SDValue From, SDValue To) { LegalizedNodes.insert(std::make_pair(From, To)); // If someone requests legalization of the new node, return itself. @@ -71,55 +71,55 @@ class VectorLegalizer { LegalizedNodes.insert(std::make_pair(To, To)); } - /// \brief Legalizes the given node. + /// Legalizes the given node. SDValue LegalizeOp(SDValue Op); - /// \brief Assuming the node is legal, "legalize" the results. + /// Assuming the node is legal, "legalize" the results. SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); - /// \brief Implements unrolling a VSETCC. + /// Implements unrolling a VSETCC. SDValue UnrollVSETCC(SDValue Op); - /// \brief Implement expand-based legalization of vector operations. + /// Implement expand-based legalization of vector operations. /// /// This is just a high-level routine to dispatch to specific code paths for /// operations to legalize them. SDValue Expand(SDValue Op); - /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if + /// Implements expansion for FNEG; falls back to UnrollVectorOp if /// FSUB isn't legal. /// /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if /// SINT_TO_FLOAT and SHR on vectors isn't legal. SDValue ExpandUINT_TO_FLOAT(SDValue Op); - /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. + /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. SDValue ExpandSEXTINREG(SDValue Op); - /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG. + /// Implement expansion for ANY_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place and bitcasts to the proper /// type. The contents of the bits in the extended part of each element are /// undef. SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG. + /// Implement expansion for SIGN_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place, bitcasts to the proper /// type, then shifts left and arithmetic shifts right to introduce a sign /// extension. SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG. + /// Implement expansion for ZERO_EXTEND_VECTOR_INREG. /// /// Shuffles the low lanes of the operand into place and blends zeros into /// the remaining lanes, finally bitcasting to the proper type. SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); - /// \brief Expand bswap of vectors into a shuffle if legal. + /// Expand bswap of vectors into a shuffle if legal. SDValue ExpandBSWAP(SDValue Op); - /// \brief Implement vselect in terms of XOR, AND, OR when blend is not + /// Implement vselect in terms of XOR, AND, OR when blend is not /// supported by the target. SDValue ExpandVSELECT(SDValue Op); SDValue ExpandSELECT(SDValue Op); @@ -131,18 +131,18 @@ class VectorLegalizer { SDValue ExpandCTLZ(SDValue Op); SDValue ExpandCTTZ_ZERO_UNDEF(SDValue Op); - /// \brief Implements vector promotion. + /// Implements vector promotion. /// /// This is essentially just bitcasting the operands to a different type and /// bitcasting the result back to the original type. SDValue Promote(SDValue Op); - /// \brief Implements [SU]INT_TO_FP vector promotion. + /// Implements [SU]INT_TO_FP vector promotion. /// /// This is a [zs]ext of the input operand to a larger integer type. SDValue PromoteINT_TO_FP(SDValue Op); - /// \brief Implements FP_TO_[SU]INT vector promotion of the result type. + /// Implements FP_TO_[SU]INT vector promotion of the result type. /// /// It is promoted to a larger integer type. The result is then /// truncated back to the original type. @@ -152,7 +152,7 @@ public: VectorLegalizer(SelectionDAG& dag) : DAG(dag), TLI(dag.getTargetLoweringInfo()) {} - /// \brief Begin legalizer the vector operations in the DAG. + /// Begin legalizer the vector operations in the DAG. bool Run(); }; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0a4e7e6f737..cf6993baef3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -773,7 +773,7 @@ static void VerifySDNode(SDNode *N) { } #endif // NDEBUG -/// \brief Insert a newly allocated node into the DAG. +/// Insert a newly allocated node into the DAG. /// /// Handles insertion into the all nodes list and CSE map, as well as /// verification and other common operations when a new node is allocated. @@ -5446,7 +5446,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); } -/// \brief Lower the call to 'memset' intrinsic function into a series of store +/// Lower the call to 'memset' intrinsic function into a series of store /// operations. /// /// \param DAG Selection DAG where lowered code is placed. @@ -8522,7 +8522,7 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { return true; } -// \brief Returns the SDNode if it is a constant integer BuildVector +// Returns the SDNode if it is a constant integer BuildVector // or constant integer. SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) { if (isa<ConstantSDNode>(N)) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 4695374bc7a..2ac4d3a7b24 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7758,7 +7758,7 @@ SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, return DAG.getMergeValues(Ops, SL); } -/// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of +/// Populate a CallLowerinInfo (into \p CLI) based on the properties of /// the call being lowered. /// /// This is a helper for lowering intrinsics that follow a target calling @@ -7793,7 +7793,7 @@ void SelectionDAGBuilder::populateCallLoweringInfo( .setIsPatchPoint(IsPatchPoint); } -/// \brief Add a stack map intrinsic call's live variable operands to a stackmap +/// Add a stack map intrinsic call's live variable operands to a stackmap /// or patchpoint target node's operand list. /// /// Constants are converted to TargetConstants purely as an optimization to @@ -7829,7 +7829,7 @@ static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, } } -/// \brief Lower llvm.experimental.stackmap directly to its target opcode. +/// Lower llvm.experimental.stackmap directly to its target opcode. void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, // [live variables...]) @@ -7892,7 +7892,7 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { FuncInfo.MF->getFrameInfo().setHasStackMap(); } -/// \brief Lower llvm.experimental.patchpoint directly to its target opcode. +/// Lower llvm.experimental.patchpoint directly to its target opcode. void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS, const BasicBlock *EHPadBB) { // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 1334f1b2bf5..c00a72753e1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -197,7 +197,7 @@ defaultListDAGScheduler("default", "Best scheduler for the target", namespace llvm { //===--------------------------------------------------------------------===// - /// \brief This class is used by SelectionDAGISel to temporarily override + /// This class is used by SelectionDAGISel to temporarily override /// the optimization level on a per-function basis. class OptLevelChanger { SelectionDAGISel &IS; @@ -2835,7 +2835,7 @@ struct MatchScope { bool HasChainNodesMatched; }; -/// \\brief A DAG update listener to keep the matching state +/// \A DAG update listener to keep the matching state /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to /// change the DAG while matching. X86 addressing mode matcher is an example /// for this. diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index d34e12fac7b..6e829a35f7c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -96,7 +96,7 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, return true; } -/// \brief Set CallLoweringInfo attribute flags based on a call instruction +/// Set CallLoweringInfo attribute flags based on a call instruction /// and called function attributes. void TargetLoweringBase::ArgListEntry::setAttributes(ImmutableCallSite *CS, unsigned ArgIdx) { @@ -3310,7 +3310,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, } } -/// \brief Given an exact SDIV by a constant, create a multiplication +/// Given an exact SDIV by a constant, create a multiplication /// with the multiplicative inverse of the constant. static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d, const SDLoc &dl, SelectionDAG &DAG, @@ -3352,7 +3352,7 @@ SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, return SDValue(); } -/// \brief Given an ISD::SDIV node expressing a divide by constant, +/// Given an ISD::SDIV node expressing a divide by constant, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". @@ -3416,7 +3416,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor, return DAG.getNode(ISD::ADD, dl, VT, Q, T); } -/// \brief Given an ISD::UDIV node expressing a divide by constant, +/// Given an ISD::UDIV node expressing a divide by constant, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp index e20233917e3..d4fbe0a8df0 100644 --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -99,7 +99,7 @@ EnableShrinkWrapOpt("enable-shrink-wrap", cl::Hidden, namespace { -/// \brief Class to determine where the safe point to insert the +/// Class to determine where the safe point to insert the /// prologue and epilogue are. /// Unlike the paper from Fred C. Chow, PLDI'88, that introduces the /// shrink-wrapping term for prologue/epilogue placement, this pass @@ -153,7 +153,7 @@ class ShrinkWrap : public MachineFunctionPass { /// Current MachineFunction. MachineFunction *MachineFunc; - /// \brief Check if \p MI uses or defines a callee-saved register or + /// Check if \p MI uses or defines a callee-saved register or /// a frame index. If this is the case, this means \p MI must happen /// after Save and before Restore. bool useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS) const; @@ -173,14 +173,14 @@ class ShrinkWrap : public MachineFunctionPass { return CurrentCSRs; } - /// \brief Update the Save and Restore points such that \p MBB is in + /// Update the Save and Restore points such that \p MBB is in /// the region that is dominated by Save and post-dominated by Restore /// and Save and Restore still match the safe point definition. /// Such point may not exist and Save and/or Restore may be null after /// this call. void updateSaveRestorePoints(MachineBasicBlock &MBB, RegScavenger *RS); - /// \brief Initialize the pass for \p MF. + /// Initialize the pass for \p MF. void init(MachineFunction &MF) { RCI.runOnMachineFunction(MF); MDT = &getAnalysis<MachineDominatorTree>(); @@ -206,7 +206,7 @@ class ShrinkWrap : public MachineFunctionPass { /// shrink-wrapping. bool ArePointsInteresting() const { return Save != Entry && Save && Restore; } - /// \brief Check if shrink wrapping is enabled for this target and function. + /// Check if shrink wrapping is enabled for this target and function. static bool isShrinkWrapEnabled(const MachineFunction &MF); public: @@ -232,7 +232,7 @@ public: StringRef getPassName() const override { return "Shrink Wrapping analysis"; } - /// \brief Perform the shrink-wrapping analysis and update + /// Perform the shrink-wrapping analysis and update /// the MachineFrameInfo attached to \p MF with the results. bool runOnMachineFunction(MachineFunction &MF) override; }; @@ -294,7 +294,7 @@ bool ShrinkWrap::useOrDefCSROrFI(const MachineInstr &MI, return false; } -/// \brief Helper function to find the immediate (post) dominator. +/// Helper function to find the immediate (post) dominator. template <typename ListOfBBs, typename DominanceAnalysis> static MachineBasicBlock *FindIDom(MachineBasicBlock &Block, ListOfBBs BBs, DominanceAnalysis &Dom) { diff --git a/llvm/lib/CodeGen/SpillPlacement.cpp b/llvm/lib/CodeGen/SpillPlacement.cpp index b989b54d419..f6786b30b21 100644 --- a/llvm/lib/CodeGen/SpillPlacement.cpp +++ b/llvm/lib/CodeGen/SpillPlacement.cpp @@ -246,7 +246,7 @@ void SpillPlacement::activate(unsigned n) { } } -/// \brief Set the threshold for a given entry frequency. +/// Set the threshold for a given entry frequency. /// /// Set the threshold relative to \c Entry. Since the threshold is used as a /// bound on the open interval (-Threshold;Threshold), 1 is the minimum diff --git a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp index cc9af92c395..32d6f54f679 100644 --- a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp +++ b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp @@ -39,7 +39,7 @@ STATISTIC(NumBBsHaveNoStackmap, "Number of basic blocks with no stackmap"); STATISTIC(NumStackMaps, "Number of StackMaps visited"); namespace { -/// \brief This pass calculates the liveness information for each basic block in +/// This pass calculates the liveness information for each basic block in /// a function and attaches the register live-out information to a patchpoint /// intrinsic if present. /// @@ -54,10 +54,10 @@ class StackMapLiveness : public MachineFunctionPass { public: static char ID; - /// \brief Default construct and initialize the pass. + /// Default construct and initialize the pass. StackMapLiveness(); - /// \brief Tell the pass manager which passes we depend on and what + /// Tell the pass manager which passes we depend on and what /// information we preserve. void getAnalysisUsage(AnalysisUsage &AU) const override; @@ -66,17 +66,17 @@ public: MachineFunctionProperties::Property::NoVRegs); } - /// \brief Calculate the liveness information for the given machine function. + /// Calculate the liveness information for the given machine function. bool runOnMachineFunction(MachineFunction &MF) override; private: - /// \brief Performs the actual liveness calculation for the function. + /// Performs the actual liveness calculation for the function. bool calculateLiveness(MachineFunction &MF); - /// \brief Add the current register live set to the instruction. + /// Add the current register live set to the instruction. void addLiveOutSetToMI(MachineFunction &MF, MachineInstr &MI); - /// \brief Create a register mask and initialize it with the registers from + /// Create a register mask and initialize it with the registers from /// the register live set. uint32_t *createRegisterMask(MachineFunction &MF) const; }; diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index 8a7393501d0..9bc0c1fc043 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -225,7 +225,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) { return false; } -/// \brief Check whether or not this function needs a stack protector based +/// Check whether or not this function needs a stack protector based /// upon the stack protector level. /// /// We use two heuristics: a standard (ssp) and strong (sspstrong). diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index aa071ddbf9f..80622ea7bae 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -345,7 +345,7 @@ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, return BestRC; } -/// \brief Check if the registers defined by the pair (RegisterClass, SubReg) +/// Check if the registers defined by the pair (RegisterClass, SubReg) /// share the same register file. static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, const TargetRegisterClass *DefRC, diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp index e4efc15f2ae..2cc6c460288 100644 --- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -96,14 +96,14 @@ ExecutionEngine::~ExecutionEngine() { } namespace { -/// \brief Helper class which uses a value handler to automatically deletes the +/// Helper class which uses a value handler to automatically deletes the /// memory block when the GlobalVariable is destroyed. class GVMemoryBlock final : public CallbackVH { GVMemoryBlock(const GlobalVariable *GV) : CallbackVH(const_cast<GlobalVariable*>(GV)) {} public: - /// \brief Returns the address the GlobalVariable should be written into. The + /// Returns the address the GlobalVariable should be written into. The /// GVMemoryBlock object prefixes that. static char *Create(const GlobalVariable *GV, const DataLayout& TD) { Type *ElTy = GV->getValueType(); @@ -589,7 +589,7 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) { return getPointerToGlobalIfAvailable(GV); } -/// \brief Converts a Constant* into a GenericValue, including handling of +/// Converts a Constant* into a GenericValue, including handling of /// ConstantExpr values. GenericValue ExecutionEngine::getConstantValue(const Constant *C) { // If its undefined, return the garbage. diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h index 766a9b21cb1..0e27e51c93e 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -87,7 +87,7 @@ public: uint8_t *getAddress() const { return Address; } - /// \brief Return the address of this section with an offset. + /// Return the address of this section with an offset. uint8_t *getAddressWithOffset(unsigned OffsetBytes) const { assert(OffsetBytes <= AllocationSize && "Offset out of bounds!"); return Address + OffsetBytes; @@ -98,7 +98,7 @@ public: uint64_t getLoadAddress() const { return LoadAddress; } void setLoadAddress(uint64_t LA) { LoadAddress = LA; } - /// \brief Return the load address of this section with an offset. + /// Return the load address of this section with an offset. uint64_t getLoadAddressWithOffset(unsigned OffsetBytes) const { assert(OffsetBytes <= AllocationSize && "Offset out of bounds!"); return LoadAddress + OffsetBytes; @@ -381,14 +381,14 @@ protected: return Addr; } - /// \brief Given the common symbols discovered in the object file, emit a + /// Given the common symbols discovered in the object file, emit a /// new section for them and update the symbol mappings in the object and /// symbol table. Error emitCommonSymbols(const ObjectFile &Obj, CommonSymbolList &CommonSymbols, uint64_t CommonSize, uint32_t CommonAlign); - /// \brief Emits section data from the object file to the MemoryManager. + /// Emits section data from the object file to the MemoryManager. /// \param IsCode if it's true then allocateCodeSection() will be /// used for emits, else allocateDataSection() will be used. /// \return SectionID. @@ -396,7 +396,7 @@ protected: const SectionRef &Section, bool IsCode); - /// \brief Find Section in LocalSections. If the secton is not found - emit + /// Find Section in LocalSections. If the secton is not found - emit /// it and store in LocalSections. /// \param IsCode if it's true then allocateCodeSection() will be /// used for emmits, else allocateDataSection() will be used. @@ -405,26 +405,26 @@ protected: const SectionRef &Section, bool IsCode, ObjSectionToIDMap &LocalSections); - // \brief Add a relocation entry that uses the given section. + // Add a relocation entry that uses the given section. void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID); - // \brief Add a relocation entry that uses the given symbol. This symbol may + // Add a relocation entry that uses the given symbol. This symbol may // be found in the global symbol table, or it may be external. void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName); - /// \brief Emits long jump instruction to Addr. + /// Emits long jump instruction to Addr. /// \return Pointer to the memory area for emitting target address. uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0); - /// \brief Resolves relocations from Relocs list with address from Value. + /// Resolves relocations from Relocs list with address from Value. void resolveRelocationList(const RelocationList &Relocs, uint64_t Value); - /// \brief A object file specific relocation resolver + /// A object file specific relocation resolver /// \param RE The relocation to be resolved /// \param Value Target symbol address to apply the relocation action virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0; - /// \brief Parses one or more object file relocations (some object files use + /// Parses one or more object file relocations (some object files use /// relocation pairs) and stores it to Relocations or SymbolRelocations /// (this depends on the object file type). /// \return Iterator to the next relocation that needs to be parsed. @@ -433,35 +433,35 @@ protected: const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) = 0; - /// \brief Resolve relocations to external symbols. + /// Resolve relocations to external symbols. Error resolveExternalSymbols(); - // \brief Compute an upper bound of the memory that is required to load all + // Compute an upper bound of the memory that is required to load all // sections Error computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize, uint32_t &CodeAlign, uint64_t &RODataSize, uint32_t &RODataAlign, uint64_t &RWDataSize, uint32_t &RWDataAlign); - // \brief Compute GOT size + // Compute GOT size unsigned computeGOTSize(const ObjectFile &Obj); - // \brief Compute the stub buffer size required for a section + // Compute the stub buffer size required for a section unsigned computeSectionStubBufSize(const ObjectFile &Obj, const SectionRef &Section); - // \brief Implementation of the generic part of the loadObject algorithm. + // Implementation of the generic part of the loadObject algorithm. Expected<ObjSectionToIDMap> loadObjectImpl(const object::ObjectFile &Obj); - // \brief Return size of Global Offset Table (GOT) entry + // Return size of Global Offset Table (GOT) entry virtual size_t getGOTEntrySize() { return 0; } - // \brief Return true if the relocation R may require allocating a GOT entry. + // Return true if the relocation R may require allocating a GOT entry. virtual bool relocationNeedsGot(const RelocationRef &R) const { return false; } - // \brief Return true if the relocation R may require allocating a stub. + // Return true if the relocation R may require allocating a stub. virtual bool relocationNeedsStub(const RelocationRef &R) const { return true; // Conservative answer } diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h index ce54a271767..f53b9e6bd75 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h @@ -39,13 +39,13 @@ protected: uint64_t SymOffset, SID SectionID); private: - /// \brief A object file specific relocation resolver + /// A object file specific relocation resolver /// \param RE The relocation to be resolved /// \param Value Target symbol address to apply the relocation action uint64_t evaluateRelocation(const RelocationEntry &RE, uint64_t Value, uint64_t Addend); - /// \brief A object file specific relocation resolver + /// A object file specific relocation resolver /// \param RE The relocation to be resolved /// \param Value Target symbol address to apply the relocation action void applyRelocation(const RelocationEntry &RE, uint64_t Value); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 8b97e2756cb..08a14934b57 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -763,7 +763,7 @@ private: /// CreateFunctionSlot - Insert the specified Value* into the slot table. void CreateFunctionSlot(const Value *V); - /// \brief Insert the specified AttributeSet into the slot table. + /// Insert the specified AttributeSet into the slot table. void CreateAttributeSetSlot(AttributeSet AS); /// Add all of the module level global variables (and their initializers) @@ -2236,7 +2236,7 @@ public: void printUseLists(const Function *F); private: - /// \brief Print out metadata attachments. + /// Print out metadata attachments. void printMetadataAttachments( const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs, StringRef Separator); @@ -3695,7 +3695,7 @@ void Module::dump() const { /*ShouldPreserveUseListOrder=*/false, /*IsForDebug=*/true); } -// \brief Allow printing of Comdats from the debugger. +// Allow printing of Comdats from the debugger. LLVM_DUMP_METHOD void Comdat::dump() const { print(dbgs(), /*IsForDebug=*/true); } diff --git a/llvm/lib/IR/AttributeImpl.h b/llvm/lib/IR/AttributeImpl.h index 9c7b61f6792..bb0c072e478 100644 --- a/llvm/lib/IR/AttributeImpl.h +++ b/llvm/lib/IR/AttributeImpl.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines various helper methods and classes used by +/// This file defines various helper methods and classes used by /// LLVMContextImpl for creating and managing attributes. /// //===----------------------------------------------------------------------===// @@ -33,7 +33,7 @@ class LLVMContext; //===----------------------------------------------------------------------===// /// \class -/// \brief This class represents a single, uniqued attribute. That attribute +/// This class represents a single, uniqued attribute. That attribute /// could be a single enum, a tuple, or a string. class AttributeImpl : public FoldingSetNode { unsigned char KindID; ///< Holds the AttrEntryKind of the attribute @@ -67,7 +67,7 @@ public: StringRef getKindAsString() const; StringRef getValueAsString() const; - /// \brief Used when sorting the attributes. + /// Used when sorting the attributes. bool operator<(const AttributeImpl &AI) const; void Profile(FoldingSetNodeID &ID) const { @@ -93,7 +93,7 @@ public: //===----------------------------------------------------------------------===// /// \class -/// \brief A set of classes that contain the value of the +/// A set of classes that contain the value of the /// attribute object. There are three main categories: enum attribute entries, /// represented by Attribute::AttrKind; alignment attribute entries; and string /// attribute enties, which are for target-dependent attributes. @@ -148,7 +148,7 @@ public: //===----------------------------------------------------------------------===// /// \class -/// \brief This class represents a group of attributes that apply to one +/// This class represents a group of attributes that apply to one /// element: function, return type, or parameter. class AttributeSetNode final : public FoldingSetNode, @@ -172,7 +172,7 @@ public: static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs); - /// \brief Return the number of attributes this AttributeList contains. + /// Return the number of attributes this AttributeList contains. unsigned getNumAttributes() const { return NumAttrs; } bool hasAttribute(Attribute::AttrKind Kind) const { @@ -210,7 +210,7 @@ using IndexAttrPair = std::pair<unsigned, AttributeSet>; //===----------------------------------------------------------------------===// /// \class -/// \brief This class represents a set of attributes that apply to the function, +/// This class represents a set of attributes that apply to the function, /// return type, and parameters. class AttributeListImpl final : public FoldingSetNode, @@ -236,10 +236,10 @@ public: void operator delete(void *p) { ::operator delete(p); } - /// \brief Get the context that created this AttributeListImpl. + /// Get the context that created this AttributeListImpl. LLVMContext &getContext() { return Context; } - /// \brief Return true if the AttributeSet or the FunctionIndex has an + /// Return true if the AttributeSet or the FunctionIndex has an /// enum attribute of the given kind. bool hasFnAttribute(Attribute::AttrKind Kind) const { return AvailableFunctionAttrs & ((uint64_t)1) << Kind; diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp index 3fe63e44091..dbe38c2095d 100644 --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // \file -// \brief This file implements the Attribute, AttributeImpl, AttrBuilder, +// This file implements the Attribute, AttributeImpl, AttrBuilder, // AttributeListImpl, and AttributeList classes. // //===----------------------------------------------------------------------===// @@ -1569,7 +1569,7 @@ bool AttrBuilder::operator==(const AttrBuilder &B) { // AttributeFuncs Function Defintions //===----------------------------------------------------------------------===// -/// \brief Which attributes cannot be applied to a type. +/// Which attributes cannot be applied to a type. AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) { AttrBuilder Incompatible; @@ -1601,7 +1601,7 @@ static bool isEqual(const Function &Caller, const Function &Callee) { Callee.getFnAttribute(AttrClass::getKind()); } -/// \brief Compute the logical AND of the attributes of the caller and the +/// Compute the logical AND of the attributes of the caller and the /// callee. /// /// This function sets the caller's attribute to false if the callee's attribute @@ -1613,7 +1613,7 @@ static void setAND(Function &Caller, const Function &Callee) { AttrClass::set(Caller, AttrClass::getKind(), false); } -/// \brief Compute the logical OR of the attributes of the caller and the +/// Compute the logical OR of the attributes of the caller and the /// callee. /// /// This function sets the caller's attribute to true if the callee's attribute @@ -1625,7 +1625,7 @@ static void setOR(Function &Caller, const Function &Callee) { AttrClass::set(Caller, AttrClass::getKind(), true); } -/// \brief If the inlined function had a higher stack protection level than the +/// If the inlined function had a higher stack protection level than the /// calling function, then bump up the caller's stack protection level. static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) { // If upgrading the SSP attribute, clear out the old SSP Attributes first. @@ -1649,7 +1649,7 @@ static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) { Caller.addFnAttr(Attribute::StackProtect); } -/// \brief If the inlined function required stack probes, then ensure that +/// If the inlined function required stack probes, then ensure that /// the calling function has those too. static void adjustCallerStackProbes(Function &Caller, const Function &Callee) { if (!Caller.hasFnAttribute("probe-stack") && @@ -1658,7 +1658,7 @@ static void adjustCallerStackProbes(Function &Caller, const Function &Callee) { } } -/// \brief If the inlined function defines the size of guard region +/// If the inlined function defines the size of guard region /// on the stack, then ensure that the calling function defines a guard region /// that is no larger. static void diff --git a/llvm/lib/IR/DiagnosticHandler.cpp b/llvm/lib/IR/DiagnosticHandler.cpp index fb1ac438ffb..8f972785cf9 100644 --- a/llvm/lib/IR/DiagnosticHandler.cpp +++ b/llvm/lib/IR/DiagnosticHandler.cpp @@ -17,7 +17,7 @@ using namespace llvm; namespace { -/// \brief Regular expression corresponding to the value given in one of the +/// Regular expression corresponding to the value given in one of the /// -pass-remarks* command line flags. Passes whose name matches this regexp /// will emit a diagnostic when calling the associated diagnostic function /// (emitOptimizationRemark, emitOptimizationRemarkMissed or diff --git a/llvm/lib/IR/Dominators.cpp b/llvm/lib/IR/Dominators.cpp index 8979ca37103..d8971e05f47 100644 --- a/llvm/lib/IR/Dominators.cpp +++ b/llvm/lib/IR/Dominators.cpp @@ -381,7 +381,7 @@ void DominatorTreeWrapperPass::print(raw_ostream &OS, const Module *) const { // //===----------------------------------------------------------------------===// -/// \brief Queues multiple updates and discards duplicates. +/// Queues multiple updates and discards duplicates. void DeferredDominance::applyUpdates( ArrayRef<DominatorTree::UpdateType> Updates) { SmallVector<DominatorTree::UpdateType, 8> Seen; @@ -394,7 +394,7 @@ void DeferredDominance::applyUpdates( } } -/// \brief Helper method for a single edge insertion. It's almost always better +/// Helper method for a single edge insertion. It's almost always better /// to batch updates and call applyUpdates to quickly remove duplicate edges. /// This is best used when there is only a single insertion needed to update /// Dominators. @@ -402,7 +402,7 @@ void DeferredDominance::insertEdge(BasicBlock *From, BasicBlock *To) { applyUpdate(DominatorTree::Insert, From, To); } -/// \brief Helper method for a single edge deletion. It's almost always better +/// Helper method for a single edge deletion. It's almost always better /// to batch updates and call applyUpdates to quickly remove duplicate edges. /// This is best used when there is only a single deletion needed to update /// Dominators. @@ -410,7 +410,7 @@ void DeferredDominance::deleteEdge(BasicBlock *From, BasicBlock *To) { applyUpdate(DominatorTree::Delete, From, To); } -/// \brief Delays the deletion of a basic block until a flush() event. +/// Delays the deletion of a basic block until a flush() event. void DeferredDominance::deleteBB(BasicBlock *DelBB) { assert(DelBB && "Invalid push_back of nullptr DelBB."); assert(pred_empty(DelBB) && "DelBB has one or more predecessors."); @@ -428,17 +428,17 @@ void DeferredDominance::deleteBB(BasicBlock *DelBB) { DeletedBBs.insert(DelBB); } -/// \brief Returns true if DelBB is awaiting deletion at a flush() event. +/// Returns true if DelBB is awaiting deletion at a flush() event. bool DeferredDominance::pendingDeletedBB(BasicBlock *DelBB) { if (DeletedBBs.empty()) return false; return DeletedBBs.count(DelBB) != 0; } -/// \brief Returns true if pending DT updates are queued for a flush() event. +/// Returns true if pending DT updates are queued for a flush() event. bool DeferredDominance::pending() { return !PendUpdates.empty(); } -/// \brief Flushes all pending updates and block deletions. Returns a +/// Flushes all pending updates and block deletions. Returns a /// correct DominatorTree reference to be used by the caller for analysis. DominatorTree &DeferredDominance::flush() { // Updates to DT must happen before blocks are deleted below. Otherwise the @@ -451,7 +451,7 @@ DominatorTree &DeferredDominance::flush() { return DT; } -/// \brief Drops all internal state and forces a (slow) recalculation of the +/// Drops all internal state and forces a (slow) recalculation of the /// DominatorTree based on the current state of the LLVM IR in F. This should /// only be used in corner cases such as the Entry block of F being deleted. void DeferredDominance::recalculate(Function &F) { @@ -464,7 +464,7 @@ void DeferredDominance::recalculate(Function &F) { } } -/// \brief Debug method to help view the state of pending updates. +/// Debug method to help view the state of pending updates. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void DeferredDominance::dump() const { raw_ostream &OS = llvm::dbgs(); diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index dfe50f60a8f..bcc654e868a 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -508,7 +508,7 @@ static ArrayRef<const char *> findTargetSubtable(StringRef Name) { return makeArrayRef(&IntrinsicNameTable[1] + TI.Offset, TI.Count); } -/// \brief This does the actual lookup of an intrinsic ID which +/// This does the actual lookup of an intrinsic ID which /// matches the given function name. Intrinsic::ID Function::lookupIntrinsicID(StringRef Name) { ArrayRef<const char *> NameTable = findTargetSubtable(Name); diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index 485b62139ac..71bdc35d4c1 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -389,7 +389,7 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) { return createCallHelper(FnAssume, Ops, this); } -/// \brief Create a call to a Masked Load intrinsic. +/// Create a call to a Masked Load intrinsic. /// \p Ptr - base pointer for the load /// \p Align - alignment of the source location /// \p Mask - vector of booleans which indicates what vector lanes should @@ -412,7 +412,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align, OverloadedTypes, Name); } -/// \brief Create a call to a Masked Store intrinsic. +/// Create a call to a Masked Store intrinsic. /// \p Val - data to be stored, /// \p Ptr - base pointer for the store /// \p Align - alignment of the destination location @@ -441,7 +441,7 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, return createCallHelper(TheFn, Ops, this, Name); } -/// \brief Create a call to a Masked Gather intrinsic. +/// Create a call to a Masked Gather intrinsic. /// \p Ptrs - vector of pointers for loading /// \p Align - alignment for one element /// \p Mask - vector of booleans which indicates what vector lanes should @@ -473,7 +473,7 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, unsigned Align, Name); } -/// \brief Create a call to a Masked Scatter intrinsic. +/// Create a call to a Masked Scatter intrinsic. /// \p Data - data to be stored, /// \p Ptrs - the vector of pointers, where the \p Data elements should be /// stored diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp index 53dd4d57ab8..f0ac4583dea 100644 --- a/llvm/lib/IR/LLVMContextImpl.cpp +++ b/llvm/lib/IR/LLVMContextImpl.cpp @@ -155,7 +155,7 @@ void Module::dropTriviallyDeadConstantArrays() { namespace llvm { -/// \brief Make MDOperand transparent for hashing. +/// Make MDOperand transparent for hashing. /// /// This overload of an implementation detail of the hashing library makes /// MDOperand hash to the same value as a \a Metadata pointer. diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h index b2ba9d38a81..13579dcab08 100644 --- a/llvm/lib/IR/LLVMContextImpl.h +++ b/llvm/lib/IR/LLVMContextImpl.h @@ -202,7 +202,7 @@ struct FunctionTypeKeyInfo { } }; -/// \brief Structure for hashing arbitrary MDNode operands. +/// Structure for hashing arbitrary MDNode operands. class MDNodeOpsKey { ArrayRef<Metadata *> RawOps; ArrayRef<MDOperand> Ops; @@ -257,7 +257,7 @@ template <class NodeTy> struct MDNodeSubsetEqualImpl { } }; -/// \brief DenseMapInfo for MDTuple. +/// DenseMapInfo for MDTuple. /// /// Note that we don't need the is-function-local bit, since that's implicit in /// the operands. @@ -274,7 +274,7 @@ template <> struct MDNodeKeyImpl<MDTuple> : MDNodeOpsKey { } }; -/// \brief DenseMapInfo for DILocation. +/// DenseMapInfo for DILocation. template <> struct MDNodeKeyImpl<DILocation> { unsigned Line; unsigned Column; @@ -298,7 +298,7 @@ template <> struct MDNodeKeyImpl<DILocation> { } }; -/// \brief DenseMapInfo for GenericDINode. +/// DenseMapInfo for GenericDINode. template <> struct MDNodeKeyImpl<GenericDINode> : MDNodeOpsKey { unsigned Tag; MDString *Header; @@ -1084,7 +1084,7 @@ template <> struct MDNodeKeyImpl<DIMacroFile> { } }; -/// \brief DenseMapInfo for MDNode subclasses. +/// DenseMapInfo for MDNode subclasses. template <class NodeTy> struct MDNodeInfo { using KeyTy = MDNodeKeyImpl<NodeTy>; using SubsetEqualTy = MDNodeSubsetEqualImpl<NodeTy>; @@ -1121,7 +1121,7 @@ template <class NodeTy> struct MDNodeInfo { #define HANDLE_MDNODE_LEAF(CLASS) using CLASS##Info = MDNodeInfo<CLASS>; #include "llvm/IR/Metadata.def" -/// \brief Map-like storage for metadata attachments. +/// Map-like storage for metadata attachments. class MDAttachmentMap { SmallVector<std::pair<unsigned, TrackingMDNodeRef>, 2> Attachments; @@ -1129,27 +1129,27 @@ public: bool empty() const { return Attachments.empty(); } size_t size() const { return Attachments.size(); } - /// \brief Get a particular attachment (if any). + /// Get a particular attachment (if any). MDNode *lookup(unsigned ID) const; - /// \brief Set an attachment to a particular node. + /// Set an attachment to a particular node. /// /// Set the \c ID attachment to \c MD, replacing the current attachment at \c /// ID (if anyway). void set(unsigned ID, MDNode &MD); - /// \brief Remove an attachment. + /// Remove an attachment. /// /// Remove the attachment at \c ID, if any. void erase(unsigned ID); - /// \brief Copy out all the attachments. + /// Copy out all the attachments. /// /// Copies all the current attachments into \c Result, sorting by attachment /// ID. This function does \em not clear \c Result. void getAll(SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const; - /// \brief Erase matching attachments. + /// Erase matching attachments. /// /// Erases all attachments matching the \c shouldRemove predicate. template <class PredTy> void remove_if(PredTy shouldRemove) { @@ -1314,7 +1314,7 @@ public: int getOrAddScopeRecordIdxEntry(MDNode *N, int ExistingIdx); int getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,int ExistingIdx); - /// \brief A set of interned tags for operand bundles. The StringMap maps + /// A set of interned tags for operand bundles. The StringMap maps /// bundle tags to their IDs. /// /// \see LLVMContext::getOperandBundleTagID @@ -1357,11 +1357,11 @@ public: mutable OptPassGate *OPG = nullptr; - /// \brief Access the object which can disable optional passes and individual + /// Access the object which can disable optional passes and individual /// optimizations at compile time. OptPassGate &getOptPassGate() const; - /// \brief Set the object which can disable optional passes and individual + /// Set the object which can disable optional passes and individual /// optimizations at compile time. /// /// The lifetime of the object must be guaranteed to extend as long as the diff --git a/llvm/lib/IR/MDBuilder.cpp b/llvm/lib/IR/MDBuilder.cpp index a2bba4c3a10..1bb23c0330f 100644 --- a/llvm/lib/IR/MDBuilder.cpp +++ b/llvm/lib/IR/MDBuilder.cpp @@ -133,7 +133,7 @@ MDNode *MDBuilder::createTBAARoot(StringRef Name) { return MDNode::get(Context, createString(Name)); } -/// \brief Return metadata for a non-root TBAA node with the given name, +/// Return metadata for a non-root TBAA node with the given name, /// parent in the TBAA tree, and value for 'pointsToConstantMemory'. MDNode *MDBuilder::createTBAANode(StringRef Name, MDNode *Parent, bool isConstant) { @@ -153,7 +153,7 @@ MDNode *MDBuilder::createAliasScope(StringRef Name, MDNode *Domain) { return MDNode::get(Context, {createString(Name), Domain}); } -/// \brief Return metadata for a tbaa.struct node with the given +/// Return metadata for a tbaa.struct node with the given /// struct field descriptions. MDNode *MDBuilder::createTBAAStructNode(ArrayRef<TBAAStructField> Fields) { SmallVector<Metadata *, 4> Vals(Fields.size() * 3); @@ -166,7 +166,7 @@ MDNode *MDBuilder::createTBAAStructNode(ArrayRef<TBAAStructField> Fields) { return MDNode::get(Context, Vals); } -/// \brief Return metadata for a TBAA struct node in the type DAG +/// Return metadata for a TBAA struct node in the type DAG /// with the given name, a list of pairs (offset, field type in the type DAG). MDNode *MDBuilder::createTBAAStructTypeNode( StringRef Name, ArrayRef<std::pair<MDNode *, uint64_t>> Fields) { @@ -180,7 +180,7 @@ MDNode *MDBuilder::createTBAAStructTypeNode( return MDNode::get(Context, Ops); } -/// \brief Return metadata for a TBAA scalar type node with the +/// Return metadata for a TBAA scalar type node with the /// given name, an offset and a parent in the TBAA type DAG. MDNode *MDBuilder::createTBAAScalarTypeNode(StringRef Name, MDNode *Parent, uint64_t Offset) { @@ -189,7 +189,7 @@ MDNode *MDBuilder::createTBAAScalarTypeNode(StringRef Name, MDNode *Parent, {createString(Name), Parent, createConstant(Off)}); } -/// \brief Return metadata for a TBAA tag node with the given +/// Return metadata for a TBAA tag node with the given /// base type, access type and offset relative to the base type. MDNode *MDBuilder::createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType, uint64_t Offset, bool IsConstant) { diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 0ffa5dc72b0..770d21dfb7b 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -207,7 +207,7 @@ private: template <typename... Ts> void WriteTs() {} public: - /// \brief A check failed, so printout out the condition and the message. + /// A check failed, so printout out the condition and the message. /// /// This provides a nice place to put a breakpoint if you want to see why /// something is not correct. @@ -217,7 +217,7 @@ public: Broken = true; } - /// \brief A check failed (with values to print). + /// A check failed (with values to print). /// /// This calls the Message-only version so that the above is easier to set a /// breakpoint on. @@ -255,14 +255,14 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport { DominatorTree DT; - /// \brief When verifying a basic block, keep track of all of the + /// When verifying a basic block, keep track of all of the /// instructions we have seen so far. /// /// This allows us to do efficient dominance checks for the case when an /// instruction has an operand that is an instruction in the same block. SmallPtrSet<Instruction *, 16> InstsInThisBlock; - /// \brief Keep track of the metadata nodes that have been checked already. + /// Keep track of the metadata nodes that have been checked already. SmallPtrSet<const Metadata *, 32> MDNodes; /// Keep track which DISubprogram is attached to which function. @@ -271,10 +271,10 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport { /// Track all DICompileUnits visited. SmallPtrSet<const Metadata *, 2> CUVisited; - /// \brief The result type for a landingpad. + /// The result type for a landingpad. Type *LandingPadResultTy; - /// \brief Whether we've seen a call to @llvm.localescape in this function + /// Whether we've seen a call to @llvm.localescape in this function /// already. bool SawFrameEscape; @@ -4421,7 +4421,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) { }; } -/// \brief Carefully grab the subprogram from a local scope. +/// Carefully grab the subprogram from a local scope. /// /// This carefully grabs the subprogram from a local scope, avoiding the /// built-in assertions that would typically fire. diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp index 154736a53f9..b6af11d09ba 100644 --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -476,7 +476,7 @@ void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize, } } -/// \brief Write the fragment \p F to the output file. +/// Write the fragment \p F to the output file. static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { MCObjectWriter *OW = Asm.getWriterPtr(); diff --git a/llvm/lib/MC/MCDisassembler/Disassembler.cpp b/llvm/lib/MC/MCDisassembler/Disassembler.cpp index 2e700b3b2ea..30e0bb56264 100644 --- a/llvm/lib/MC/MCDisassembler/Disassembler.cpp +++ b/llvm/lib/MC/MCDisassembler/Disassembler.cpp @@ -130,7 +130,7 @@ void LLVMDisasmDispose(LLVMDisasmContextRef DCR){ delete DC; } -/// \brief Emits the comments that are stored in \p DC comment stream. +/// Emits the comments that are stored in \p DC comment stream. /// Each comment in the comment stream must end with a newline. static void emitComments(LLVMDisasmContext *DC, formatted_raw_ostream &FormattedOS) { @@ -158,7 +158,7 @@ static void emitComments(LLVMDisasmContext *DC, DC->CommentsToEmit.clear(); } -/// \brief Gets latency information for \p Inst from the itinerary +/// Gets latency information for \p Inst from the itinerary /// scheduling model, based on \p DC information. /// \return The maximum expected latency over all the operands or -1 /// if no information is available. @@ -184,7 +184,7 @@ static int getItineraryLatency(LLVMDisasmContext *DC, const MCInst &Inst) { return Latency; } -/// \brief Gets latency information for \p Inst, based on \p DC information. +/// Gets latency information for \p Inst, based on \p DC information. /// \return The maximum expected latency over all the definitions or -1 /// if no information is available. static int getLatency(LLVMDisasmContext *DC, const MCInst &Inst) { @@ -221,7 +221,7 @@ static int getLatency(LLVMDisasmContext *DC, const MCInst &Inst) { return Latency; } -/// \brief Emits latency information in DC->CommentStream for \p Inst, based +/// Emits latency information in DC->CommentStream for \p Inst, based /// on the information available in \p DC. static void emitLatency(LLVMDisasmContext *DC, const MCInst &Inst) { int Latency = getLatency(DC, Inst); diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index 2ad19a793ad..f9b88895245 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -480,7 +480,7 @@ bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm, return IsRelocatable && Value.isAbsolute(); } -/// \brief Helper method for \see EvaluateSymbolAdd(). +/// Helper method for \see EvaluateSymbolAdd(). static void AttemptToFoldSymbolOffsetDifference( const MCAssembler *Asm, const MCAsmLayout *Layout, const SectionAddrMap *Addrs, bool InSet, const MCSymbolRefExpr *&A, @@ -537,7 +537,7 @@ static void AttemptToFoldSymbolOffsetDifference( A = B = nullptr; } -/// \brief Evaluate the result of an add between (conceptually) two MCValues. +/// Evaluate the result of an add between (conceptually) two MCValues. /// /// This routine conceptually attempts to construct an MCValue: /// Result = (Result_A - Result_B + Result_Cst) diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp index c2410413a62..82994c06898 100644 --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -80,11 +80,11 @@ static cl::opt<unsigned> AsmMacroMaxNestingDepth( namespace { -/// \brief Helper types for tracking macro definitions. +/// Helper types for tracking macro definitions. typedef std::vector<AsmToken> MCAsmMacroArgument; typedef std::vector<MCAsmMacroArgument> MCAsmMacroArguments; -/// \brief Helper class for storing information about an active macro +/// Helper class for storing information about an active macro /// instantiation. struct MacroInstantiation { /// The location of the instantiation. @@ -104,13 +104,13 @@ public: }; struct ParseStatementInfo { - /// \brief The parsed operands from the last parsed statement. + /// The parsed operands from the last parsed statement. SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> ParsedOperands; - /// \brief The opcode from the last parsed instruction. + /// The opcode from the last parsed instruction. unsigned Opcode = ~0U; - /// \brief Was there an error parsing the inline assembly? + /// Was there an error parsing the inline assembly? bool ParseError = false; SmallVectorImpl<AsmRewrite> *AsmRewrites = nullptr; @@ -120,7 +120,7 @@ struct ParseStatementInfo { : AsmRewrites(rewrites) {} }; -/// \brief The concrete assembly parser instance. +/// The concrete assembly parser instance. class AsmParser : public MCAsmParser { private: AsmLexer Lexer; @@ -139,21 +139,21 @@ private: AsmCond TheCondState; std::vector<AsmCond> TheCondStack; - /// \brief maps directive names to handler methods in parser + /// maps directive names to handler methods in parser /// extensions. Extensions register themselves in this map by calling /// addDirectiveHandler. StringMap<ExtensionDirectiveHandler> ExtensionDirectiveMap; - /// \brief Stack of active macro instantiations. + /// Stack of active macro instantiations. std::vector<MacroInstantiation*> ActiveMacros; - /// \brief List of bodies of anonymous macros. + /// List of bodies of anonymous macros. std::deque<MCAsmMacro> MacroLikeBodies; /// Boolean tracking whether macro substitution is enabled. unsigned MacrosEnabledFlag : 1; - /// \brief Keeps track of how many .macro's have been instantiated. + /// Keeps track of how many .macro's have been instantiated. unsigned NumOfMacroInstantiations; /// The values from the last parsed cpp hash file line comment if any. @@ -165,16 +165,16 @@ private: }; CppHashInfoTy CppHashInfo; - /// \brief List of forward directional labels for diagnosis at the end. + /// List of forward directional labels for diagnosis at the end. SmallVector<std::tuple<SMLoc, CppHashInfoTy, MCSymbol *>, 4> DirLabels; /// AssemblerDialect. ~OU means unset value and use value provided by MAI. unsigned AssemblerDialect = ~0U; - /// \brief is Darwin compatibility enabled? + /// is Darwin compatibility enabled? bool IsDarwin = false; - /// \brief Are we parsing ms-style inline assembly? + /// Are we parsing ms-style inline assembly? bool ParsingInlineAsm = false; public: @@ -243,11 +243,11 @@ public: SMLoc &EndLoc) override; bool parseAbsoluteExpression(int64_t &Res) override; - /// \brief Parse a floating point expression using the float \p Semantics + /// Parse a floating point expression using the float \p Semantics /// and set \p Res to the value. bool parseRealValue(const fltSemantics &Semantics, APInt &Res); - /// \brief Parse an identifier or string (as a quoted identifier) + /// Parse an identifier or string (as a quoted identifier) /// and set \p Res to the identifier contents. bool parseIdentifier(StringRef &Res) override; void eatToEndOfStatement() override; @@ -271,28 +271,28 @@ private: ArrayRef<MCAsmMacroArgument> A, bool EnableAtPseudoVariable, SMLoc L); - /// \brief Are macros enabled in the parser? + /// Are macros enabled in the parser? bool areMacrosEnabled() {return MacrosEnabledFlag;} - /// \brief Control a flag in the parser that enables or disables macros. + /// Control a flag in the parser that enables or disables macros. void setMacrosEnabled(bool Flag) {MacrosEnabledFlag = Flag;} - /// \brief Are we inside a macro instantiation? + /// Are we inside a macro instantiation? bool isInsideMacroInstantiation() {return !ActiveMacros.empty();} - /// \brief Handle entry to macro instantiation. + /// Handle entry to macro instantiation. /// /// \param M The macro. /// \param NameLoc Instantiation location. bool handleMacroEntry(const MCAsmMacro *M, SMLoc NameLoc); - /// \brief Handle exit from macro instantiation. + /// Handle exit from macro instantiation. void handleMacroExit(); - /// \brief Extract AsmTokens for a macro argument. + /// Extract AsmTokens for a macro argument. bool parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg); - /// \brief Parse all macro arguments for a given macro. + /// Parse all macro arguments for a given macro. bool parseMacroArguments(const MCAsmMacro *M, MCAsmMacroArguments &A); void printMacroInstantiations(); @@ -308,15 +308,15 @@ private: /// info describing the assembler source itself.) bool enabledGenDwarfForAssembly(); - /// \brief Enter the specified file. This returns true on failure. + /// Enter the specified file. This returns true on failure. bool enterIncludeFile(const std::string &Filename); - /// \brief Process the specified file for the .incbin directive. + /// Process the specified file for the .incbin directive. /// This returns true on failure. bool processIncbinFile(const std::string &Filename, int64_t Skip = 0, const MCExpr *Count = nullptr, SMLoc Loc = SMLoc()); - /// \brief Reset the current lexer position to that given by \p Loc. The + /// Reset the current lexer position to that given by \p Loc. The /// current token is not set; clients should ensure Lex() is called /// subsequently. /// @@ -324,12 +324,12 @@ private: /// location. void jumpToLoc(SMLoc Loc, unsigned InBuffer = 0); - /// \brief Parse up to the end of statement and a return the contents from the + /// Parse up to the end of statement and a return the contents from the /// current token until the end of the statement; the current token on exit /// will be either the EndOfStatement or EOF. StringRef parseStringToEndOfStatement() override; - /// \brief Parse until the end of a statement or a comma is encountered, + /// Parse until the end of a statement or a comma is encountered, /// return the contents from the current token up to the end or comma. StringRef parseStringToComma(); @@ -506,7 +506,7 @@ private: DK_END }; - /// \brief Maps directive name --> DirectiveKind enum, for + /// Maps directive name --> DirectiveKind enum, for /// directives parsed by this class. StringMap<DirectiveKind> DirectiveKindMap; @@ -595,7 +595,7 @@ private: // .sleb128 (Signed=true) and .uleb128 (Signed=false) bool parseDirectiveLEB128(bool Signed); - /// \brief Parse a directive like ".globl" which + /// Parse a directive like ".globl" which /// accepts a single symbol (which should be a label or an external). bool parseDirectiveSymbolAttribute(MCSymbolAttr Attr); @@ -957,7 +957,7 @@ bool AsmParser::checkForValidSection() { return false; } -/// \brief Throw away the rest of the line for testing purposes. +/// Throw away the rest of the line for testing purposes. void AsmParser::eatToEndOfStatement() { while (Lexer.isNot(AsmToken::EndOfStatement) && Lexer.isNot(AsmToken::Eof)) Lexer.Lex(); @@ -988,7 +988,7 @@ StringRef AsmParser::parseStringToComma() { return StringRef(Start, End - Start); } -/// \brief Parse a paren expression and return it. +/// Parse a paren expression and return it. /// NOTE: This assumes the leading '(' has already been consumed. /// /// parenexpr ::= expr) @@ -1003,7 +1003,7 @@ bool AsmParser::parseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) { return false; } -/// \brief Parse a bracket expression and return it. +/// Parse a bracket expression and return it. /// NOTE: This assumes the leading '[' has already been consumed. /// /// bracketexpr ::= expr] @@ -1017,7 +1017,7 @@ bool AsmParser::parseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc) { return false; } -/// \brief Parse a primary expression and return it. +/// Parse a primary expression and return it. /// primaryexpr ::= (parenexpr /// primaryexpr ::= symbol /// primaryexpr ::= number @@ -1329,7 +1329,7 @@ bool AsmParser::isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc) { return false; } -/// \brief creating a string without the escape characters '!'. +/// creating a string without the escape characters '!'. void AsmParser::altMacroString(StringRef AltMacroStr,std::string &Res) { for (size_t Pos = 0; Pos < AltMacroStr.size(); Pos++) { if (AltMacroStr[Pos] == '!') @@ -1338,7 +1338,7 @@ void AsmParser::altMacroString(StringRef AltMacroStr,std::string &Res) { } } -/// \brief Parse an expression and return it. +/// Parse an expression and return it. /// /// expr ::= expr &&,|| expr -> lowest. /// expr ::= expr |,^,&,! expr @@ -1587,7 +1587,7 @@ unsigned AsmParser::getBinOpPrecedence(AsmToken::TokenKind K, : getGNUBinOpPrecedence(K, Kind, ShouldUseLogicalShr); } -/// \brief Parse all binary operators with precedence >= 'Precedence'. +/// Parse all binary operators with precedence >= 'Precedence'. /// Res contains the LHS of the expression on input. bool AsmParser::parseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc) { @@ -2252,7 +2252,7 @@ bool AsmParser::parseCppHashLineFilenameComment(SMLoc L) { return false; } -/// \brief will use the last parsed cpp hash line filename comment +/// will use the last parsed cpp hash line filename comment /// for the Filename and LineNo if any in the diagnostic. void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) { const AsmParser *Parser = static_cast<const AsmParser *>(Context); @@ -3875,7 +3875,7 @@ bool AsmParser::parseDirectiveCFIEndProc() { return false; } -/// \brief parse register name or number. +/// parse register name or number. bool AsmParser::parseRegisterOrRegisterNumber(int64_t &Register, SMLoc DirectiveLoc) { unsigned RegNo; @@ -5844,7 +5844,7 @@ bool parseAssignmentExpression(StringRef Name, bool allow_redef, } // end namespace MCParserUtils } // end namespace llvm -/// \brief Create an MCAsmParser instance. +/// Create an MCAsmParser instance. MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM, MCContext &C, MCStreamer &Out, const MCAsmInfo &MAI, unsigned CB) { diff --git a/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/llvm/lib/MC/MCParser/DarwinAsmParser.cpp index 5bbf49290f1..82741f80128 100644 --- a/llvm/lib/MC/MCParser/DarwinAsmParser.cpp +++ b/llvm/lib/MC/MCParser/DarwinAsmParser.cpp @@ -40,7 +40,7 @@ using namespace llvm; namespace { -/// \brief Implementation of directive handling which is shared across all +/// Implementation of directive handling which is shared across all /// Darwin targets. class DarwinAsmParser : public MCAsmParserExtension { template<bool (DarwinAsmParser::*HandlerMethod)(StringRef, SMLoc)> diff --git a/llvm/lib/ObjectYAML/DWARFEmitter.cpp b/llvm/lib/ObjectYAML/DWARFEmitter.cpp index 1135127eddc..6d75897066a 100644 --- a/llvm/lib/ObjectYAML/DWARFEmitter.cpp +++ b/llvm/lib/ObjectYAML/DWARFEmitter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief The DWARF component of yaml2obj. Provided as library code for tests. +/// The DWARF component of yaml2obj. Provided as library code for tests. /// //===----------------------------------------------------------------------===// @@ -132,7 +132,7 @@ void DWARFYAML::EmitPubSection(raw_ostream &OS, } namespace { -/// \brief An extension of the DWARFYAML::ConstVisitor which writes compile +/// An extension of the DWARFYAML::ConstVisitor which writes compile /// units and DIEs to a stream. class DumpVisitor : public DWARFYAML::ConstVisitor { raw_ostream &OS; diff --git a/llvm/lib/ObjectYAML/DWARFVisitor.h b/llvm/lib/ObjectYAML/DWARFVisitor.h index 81ef412eb7e..5489031dc33 100644 --- a/llvm/lib/ObjectYAML/DWARFVisitor.h +++ b/llvm/lib/ObjectYAML/DWARFVisitor.h @@ -26,7 +26,7 @@ struct Entry; struct FormValue; struct AttributeAbbrev; -/// \brief A class to visits DWARFYAML Compile Units and DIEs in preorder. +/// A class to visits DWARFYAML Compile Units and DIEs in preorder. /// /// Extensions of this class can either maintain const or non-const references /// to the DWARFYAML::Data object. diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 38f98ae2a18..4787e70bbb1 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -202,7 +202,7 @@ static bool isOptimizingForSize(PassBuilder::OptimizationLevel Level) { namespace { -/// \brief No-op module pass which does nothing. +/// No-op module pass which does nothing. struct NoOpModulePass { PreservedAnalyses run(Module &M, ModuleAnalysisManager &) { return PreservedAnalyses::all(); @@ -210,7 +210,7 @@ struct NoOpModulePass { static StringRef name() { return "NoOpModulePass"; } }; -/// \brief No-op module analysis. +/// No-op module analysis. class NoOpModuleAnalysis : public AnalysisInfoMixin<NoOpModuleAnalysis> { friend AnalysisInfoMixin<NoOpModuleAnalysis>; static AnalysisKey Key; @@ -221,7 +221,7 @@ public: static StringRef name() { return "NoOpModuleAnalysis"; } }; -/// \brief No-op CGSCC pass which does nothing. +/// No-op CGSCC pass which does nothing. struct NoOpCGSCCPass { PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &, LazyCallGraph &, CGSCCUpdateResult &UR) { @@ -230,7 +230,7 @@ struct NoOpCGSCCPass { static StringRef name() { return "NoOpCGSCCPass"; } }; -/// \brief No-op CGSCC analysis. +/// No-op CGSCC analysis. class NoOpCGSCCAnalysis : public AnalysisInfoMixin<NoOpCGSCCAnalysis> { friend AnalysisInfoMixin<NoOpCGSCCAnalysis>; static AnalysisKey Key; @@ -243,7 +243,7 @@ public: static StringRef name() { return "NoOpCGSCCAnalysis"; } }; -/// \brief No-op function pass which does nothing. +/// No-op function pass which does nothing. struct NoOpFunctionPass { PreservedAnalyses run(Function &F, FunctionAnalysisManager &) { return PreservedAnalyses::all(); @@ -251,7 +251,7 @@ struct NoOpFunctionPass { static StringRef name() { return "NoOpFunctionPass"; } }; -/// \brief No-op function analysis. +/// No-op function analysis. class NoOpFunctionAnalysis : public AnalysisInfoMixin<NoOpFunctionAnalysis> { friend AnalysisInfoMixin<NoOpFunctionAnalysis>; static AnalysisKey Key; @@ -262,7 +262,7 @@ public: static StringRef name() { return "NoOpFunctionAnalysis"; } }; -/// \brief No-op loop pass which does nothing. +/// No-op loop pass which does nothing. struct NoOpLoopPass { PreservedAnalyses run(Loop &L, LoopAnalysisManager &, LoopStandardAnalysisResults &, LPMUpdater &) { @@ -271,7 +271,7 @@ struct NoOpLoopPass { static StringRef name() { return "NoOpLoopPass"; } }; -/// \brief No-op loop analysis. +/// No-op loop analysis. class NoOpLoopAnalysis : public AnalysisInfoMixin<NoOpLoopAnalysis> { friend AnalysisInfoMixin<NoOpLoopAnalysis>; static AnalysisKey Key; diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp index 588a0807f0a..921372bd8a4 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp @@ -292,7 +292,7 @@ CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames, namespace { -/// \brief Distributes functions into instantiation sets. +/// Distributes functions into instantiation sets. /// /// An instantiation set is a collection of functions that have the same source /// code, ie, template functions specializations. diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp index 649cf507357..5ccb23f28f7 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp @@ -147,7 +147,7 @@ Error RawCoverageMappingReader::readCounter(Counter &C) { static const unsigned EncodingExpansionRegionBit = 1 << Counter::EncodingTagBits; -/// \brief Read the sub-array of regions for the given inferred file id. +/// Read the sub-array of regions for the given inferred file id. /// \param NumFileIDs the number of file ids that are defined for this /// function. Error RawCoverageMappingReader::readMappingRegionsSubArray( diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp index 49e82e48105..bb3f4f854e0 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp @@ -35,7 +35,7 @@ void CoverageFilenamesSectionWriter::write(raw_ostream &OS) { namespace { -/// \brief Gather only the expressions that are used by the mapping +/// Gather only the expressions that are used by the mapping /// regions in this function. class CounterExpressionsMinimizer { ArrayRef<CounterExpression> Expressions; @@ -74,7 +74,7 @@ public: ArrayRef<CounterExpression> getExpressions() const { return UsedExpressions; } - /// \brief Adjust the given counter to correctly transition from the old + /// Adjust the given counter to correctly transition from the old /// expression ids to the new expression ids. Counter adjust(Counter C) const { if (C.isExpression()) @@ -85,7 +85,7 @@ public: } // end anonymous namespace -/// \brief Encode the counter. +/// Encode the counter. /// /// The encoding uses the following format: /// Low 2 bits - Tag: diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp index fd25728a8a8..bae293aa75f 100644 --- a/llvm/lib/ProfileData/InstrProf.cpp +++ b/llvm/lib/ProfileData/InstrProf.cpp @@ -603,7 +603,7 @@ void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site, #include "llvm/ProfileData/InstrProfData.inc" /*! - * \brief ValueProfRecordClosure Interface implementation for InstrProfRecord + * ValueProfRecordClosure Interface implementation for InstrProfRecord * class. These C wrappers are used as adaptors so that C++ code can be * invoked as callbacks. */ diff --git a/llvm/lib/ProfileData/SampleProf.cpp b/llvm/lib/ProfileData/SampleProf.cpp index d73f1c4760a..30438ba7962 100644 --- a/llvm/lib/ProfileData/SampleProf.cpp +++ b/llvm/lib/ProfileData/SampleProf.cpp @@ -88,7 +88,7 @@ raw_ostream &llvm::sampleprof::operator<<(raw_ostream &OS, LLVM_DUMP_METHOD void LineLocation::dump() const { print(dbgs()); } #endif -/// \brief Print the sample record to the stream \p OS indented by \p Indent. +/// Print the sample record to the stream \p OS indented by \p Indent. void SampleRecord::print(raw_ostream &OS, unsigned Indent) const { OS << NumSamples; if (hasCalls()) { @@ -109,7 +109,7 @@ raw_ostream &llvm::sampleprof::operator<<(raw_ostream &OS, return OS; } -/// \brief Print the samples collected for a function on stream \p OS. +/// Print the samples collected for a function on stream \p OS. void FunctionSamples::print(raw_ostream &OS, unsigned Indent) const { OS << TotalSamples << ", " << TotalHeadSamples << ", " << BodySamples.size() << " sampled lines\n"; diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp index 8048076a537..e192b58de9c 100644 --- a/llvm/lib/ProfileData/SampleProfReader.cpp +++ b/llvm/lib/ProfileData/SampleProfReader.cpp @@ -43,7 +43,7 @@ using namespace llvm; using namespace sampleprof; -/// \brief Dump the function profile for \p FName. +/// Dump the function profile for \p FName. /// /// \param FName Name of the function to print. /// \param OS Stream to emit the output to. @@ -52,13 +52,13 @@ void SampleProfileReader::dumpFunctionProfile(StringRef FName, OS << "Function: " << FName << ": " << Profiles[FName]; } -/// \brief Dump all the function profiles found on stream \p OS. +/// Dump all the function profiles found on stream \p OS. void SampleProfileReader::dump(raw_ostream &OS) { for (const auto &I : Profiles) dumpFunctionProfile(I.getKey(), OS); } -/// \brief Parse \p Input as function head. +/// Parse \p Input as function head. /// /// Parse one line of \p Input, and update function name in \p FName, /// function's total sample count in \p NumSamples, function's entry @@ -79,10 +79,10 @@ static bool ParseHead(const StringRef &Input, StringRef &FName, return true; } -/// \brief Returns true if line offset \p L is legal (only has 16 bits). +/// Returns true if line offset \p L is legal (only has 16 bits). static bool isOffsetLegal(unsigned L) { return (L & 0xffff) == L; } -/// \brief Parse \p Input as line sample. +/// Parse \p Input as line sample. /// /// \param Input input line. /// \param IsCallsite true if the line represents an inlined callsite. @@ -184,7 +184,7 @@ static bool ParseLine(const StringRef &Input, bool &IsCallsite, uint32_t &Depth, return true; } -/// \brief Load samples from a text file. +/// Load samples from a text file. /// /// See the documentation at the top of the file for an explanation of /// the expected format. @@ -750,7 +750,7 @@ std::error_code SampleProfileReaderGCC::readOneFunctionProfile( return sampleprof_error::success; } -/// \brief Read a GCC AutoFDO profile. +/// Read a GCC AutoFDO profile. /// /// This format is generated by the Linux Perf conversion tool at /// https://github.com/google/autofdo. @@ -771,7 +771,7 @@ bool SampleProfileReaderGCC::hasFormat(const MemoryBuffer &Buffer) { return Magic == "adcg*704"; } -/// \brief Prepare a memory buffer for the contents of \p Filename. +/// Prepare a memory buffer for the contents of \p Filename. /// /// \returns an error code indicating the status of the buffer. static ErrorOr<std::unique_ptr<MemoryBuffer>> @@ -788,7 +788,7 @@ setupMemoryBuffer(const Twine &Filename) { return std::move(Buffer); } -/// \brief Create a sample profile reader based on the format of the input file. +/// Create a sample profile reader based on the format of the input file. /// /// \param Filename The file to open. /// @@ -803,7 +803,7 @@ SampleProfileReader::create(const Twine &Filename, LLVMContext &C) { return create(BufferOrError.get(), C); } -/// \brief Create a sample profile reader based on the format of the input data. +/// Create a sample profile reader based on the format of the input data. /// /// \param B The memory buffer to create the reader from (assumes ownership). /// diff --git a/llvm/lib/ProfileData/SampleProfWriter.cpp b/llvm/lib/ProfileData/SampleProfWriter.cpp index 59c4885fcdb..45c81782a0c 100644 --- a/llvm/lib/ProfileData/SampleProfWriter.cpp +++ b/llvm/lib/ProfileData/SampleProfWriter.cpp @@ -63,7 +63,7 @@ SampleProfileWriter::write(const StringMap<FunctionSamples> &ProfileMap) { return sampleprof_error::success; } -/// \brief Write samples to a text file. +/// Write samples to a text file. /// /// Note: it may be tempting to implement this in terms of /// FunctionSamples::print(). Please don't. The dump functionality is intended @@ -239,7 +239,7 @@ std::error_code SampleProfileWriterBinary::writeBody(const FunctionSamples &S) { return sampleprof_error::success; } -/// \brief Write samples of a top-level function to a binary file. +/// Write samples of a top-level function to a binary file. /// /// \returns true if the samples were written successfully, false otherwise. std::error_code SampleProfileWriterBinary::write(const FunctionSamples &S) { @@ -247,7 +247,7 @@ std::error_code SampleProfileWriterBinary::write(const FunctionSamples &S) { return writeBody(S); } -/// \brief Create a sample profile file writer based on the specified format. +/// Create a sample profile file writer based on the specified format. /// /// \param Filename The file to create. /// @@ -268,7 +268,7 @@ SampleProfileWriter::create(StringRef Filename, SampleProfileFormat Format) { return create(OS, Format); } -/// \brief Create a sample profile stream writer based on the specified format. +/// Create a sample profile stream writer based on the specified format. /// /// \param OS The output stream to store the profile data to. /// diff --git a/llvm/lib/Support/AMDGPUMetadata.cpp b/llvm/lib/Support/AMDGPUMetadata.cpp index ddb25935e0e..a04bfc2ea29 100644 --- a/llvm/lib/Support/AMDGPUMetadata.cpp +++ b/llvm/lib/Support/AMDGPUMetadata.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU metadata definitions and in-memory representations. +/// AMDGPU metadata definitions and in-memory representations. /// // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Support/BinaryStreamRef.cpp b/llvm/lib/Support/BinaryStreamRef.cpp index 60a03fe9930..bdc0f54bf25 100644 --- a/llvm/lib/Support/BinaryStreamRef.cpp +++ b/llvm/lib/Support/BinaryStreamRef.cpp @@ -127,5 +127,5 @@ WritableBinaryStreamRef::operator BinaryStreamRef() const { return BinaryStreamRef(*BorrowedImpl, ViewOffset, Length); } -/// \brief For buffered streams, commits changes to the backing store. +/// For buffered streams, commits changes to the backing store. Error WritableBinaryStreamRef::commit() { return BorrowedImpl->commit(); } diff --git a/llvm/lib/Support/CommandLine.cpp b/llvm/lib/Support/CommandLine.cpp index 1354e3ec020..d2edc6368ed 100644 --- a/llvm/lib/Support/CommandLine.cpp +++ b/llvm/lib/Support/CommandLine.cpp @@ -974,7 +974,7 @@ static bool ExpandResponseFile(StringRef FName, StringSaver &Saver, return true; } -/// \brief Expand response files on a command line recursively using the given +/// Expand response files on a command line recursively using the given /// StringSaver and tokenization strategy. bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer, SmallVectorImpl<const char *> &Argv, diff --git a/llvm/lib/Support/CrashRecoveryContext.cpp b/llvm/lib/Support/CrashRecoveryContext.cpp index fab81384b55..fd5d097d2b7 100644 --- a/llvm/lib/Support/CrashRecoveryContext.cpp +++ b/llvm/lib/Support/CrashRecoveryContext.cpp @@ -47,7 +47,7 @@ public: CurrentContext->set(Next); } - /// \brief Called when the separate crash-recovery thread was finished, to + /// Called when the separate crash-recovery thread was finished, to /// indicate that we don't need to clear the thread-local CurrentContext. void setSwitchedThread() { #if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0 diff --git a/llvm/lib/Support/GraphWriter.cpp b/llvm/lib/Support/GraphWriter.cpp index 3718affa81d..794c6ba3a85 100644 --- a/llvm/lib/Support/GraphWriter.cpp +++ b/llvm/lib/Support/GraphWriter.cpp @@ -66,7 +66,7 @@ std::string llvm::DOT::EscapeString(const std::string &Label) { return Str; } -/// \brief Get a color string for this node number. Simply round-robin selects +/// Get a color string for this node number. Simply round-robin selects /// from a reasonable number of colors. StringRef llvm::DOT::getColorString(unsigned ColorNumber) { static const int NumColors = 20; diff --git a/llvm/lib/Support/LockFileManager.cpp b/llvm/lib/Support/LockFileManager.cpp index d97452d08c0..368b276f4b6 100644 --- a/llvm/lib/Support/LockFileManager.cpp +++ b/llvm/lib/Support/LockFileManager.cpp @@ -45,7 +45,7 @@ using namespace llvm; -/// \brief Attempt to read the lock file with the given name, if it exists. +/// Attempt to read the lock file with the given name, if it exists. /// /// \param LockFileName The name of the lock file to read. /// diff --git a/llvm/lib/Support/MD5.cpp b/llvm/lib/Support/MD5.cpp index a5317227923..9b02f62912f 100644 --- a/llvm/lib/Support/MD5.cpp +++ b/llvm/lib/Support/MD5.cpp @@ -74,7 +74,7 @@ using namespace llvm; -/// \brief This processes one or more 64-byte data blocks, but does NOT update +/// This processes one or more 64-byte data blocks, but does NOT update ///the bit counters. There are no alignment requirements. const uint8_t *MD5::body(ArrayRef<uint8_t> Data) { const uint8_t *ptr; @@ -229,7 +229,7 @@ void MD5::update(StringRef Str) { update(SVal); } -/// \brief Finish the hash and place the resulting hash into \p result. +/// Finish the hash and place the resulting hash into \p result. /// \param Result is assumed to be a minimum of 16-bytes in size. void MD5::final(MD5Result &Result) { unsigned long used, free; diff --git a/llvm/lib/Support/MemoryBuffer.cpp b/llvm/lib/Support/MemoryBuffer.cpp index 9139ba4ead4..fe04bb6fb57 100644 --- a/llvm/lib/Support/MemoryBuffer.cpp +++ b/llvm/lib/Support/MemoryBuffer.cpp @@ -163,7 +163,7 @@ MemoryBuffer::getFileSlice(const Twine &FilePath, uint64_t MapSize, //===----------------------------------------------------------------------===// namespace { -/// \brief Memory maps a file descriptor using sys::fs::mapped_file_region. +/// Memory maps a file descriptor using sys::fs::mapped_file_region. /// /// This handles converting the offset into a legal offset on the platform. template<typename MB> diff --git a/llvm/lib/Support/Parallel.cpp b/llvm/lib/Support/Parallel.cpp index 010e42916f9..16111e18ae3 100644 --- a/llvm/lib/Support/Parallel.cpp +++ b/llvm/lib/Support/Parallel.cpp @@ -19,7 +19,7 @@ using namespace llvm; namespace { -/// \brief An abstract class that takes closures and runs them asynchronously. +/// An abstract class that takes closures and runs them asynchronously. class Executor { public: virtual ~Executor() = default; @@ -40,7 +40,7 @@ Executor *Executor::getDefaultExecutor() { } #elif defined(_MSC_VER) -/// \brief An Executor that runs tasks via ConcRT. +/// An Executor that runs tasks via ConcRT. class ConcRTExecutor : public Executor { struct Taskish { Taskish(std::function<void()> Task) : Task(Task) {} @@ -67,7 +67,7 @@ Executor *Executor::getDefaultExecutor() { } #else -/// \brief An implementation of an Executor that runs closures on a thread pool +/// An implementation of an Executor that runs closures on a thread pool /// in filo order. class ThreadPoolExecutor : public Executor { public: diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp index feeb10ad899..bf08eb3a2ee 100644 --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -682,7 +682,7 @@ static Triple::ObjectFormatType getDefaultFormat(const Triple &T) { llvm_unreachable("unknown architecture"); } -/// \brief Construct a triple from the string representation provided. +/// Construct a triple from the string representation provided. /// /// This stores the string representation and parses the various pieces into /// enum members. @@ -711,7 +711,7 @@ Triple::Triple(const Twine &Str) ObjectFormat = getDefaultFormat(*this); } -/// \brief Construct a triple from string representations of the architecture, +/// Construct a triple from string representations of the architecture, /// vendor, and OS. /// /// This joins each argument into a canonical string representation and parses @@ -727,7 +727,7 @@ Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr) ObjectFormat = getDefaultFormat(*this); } -/// \brief Construct a triple from string representations of the architecture, +/// Construct a triple from string representations of the architecture, /// vendor, OS, and environment. /// /// This joins each argument into a canonical string representation and parses diff --git a/llvm/lib/Support/Windows/Process.inc b/llvm/lib/Support/Windows/Process.inc index 2e9b1c7d051..612eca56413 100644 --- a/llvm/lib/Support/Windows/Process.inc +++ b/llvm/lib/Support/Windows/Process.inc @@ -158,7 +158,7 @@ static std::error_code ConvertAndPushArg(const wchar_t *Arg, return std::error_code(); } -/// \brief Perform wildcard expansion of Arg, or just push it into Args if it +/// Perform wildcard expansion of Arg, or just push it into Args if it /// doesn't have wildcards or doesn't match any files. static std::error_code WildcardExpand(const wchar_t *Arg, SmallVectorImpl<const char *> &Args, diff --git a/llvm/lib/Support/Windows/Signals.inc b/llvm/lib/Support/Windows/Signals.inc index e30522b4ebb..8a636d085b0 100644 --- a/llvm/lib/Support/Windows/Signals.inc +++ b/llvm/lib/Support/Windows/Signals.inc @@ -595,7 +595,7 @@ void llvm::sys::RunInterruptHandlers() { Cleanup(); } -/// \brief Find the Windows Registry Key for a given location. +/// Find the Windows Registry Key for a given location. /// /// \returns a valid HKEY if the location exists, else NULL. static HKEY FindWERKey(const llvm::Twine &RegistryLocation) { @@ -608,7 +608,7 @@ static HKEY FindWERKey(const llvm::Twine &RegistryLocation) { return Key; } -/// \brief Populate ResultDirectory with the value for "DumpFolder" for a given +/// Populate ResultDirectory with the value for "DumpFolder" for a given /// Windows Registry key. /// /// \returns true if a valid value for DumpFolder exists, false otherwise. @@ -649,7 +649,7 @@ static bool GetDumpFolder(HKEY Key, return true; } -/// \brief Populate ResultType with a valid MINIDUMP_TYPE based on the value of +/// Populate ResultType with a valid MINIDUMP_TYPE based on the value of /// "DumpType" for a given Windows Registry key. /// /// According to @@ -696,7 +696,7 @@ static bool GetDumpType(HKEY Key, MINIDUMP_TYPE &ResultType) { return true; } -/// \brief Write a Windows dump file containing process information that can be +/// Write a Windows dump file containing process information that can be /// used for post-mortem debugging. /// /// \returns zero error code if a mini dump created, actual error code diff --git a/llvm/lib/TableGen/Main.cpp b/llvm/lib/TableGen/Main.cpp index d2303009d61..bcd7dce95a8 100644 --- a/llvm/lib/TableGen/Main.cpp +++ b/llvm/lib/TableGen/Main.cpp @@ -52,7 +52,7 @@ static int reportError(const char *ProgName, Twine Msg) { return 1; } -/// \brief Create a dependency file for `-d` option. +/// Create a dependency file for `-d` option. /// /// This functionality is really only for the benefit of the build system. /// It is similar to GCC's `-M*` family of options. diff --git a/llvm/lib/TableGen/TGParser.cpp b/llvm/lib/TableGen/TGParser.cpp index 80006869315..7a0197e77a5 100644 --- a/llvm/lib/TableGen/TGParser.cpp +++ b/llvm/lib/TableGen/TGParser.cpp @@ -2387,7 +2387,7 @@ bool TGParser::ParseBody(Record *CurRec) { return false; } -/// \brief Apply the current let bindings to \a CurRec. +/// Apply the current let bindings to \a CurRec. /// \returns true on error, false otherwise. bool TGParser::ApplyLetStack(Record *CurRec) { for (SmallVectorImpl<LetRecord> &LetInfo : LetStack) diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp index 994b8436f94..7788fa5d3cc 100644 --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -71,7 +71,7 @@ public: StringRef getPassName() const override { return "AArch64 Assembly Printer"; } - /// \brief Wrapper for MCInstLowering.lowerOperand() for the + /// Wrapper for MCInstLowering.lowerOperand() for the /// tblgen'erated pseudo lowering. bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { return MCInstLowering.lowerOperand(MO, MCOp); @@ -88,7 +88,7 @@ public: void EmitSled(const MachineInstr &MI, SledKind Kind); - /// \brief tblgen'erated driver function for lowering simple MI->MC + /// tblgen'erated driver function for lowering simple MI->MC /// pseudo instructions. bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, const MachineInstr *MI); @@ -131,7 +131,7 @@ private: AArch64FunctionInfo *AArch64FI = nullptr; - /// \brief Emit the LOHs contained in AArch64FI. + /// Emit the LOHs contained in AArch64FI. void EmitLOHs(); /// Emit instruction to set float register to zero. diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 90039adcb20..7992c8793d5 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -83,7 +83,7 @@ char AArch64ExpandPseudo::ID = 0; INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo", AARCH64_EXPAND_PSEUDO_NAME, false, false) -/// \brief Transfer implicit operands on the pseudo instruction to the +/// Transfer implicit operands on the pseudo instruction to the /// instructions created from the expansion. static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI) { @@ -99,7 +99,7 @@ static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, } } -/// \brief Helper function which extracts the specified 16-bit chunk from a +/// Helper function which extracts the specified 16-bit chunk from a /// 64-bit value. static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) { assert(ChunkIdx < 4 && "Out of range chunk index specified!"); @@ -107,7 +107,7 @@ static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) { return (Imm >> (ChunkIdx * 16)) & 0xFFFF; } -/// \brief Helper function which replicates a 16-bit chunk within a 64-bit +/// Helper function which replicates a 16-bit chunk within a 64-bit /// value. Indices correspond to element numbers in a v4i16. static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) { assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!"); @@ -121,7 +121,7 @@ static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) { return Imm | Chunk; } -/// \brief Helper function which tries to materialize a 64-bit value with an +/// Helper function which tries to materialize a 64-bit value with an /// ORR + MOVK instruction sequence. static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI, MachineBasicBlock &MBB, @@ -158,7 +158,7 @@ static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI, return false; } -/// \brief Check whether the given 16-bit chunk replicated to full 64-bit width +/// Check whether the given 16-bit chunk replicated to full 64-bit width /// can be materialized with an ORR instruction. static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) { Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk; @@ -166,7 +166,7 @@ static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) { return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding); } -/// \brief Check for identical 16-bit chunks within the constant and if so +/// Check for identical 16-bit chunks within the constant and if so /// materialize them with a single ORR instruction. The remaining one or two /// 16-bit chunks will be materialized with MOVK instructions. /// @@ -260,7 +260,7 @@ static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI, return false; } -/// \brief Check whether this chunk matches the pattern '1...0...'. This pattern +/// Check whether this chunk matches the pattern '1...0...'. This pattern /// starts a contiguous sequence of ones if we look at the bits from the LSB /// towards the MSB. static bool isStartChunk(uint64_t Chunk) { @@ -270,7 +270,7 @@ static bool isStartChunk(uint64_t Chunk) { return isMask_64(~Chunk); } -/// \brief Check whether this chunk matches the pattern '0...1...' This pattern +/// Check whether this chunk matches the pattern '0...1...' This pattern /// ends a contiguous sequence of ones if we look at the bits from the LSB /// towards the MSB. static bool isEndChunk(uint64_t Chunk) { @@ -280,7 +280,7 @@ static bool isEndChunk(uint64_t Chunk) { return isMask_64(Chunk); } -/// \brief Clear or set all bits in the chunk at the given index. +/// Clear or set all bits in the chunk at the given index. static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) { const uint64_t Mask = 0xFFFF; @@ -294,7 +294,7 @@ static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) { return Imm; } -/// \brief Check whether the constant contains a sequence of contiguous ones, +/// Check whether the constant contains a sequence of contiguous ones, /// which might be interrupted by one or two chunks. If so, materialize the /// sequence of contiguous ones with an ORR instruction. /// Materialize the chunks which are either interrupting the sequence or outside @@ -423,7 +423,7 @@ static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI, return true; } -/// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more +/// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more /// real move-immediate instructions to synthesize the immediate. bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, @@ -778,7 +778,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128( return true; } -/// \brief If MBBI references a pseudo instruction that should be expanded here, +/// If MBBI references a pseudo instruction that should be expanded here, /// do the expansion and return true. Otherwise return false. bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, @@ -990,7 +990,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, return false; } -/// \brief Iterate over the instructions in basic block MBB and expand any +/// Iterate over the instructions in basic block MBB and expand any /// pseudo instructions. Return true if anything was modified. bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) { bool Modified = false; @@ -1014,7 +1014,7 @@ bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) { return Modified; } -/// \brief Returns an instance of the pseudo instruction expansion pass. +/// Returns an instance of the pseudo instruction expansion pass. FunctionPass *llvm::createAArch64ExpandPseudoPass() { return new AArch64ExpandPseudo(); } diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index b7738c3e33a..43a3ae77a17 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -307,7 +307,7 @@ public: #include "AArch64GenCallingConv.inc" -/// \brief Check if the sign-/zero-extend will be a noop. +/// Check if the sign-/zero-extend will be a noop. static bool isIntExtFree(const Instruction *I) { assert((isa<ZExtInst>(I) || isa<SExtInst>(I)) && "Unexpected integer extend instruction."); @@ -326,7 +326,7 @@ static bool isIntExtFree(const Instruction *I) { return false; } -/// \brief Determine the implicit scale factor that is applied by a memory +/// Determine the implicit scale factor that is applied by a memory /// operation for a given value type. static unsigned getImplicitScaleFactor(MVT VT) { switch (VT.SimpleTy) { @@ -535,7 +535,7 @@ unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) { return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true); } -/// \brief Check if the multiply is by a power-of-2 constant. +/// Check if the multiply is by a power-of-2 constant. static bool isMulPowOf2(const Value *I) { if (const auto *MI = dyn_cast<MulOperator>(I)) { if (const auto *C = dyn_cast<ConstantInt>(MI->getOperand(0))) @@ -964,7 +964,7 @@ bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) { return TLI.isTypeLegal(VT); } -/// \brief Determine if the value type is supported by FastISel. +/// Determine if the value type is supported by FastISel. /// /// FastISel for AArch64 can handle more value types than are legal. This adds /// simple value type such as i1, i8, and i16. @@ -1524,7 +1524,7 @@ unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, IsZExt); } -/// \brief This method is a wrapper to simplify add emission. +/// This method is a wrapper to simplify add emission. /// /// First try to emit an add with an immediate operand using emitAddSub_ri. If /// that fails, then try to materialize the immediate into a register and use @@ -2254,7 +2254,7 @@ static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) { } } -/// \brief Try to emit a combined compare-and-branch instruction. +/// Try to emit a combined compare-and-branch instruction. bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { assert(isa<CmpInst>(BI->getCondition()) && "Expected cmp instruction"); const CmpInst *CI = cast<CmpInst>(BI->getCondition()); @@ -2607,7 +2607,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) { return true; } -/// \brief Optimize selects of i1 if one of the operands has a 'true' or 'false' +/// Optimize selects of i1 if one of the operands has a 'true' or 'false' /// value. bool AArch64FastISel::optimizeSelect(const SelectInst *SI) { if (!SI->getType()->isIntegerTy(1)) @@ -3322,7 +3322,7 @@ bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src, return true; } -/// \brief Check if it is possible to fold the condition from the XALU intrinsic +/// Check if it is possible to fold the condition from the XALU intrinsic /// into the user. The condition code will only be updated on success. bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC, const Instruction *I, diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index 55a256867fa..104e52b5f1f 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -53,7 +53,7 @@ public: std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const override; - /// \brief Can this function use the red zone for local allocations. + /// Can this function use the red zone for local allocations. bool canUseRedZone(const MachineFunction &MF) const; bool hasFP(const MachineFunction &MF) const override; diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index d44eee051aa..3124204fc59 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -336,7 +336,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) { } } -/// \brief Determine whether it is worth it to fold SHL into the addressing +/// Determine whether it is worth it to fold SHL into the addressing /// mode. static bool isWorthFoldingSHL(SDValue V) { assert(V.getOpcode() == ISD::SHL && "invalid opcode"); @@ -360,7 +360,7 @@ static bool isWorthFoldingSHL(SDValue V) { return true; } -/// \brief Determine whether it is worth to fold V into an extended register. +/// Determine whether it is worth to fold V into an extended register. bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { // Trivial if we are optimizing for code size or if there is only // one use of the value. @@ -826,7 +826,7 @@ static SDValue Widen(SelectionDAG *CurDAG, SDValue N) { return SDValue(Node, 0); } -/// \brief Check if the given SHL node (\p N), can be used to form an +/// Check if the given SHL node (\p N), can be used to form an /// extended register for an addressing mode. bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend, SDValue &Offset, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index e12aeb46765..27dd4249770 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3778,7 +3778,7 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, return Result; } -/// \brief Convert a TLS address reference into the correct sequence of loads +/// Convert a TLS address reference into the correct sequence of loads /// and calls to compute the variable's address (for Darwin, currently) and /// return an SDValue containing the final node. @@ -7863,7 +7863,7 @@ bool AArch64TargetLowering::isLegalInterleavedAccessType( return VecSize == 64 || VecSize % 128 == 0; } -/// \brief Lower an interleaved load into a ldN intrinsic. +/// Lower an interleaved load into a ldN intrinsic. /// /// E.g. Lower an interleaved load (Factor = 2): /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr @@ -7975,7 +7975,7 @@ bool AArch64TargetLowering::lowerInterleavedLoad( return true; } -/// \brief Lower an interleaved store into a stN intrinsic. +/// Lower an interleaved store into a stN intrinsic. /// /// E.g. Lower an interleaved store (Factor = 3): /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, @@ -9159,26 +9159,26 @@ static bool isEssentiallyExtractSubvector(SDValue N) { N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR; } -/// \brief Helper structure to keep track of ISD::SET_CC operands. +/// Helper structure to keep track of ISD::SET_CC operands. struct GenericSetCCInfo { const SDValue *Opnd0; const SDValue *Opnd1; ISD::CondCode CC; }; -/// \brief Helper structure to keep track of a SET_CC lowered into AArch64 code. +/// Helper structure to keep track of a SET_CC lowered into AArch64 code. struct AArch64SetCCInfo { const SDValue *Cmp; AArch64CC::CondCode CC; }; -/// \brief Helper structure to keep track of SetCC information. +/// Helper structure to keep track of SetCC information. union SetCCInfo { GenericSetCCInfo Generic; AArch64SetCCInfo AArch64; }; -/// \brief Helper structure to be able to read SetCC information. If set to +/// Helper structure to be able to read SetCC information. If set to /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a /// GenericSetCCInfo. struct SetCCInfoAndKind { @@ -9186,7 +9186,7 @@ struct SetCCInfoAndKind { bool IsAArch64; }; -/// \brief Check whether or not \p Op is a SET_CC operation, either a generic or +/// Check whether or not \p Op is a SET_CC operation, either a generic or /// an /// AArch64 lowered one. /// \p SetCCInfo is filled accordingly. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 5754ed97380..e48fa95224c 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -345,7 +345,7 @@ public: unsigned AS, Instruction *I = nullptr) const override; - /// \brief Return the cost of the scaling factor used in the addressing + /// Return the cost of the scaling factor used in the addressing /// mode represented by AM for this target, for a load/store /// of the specified type. /// If the AM is supported, the return value must be >= 0. @@ -360,10 +360,10 @@ public: const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; - /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask. + /// Returns false if N is a bit extraction pattern of (X >> C) & Mask. bool isDesirableToCommuteWithShift(const SDNode *N) const override; - /// \brief Returns true if it is beneficial to convert a load of a constant + /// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 53946ea873c..452bb13bf88 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1210,7 +1210,7 @@ static bool UpdateOperandRegClass(MachineInstr &Instr) { return true; } -/// \brief Return the opcode that does not set flags when possible - otherwise +/// Return the opcode that does not set flags when possible - otherwise /// return the original opcode. The caller is responsible to do the actual /// substitution and legality checking. static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) { @@ -4643,7 +4643,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence( DelInstrs.push_back(&Root); } -/// \brief Replace csincr-branch sequence by simple conditional branch +/// Replace csincr-branch sequence by simple conditional branch /// /// Examples: /// 1. \code diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h index c517f970adc..33abba1f29d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -70,13 +70,13 @@ public: /// value is non-zero. static bool hasExtendedReg(const MachineInstr &MI); - /// \brief Does this instruction set its full destination register to zero? + /// Does this instruction set its full destination register to zero? static bool isGPRZero(const MachineInstr &MI); - /// \brief Does this instruction rename a GPR without modifying bits? + /// Does this instruction rename a GPR without modifying bits? static bool isGPRCopy(const MachineInstr &MI); - /// \brief Does this instruction rename an FPR without modifying bits? + /// Does this instruction rename an FPR without modifying bits? static bool isFPRCopy(const MachineInstr &MI); /// Return true if this is load/store scales or extends its register offset. @@ -100,7 +100,7 @@ public: /// Return true if pairing the given load or store may be paired with another. static bool isPairableLdStInst(const MachineInstr &MI); - /// \brief Return the opcode that set flags when possible. The caller is + /// Return the opcode that set flags when possible. The caller is /// responsible for ensuring the opc has a flag setting equivalent. static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit); @@ -121,7 +121,7 @@ public: /// Return the immediate offset of the base register in a load/store \p LdSt. MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const; - /// \brief Returns true if opcode \p Opc is a memory operation. If it is, set + /// Returns true if opcode \p Opc is a memory operation. If it is, set /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly. /// /// For unscaled instructions, \p Scale is set to 1. @@ -269,7 +269,7 @@ public: bool isFalkorShiftExtFast(const MachineInstr &MI) const; private: - /// \brief Sets the offsets on outlined instructions in \p MBB which use SP + /// Sets the offsets on outlined instructions in \p MBB which use SP /// so that they will be valid post-outlining. /// /// \param MBB A \p MachineBasicBlock in an outlined function. @@ -299,14 +299,14 @@ bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int &Offset, const AArch64InstrInfo *TII); -/// \brief Use to report the frame offset status in isAArch64FrameOffsetLegal. +/// Use to report the frame offset status in isAArch64FrameOffsetLegal. enum AArch64FrameOffsetStatus { AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply. AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal. AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly. }; -/// \brief Check if the @p Offset is a valid frame offset for @p MI. +/// Check if the @p Offset is a valid frame offset for @p MI. /// The returned value reports the validity of the frame offset for @p MI. /// It uses the values defined by AArch64FrameOffsetStatus for that. /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h index e7feb021f52..798340f8fed 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h @@ -49,33 +49,33 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// determineCalleeSaves(). bool HasStackFrame = false; - /// \brief Amount of stack frame size, not including callee-saved registers. + /// Amount of stack frame size, not including callee-saved registers. unsigned LocalStackSize; - /// \brief Amount of stack frame size used for saving callee-saved registers. + /// Amount of stack frame size used for saving callee-saved registers. unsigned CalleeSavedStackSize; - /// \brief Number of TLS accesses using the special (combinable) + /// Number of TLS accesses using the special (combinable) /// _TLS_MODULE_BASE_ symbol. unsigned NumLocalDynamicTLSAccesses = 0; - /// \brief FrameIndex for start of varargs area for arguments passed on the + /// FrameIndex for start of varargs area for arguments passed on the /// stack. int VarArgsStackIndex = 0; - /// \brief FrameIndex for start of varargs area for arguments passed in + /// FrameIndex for start of varargs area for arguments passed in /// general purpose registers. int VarArgsGPRIndex = 0; - /// \brief Size of the varargs area for arguments passed in general purpose + /// Size of the varargs area for arguments passed in general purpose /// registers. unsigned VarArgsGPRSize = 0; - /// \brief FrameIndex for start of varargs area for arguments passed in + /// FrameIndex for start of varargs area for arguments passed in /// floating-point registers. int VarArgsFPRIndex = 0; - /// \brief Size of the varargs area for arguments passed in floating-point + /// Size of the varargs area for arguments passed in floating-point /// registers. unsigned VarArgsFPRSize = 0; @@ -91,7 +91,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// other stack allocations. bool CalleeSaveStackHasFreeSpace = false; - /// \brief Has a value when it is known whether or not the function uses a + /// Has a value when it is known whether or not the function uses a /// redzone, and no value otherwise. /// Initialized during frame lowering, unless the function has the noredzone /// attribute, in which case it is set to false at construction. diff --git a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp index b46509d1d65..bc0168e783b 100644 --- a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp +++ b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp @@ -255,7 +255,7 @@ static bool isCCSelectPair(const MachineInstr *FirstMI, return false; } -/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused +/// Check if the instr pair, FirstMI and SecondMI, should be fused /// together. Given SecondMI, when FirstMI is unspecified, then check if /// SecondMI may be part of a fused pair at all. static bool shouldScheduleAdjacent(const TargetInstrInfo &TII, diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index d58f50dd8d7..356e084e856 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -243,7 +243,7 @@ public: bool hasFuseCCSelect() const { return HasFuseCCSelect; } bool hasFuseLiterals() const { return HasFuseLiterals; } - /// \brief Return true if the CPU supports any kind of instruction fusion. + /// Return true if the CPU supports any kind of instruction fusion. bool hasFusion() const { return hasArithmeticBccFusion() || hasArithmeticCbzFusion() || hasFuseAES() || hasFuseCCSelect() || hasFuseLiterals(); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 337db546658..316ea048436 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -38,7 +38,7 @@ bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, return (CallerBits & CalleeBits) == CalleeBits; } -/// \brief Calculate the cost of materializing a 64-bit value. This helper +/// Calculate the cost of materializing a 64-bit value. This helper /// method might only calculate a fraction of a larger immediate. Therefore it /// is valid to return a cost of ZERO. int AArch64TTIImpl::getIntImmCost(int64_t Val) { @@ -54,7 +54,7 @@ int AArch64TTIImpl::getIntImmCost(int64_t Val) { return (64 - LZ + 15) / 16; } -/// \brief Calculate the cost of materializing the given constant. +/// Calculate the cost of materializing the given constant. int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { assert(Ty->isIntegerTy()); diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp index 4d1d3fd5735..7eed296a1b1 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -97,7 +97,7 @@ public: } // end anonymous namespace -/// \brief The number of bytes the fixup may change. +/// The number of bytes the fixup may change. static unsigned getFixupKindNumBytes(unsigned Kind) { switch (Kind) { default: @@ -381,20 +381,20 @@ namespace { namespace CU { -/// \brief Compact unwind encoding values. +/// Compact unwind encoding values. enum CompactUnwindEncodings { - /// \brief A "frameless" leaf function, where no non-volatile registers are + /// A "frameless" leaf function, where no non-volatile registers are /// saved. The return remains in LR throughout the function. UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, - /// \brief No compact unwind encoding available. Instead the low 23-bits of + /// No compact unwind encoding available. Instead the low 23-bits of /// the compact unwind encoding is the offset of the DWARF FDE in the /// __eh_frame section. This mode is never used in object files. It is only /// generated by the linker in final linked images, which have only DWARF info /// for a function. UNWIND_ARM64_MODE_DWARF = 0x03000000, - /// \brief This is a standard arm64 prologue where FP/LR are immediately + /// This is a standard arm64 prologue where FP/LR are immediately /// pushed on the stack, then SP is copied to FP. If there are any /// non-volatile register saved, they are copied into the stack fame in pairs /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the @@ -402,7 +402,7 @@ enum CompactUnwindEncodings { /// in register number order. UNWIND_ARM64_MODE_FRAME = 0x04000000, - /// \brief Frame register pair encodings. + /// Frame register pair encodings. UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, @@ -420,7 +420,7 @@ enum CompactUnwindEncodings { class DarwinAArch64AsmBackend : public AArch64AsmBackend { const MCRegisterInfo &MRI; - /// \brief Encode compact unwind stack adjustment for frameless functions. + /// Encode compact unwind stack adjustment for frameless functions. /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. /// The stack size always needs to be 16 byte aligned. uint32_t encodeStackAdjustment(uint32_t StackSize) const { @@ -438,7 +438,7 @@ public: MachO::CPU_SUBTYPE_ARM64_ALL); } - /// \brief Generate the compact unwind encoding from the CFI directives. + /// Generate the compact unwind encoding from the CFI directives. uint32_t generateCompactUnwindEncoding( ArrayRef<MCCFIInstruction> Instrs) const override { if (Instrs.empty()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h index 51d48a0c732..3c7e8dd764c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU Assembly printer class. +/// AMDGPU Assembly printer class. // //===----------------------------------------------------------------------===// @@ -135,7 +135,7 @@ private: const MachineFunction &MF, const SIProgramInfo &ProgramInfo) const; - /// \brief Emit register usage information so that the GPU driver + /// Emit register usage information so that the GPU driver /// can correctly setup the GPU state. void EmitProgramInfoR600(const MachineFunction &MF); void EmitProgramInfoSI(const MachineFunction &MF, @@ -160,16 +160,16 @@ public: bool doFinalization(Module &M) override; bool runOnMachineFunction(MachineFunction &MF) override; - /// \brief Wrapper for MCInstLowering.lowerOperand() for the tblgen'erated + /// Wrapper for MCInstLowering.lowerOperand() for the tblgen'erated /// pseudo lowering. bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const; - /// \brief Lower the specified LLVM Constant to an MCExpr. + /// Lower the specified LLVM Constant to an MCExpr. /// The AsmPrinter::lowerConstantof does not know how to lower /// addrspacecast, therefore they should be lowered by this function. const MCExpr *lowerConstant(const Constant *CV) override; - /// \brief tblgen'erated driver function for lowering simple MI->MC pseudo + /// tblgen'erated driver function for lowering simple MI->MC pseudo /// instructions. bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, const MachineInstr *MI); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 0c30f051932..b1ae1e0cb08 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -56,7 +56,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, bool HasUnsafeFPMath = false; AMDGPUAS AMDGPUASI; - /// \brief Copies exact/nsw/nuw flags (if any) from binary operation \p I to + /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to /// binary operation \p V. /// /// \returns Binary operation \p V. @@ -80,7 +80,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, /// false otherwise. bool needsPromotionToI32(const Type *T) const; - /// \brief Promotes uniform binary operation \p I to equivalent 32 bit binary + /// Promotes uniform binary operation \p I to equivalent 32 bit binary /// operation. /// /// \details \p I's base element bit width must be greater than 1 and less @@ -93,7 +93,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, /// false otherwise. bool promoteUniformOpToI32(BinaryOperator &I) const; - /// \brief Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. + /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. /// /// \details \p I's base element bit width must be greater than 1 and less /// than or equal 16. Promotion is done by sign or zero extending operands to @@ -102,7 +102,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, /// \returns True. bool promoteUniformOpToI32(ICmpInst &I) const; - /// \brief Promotes uniform 'select' operation \p I to 32 bit 'select' + /// Promotes uniform 'select' operation \p I to 32 bit 'select' /// operation. /// /// \details \p I's base element bit width must be greater than 1 and less @@ -113,7 +113,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, /// \returns True. bool promoteUniformOpToI32(SelectInst &I) const; - /// \brief Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' + /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' /// intrinsic. /// /// \details \p I's base element bit width must be greater than 1 and less @@ -125,7 +125,7 @@ class AMDGPUCodeGenPrepare : public FunctionPass, /// /// \returns True. bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; - /// \brief Widen a scalar load. + /// Widen a scalar load. /// /// \details \p Widen scalar load for uniform, small type loads from constant // memory / to a full 32-bits and then truncate the input to allow a scalar diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.h index 91fe921bfee..ee836bf8a63 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUFrameLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface to describe a layout of a stack frame on an AMDGPU target. +/// Interface to describe a layout of a stack frame on an AMDGPU target. // //===----------------------------------------------------------------------===// @@ -19,7 +19,7 @@ namespace llvm { -/// \brief Information about the stack frame layout on the AMDGPU targets. +/// Information about the stack frame layout on the AMDGPU targets. /// /// It holds the direction of the stack growth, the known stack alignment on /// entry to each function, and the offset to the locals area. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 47321a76e5c..16fb438de01 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -8,7 +8,7 @@ //==-----------------------------------------------------------------------===// // /// \file -/// \brief Defines an instruction selector for the AMDGPU target. +/// Defines an instruction selector for the AMDGPU target. // //===----------------------------------------------------------------------===// @@ -244,14 +244,14 @@ INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo) INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "isel", "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) -/// \brief This pass converts a legalized DAG into a AMDGPU-specific +/// This pass converts a legalized DAG into a AMDGPU-specific // DAG, ready for instruction scheduling. FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM, CodeGenOpt::Level OptLevel) { return new AMDGPUDAGToDAGISel(TM, OptLevel); } -/// \brief This pass converts a legalized DAG into a R600-specific +/// This pass converts a legalized DAG into a R600-specific // DAG, ready for instruction scheduling. FunctionPass *llvm::createR600ISelDag(TargetMachine *TM, CodeGenOpt::Level OptLevel) { @@ -287,7 +287,7 @@ bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N) const { return false; } -/// \brief Determine the register class for \p OpNo +/// Determine the register class for \p OpNo /// \returns The register class of the virtual register that will be used for /// the given operand number \OpNo or NULL if the register class cannot be /// determined. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index c60e25390c1..fffcb2fb566 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This is the parent TargetLowering class for hardware code gen +/// This is the parent TargetLowering class for hardware code gen /// targets. // //===----------------------------------------------------------------------===// @@ -1321,7 +1321,7 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); } -/// \brief Generate Min/Max node +/// Generate Min/Max node SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index 94b5332ff6a..857a69a1951 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface definition of the TargetLowering class that is common +/// Interface definition of the TargetLowering class that is common /// to all AMD GPUs. // //===----------------------------------------------------------------------===// @@ -44,7 +44,7 @@ protected: SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; - /// \brief Split a vector store into multiple scalar stores. + /// Split a vector store into multiple scalar stores. /// \returns The resulting chain. SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const; @@ -108,10 +108,10 @@ protected: SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const; SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const; - /// \brief Split a vector load into 2 loads of half the vector. + /// Split a vector load into 2 loads of half the vector. SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; - /// \brief Split a vector store into 2 stores of half the vector. + /// Split a vector store into 2 stores of half the vector. SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; @@ -227,7 +227,7 @@ public: virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const = 0; - /// \brief Determine which of the bits specified in \p Mask are known to be + /// Determine which of the bits specified in \p Mask are known to be /// either zero or one and return them in the \p KnownZero and \p KnownOne /// bitsets. void computeKnownBitsForTargetNode(const SDValue Op, @@ -240,7 +240,7 @@ public: const SelectionDAG &DAG, unsigned Depth = 0) const override; - /// \brief Helper function that adds Reg to the LiveIn list of the DAG's + /// Helper function that adds Reg to the LiveIn list of the DAG's /// MachineFunction. /// /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise @@ -288,7 +288,7 @@ public: GRID_OFFSET, }; - /// \brief Helper function that returns the byte offset of the given + /// Helper function that returns the byte offset of the given /// type of implicit parameter. uint32_t getImplicitParameterOffset(const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp index ff9e7b50ed5..ca77795ce10 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This is AMDGPU specific replacement of the standard inliner. +/// This is AMDGPU specific replacement of the standard inliner. /// The main purpose is to account for the fact that calls not only expensive /// on the AMDGPU, but much more expensive if a private memory pointer is /// passed to a function as an argument. In this situation, we are unable to diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp index 32118df5382..0b173abf035 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Implementation of the TargetInstrInfo class that is common to all +/// Implementation of the TargetInstrInfo class that is common to all /// AMD GPUs. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h index 766ee3d6f1c..a1ea3ff2bf8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Contains the definition of a TargetInstrInfo class that is common +/// Contains the definition of a TargetInstrInfo class that is common /// to all AMD GPUs. // //===----------------------------------------------------------------------===// @@ -46,7 +46,7 @@ public: int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override; - /// \brief Return a target-specific opcode if Opcode is a pseudo instruction. + /// Return a target-specific opcode if Opcode is a pseudo instruction. /// Return -1 if the target-specific opcode for the pseudo instruction does /// not exist. If Opcode is not a pseudo instruction, this is identity. int pseudoToMCOpcode(int Opcode) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp index 86dc9bd9ea7..84b6c5b91b7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp @@ -8,7 +8,7 @@ //==-----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU Implementation of the IntrinsicInfo class. +/// AMDGPU Implementation of the IntrinsicInfo class. // //===-----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h index 6cb8b964464..adbd7045ab9 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h @@ -8,7 +8,7 @@ //==-----------------------------------------------------------------------===// // /// \file -/// \brief Interface for the AMDGPU Implementation of the Intrinsic Info class. +/// Interface for the AMDGPU Implementation of the Intrinsic Info class. // //===-----------------------------------------------------------------------===// #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUINTRINSICINFO_H diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp index f594767c8ed..dd45ced6ecc 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This file does AMD library function optimizations. +/// This file does AMD library function optimizations. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index 23fd8113932..9826579565f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Code to lower AMDGPU MachineInstrs to their corresponding MCInst. +/// Code to lower AMDGPU MachineInstrs to their corresponding MCInst. // //===----------------------------------------------------------------------===// // diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h index 57d2d85daec..ea70e636b30 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h @@ -36,7 +36,7 @@ public: bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const; - /// \brief Lower a MachineInstr to an MCInst + /// Lower a MachineInstr to an MCInst void lower(const MachineInstr *MI, MCInst &OutMI) const; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.cpp index 3164140abe2..7b9f673c418 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU Machine Module Info. +/// AMDGPU Machine Module Info. /// // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h index 1a728c6bd04..1219ab26fb6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU Machine Module Info. +/// AMDGPU Machine Module Info. /// // //===----------------------------------------------------------------------===// @@ -30,14 +30,14 @@ private: // All supported memory/synchronization scopes can be found here: // http://llvm.org/docs/AMDGPUUsage.html#memory-scopes - /// \brief Agent synchronization scope ID. + /// Agent synchronization scope ID. SyncScope::ID AgentSSID; - /// \brief Workgroup synchronization scope ID. + /// Workgroup synchronization scope ID. SyncScope::ID WorkgroupSSID; - /// \brief Wavefront synchronization scope ID. + /// Wavefront synchronization scope ID. SyncScope::ID WavefrontSSID; - /// \brief In AMDGPU target synchronization scopes are inclusive, meaning a + /// In AMDGPU target synchronization scopes are inclusive, meaning a /// larger synchronization scope is inclusive of a smaller synchronization /// scope. /// @@ -74,7 +74,7 @@ public: return WavefrontSSID; } - /// \brief In AMDGPU target synchronization scopes are inclusive, meaning a + /// In AMDGPU target synchronization scopes are inclusive, meaning a /// larger synchronization scope is inclusive of a smaller synchronization /// scope. /// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp index 7263ba73d15..4c4e428008d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMacroFusion.cpp @@ -22,7 +22,7 @@ using namespace llvm; namespace { -/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused +/// Check if the instr pair, FirstMI and SecondMI, should be fused /// together. Given SecondMI, when FirstMI is unspecified, then check if /// SecondMI may be part of a fused pair at all. static bool shouldScheduleAdjacent(const TargetInstrInfo &TII_, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp index 514670af2d0..265104a8643 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // \file -// \brief This post-linking pass replaces the function pointer of enqueued +// This post-linking pass replaces the function pointer of enqueued // block kernel with a global variable (runtime handle) and adds // "runtime-handle" attribute to the enqueued block kernel. // @@ -50,7 +50,7 @@ using namespace llvm; namespace { -/// \brief Lower enqueued blocks. +/// Lower enqueued blocks. class AMDGPUOpenCLEnqueuedBlockLowering : public ModulePass { public: static char ID; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp index 242b97b1090..df9b4c2c145 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Parent TargetRegisterInfo class common to all hw codegen targets. +/// Parent TargetRegisterInfo class common to all hw codegen targets. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.h b/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.h index eb07ad69292..d8ef601371f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief TargetRegisterInfo interface that is implemented by all hw codegen +/// TargetRegisterInfo interface that is implemented by all hw codegen /// targets. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp index b3b485e548b..40c583ba4f5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Implements the AMDGPU specific subclass of TargetSubtarget. +/// Implements the AMDGPU specific subclass of TargetSubtarget. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h index 996ae9c2f0b..1347427da34 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -8,7 +8,7 @@ //==-----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU specific subclass of TargetSubtarget. +/// AMDGPU specific subclass of TargetSubtarget. // //===----------------------------------------------------------------------===// @@ -547,7 +547,7 @@ public: return HasDLInsts; } - /// \brief Returns the offset in bytes from the start of the input buffer + /// Returns the offset in bytes from the start of the input buffer /// of the first explicit kernel argument. unsigned getExplicitKernelArgOffset(const MachineFunction &MF) const { return isAmdCodeObjectV2(MF) ? 0 : 36; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index e4cd22c61ea..60e26fa72f7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief The AMDGPU target machine contains all of the hardware specific +/// The AMDGPU target machine contains all of the hardware specific /// information needed to emit code for R600 and SI GPUs. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h index 5f9b2a7fca2..56ed10e0625 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief The AMDGPU TargetMachine interface definition for hw codgen targets. +/// The AMDGPU TargetMachine interface definition for hw codgen targets. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h index ca6210f6929..dd9dc1a88fc 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the AMDGPU-specific subclass of +/// This file declares the AMDGPU-specific subclass of /// TargetLoweringObjectFile. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUnifyMetadata.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUnifyMetadata.cpp index b78568e89cf..1f6d9234c1e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUUnifyMetadata.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUUnifyMetadata.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // \file -// \brief This pass that unifies multiple OpenCL metadata due to linking. +// This pass that unifies multiple OpenCL metadata due to linking. // //===----------------------------------------------------------------------===// @@ -37,7 +37,7 @@ namespace { } // end namespace kOCLMD - /// \brief Unify multiple OpenCL metadata due to linking. + /// Unify multiple OpenCL metadata due to linking. class AMDGPUUnifyMetadata : public ModulePass { public: static char ID; @@ -47,7 +47,7 @@ namespace { private: bool runOnModule(Module &M) override; - /// \brief Unify version metadata. + /// Unify version metadata. /// \return true if changes are made. /// Assume the named metadata has operands each of which is a pair of /// integer constant, e.g. @@ -82,7 +82,7 @@ namespace { return true; } - /// \brief Unify version metadata. + /// Unify version metadata. /// \return true if changes are made. /// Assume the named metadata has operands each of which is a list e.g. /// !Name = {!n1, !n2} diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.cpp index 3b9561327ae..402324fe6a7 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU HSA Metadata Streamer. +/// AMDGPU HSA Metadata Streamer. /// // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.h index bd6515521a7..dfbb5d3ccee 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUHSAMetadataStreamer.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief AMDGPU HSA Metadata Streamer. +/// AMDGPU HSA Metadata Streamer. /// // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp index 521b3b39bba..cae7a7a6c7e 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief CodeEmitter interface for R600 and SI codegen. +/// CodeEmitter interface for R600 and SI codegen. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h index 1b062064ace..dcc10a032af 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief CodeEmitter interface for R600 and SI codegen. +/// CodeEmitter interface for R600 and SI codegen. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp index 7e581204be1..08ecb5f101a 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This file provides AMDGPU specific target descriptions. +/// This file provides AMDGPU specific target descriptions. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h index 1173dfd437c..316b119372c 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Provides AMDGPU specific target descriptions. +/// Provides AMDGPU specific target descriptions. // //===----------------------------------------------------------------------===// // diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp index eab90e1d344..0d471b1f5ce 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp @@ -9,7 +9,7 @@ // /// \file /// -/// \brief The R600 code emitter produces machine code that can be executed +/// The R600 code emitter produces machine code that can be executed /// directly on the GPU device. // //===----------------------------------------------------------------------===// @@ -45,7 +45,7 @@ public: R600MCCodeEmitter(const R600MCCodeEmitter &) = delete; R600MCCodeEmitter &operator=(const R600MCCodeEmitter &) = delete; - /// \brief Encode the instruction and write it to the OS. + /// Encode the instruction and write it to the OS. void encodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const override; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp index 0d917a192fd..e3e23d73f41 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief The SI code emitter produces machine code that can be executed +/// The SI code emitter produces machine code that can be executed /// directly on the GPU device. // //===----------------------------------------------------------------------===// @@ -43,7 +43,7 @@ namespace { class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { const MCRegisterInfo &MRI; - /// \brief Encode an fp or int literal + /// Encode an fp or int literal uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, const MCSubtargetInfo &STI) const; @@ -54,7 +54,7 @@ public: SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; - /// \brief Encode the instruction and write it to the OS. + /// Encode the instruction and write it to the OS. void encodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const override; @@ -64,7 +64,7 @@ public: SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const override; - /// \brief Use a fixup to encode the simm16 field for SOPP branch + /// Use a fixup to encode the simm16 field for SOPP branch /// instructions. unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups, diff --git a/llvm/lib/Target/AMDGPU/R600Defines.h b/llvm/lib/Target/AMDGPU/R600Defines.h index 534461adc59..0d33d82e8e0 100644 --- a/llvm/lib/Target/AMDGPU/R600Defines.h +++ b/llvm/lib/Target/AMDGPU/R600Defines.h @@ -23,7 +23,7 @@ #define MO_FLAG_LAST (1 << 6) #define NUM_MO_FLAGS 7 -/// \brief Helper for getting the operand index for the instruction flags +/// Helper for getting the operand index for the instruction flags /// operand. #define GET_FLAG_OPERAND_IDX(Flags) (((Flags) >> 7) & 0x3) @@ -52,7 +52,7 @@ namespace R600_InstFlag { #define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS) -/// \brief Defines for extracting register information from register encoding +/// Defines for extracting register information from register encoding #define HW_REG_MASK 0x1ff #define HW_CHAN_SHIFT 9 diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp index be37dbf57b1..11423a629e7 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Custom DAG lowering for R600 +/// Custom DAG lowering for R600 // //===----------------------------------------------------------------------===// @@ -2116,7 +2116,7 @@ bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx, } } -/// \brief Fold the instructions after selecting them +/// Fold the instructions after selecting them SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/llvm/lib/Target/AMDGPU/R600ISelLowering.h index 2a774693f02..cc55a414139 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.h +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief R600 DAG Lowering interface definition +/// R600 DAG Lowering interface definition // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp index 3e718f14c7f..9b8cca123b8 100644 --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief R600 Implementation of TargetInstrInfo. +/// R600 Implementation of TargetInstrInfo. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.h b/llvm/lib/Target/AMDGPU/R600InstrInfo.h index 9999cc516f6..0af17d01c94 100644 --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.h +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface definition for R600InstrInfo +/// Interface definition for R600InstrInfo // //===----------------------------------------------------------------------===// @@ -150,7 +150,7 @@ public: /// Same but using const index set instead of MI set. bool fitsConstReadLimitations(const std::vector<unsigned>&) const; - /// \brief Vector instructions are instructions that must fill all + /// Vector instructions are instructions that must fill all /// instruction slots within an instruction group. bool isVector(const MachineInstr &MI) const; @@ -209,7 +209,7 @@ public: bool expandPostRAPseudo(MachineInstr &MI) const override; - /// \brief Reserve the registers that may be accesed using indirect addressing. + /// Reserve the registers that may be accesed using indirect addressing. void reserveIndirectRegisters(BitVector &Reserved, const MachineFunction &MF, const R600RegisterInfo &TRI) const; @@ -236,7 +236,7 @@ public: /// read or write or -1 if indirect addressing is not used by this program. int getIndirectIndexEnd(const MachineFunction &MF) const; - /// \brief Build instruction(s) for an indirect register write. + /// Build instruction(s) for an indirect register write. /// /// \returns The instruction that performs the indirect register write MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB, @@ -244,7 +244,7 @@ public: unsigned ValueReg, unsigned Address, unsigned OffsetReg) const; - /// \brief Build instruction(s) for an indirect register read. + /// Build instruction(s) for an indirect register read. /// /// \returns The instruction that performs the indirect register read MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB, @@ -282,23 +282,23 @@ public: MachineBasicBlock::iterator I, unsigned DstReg, unsigned SrcReg) const; - /// \brief Get the index of Op in the MachineInstr. + /// Get the index of Op in the MachineInstr. /// /// \returns -1 if the Instruction does not contain the specified \p Op. int getOperandIdx(const MachineInstr &MI, unsigned Op) const; - /// \brief Get the index of \p Op for the given Opcode. + /// Get the index of \p Op for the given Opcode. /// /// \returns -1 if the Instruction does not contain the specified \p Op. int getOperandIdx(unsigned Opcode, unsigned Op) const; - /// \brief Helper function for setting instruction flag values. + /// Helper function for setting instruction flag values. void setImmOperand(MachineInstr &MI, unsigned Op, int64_t Imm) const; - ///\brief Add one of the MO_FLAG* flags to the specified \p Operand. + ///Add one of the MO_FLAG* flags to the specified \p Operand. void addFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const; - ///\brief Determine if the specified \p Flag is set on this \p Operand. + ///Determine if the specified \p Flag is set on this \p Operand. bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const; /// \param SrcIdx The register source to set the flag on (e.g src0, src1, src2) @@ -308,7 +308,7 @@ public: MachineOperand &getFlagOp(MachineInstr &MI, unsigned SrcIdx = 0, unsigned Flag = 0) const; - /// \brief Clear the specified flag on the instruction. + /// Clear the specified flag on the instruction. void clearFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const; // Helper functions that check the opcode for status information diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp index a7e540f9d14..f8d062ef52d 100644 --- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief R600 Machine Scheduler interface +/// R600 Machine Scheduler interface // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.h b/llvm/lib/Target/AMDGPU/R600MachineScheduler.h index 9a677057047..8a9a8d3d1e2 100644 --- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.h +++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief R600 Machine Scheduler interface +/// R600 Machine Scheduler interface // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp index b622110690c..9544ee75089 100644 --- a/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600RegisterInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief R600 implementation of the TargetRegisterInfo class. +/// R600 implementation of the TargetRegisterInfo class. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/R600RegisterInfo.h b/llvm/lib/Target/AMDGPU/R600RegisterInfo.h index f0d9644b02f..305878522dd 100644 --- a/llvm/lib/Target/AMDGPU/R600RegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/R600RegisterInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface definition for R600RegisterInfo +/// Interface definition for R600RegisterInfo // //===----------------------------------------------------------------------===// @@ -30,12 +30,12 @@ struct R600RegisterInfo final : public AMDGPURegisterInfo { const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override; unsigned getFrameRegister(const MachineFunction &MF) const override; - /// \brief get the HW encoding for a register's channel. + /// get the HW encoding for a register's channel. unsigned getHWRegChan(unsigned reg) const; unsigned getHWRegIndex(unsigned Reg) const; - /// \brief get the register class of the specified type to use in the + /// get the register class of the specified type to use in the /// CFGStructurizer const TargetRegisterClass *getCFGStructurizerRegClass(MVT VT) const; diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp index 68561ac38cd..11fea5d6ee7 100644 --- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp @@ -133,7 +133,7 @@ INITIALIZE_PASS_END(SIAnnotateControlFlow, DEBUG_TYPE, char SIAnnotateControlFlow::ID = 0; -/// \brief Initialize all the types and constants used in the pass +/// Initialize all the types and constants used in the pass bool SIAnnotateControlFlow::doInitialization(Module &M) { LLVMContext &Context = M.getContext(); @@ -157,29 +157,29 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) { return false; } -/// \brief Is the branch condition uniform or did the StructurizeCFG pass +/// Is the branch condition uniform or did the StructurizeCFG pass /// consider it as such? bool SIAnnotateControlFlow::isUniform(BranchInst *T) { return DA->isUniform(T->getCondition()) || T->getMetadata("structurizecfg.uniform") != nullptr; } -/// \brief Is BB the last block saved on the stack ? +/// Is BB the last block saved on the stack ? bool SIAnnotateControlFlow::isTopOfStack(BasicBlock *BB) { return !Stack.empty() && Stack.back().first == BB; } -/// \brief Pop the last saved value from the control flow stack +/// Pop the last saved value from the control flow stack Value *SIAnnotateControlFlow::popSaved() { return Stack.pop_back_val().second; } -/// \brief Push a BB and saved value to the control flow stack +/// Push a BB and saved value to the control flow stack void SIAnnotateControlFlow::push(BasicBlock *BB, Value *Saved) { Stack.push_back(std::make_pair(BB, Saved)); } -/// \brief Can the condition represented by this PHI node treated like +/// Can the condition represented by this PHI node treated like /// an "Else" block? bool SIAnnotateControlFlow::isElse(PHINode *Phi) { BasicBlock *IDom = DT->getNode(Phi->getParent())->getIDom()->getBlock(); @@ -198,14 +198,14 @@ bool SIAnnotateControlFlow::isElse(PHINode *Phi) { return true; } -// \brief Erase "Phi" if it is not used any more +// Erase "Phi" if it is not used any more void SIAnnotateControlFlow::eraseIfUnused(PHINode *Phi) { if (RecursivelyDeleteDeadPHINode(Phi)) { DEBUG(dbgs() << "Erased unused condition phi\n"); } } -/// \brief Open a new "If" block +/// Open a new "If" block void SIAnnotateControlFlow::openIf(BranchInst *Term) { if (isUniform(Term)) return; @@ -215,7 +215,7 @@ void SIAnnotateControlFlow::openIf(BranchInst *Term) { push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term)); } -/// \brief Close the last "If" block and open a new "Else" block +/// Close the last "If" block and open a new "Else" block void SIAnnotateControlFlow::insertElse(BranchInst *Term) { if (isUniform(Term)) { return; @@ -225,7 +225,7 @@ void SIAnnotateControlFlow::insertElse(BranchInst *Term) { push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term)); } -/// \brief Recursively handle the condition leading to a loop +/// Recursively handle the condition leading to a loop Value *SIAnnotateControlFlow::handleLoopCondition( Value *Cond, PHINode *Broken, llvm::Loop *L, BranchInst *Term, SmallVectorImpl<WeakTrackingVH> &LoopPhiConditions) { @@ -322,7 +322,7 @@ Value *SIAnnotateControlFlow::handleLoopCondition( llvm_unreachable("Unhandled loop condition!"); } -/// \brief Handle a back edge (loop) +/// Handle a back edge (loop) void SIAnnotateControlFlow::handleLoop(BranchInst *Term) { if (isUniform(Term)) return; @@ -353,7 +353,7 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) { push(Term->getSuccessor(0), Arg); } -/// \brief Close the last opened control flow +/// Close the last opened control flow void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) { llvm::Loop *L = LI->getLoopFor(BB); @@ -381,7 +381,7 @@ void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) { CallInst::Create(EndCf, Exec, "", FirstInsertionPt); } -/// \brief Annotate the control flow with intrinsics so the backend can +/// Annotate the control flow with intrinsics so the backend can /// recognize if/then/else and loops. bool SIAnnotateControlFlow::runOnFunction(Function &F) { DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); @@ -430,7 +430,7 @@ bool SIAnnotateControlFlow::runOnFunction(Function &F) { return true; } -/// \brief Create the annotation pass +/// Create the annotation pass FunctionPass *llvm::createSIAnnotateControlFlowPass() { return new SIAnnotateControlFlow(); } diff --git a/llvm/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp b/llvm/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp index b5c439b21b8..69ec3816772 100644 --- a/llvm/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp +++ b/llvm/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Inserts one nop instruction for each high level source statement for +/// Inserts one nop instruction for each high level source statement for /// debugger usage. /// /// Tools, such as a debugger, need to pause execution based on user input (i.e. diff --git a/llvm/lib/Target/AMDGPU/SIFixVGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixVGPRCopies.cpp index 3d3121788b5..7a3caf4db71 100644 --- a/llvm/lib/Target/AMDGPU/SIFixVGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixVGPRCopies.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Add implicit use of exec to vector register copies. +/// Add implicit use of exec to vector register copies. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp b/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp index 3493c7775f0..666335531e7 100644 --- a/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Computations in WWM can overwrite values in inactive channels for +/// Computations in WWM can overwrite values in inactive channels for /// variables that the register allocator thinks are dead. This pass adds fake /// uses of those variables to WWM instructions to make sure that they aren't /// overwritten. diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.h b/llvm/lib/Target/AMDGPU/SIFrameLowering.h index df6f1632a31..6be7f262208 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.h +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.h @@ -66,7 +66,7 @@ private: SIMachineFunctionInfo *MFI, MachineFunction &MF) const; - /// \brief Emits debugger prologue. + /// Emits debugger prologue. void emitDebuggerPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const; // Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index ebc686ca83d..4186e221ede 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Custom DAG lowering for SI +/// Custom DAG lowering for SI // //===----------------------------------------------------------------------===// @@ -3785,7 +3785,7 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N, } } -/// \brief Helper function for LowerBRCOND +/// Helper function for LowerBRCOND static SDNode *findUser(SDValue Value, unsigned Opcode) { SDNode *Parent = Value.getNode(); @@ -7129,7 +7129,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N, return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); } -/// \brief Helper function for adjustWritemask +/// Helper function for adjustWritemask static unsigned SubIdx2Lane(unsigned Idx) { switch (Idx) { default: return 0; @@ -7140,7 +7140,7 @@ static unsigned SubIdx2Lane(unsigned Idx) { } } -/// \brief Adjust the writemask of MIMG instructions +/// Adjust the writemask of MIMG instructions SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, SelectionDAG &DAG) const { SDNode *Users[4] = { nullptr }; @@ -7262,7 +7262,7 @@ static bool isFrameIndexOp(SDValue Op) { return isa<FrameIndexSDNode>(Op); } -/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) +/// Legalize target independent instructions (e.g. INSERT_SUBREG) /// with frame index operands. /// LLVM assumes that inputs are to these instructions are registers. SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, @@ -7309,7 +7309,7 @@ SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, return DAG.UpdateNodeOperands(Node, Ops); } -/// \brief Fold the instructions after selecting them. +/// Fold the instructions after selecting them. /// Returns null if users were already updated. SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { @@ -7383,7 +7383,7 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, return Node; } -/// \brief Assign the register class depending on the number of +/// Assign the register class depending on the number of /// bits set in the writemask void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const { @@ -7470,7 +7470,7 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); } -/// \brief Return a resource descriptor with the 'Add TID' bit enabled +/// Return a resource descriptor with the 'Add TID' bit enabled /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] /// of the resource descriptor) to create an offset, which is added to /// the resource pointer. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index 2c6e61d316d..fba383dbe4c 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief SI DAG Lowering interface definition +/// SI DAG Lowering interface definition // //===----------------------------------------------------------------------===// @@ -64,7 +64,7 @@ class SITargetLowering final : public AMDGPUTargetLowering { SelectionDAG &DAG) const; SDValue handleD16VData(SDValue VData, SelectionDAG &DAG) const; - /// \brief Converts \p Op, which must be of floating point type, to the + /// Converts \p Op, which must be of floating point type, to the /// floating point type \p VT, by either extending or truncating it. SDValue getFPExtOrFPTrunc(SelectionDAG &DAG, SDValue Op, @@ -75,7 +75,7 @@ class SITargetLowering final : public AMDGPUTargetLowering { SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val, bool Signed, const ISD::InputArg *Arg = nullptr) const; - /// \brief Custom lowering for ISD::FP_ROUND for MVT::f16. + /// Custom lowering for ISD::FP_ROUND for MVT::f16. SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; SDValue getSegmentAperture(unsigned AS, const SDLoc &DL, diff --git a/llvm/lib/Target/AMDGPU/SIInsertSkips.cpp b/llvm/lib/Target/AMDGPU/SIInsertSkips.cpp index eb7277b7a5b..37903cdd6e8 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertSkips.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertSkips.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This pass inserts branches on the 0 exec mask over divergent branches +/// This pass inserts branches on the 0 exec mask over divergent branches /// branches when it's expected that jumping over the untaken control flow will /// be cheaper than having every workitem no-op through it. // diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 543d07347cc..2aa4297477b 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Insert wait instructions for memory reads and writes. +/// Insert wait instructions for memory reads and writes. /// /// Memory reads and writes are issued asynchronously, so we need to insert /// S_WAITCNT instructions when we want to access any of their results or @@ -843,7 +843,7 @@ static bool readsVCCZ(const MachineInstr &MI) { !MI.getOperand(1).isUndef(); } -/// \brief Given wait count encodings checks if LHS is stronger than RHS. +/// Given wait count encodings checks if LHS is stronger than RHS. bool SIInsertWaitcnts::isWaitcntStronger(unsigned LHS, unsigned RHS) { if (AMDGPU::decodeVmcnt(IV, LHS) > AMDGPU::decodeVmcnt(IV, RHS)) return false; @@ -854,7 +854,7 @@ bool SIInsertWaitcnts::isWaitcntStronger(unsigned LHS, unsigned RHS) { return true; } -/// \brief Given wait count encodings create a new encoding which is stronger +/// Given wait count encodings create a new encoding which is stronger /// or equal to both. unsigned SIInsertWaitcnts::combineWaitcnt(unsigned LHS, unsigned RHS) { unsigned VmCnt = std::min(AMDGPU::decodeVmcnt(IV, LHS), @@ -866,7 +866,7 @@ unsigned SIInsertWaitcnts::combineWaitcnt(unsigned LHS, unsigned RHS) { return AMDGPU::encodeWaitcnt(IV, VmCnt, ExpCnt, LgkmCnt); } -/// \brief Generate s_waitcnt instruction to be placed before cur_Inst. +/// Generate s_waitcnt instruction to be placed before cur_Inst. /// Instructions of a given type are returned in order, /// but instructions of different types can complete out of order. /// We rely on this in-order completion diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp index d06d96be542..65b22bde51d 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaits.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Insert wait instructions for memory reads and writes. +/// Insert wait instructions for memory reads and writes. /// /// Memory reads and writes are issued asynchronously, so we need to insert /// S_WAITCNT instructions when we want to access any of their results or @@ -49,7 +49,7 @@ using namespace llvm; namespace { -/// \brief One variable for each of the hardware counters +/// One variable for each of the hardware counters using Counters = union { struct { unsigned VM; @@ -76,32 +76,32 @@ private: const MachineRegisterInfo *MRI; AMDGPU::IsaInfo::IsaVersion ISA; - /// \brief Constant zero value + /// Constant zero value static const Counters ZeroCounts; - /// \brief Hardware limits + /// Hardware limits Counters HardwareLimits; - /// \brief Counter values we have already waited on. + /// Counter values we have already waited on. Counters WaitedOn; - /// \brief Counter values that we must wait on before the next counter + /// Counter values that we must wait on before the next counter /// increase. Counters DelayedWaitOn; - /// \brief Counter values for last instruction issued. + /// Counter values for last instruction issued. Counters LastIssued; - /// \brief Registers used by async instructions. + /// Registers used by async instructions. RegCounters UsedRegs; - /// \brief Registers defined by async instructions. + /// Registers defined by async instructions. RegCounters DefinedRegs; - /// \brief Different export instruction types seen since last wait. + /// Different export instruction types seen since last wait. unsigned ExpInstrTypesSeen = 0; - /// \brief Type of the last opcode. + /// Type of the last opcode. InstType LastOpcodeType; bool LastInstWritesM0; @@ -109,42 +109,42 @@ private: /// Whether or not we have flat operations outstanding. bool IsFlatOutstanding; - /// \brief Whether the machine function returns void + /// Whether the machine function returns void bool ReturnsVoid; /// Whether the VCCZ bit is possibly corrupt bool VCCZCorrupt = false; - /// \brief Get increment/decrement amount for this instruction. + /// Get increment/decrement amount for this instruction. Counters getHwCounts(MachineInstr &MI); - /// \brief Is operand relevant for async execution? + /// Is operand relevant for async execution? bool isOpRelevant(MachineOperand &Op); - /// \brief Get register interval an operand affects. + /// Get register interval an operand affects. RegInterval getRegInterval(const TargetRegisterClass *RC, const MachineOperand &Reg) const; - /// \brief Handle instructions async components + /// Handle instructions async components void pushInstruction(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const Counters& Increment); - /// \brief Insert the actual wait instruction + /// Insert the actual wait instruction bool insertWait(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const Counters &Counts); - /// \brief Handle existing wait instructions (from intrinsics) + /// Handle existing wait instructions (from intrinsics) void handleExistingWait(MachineBasicBlock::iterator I); - /// \brief Do we need def2def checks? + /// Do we need def2def checks? bool unorderedDefines(MachineInstr &MI); - /// \brief Resolve all operand dependencies to counter requirements + /// Resolve all operand dependencies to counter requirements Counters handleOperands(MachineInstr &MI); - /// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG. + /// Insert S_NOP between an instruction writing M0 and S_SENDMSG. void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I); /// Return true if there are LGKM instrucitons that haven't been waited on @@ -435,13 +435,13 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB, return true; } -/// \brief helper function for handleOperands +/// helper function for handleOperands static void increaseCounters(Counters &Dst, const Counters &Src) { for (unsigned i = 0; i < 3; ++i) Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]); } -/// \brief check whether any of the counters is non-zero +/// check whether any of the counters is non-zero static bool countersNonZero(const Counters &Counter) { for (unsigned i = 0; i < 3; ++i) if (Counter.Array[i]) diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 897ffa948e2..553f13ecb84 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief SI Implementation of TargetInstrInfo. +/// SI Implementation of TargetInstrInfo. // //===----------------------------------------------------------------------===// @@ -89,7 +89,7 @@ static SDValue findChainOperand(SDNode *Load) { return LastOp; } -/// \brief Returns true if both nodes have the same value for the given +/// Returns true if both nodes have the same value for the given /// operand \p Op, or if both nodes do not have this operand. static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { unsigned Opc0 = N0->getMachineOpcode(); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 61639f5cf64..673f3f89578 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface definition for SIInstrInfo. +/// Interface definition for SIInstrInfo. // //===----------------------------------------------------------------------===// @@ -203,7 +203,7 @@ public: bool expandPostRAPseudo(MachineInstr &MI) const override; - // \brief Returns an opcode that can be used to move a value to a \p DstRC + // Returns an opcode that can be used to move a value to a \p DstRC // register. If there is no hardware instruction that can store to \p // DstRC, then AMDGPU::COPY is returned. unsigned getMovOpcode(const TargetRegisterClass *DstRC) const; @@ -682,16 +682,16 @@ public: bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, const MachineOperand &MO) const; - /// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding. + /// Return true if this 64-bit VALU instruction has a 32-bit encoding. /// This function will return false if you pass it a 32-bit instruction. bool hasVALU32BitEncoding(unsigned Opcode) const; - /// \brief Returns true if this operand uses the constant bus. + /// Returns true if this operand uses the constant bus. bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const; - /// \brief Return true if this instruction has any modifiers. + /// Return true if this instruction has any modifiers. /// e.g. src[012]_mod, omod, clamp. bool hasModifiers(unsigned Opcode) const; @@ -704,7 +704,7 @@ public: unsigned getVALUOp(const MachineInstr &MI) const; - /// \brief Return the correct register class for \p OpNo. For target-specific + /// Return the correct register class for \p OpNo. For target-specific /// instructions, this will return the register class that has been defined /// in tablegen. For generic instructions, like REG_SEQUENCE it will return /// the register class of its machine operand. @@ -712,7 +712,7 @@ public: const TargetRegisterClass *getOpRegClass(const MachineInstr &MI, unsigned OpNo) const; - /// \brief Return the size in bytes of the operand OpNo on the given + /// Return the size in bytes of the operand OpNo on the given // instruction opcode. unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const { const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo]; @@ -726,7 +726,7 @@ public: return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8; } - /// \brief This form should usually be preferred since it handles operands + /// This form should usually be preferred since it handles operands /// with unknown register classes. unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const { return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8; @@ -736,7 +736,7 @@ public: /// to read a VGPR. bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const; - /// \brief Legalize the \p OpIndex operand of this instruction by inserting + /// Legalize the \p OpIndex operand of this instruction by inserting /// a MOV. For example: /// ADD_I32_e32 VGPR0, 15 /// to @@ -747,29 +747,29 @@ public: /// instead of MOV. void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const; - /// \brief Check if \p MO is a legal operand if it was the \p OpIdx Operand + /// Check if \p MO is a legal operand if it was the \p OpIdx Operand /// for \p MI. bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO = nullptr) const; - /// \brief Check if \p MO would be a valid operand for the given operand + /// Check if \p MO would be a valid operand for the given operand /// definition \p OpInfo. Note this does not attempt to validate constant bus /// restrictions (e.g. literal constant usage). bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const; - /// \brief Check if \p MO (a register operand) is a legal register for the + /// Check if \p MO (a register operand) is a legal register for the /// given operand description. bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const; - /// \brief Legalize operands in \p MI by either commuting it or inserting a + /// Legalize operands in \p MI by either commuting it or inserting a /// copy of src1. void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const; - /// \brief Fix operands in \p MI to satisfy constant bus requirements. + /// Fix operands in \p MI to satisfy constant bus requirements. void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const; /// Copy a value from a VGPR (\p SrcReg) to SGPR. This function can only @@ -787,11 +787,11 @@ public: MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const; - /// \brief Legalize all operands in this instruction. This function may + /// Legalize all operands in this instruction. This function may /// create new instruction and insert them before \p MI. void legalizeOperands(MachineInstr &MI) const; - /// \brief Replace this instruction's opcode with the equivalent VALU + /// Replace this instruction's opcode with the equivalent VALU /// opcode. This function will also move the users of \p MI to the /// VALU if necessary. void moveToVALU(MachineInstr &MI) const; @@ -803,11 +803,11 @@ public: MachineBasicBlock::iterator MI) const override; void insertReturn(MachineBasicBlock &MBB) const; - /// \brief Return the number of wait states that result from executing this + /// Return the number of wait states that result from executing this /// instruction. unsigned getNumWaitStates(const MachineInstr &MI) const; - /// \brief Returns the operand named \p Op. If \p MI does not have an + /// Returns the operand named \p Op. If \p MI does not have an /// operand named \c Op, this function returns nullptr. LLVM_READONLY MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const; @@ -830,7 +830,7 @@ public: bool isLowLatencyInstruction(const MachineInstr &MI) const; bool isHighLatencyInstruction(const MachineInstr &MI) const; - /// \brief Return the descriptor of the target-specific machine instruction + /// Return the descriptor of the target-specific machine instruction /// that corresponds to the specified pseudo or native opcode. const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const { return get(pseudoToMCOpcode(Opcode)); @@ -875,7 +875,7 @@ public: bool isBasicBlockPrologue(const MachineInstr &MI) const override; - /// \brief Return a partially built integer add instruction without carry. + /// Return a partially built integer add instruction without carry. /// Caller must add source operands. /// For pre-GFX9 it will generate unused carry destination operand. /// TODO: After GFX9 it should return a no-carry operation. diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp index a9af8332397..96f1a1a5134 100644 --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This pass lowers the pseudo control flow instructions to real +/// This pass lowers the pseudo control flow instructions to real /// machine instructions. /// /// All control flow is handled using predicated instructions and diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h index 8c38cdae5d9..8f85d077679 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -426,7 +426,7 @@ public: return ArgInfo.PrivateSegmentWaveByteOffset.getRegister(); } - /// \brief Returns the physical register reserved for use as the resource + /// Returns the physical register reserved for use as the resource /// descriptor for scratch accesses. unsigned getScratchRSrcReg() const { return ScratchRSrcReg; @@ -586,7 +586,7 @@ public: return DebuggerWorkGroupIDStackObjectIndices[Dim]; } - /// \brief Sets stack object index for \p Dim's work group ID to \p ObjectIdx. + /// Sets stack object index for \p Dim's work group ID to \p ObjectIdx. void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) { assert(Dim < 3); DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx; @@ -598,7 +598,7 @@ public: return DebuggerWorkItemIDStackObjectIndices[Dim]; } - /// \brief Sets stack object index for \p Dim's work item ID to \p ObjectIdx. + /// Sets stack object index for \p Dim's work item ID to \p ObjectIdx. void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) { assert(Dim < 3); DebuggerWorkItemIDStackObjectIndices[Dim] = ObjectIdx; diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp index 528ce52b453..04536cdfe72 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief SI Machine Scheduler interface +/// SI Machine Scheduler interface // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h index d824e38504e..0ce68ac6a89 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief SI Machine Scheduler interface +/// SI Machine Scheduler interface // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 6d618dfc919..574f21019bb 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Memory legalizer - implements memory model. More information can be +/// Memory legalizer - implements memory model. More information can be /// found here: /// http://llvm.org/docs/AMDGPUUsage.html#memory-model // @@ -110,7 +110,7 @@ public: static Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo( const MachineBasicBlock::iterator &MI); - /// \brief Reports unknown synchronization scope used in \p MI to LLVM + /// Reports unknown synchronization scope used in \p MI to LLVM /// context. static void reportUnknownSyncScope( const MachineBasicBlock::iterator &MI); @@ -118,22 +118,22 @@ public: class SIMemoryLegalizer final : public MachineFunctionPass { private: - /// \brief Machine module info. + /// Machine module info. const AMDGPUMachineModuleInfo *MMI = nullptr; - /// \brief Instruction info. + /// Instruction info. const SIInstrInfo *TII = nullptr; - /// \brief Immediate for "vmcnt(0)". + /// Immediate for "vmcnt(0)". unsigned Vmcnt0Immediate = 0; - /// \brief Opcode for cache invalidation instruction (L1). + /// Opcode for cache invalidation instruction (L1). unsigned VmemSIMDCacheInvalidateOpc = 0; - /// \brief List of atomic pseudo instructions. + /// List of atomic pseudo instructions. std::list<MachineBasicBlock::iterator> AtomicPseudoMIs; - /// \brief Sets named bit (BitName) to "true" if present in \p MI. Returns + /// Sets named bit (BitName) to "true" if present in \p MI. Returns /// true if \p MI is modified, false otherwise. template <uint16_t BitName> bool enableNamedBit(const MachineBasicBlock::iterator &MI) const { @@ -149,44 +149,44 @@ private: return true; } - /// \brief Sets GLC bit to "true" if present in \p MI. Returns true if \p MI + /// Sets GLC bit to "true" if present in \p MI. Returns true if \p MI /// is modified, false otherwise. bool enableGLCBit(const MachineBasicBlock::iterator &MI) const { return enableNamedBit<AMDGPU::OpName::glc>(MI); } - /// \brief Sets SLC bit to "true" if present in \p MI. Returns true if \p MI + /// Sets SLC bit to "true" if present in \p MI. Returns true if \p MI /// is modified, false otherwise. bool enableSLCBit(const MachineBasicBlock::iterator &MI) const { return enableNamedBit<AMDGPU::OpName::slc>(MI); } - /// \brief Inserts "buffer_wbinvl1_vol" instruction \p Before or after \p MI. + /// Inserts "buffer_wbinvl1_vol" instruction \p Before or after \p MI. /// Always returns true. bool insertVmemSIMDCacheInvalidate(MachineBasicBlock::iterator &MI, bool Before = true) const; - /// \brief Inserts "s_waitcnt vmcnt(0)" instruction \p Before or after \p MI. + /// Inserts "s_waitcnt vmcnt(0)" instruction \p Before or after \p MI. /// Always returns true. bool insertWaitcntVmcnt0(MachineBasicBlock::iterator &MI, bool Before = true) const; - /// \brief Removes all processed atomic pseudo instructions from the current + /// Removes all processed atomic pseudo instructions from the current /// function. Returns true if current function is modified, false otherwise. bool removeAtomicPseudoMIs(); - /// \brief Expands load operation \p MI. Returns true if instructions are + /// Expands load operation \p MI. Returns true if instructions are /// added/deleted or \p MI is modified, false otherwise. bool expandLoad(const SIMemOpInfo &MOI, MachineBasicBlock::iterator &MI); - /// \brief Expands store operation \p MI. Returns true if instructions are + /// Expands store operation \p MI. Returns true if instructions are /// added/deleted or \p MI is modified, false otherwise. bool expandStore(const SIMemOpInfo &MOI, MachineBasicBlock::iterator &MI); - /// \brief Expands atomic fence operation \p MI. Returns true if + /// Expands atomic fence operation \p MI. Returns true if /// instructions are added/deleted or \p MI is modified, false otherwise. bool expandAtomicFence(const SIMemOpInfo &MOI, MachineBasicBlock::iterator &MI); - /// \brief Expands atomic cmpxchg or rmw operation \p MI. Returns true if + /// Expands atomic cmpxchg or rmw operation \p MI. Returns true if /// instructions are added/deleted or \p MI is modified, false otherwise. bool expandAtomicCmpxchgOrRmw(const SIMemOpInfo &MOI, MachineBasicBlock::iterator &MI); diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp index 83074773c49..51ac8146f3a 100644 --- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This pass removes redundant S_OR_B64 instructions enabling lanes in +/// This pass removes redundant S_OR_B64 instructions enabling lanes in /// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any /// vector instructions between them we can only keep outer SI_END_CF, given /// that CFG is structured and exec bits of the outer end statement are always diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 6a3f00f8d1e..f6e2fbc5884 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief SI implementation of the TargetRegisterInfo class. +/// SI implementation of the TargetRegisterInfo class. // //===----------------------------------------------------------------------===// @@ -1370,7 +1370,7 @@ bool SIRegisterInfo::shouldRewriteCopySrc( return getCommonSubClass(DefRC, SrcRC) != nullptr; } -/// \brief Returns a register that is not used at any point in the function. +/// Returns a register that is not used at any point in the function. /// If all registers are used, then this function will return // AMDGPU::NoRegister. unsigned diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h index 81515ec8a25..1775c94c292 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief Interface definition for SIRegisterInfo +/// Interface definition for SIRegisterInfo // //===----------------------------------------------------------------------===// @@ -125,7 +125,7 @@ public: return getEncodingValue(Reg) & 0xff; } - /// \brief Return the 'base' register class for this register. + /// Return the 'base' register class for this register. /// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc. const TargetRegisterClass *getPhysRegClass(unsigned Reg) const; diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp index 61cbba4c8ae..33fd5a30791 100644 --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -126,7 +126,7 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp); } -/// \brief This function checks \p MI for operands defined by a move immediate +/// This function checks \p MI for operands defined by a move immediate /// instruction and then folds the literal constant into the instruction if it /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions. static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp index 53aefe82973..aeb1190e449 100644 --- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// \brief This pass adds instructions to enable whole quad mode for pixel +/// This pass adds instructions to enable whole quad mode for pixel /// shaders, and whole wavefront mode for all programs. /// /// Whole quad mode is required for derivative computations, but it interferes diff --git a/llvm/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp b/llvm/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp index f61e2e413ad..e4c442db301 100644 --- a/llvm/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp +++ b/llvm/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp @@ -16,19 +16,19 @@ using namespace llvm; -/// \brief The target which supports all AMD GPUs. This will eventually +/// The target which supports all AMD GPUs. This will eventually /// be deprecated and there will be a R600 target and a GCN target. Target &llvm::getTheAMDGPUTarget() { static Target TheAMDGPUTarget; return TheAMDGPUTarget; } -/// \brief The target for GCN GPUs +/// The target for GCN GPUs Target &llvm::getTheGCNTarget() { static Target TheGCNTarget; return TheGCNTarget; } -/// \brief Extern function to initialize the targets for the AMDGPU backend +/// Extern function to initialize the targets for the AMDGPU backend extern "C" void LLVMInitializeAMDGPUTargetInfo() { RegisterTarget<Triple::r600, false> R600(getTheAMDGPUTarget(), "r600", "AMD GPUs HD2XXX-HD6XXX", "AMDGPU"); diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 8f687fdc60a..74dac6561cf 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -53,7 +53,7 @@ unsigned getBitMask(unsigned Shift, unsigned Width) { return ((1 << Width) - 1) << Shift; } -/// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. +/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width. /// /// \returns Packed \p Dst. unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { @@ -62,7 +62,7 @@ unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) { return Dst; } -/// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width. +/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width. /// /// \returns Unpacked bits. unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) { diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index 1fb81533cb7..5459ddfc7ef 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -45,7 +45,7 @@ enum { FIXED_NUM_SGPRS_FOR_INIT_BUG = 96 }; -/// \brief Instruction set architecture version. +/// Instruction set architecture version. struct IsaVersion { unsigned Major; unsigned Minor; @@ -55,7 +55,7 @@ struct IsaVersion { /// \returns Isa version for given subtarget \p Features. IsaVersion getIsaVersion(const FeatureBitset &Features); -/// \brief Streams isa version string for given subtarget \p STI into \p Stream. +/// Streams isa version string for given subtarget \p STI into \p Stream. void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream); /// \returns True if given subtarget \p Features support code object version 3, @@ -221,7 +221,7 @@ unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt); /// \returns Decoded Lgkmcnt from given \p Waitcnt for given isa \p Version. unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt); -/// \brief Decodes Vmcnt, Expcnt and Lgkmcnt from given \p Waitcnt for given isa +/// Decodes Vmcnt, Expcnt and Lgkmcnt from given \p Waitcnt for given isa /// \p Version, and writes decoded values into \p Vmcnt, \p Expcnt and /// \p Lgkmcnt respectively. /// @@ -245,7 +245,7 @@ unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt, unsigned Lgkmcnt); -/// \brief Encodes \p Vmcnt, \p Expcnt and \p Lgkmcnt into Waitcnt for given isa +/// Encodes \p Vmcnt, \p Expcnt and \p Lgkmcnt into Waitcnt for given isa /// \p Version. /// /// \details \p Vmcnt, \p Expcnt and \p Lgkmcnt are encoded as follows: @@ -292,36 +292,36 @@ bool isCI(const MCSubtargetInfo &STI); bool isVI(const MCSubtargetInfo &STI); bool isGFX9(const MCSubtargetInfo &STI); -/// \brief Is Reg - scalar register +/// Is Reg - scalar register bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI); -/// \brief Is there any intersection between registers +/// Is there any intersection between registers bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI); /// If \p Reg is a pseudo reg, return the correct hardware register given /// \p STI otherwise return \p Reg. unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI); -/// \brief Convert hardware register \p Reg to a pseudo register +/// Convert hardware register \p Reg to a pseudo register LLVM_READNONE unsigned mc2PseudoReg(unsigned Reg); -/// \brief Can this operand also contain immediate values? +/// Can this operand also contain immediate values? bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo); -/// \brief Is this floating-point operand? +/// Is this floating-point operand? bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo); -/// \brief Does this opearnd support only inlinable literals? +/// Does this opearnd support only inlinable literals? bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo); -/// \brief Get the size in bits of a register from the register class \p RC. +/// Get the size in bits of a register from the register class \p RC. unsigned getRegBitWidth(unsigned RCID); -/// \brief Get the size in bits of a register from the register class \p RC. +/// Get the size in bits of a register from the register class \p RC. unsigned getRegBitWidth(const MCRegisterClass &RC); -/// \brief Get size of register operand +/// Get size of register operand unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo); @@ -358,7 +358,7 @@ inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) { return getOperandSize(Desc.OpInfo[OpNo]); } -/// \brief Is this literal inlinable +/// Is this literal inlinable LLVM_READNONE bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi); diff --git a/llvm/lib/Target/ARC/ARCMCInstLower.cpp b/llvm/lib/Target/ARC/ARCMCInstLower.cpp index 4658388924e..43b087a5720 100644 --- a/llvm/lib/Target/ARC/ARCMCInstLower.cpp +++ b/llvm/lib/Target/ARC/ARCMCInstLower.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains code to lower ARC MachineInstrs to their +/// This file contains code to lower ARC MachineInstrs to their /// corresponding MCInst records. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARC/ARCMCInstLower.h b/llvm/lib/Target/ARC/ARCMCInstLower.h index 22e15cdb351..9a698f26334 100644 --- a/llvm/lib/Target/ARC/ARCMCInstLower.h +++ b/llvm/lib/Target/ARC/ARCMCInstLower.h @@ -23,7 +23,7 @@ class MachineFunction; class Mangler; class AsmPrinter; -/// \brief This class is used to lower an MachineInstr into an MCInst. +/// This class is used to lower an MachineInstr into an MCInst. class LLVM_LIBRARY_VISIBILITY ARCMCInstLower { using MachineOperandType = MachineOperand::MachineOperandType; MCContext *Ctx; diff --git a/llvm/lib/Target/ARC/Disassembler/ARCDisassembler.cpp b/llvm/lib/Target/ARC/Disassembler/ARCDisassembler.cpp index dd181767d81..3280d5ee6cf 100644 --- a/llvm/lib/Target/ARC/Disassembler/ARCDisassembler.cpp +++ b/llvm/lib/Target/ARC/Disassembler/ARCDisassembler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file is part of the ARC Disassembler. +/// This file is part of the ARC Disassembler. /// //===----------------------------------------------------------------------===// @@ -31,7 +31,7 @@ using DecodeStatus = MCDisassembler::DecodeStatus; namespace { -/// \brief A disassembler class for ARC. +/// A disassembler class for ARC. class ARCDisassembler : public MCDisassembler { public: std::unique_ptr<MCInstrInfo const> const MCII; diff --git a/llvm/lib/Target/ARC/InstPrinter/ARCInstPrinter.h b/llvm/lib/Target/ARC/InstPrinter/ARCInstPrinter.h index e26c08104e2..bb3898a67ce 100644 --- a/llvm/lib/Target/ARC/InstPrinter/ARCInstPrinter.h +++ b/llvm/lib/Target/ARC/InstPrinter/ARCInstPrinter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the ARCInstPrinter class, +/// This file contains the declaration of the ARCInstPrinter class, /// which is used to print ARC MCInst to a .s file. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index cc44c70b1d3..89e22369810 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1365,7 +1365,7 @@ unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, return MI.mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); } -/// \brief Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD +/// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD /// depending on whether the result is used. void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { bool isThumb1 = Subtarget.isThumb1Only(); diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h index 0d1719c2d42..f755f66a0f3 100644 --- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -201,7 +201,7 @@ public: int SPAdj, unsigned FIOperandNum, RegScavenger *RS = nullptr) const override; - /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true + /// SrcRC and DstRC will be morphed into NewRC if this returns true bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp index cdbd3607544..7411caf4d12 100644 --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -480,7 +480,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) { return MadeChange; } -/// \brief Perform the initial placement of the regular constant pool entries. +/// Perform the initial placement of the regular constant pool entries. /// To start with, we put them all at the end of the function. void ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) { @@ -540,7 +540,7 @@ ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) DEBUG(BB->dump()); } -/// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH +/// Do initial placement of the jump tables. Because Thumb2's TBB and TBH /// instructions can be made more efficient if the jump table immediately /// follows the instruction, it's best to place them immediately next to their /// jumps to begin with. In almost all cases they'll never be moved from that @@ -1929,7 +1929,7 @@ static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg, return true; } -/// \brief While trying to form a TBB/TBH instruction, we may (if the table +/// While trying to form a TBB/TBH instruction, we may (if the table /// doesn't immediately follow the BR_JT) need access to the start of the /// jump-table. We know one instruction that produces such a register; this /// function works out whether that definition can be preserved to the BR_JT, @@ -2017,7 +2017,7 @@ bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI, return true; } -/// \brief Returns whether CPEMI is the first instruction in the block +/// Returns whether CPEMI is the first instruction in the block /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so, /// we can switch the first register to PC and usually remove the address /// calculation that preceded it. diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 023baaa0ed9..26d4aaa12ac 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2913,7 +2913,7 @@ static const struct FoldableLoadExtendsStruct { { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 } }; -/// \brief The specified machine instr operand is a vreg, and that +/// The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, /// try to fold the load as an operand to the instruction, returning true if /// successful. diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index db25603950e..1e8aa929027 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -283,7 +283,7 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { isInt32Immediate(N->getOperand(1).getNode(), Imm); } -/// \brief Check whether a particular node is a constant value representable as +/// Check whether a particular node is a constant value representable as /// (N * Scale) where (N in [\p RangeMin, \p RangeMax). /// /// \param ScaledConstant [out] - On success, the pre-scaled constant value. @@ -1496,7 +1496,7 @@ bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) { return false; } -/// \brief Form a GPRPair pseudo register from a pair of GPR regs. +/// Form a GPRPair pseudo register from a pair of GPR regs. SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { SDLoc dl(V0.getNode()); SDValue RegClass = @@ -1507,7 +1507,7 @@ SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) { return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form a D register from a pair of S registers. +/// Form a D register from a pair of S registers. SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDLoc dl(V0.getNode()); SDValue RegClass = @@ -1518,7 +1518,7 @@ SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) { return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form a quad register from a pair of D registers. +/// Form a quad register from a pair of D registers. SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl, @@ -1529,7 +1529,7 @@ SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) { return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form 4 consecutive D registers from a pair of Q registers. +/// Form 4 consecutive D registers from a pair of Q registers. SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { SDLoc dl(V0.getNode()); SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl, @@ -1540,7 +1540,7 @@ SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) { return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form 4 consecutive S registers. +/// Form 4 consecutive S registers. SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { SDLoc dl(V0.getNode()); @@ -1555,7 +1555,7 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form 4 consecutive D registers. +/// Form 4 consecutive D registers. SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { SDLoc dl(V0.getNode()); @@ -1570,7 +1570,7 @@ SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops); } -/// \brief Form 4 consecutive Q registers. +/// Form 4 consecutive Q registers. SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3) { SDLoc dl(V0.getNode()); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 1e032640c55..a6f2f3340a3 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2800,7 +2800,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); } -/// \brief Convert a TLS address reference into the correct sequence of loads +/// Convert a TLS address reference into the correct sequence of loads /// and calls to compute the variable's address for Darwin, and return an /// SDValue containing the final node. @@ -9553,7 +9553,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } } -/// \brief Attaches vregs to MEMCPY that it will use as scratch registers +/// Attaches vregs to MEMCPY that it will use as scratch registers /// when it is expanded into LDM/STM. This is done as a post-isel lowering /// instead of as a custom inserter because we need the use list from the SDNode. static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, @@ -11292,7 +11292,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N, return DAG.getNode(ISD::BITCAST, dl, VT, BV); } -/// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. +/// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. @@ -14171,7 +14171,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, return false; } -/// \brief Returns true if it is beneficial to convert a load of a constant +/// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { @@ -14467,7 +14467,7 @@ bool ARMTargetLowering::isLegalInterleavedAccessType( return VecSize == 64 || VecSize % 128 == 0; } -/// \brief Lower an interleaved load into a vldN intrinsic. +/// Lower an interleaved load into a vldN intrinsic. /// /// E.g. Lower an interleaved load (Factor = 2): /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 @@ -14585,7 +14585,7 @@ bool ARMTargetLowering::lowerInterleavedLoad( return true; } -/// \brief Lower an interleaved store into a vstN intrinsic. +/// Lower an interleaved store into a vstN intrinsic. /// /// E.g. Lower an interleaved store (Factor = 3): /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, @@ -14783,7 +14783,7 @@ static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, return (Members > 0 && Members <= 4); } -/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of +/// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when /// passing according to AAPCS rules. bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 20e323be245..3986884f1c3 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -354,7 +354,7 @@ class VectorType; bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; - /// \brief Returns true if the addresing mode representing by AM is legal + /// Returns true if the addresing mode representing by AM is legal /// for the Thumb1 target, for a load/store of the specified type. bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const; @@ -482,7 +482,7 @@ class VectorType; MachineFunction &MF, unsigned Intrinsic) const override; - /// \brief Returns true if it is beneficial to convert a load of a constant + /// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; @@ -492,7 +492,7 @@ class VectorType; bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override; - /// \brief Returns true if an argument of type Ty needs to be passed in a + /// Returns true if an argument of type Ty needs to be passed in a /// contiguous block of registers in calling convention CallConv. bool functionArgumentNeedsConsecutiveRegisters( Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; diff --git a/llvm/lib/Target/ARM/ARMMacroFusion.cpp b/llvm/lib/Target/ARM/ARMMacroFusion.cpp index 5c9aad417ce..f2dc650a6f3 100644 --- a/llvm/lib/Target/ARM/ARMMacroFusion.cpp +++ b/llvm/lib/Target/ARM/ARMMacroFusion.cpp @@ -19,7 +19,7 @@ namespace llvm { -/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused +/// Check if the instr pair, FirstMI and SecondMI, should be fused /// together. Given SecondMI, when FirstMI is unspecified, then check if /// SecondMI may be part of a fused pair at all. static bool shouldScheduleAdjacent(const TargetInstrInfo &TII, diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index eedb675a330..e23a5fe1e06 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -598,7 +598,7 @@ public: bool hasFullFP16() const { return HasFullFP16; } bool hasFuseAES() const { return HasFuseAES; } - /// \brief Return true if the CPU supports any kind of instruction fusion. + /// Return true if the CPU supports any kind of instruction fusion. bool hasFusion() const { return hasFuseAES(); } const Triple &getTargetTriple() const { return TargetTriple; } diff --git a/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/llvm/lib/Target/ARM/ARMTargetObjectFile.h index a5463a67763..0dc0882809c 100644 --- a/llvm/lib/Target/ARM/ARMTargetObjectFile.h +++ b/llvm/lib/Target/ARM/ARMTargetObjectFile.h @@ -30,7 +30,7 @@ public: MachineModuleInfo *MMI, MCStreamer &Streamer) const override; - /// \brief Describe a TLS variable address within debug info. + /// Describe a TLS variable address within debug info. const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override; MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind, diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 532fd7e7eda..ce6ca46437e 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -5538,7 +5538,7 @@ bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { return false; } -/// \brief Given a mnemonic, split out possible predication code and carry +/// Given a mnemonic, split out possible predication code and carry /// setting letters to form a canonical mnemonic and flags. // // FIXME: Would be nice to autogen this. @@ -5629,7 +5629,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, return Mnemonic; } -/// \brief Given a canonical mnemonic, determine if the instruction ever allows +/// Given a canonical mnemonic, determine if the instruction ever allows /// inclusion of carry set or predication code operands. // // FIXME: It would be nice to autogen this. @@ -5683,7 +5683,7 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, CanAcceptPredicationCode = true; } -// \brief Some Thumb instructions have two operand forms that are not +// Some Thumb instructions have two operand forms that are not // available as three operand, convert to two operand form if possible. // // FIXME: We would really like to be able to tablegen'erate this. diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp index 4fb1db5a218..d659799ff4a 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -912,7 +912,7 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, namespace CU { -/// \brief Compact unwind encoding values. +/// Compact unwind encoding values. enum CompactUnwindEncodings { UNWIND_ARM_MODE_MASK = 0x0F000000, UNWIND_ARM_MODE_FRAME = 0x01000000, diff --git a/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp index 574a8d44616..1a619ebda84 100644 --- a/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp +++ b/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp @@ -40,7 +40,7 @@ using DecodeStatus = MCDisassembler::DecodeStatus; namespace { -/// \brief Hexagon disassembler for all Hexagon platforms. +/// Hexagon disassembler for all Hexagon platforms. class HexagonDisassembler : public MCDisassembler { public: std::unique_ptr<MCInstrInfo const> const MCII; diff --git a/llvm/lib/Target/Hexagon/Hexagon.h b/llvm/lib/Target/Hexagon/Hexagon.h index 66b387b62c6..6ec52d18cdc 100644 --- a/llvm/lib/Target/Hexagon/Hexagon.h +++ b/llvm/lib/Target/Hexagon/Hexagon.h @@ -49,7 +49,7 @@ namespace llvm { class HexagonTargetMachine; - /// \brief Creates a Hexagon-specific Target Transformation Info pass. + /// Creates a Hexagon-specific Target Transformation Info pass. ImmutablePass *createHexagonTargetTransformInfoPass(const HexagonTargetMachine *TM); } // end namespace llvm; diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp index a7404fbb18f..62b2e892b70 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -713,7 +713,7 @@ void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) { } } -// \brief Starting at a given branch, visit remaining branches in the block. +// Starting at a given branch, visit remaining branches in the block. // Traverse over the subsequent branches for as long as the preceding one // can fall through. Add all the possible targets to the flow work queue, // including the potential fall-through to the layout-successor block. diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp index a6b9bcb405d..e9067e2285a 100644 --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -60,12 +60,12 @@ namespace { } private: - /// \brief Check the offset between each loop instruction and + /// Check the offset between each loop instruction and /// the loop basic block to determine if we can use the LOOP instruction /// or if we need to set the LC/SA registers explicitly. bool fixupLoopInstrs(MachineFunction &MF); - /// \brief Replace loop instruction with the constant extended + /// Replace loop instruction with the constant extended /// version if the loop label is too far from the loop instruction. void useExtLoopInstr(MachineFunction &MF, MachineBasicBlock::iterator &MII); @@ -81,7 +81,7 @@ FunctionPass *llvm::createHexagonFixupHwLoops() { return new HexagonFixupHwLoops(); } -/// \brief Returns true if the instruction is a hardware loop instruction. +/// Returns true if the instruction is a hardware loop instruction. static bool isHardwareLoop(const MachineInstr &MI) { return MI.getOpcode() == Hexagon::J2_loop0r || MI.getOpcode() == Hexagon::J2_loop0i || @@ -95,7 +95,7 @@ bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) { return fixupLoopInstrs(MF); } -/// \brief For Hexagon, if the loop label is to far from the +/// For Hexagon, if the loop label is to far from the /// loop instruction then we need to set the LC0 and SA0 registers /// explicitly instead of using LOOP(start,count). This function /// checks the distance, and generates register assignments if needed. @@ -166,7 +166,7 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) { return Changed; } -/// \brief Replace loop instructions with the constant extended version. +/// Replace loop instructions with the constant extended version. void HexagonFixupHwLoops::useExtLoopInstr(MachineFunction &MF, MachineBasicBlock::iterator &MII) { const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 0703606b7a7..e3d5825b269 100644 --- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -168,7 +168,7 @@ namespace { } }; - /// \brief Find the register that contains the loop controlling + /// Find the register that contains the loop controlling /// induction variable. /// If successful, it will return true and set the \p Reg, \p IVBump /// and \p IVOp arguments. Otherwise it will return false. @@ -183,19 +183,19 @@ namespace { bool findInductionRegister(MachineLoop *L, unsigned &Reg, int64_t &IVBump, MachineInstr *&IVOp) const; - /// \brief Return the comparison kind for the specified opcode. + /// Return the comparison kind for the specified opcode. Comparison::Kind getComparisonKind(unsigned CondOpc, MachineOperand *InitialValue, const MachineOperand *Endvalue, int64_t IVBump) const; - /// \brief Analyze the statements in a loop to determine if the loop + /// Analyze the statements in a loop to determine if the loop /// has a computable trip count and, if so, return a value that represents /// the trip count expression. CountValue *getLoopTripCount(MachineLoop *L, SmallVectorImpl<MachineInstr *> &OldInsts); - /// \brief Return the expression that represents the number of times + /// Return the expression that represents the number of times /// a loop iterates. The function takes the operands that represent the /// loop start value, loop end value, and induction value. Based upon /// these operands, the function attempts to compute the trip count. @@ -206,64 +206,64 @@ namespace { const MachineOperand *End, unsigned IVReg, int64_t IVBump, Comparison::Kind Cmp) const; - /// \brief Return true if the instruction is not valid within a hardware + /// Return true if the instruction is not valid within a hardware /// loop. bool isInvalidLoopOperation(const MachineInstr *MI, bool IsInnerHWLoop) const; - /// \brief Return true if the loop contains an instruction that inhibits + /// Return true if the loop contains an instruction that inhibits /// using the hardware loop. bool containsInvalidInstruction(MachineLoop *L, bool IsInnerHWLoop) const; - /// \brief Given a loop, check if we can convert it to a hardware loop. + /// Given a loop, check if we can convert it to a hardware loop. /// If so, then perform the conversion and return true. bool convertToHardwareLoop(MachineLoop *L, bool &L0used, bool &L1used); - /// \brief Return true if the instruction is now dead. + /// Return true if the instruction is now dead. bool isDead(const MachineInstr *MI, SmallVectorImpl<MachineInstr *> &DeadPhis) const; - /// \brief Remove the instruction if it is now dead. + /// Remove the instruction if it is now dead. void removeIfDead(MachineInstr *MI); - /// \brief Make sure that the "bump" instruction executes before the + /// Make sure that the "bump" instruction executes before the /// compare. We need that for the IV fixup, so that the compare /// instruction would not use a bumped value that has not yet been /// defined. If the instructions are out of order, try to reorder them. bool orderBumpCompare(MachineInstr *BumpI, MachineInstr *CmpI); - /// \brief Return true if MO and MI pair is visited only once. If visited + /// Return true if MO and MI pair is visited only once. If visited /// more than once, this indicates there is recursion. In such a case, /// return false. bool isLoopFeeder(MachineLoop *L, MachineBasicBlock *A, MachineInstr *MI, const MachineOperand *MO, LoopFeederMap &LoopFeederPhi) const; - /// \brief Return true if the Phi may generate a value that may underflow, + /// Return true if the Phi may generate a value that may underflow, /// or may wrap. bool phiMayWrapOrUnderflow(MachineInstr *Phi, const MachineOperand *EndVal, MachineBasicBlock *MBB, MachineLoop *L, LoopFeederMap &LoopFeederPhi) const; - /// \brief Return true if the induction variable may underflow an unsigned + /// Return true if the induction variable may underflow an unsigned /// value in the first iteration. bool loopCountMayWrapOrUnderFlow(const MachineOperand *InitVal, const MachineOperand *EndVal, MachineBasicBlock *MBB, MachineLoop *L, LoopFeederMap &LoopFeederPhi) const; - /// \brief Check if the given operand has a compile-time known constant + /// Check if the given operand has a compile-time known constant /// value. Return true if yes, and false otherwise. When returning true, set /// Val to the corresponding constant value. bool checkForImmediate(const MachineOperand &MO, int64_t &Val) const; - /// \brief Check if the operand has a compile-time known constant value. + /// Check if the operand has a compile-time known constant value. bool isImmediate(const MachineOperand &MO) const { int64_t V; return checkForImmediate(MO, V); } - /// \brief Return the immediate for the specified operand. + /// Return the immediate for the specified operand. int64_t getImmediate(const MachineOperand &MO) const { int64_t V; if (!checkForImmediate(MO, V)) @@ -271,12 +271,12 @@ namespace { return V; } - /// \brief Reset the given machine operand to now refer to a new immediate + /// Reset the given machine operand to now refer to a new immediate /// value. Assumes that the operand was already referencing an immediate /// value, either directly, or via a register. void setImmediate(MachineOperand &MO, int64_t Val); - /// \brief Fix the data flow of the induction variable. + /// Fix the data flow of the induction variable. /// The desired flow is: phi ---> bump -+-> comparison-in-latch. /// | /// +-> back to phi @@ -297,7 +297,7 @@ namespace { /// cannot be adjusted to reflect the post-bump value. bool fixupInductionVariable(MachineLoop *L); - /// \brief Given a loop, if it does not have a preheader, create one. + /// Given a loop, if it does not have a preheader, create one. /// Return the block that is the preheader. MachineBasicBlock *createPreheaderForLoop(MachineLoop *L); }; @@ -307,7 +307,7 @@ namespace { int HexagonHardwareLoops::Counter = 0; #endif - /// \brief Abstraction for a trip count of a loop. A smaller version + /// Abstraction for a trip count of a loop. A smaller version /// of the MachineOperand class without the concerns of changing the /// operand representation. class CountValue { @@ -556,7 +556,7 @@ HexagonHardwareLoops::getComparisonKind(unsigned CondOpc, return Cmp; } -/// \brief Analyze the statements in a loop to determine if the loop has +/// Analyze the statements in a loop to determine if the loop has /// a computable trip count and, if so, return a value that represents /// the trip count expression. /// @@ -718,7 +718,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L, return computeCount(L, InitialValue, EndValue, IVReg, IVBump, Cmp); } -/// \brief Helper function that returns the expression that represents the +/// Helper function that returns the expression that represents the /// number of times a loop iterates. The function takes the operands that /// represent the loop start value, loop end value, and induction value. /// Based upon these operands, the function attempts to compute the trip count. @@ -985,7 +985,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, return new CountValue(CountValue::CV_Register, CountR, CountSR); } -/// \brief Return true if the operation is invalid within hardware loop. +/// Return true if the operation is invalid within hardware loop. bool HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI, bool IsInnerHWLoop) const { // Call is not allowed because the callee may use a hardware loop except for @@ -1007,7 +1007,7 @@ bool HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI, return false; } -/// \brief Return true if the loop contains an instruction that inhibits +/// Return true if the loop contains an instruction that inhibits /// the use of the hardware loop instruction. bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L, bool IsInnerHWLoop) const { @@ -1027,7 +1027,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L, return false; } -/// \brief Returns true if the instruction is dead. This was essentially +/// Returns true if the instruction is dead. This was essentially /// copied from DeadMachineInstructionElim::isDead, but with special cases /// for inline asm, physical registers and instructions with side effects /// removed. @@ -1113,7 +1113,7 @@ void HexagonHardwareLoops::removeIfDead(MachineInstr *MI) { } } -/// \brief Check if the loop is a candidate for converting to a hardware +/// Check if the loop is a candidate for converting to a hardware /// loop. If so, then perform the transformation. /// /// This function works on innermost loops first. A loop can be converted diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 3f3585ca31a..e639d13bd2b 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1593,7 +1593,7 @@ static bool isOpcodeHandled(const SDNode *N) { } } -/// \brief Return the weight of an SDNode +/// Return the weight of an SDNode int HexagonDAGToDAGISel::getWeight(SDNode *N) { if (!isOpcodeHandled(N)) return 1; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 5f6fa7e7b7d..78e82579c47 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1694,7 +1694,7 @@ HexagonInstrInfo::CreateTargetPostRAHazardRecognizer( return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); } -/// \brief For a comparison instruction, return the source registers in +/// For a comparison instruction, return the source registers in /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. @@ -2871,7 +2871,7 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1, return false; } -/// \brief Get the base register and byte offset of a load/store instr. +/// Get the base register and byte offset of a load/store instr. bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const { @@ -2882,7 +2882,7 @@ bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, return BaseReg != 0; } -/// \brief Can these instructions execute at the same time in a bundle. +/// Can these instructions execute at the same time in a bundle. bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const { if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) { diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h index 02d7ad92351..96b4ffaba02 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h @@ -215,7 +215,7 @@ public: /// anything was changed. bool expandPostRAPseudo(MachineInstr &MI) const override; - /// \brief Get the base register and byte offset of a load/store instr. + /// Get the base register and byte offset of a load/store instr. bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const override; diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp index f240a59dfe9..625e2e93510 100644 --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2295,7 +2295,7 @@ CleanupAndExit: return true; } -// \brief Check if the instructions in Insts, together with their dependencies +// Check if the instructions in Insts, together with their dependencies // cover the loop in the sense that the loop could be safely eliminated once // the instructions in Insts are removed. bool HexagonLoopIdiomRecognize::coverLoop(Loop *L, diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp index 1abf27fbd61..a3686c89d61 100644 --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -96,7 +96,7 @@ namespace { const HexagonInstrInfo *QII; const HexagonRegisterInfo *QRI; - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; bool isNewValueJumpCandidate(const MachineInstr &MI) const; diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp index 1c5c7a97b81..781d3887959 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -322,7 +322,7 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) { } } -/// \brief Enable use of alias analysis during code generation (during MI +/// Enable use of alias analysis during code generation (during MI /// scheduling, DAGCombine, etc.). bool HexagonSubtarget::useAA() const { if (OptLevel != CodeGenOpt::None) @@ -330,7 +330,7 @@ bool HexagonSubtarget::useAA() const { return false; } -/// \brief Perform target specific adjustments to the latency of a schedule +/// Perform target specific adjustments to the latency of a schedule /// dependency. void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst, SDep &Dep) const { diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h index 9076b1d7fd6..fd564f23067 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h @@ -195,11 +195,11 @@ public: std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const override; - /// \brief Enable use of alias analysis during code generation (during MI + /// Enable use of alias analysis during code generation (during MI /// scheduling, DAGCombine, etc.). bool useAA() const override; - /// \brief Perform target specific adjustments to the latency of a schedule + /// Perform target specific adjustments to the latency of a schedule /// dependency. void adjustSchedDependency(SUnit *def, SUnit *use, SDep& dep) const override; diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h index 764d9ae9059..40dcee3441a 100644 --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h @@ -59,7 +59,7 @@ class HexagonPacketizerList : public VLIWPacketizerList { bool PacketStalls = false; protected: - /// \brief A handle to the branch probability pass. + /// A handle to the branch probability pass. const MachineBranchProbabilityInfo *MBPI; const MachineLoopInfo *MLI; diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h index 14cabf1534a..be682e571f2 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Definition for classes that emit Hexagon machine code from MCInsts +/// Definition for classes that emit Hexagon machine code from MCInsts /// //===----------------------------------------------------------------------===// @@ -64,13 +64,13 @@ public: const MCSubtargetInfo &STI, uint32_t Parse) const; - // \brief TableGen'erated function for getting the + // TableGen'erated function for getting the // binary encoding for an instruction. uint64_t getBinaryCodeForInstr(MCInst const &MI, SmallVectorImpl<MCFixup> &Fixups, MCSubtargetInfo const &STI) const; - /// \brief Return binary encoding of operand. + /// Return binary encoding of operand. unsigned getMachineOpValue(MCInst const &MI, MCOperand const &MO, SmallVectorImpl<MCFixup> &Fixups, MCSubtargetInfo const &STI) const; diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index 870614ac59b..7685555a1e7 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the MIPS-specific support for the FastISel class. +/// This file defines the MIPS-specific support for the FastISel class. /// Some of the target-specific code is generated by tablegen in the file /// MipsGenFastISel.inc, which is #included here. /// diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h index 20bdd4aa8f5..5a871b5930a 100644 --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h @@ -93,34 +93,34 @@ private: virtual bool selectAddr16(SDValue Addr, SDValue &Base, SDValue &Offset); virtual bool selectAddr16SP(SDValue Addr, SDValue &Base, SDValue &Offset); - /// \brief Select constant vector splats. + /// Select constant vector splats. virtual bool selectVSplat(SDNode *N, APInt &Imm, unsigned MinSizeInBits) const; - /// \brief Select constant vector splats whose value fits in a uimm1. + /// Select constant vector splats whose value fits in a uimm1. virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm2. + /// Select constant vector splats whose value fits in a uimm2. virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm3. + /// Select constant vector splats whose value fits in a uimm3. virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm4. + /// Select constant vector splats whose value fits in a uimm4. virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm5. + /// Select constant vector splats whose value fits in a uimm5. virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm6. + /// Select constant vector splats whose value fits in a uimm6. virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a uimm8. + /// Select constant vector splats whose value fits in a uimm8. virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value fits in a simm5. + /// Select constant vector splats whose value fits in a simm5. virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value is a power of 2. + /// Select constant vector splats whose value is a power of 2. virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value is the inverse of a + /// Select constant vector splats whose value is the inverse of a /// power of 2. virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value is a run of set bits + /// Select constant vector splats whose value is a run of set bits /// ending at the most significant bit virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const; - /// \brief Select constant vector splats whose value is a run of set bits + /// Select constant vector splats whose value is a run of set bits /// starting at bit zero. virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const; diff --git a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp index 72fc50a0359..27bc4843f41 100644 --- a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp +++ b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp @@ -90,10 +90,10 @@ public: } private: - /// \brief Visit MBB. + /// Visit MBB. bool visitNode(MBBInfo &MBBI); - /// \brief Test if MI jumps to a function via a register. + /// Test if MI jumps to a function via a register. /// /// Also, return the virtual register containing the target function's address /// and the underlying object in Reg and Val respectively, if the function's @@ -101,15 +101,15 @@ private: bool isCallViaRegister(MachineInstr &MI, unsigned &Reg, ValueType &Val) const; - /// \brief Return the number of instructions that dominate the current + /// Return the number of instructions that dominate the current /// instruction and load the function address from object Entry. unsigned getCount(ValueType Entry); - /// \brief Return the destination virtual register of the last instruction + /// Return the destination virtual register of the last instruction /// that loads from object Entry. unsigned getReg(ValueType Entry); - /// \brief Update ScopedHT. + /// Update ScopedHT. void incCntAndSetReg(ValueType Entry, unsigned Reg); ScopedHTType ScopedHT; diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.h b/llvm/lib/Target/Mips/MipsRegisterInfo.h index 53c42bccaf2..4cc50fb981b 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.h +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.h @@ -74,7 +74,7 @@ public: /// Debug information queries. unsigned getFrameRegister(const MachineFunction &MF) const override; - /// \brief Return GPR register class. + /// Return GPR register class. virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0; private: diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h index 6f38289c5a4..eb3657aae05 100644 --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.h @@ -93,37 +93,37 @@ private: bool selectIntAddrSImm10Lsl3(SDValue Addr, SDValue &Base, SDValue &Offset) const override; - /// \brief Select constant vector splats. + /// Select constant vector splats. bool selectVSplat(SDNode *N, APInt &Imm, unsigned MinSizeInBits) const override; - /// \brief Select constant vector splats whose value fits in a given integer. + /// Select constant vector splats whose value fits in a given integer. bool selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed, unsigned ImmBitSize) const; - /// \brief Select constant vector splats whose value fits in a uimm1. + /// Select constant vector splats whose value fits in a uimm1. bool selectVSplatUimm1(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm2. + /// Select constant vector splats whose value fits in a uimm2. bool selectVSplatUimm2(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm3. + /// Select constant vector splats whose value fits in a uimm3. bool selectVSplatUimm3(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm4. + /// Select constant vector splats whose value fits in a uimm4. bool selectVSplatUimm4(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm5. + /// Select constant vector splats whose value fits in a uimm5. bool selectVSplatUimm5(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm6. + /// Select constant vector splats whose value fits in a uimm6. bool selectVSplatUimm6(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a uimm8. + /// Select constant vector splats whose value fits in a uimm8. bool selectVSplatUimm8(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value fits in a simm5. + /// Select constant vector splats whose value fits in a simm5. bool selectVSplatSimm5(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value is a power of 2. + /// Select constant vector splats whose value is a power of 2. bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value is the inverse of a + /// Select constant vector splats whose value is the inverse of a /// power of 2. bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value is a run of set bits + /// Select constant vector splats whose value is a run of set bits /// ending at the most significant bit bool selectVSplatMaskL(SDValue N, SDValue &Imm) const override; - /// \brief Select constant vector splats whose value is a run of set bits + /// Select constant vector splats whose value is a run of set bits /// starting at bit zero. bool selectVSplatMaskR(SDValue N, SDValue &Imm) const override; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 885c24fb35e..84367b240a6 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -2348,7 +2348,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op, } } -/// \brief Check if the given BuildVectorSDNode is a splat. +/// Check if the given BuildVectorSDNode is a splat. /// This method currently relies on DAG nodes being reused when equivalent, /// so it's possible for this to return false even when isConstantSplat returns /// true. diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.h b/llvm/lib/Target/Mips/MipsSEISelLowering.h index be0b9f5354e..761ff3b1fa4 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.h +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.h @@ -32,11 +32,11 @@ class TargetRegisterClass; explicit MipsSETargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI); - /// \brief Enable MSA support for the given integer type and Register + /// Enable MSA support for the given integer type and Register /// class. void addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC); - /// \brief Enable MSA support for the given floating-point type and + /// Enable MSA support for the given floating-point type and /// Register class. void addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC); @@ -82,7 +82,7 @@ class TargetRegisterClass; SDValue lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; - /// \brief Lower VECTOR_SHUFFLE into one of a number of instructions + /// Lower VECTOR_SHUFFLE into one of a number of instructions /// depending on the indices in the shuffle. SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; @@ -92,46 +92,46 @@ class TargetRegisterClass; MachineBasicBlock *emitMSACBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned BranchOp) const; - /// \brief Emit the COPY_FW pseudo instruction + /// Emit the COPY_FW pseudo instruction MachineBasicBlock *emitCOPY_FW(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the COPY_FD pseudo instruction + /// Emit the COPY_FD pseudo instruction MachineBasicBlock *emitCOPY_FD(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the INSERT_FW pseudo instruction + /// Emit the INSERT_FW pseudo instruction MachineBasicBlock *emitINSERT_FW(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the INSERT_FD pseudo instruction + /// Emit the INSERT_FD pseudo instruction MachineBasicBlock *emitINSERT_FD(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction + /// Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction MachineBasicBlock *emitINSERT_DF_VIDX(MachineInstr &MI, MachineBasicBlock *BB, unsigned EltSizeInBytes, bool IsFP) const; - /// \brief Emit the FILL_FW pseudo instruction + /// Emit the FILL_FW pseudo instruction MachineBasicBlock *emitFILL_FW(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FILL_FD pseudo instruction + /// Emit the FILL_FD pseudo instruction MachineBasicBlock *emitFILL_FD(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FEXP2_W_1 pseudo instructions. + /// Emit the FEXP2_W_1 pseudo instructions. MachineBasicBlock *emitFEXP2_W_1(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FEXP2_D_1 pseudo instructions. + /// Emit the FEXP2_D_1 pseudo instructions. MachineBasicBlock *emitFEXP2_D_1(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FILL_FW pseudo instruction + /// Emit the FILL_FW pseudo instruction MachineBasicBlock *emitLD_F16_PSEUDO(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FILL_FD pseudo instruction + /// Emit the FILL_FD pseudo instruction MachineBasicBlock *emitST_F16_PSEUDO(MachineInstr &MI, MachineBasicBlock *BB) const; - /// \brief Emit the FEXP2_W_1 pseudo instructions. + /// Emit the FEXP2_W_1 pseudo instructions. MachineBasicBlock *emitFPEXTEND_PSEUDO(MachineInstr &MI, MachineBasicBlock *BB, bool IsFGR64) const; - /// \brief Emit the FEXP2_D_1 pseudo instructions. + /// Emit the FEXP2_D_1 pseudo instructions. MachineBasicBlock *emitFPROUND_PSEUDO(MachineInstr &MI, MachineBasicBlock *BBi, bool IsFGR64) const; diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.h b/llvm/lib/Target/Mips/MipsTargetMachine.h index 56e6e5d8daa..d9b73d15111 100644 --- a/llvm/lib/Target/Mips/MipsTargetMachine.h +++ b/llvm/lib/Target/Mips/MipsTargetMachine.h @@ -54,7 +54,7 @@ public: const MipsSubtarget *getSubtargetImpl(const Function &F) const override; - /// \brief Reset the subtarget for the Mips target. + /// Reset the subtarget for the Mips target. void resetSubtarget(MachineFunction *MF); // Pass Pipeline Configuration diff --git a/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp b/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp index f02c33f9249..41e9ae82718 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp @@ -28,7 +28,7 @@ using namespace llvm; namespace { -/// \brief NVPTXAssignValidGlobalNames +/// NVPTXAssignValidGlobalNames class NVPTXAssignValidGlobalNames : public ModulePass { public: static char ID; @@ -36,7 +36,7 @@ public: bool runOnModule(Module &M) override; - /// \brief Clean up the name to remove symbols invalid in PTX. + /// Clean up the name to remove symbols invalid in PTX. std::string cleanUpName(StringRef Name); }; } diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.h b/llvm/lib/Target/PowerPC/PPCFrameLowering.h index f845d5a9ac6..01c155594c4 100644 --- a/llvm/lib/Target/PowerPC/PPCFrameLowering.h +++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.h @@ -30,7 +30,7 @@ class PPCFrameLowering: public TargetFrameLowering { const unsigned BasePointerSaveOffset; /** - * \brief Find register[s] that can be used in function prologue and epilogue + * Find register[s] that can be used in function prologue and epilogue * * Find register[s] that can be use as scratch register[s] in function * prologue and epilogue to save various registers (Link Register, Base @@ -67,7 +67,7 @@ class PPCFrameLowering: public TargetFrameLowering { bool twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const; /** - * \brief Create branch instruction for PPC::TCRETURN* (tail call return) + * Create branch instruction for PPC::TCRETURN* (tail call return) * * \param[in] MBB that is terminated by PPC::TCRETURN* */ diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index f7948ccb723..83ed349b46f 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1477,7 +1477,7 @@ bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, } /** - * \brief Common function used to match vmrgew and vmrgow shuffles + * Common function used to match vmrgew and vmrgow shuffles * * The indexOffset determines whether to look for even or odd words in * the shuffle mask. This is based on the of the endianness of the target @@ -1534,7 +1534,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, } /** - * \brief Determine if the specified shuffle mask is suitable for the vmrgew or + * Determine if the specified shuffle mask is suitable for the vmrgew or * vmrgow instructions. * * \param[in] N The shuffle vector SD Node to analyze @@ -6887,7 +6887,7 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, RLI.MPI = MPI; } -/// \brief Custom lowers floating point to integer conversions to use +/// Custom lowers floating point to integer conversions to use /// the direct move instructions available in ISA 2.07 to avoid the /// need for load/store combinations. SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, @@ -7045,7 +7045,7 @@ void PPCTargetLowering::spliceIntoChain(SDValue ResChain, DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); } -/// \brief Analyze profitability of direct move +/// Analyze profitability of direct move /// prefer float load to int load plus direct move /// when there is no integer use of int load bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { @@ -7075,7 +7075,7 @@ bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { return false; } -/// \brief Custom lowers integer to floating point conversions to use +/// Custom lowers integer to floating point conversions to use /// the direct move instructions available in ISA 2.07 to avoid the /// need for load/store combinations. SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, @@ -11611,7 +11611,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, ShiftCst); } -/// \brief Reduces the number of fp-to-int conversion when building a vector. +/// Reduces the number of fp-to-int conversion when building a vector. /// /// If this vector is built out of floating to integer conversions, /// transform it to a vector built out of floating point values followed by a @@ -11691,7 +11691,7 @@ combineElementTruncationToVectorTruncation(SDNode *N, return SDValue(); } -/// \brief Reduce the number of loads when building a vector. +/// Reduce the number of loads when building a vector. /// /// Building a vector out of multiple loads can be converted to a load /// of the vector type if the loads are consecutive. If the loads are @@ -13643,7 +13643,7 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, return MVT::i32; } -/// \brief Returns true if it is beneficial to convert a load of a constant +/// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 76f8d72136c..b4373c1dfea 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -765,7 +765,7 @@ namespace llvm { bool isFPExtFree(EVT DestVT, EVT SrcVT) const override; - /// \brief Returns true if it is beneficial to convert a load of a constant + /// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; @@ -822,7 +822,7 @@ namespace llvm { FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override; - /// \brief Returns true if an argument of type Ty needs to be passed in a + /// Returns true if an argument of type Ty needs to be passed in a /// contiguous block of registers in calling convention CallConv. bool functionArgumentNeedsConsecutiveRegisters( Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override { diff --git a/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h b/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h index c8b9b2e9790..417b8ed0d61 100644 --- a/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h +++ b/llvm/lib/Target/PowerPC/PPCTargetObjectFile.h @@ -25,7 +25,7 @@ namespace llvm { MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override; - /// \brief Describe a TLS variable address within debug info. + /// Describe a TLS variable address within debug info. const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override; }; diff --git a/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp b/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp index d02db9a617a..67c80899d49 100644 --- a/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp @@ -55,7 +55,7 @@ char SystemZExpandPseudo::ID = 0; INITIALIZE_PASS(SystemZExpandPseudo, "systemz-expand-pseudo", SYSTEMZ_EXPAND_PSEUDO_NAME, false, false) -/// \brief Returns an instance of the pseudo instruction expansion pass. +/// Returns an instance of the pseudo instruction expansion pass. FunctionPass *llvm::createSystemZExpandPseudoPass(SystemZTargetMachine &TM) { return new SystemZExpandPseudo(); } @@ -112,7 +112,7 @@ bool SystemZExpandPseudo::expandLOCRMux(MachineBasicBlock &MBB, return true; } -/// \brief If MBBI references a pseudo instruction that should be expanded here, +/// If MBBI references a pseudo instruction that should be expanded here, /// do the expansion and return true. Otherwise return false. bool SystemZExpandPseudo::expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, @@ -127,7 +127,7 @@ bool SystemZExpandPseudo::expandMI(MachineBasicBlock &MBB, return false; } -/// \brief Iterate over the instructions in basic block MBB and expand any +/// Iterate over the instructions in basic block MBB and expand any /// pseudo instructions. Return true if anything was modified. bool SystemZExpandPseudo::expandMBB(MachineBasicBlock &MBB) { bool Modified = false; diff --git a/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h index d64f7766201..94781659a50 100644 --- a/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h @@ -77,7 +77,7 @@ public: int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const override; - /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true. + /// SrcRC and DstRC will be morphed into NewRC if this returns true. bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index 08b3fa5cbf3..092f5ea4104 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -51,7 +51,7 @@ bool TargetMachine::isPositionIndependent() const { return getRelocationModel() == Reloc::PIC_; } -/// \brief Reset the target options based on the function's attributes. +/// Reset the target options based on the function's attributes. // FIXME: This function needs to go away for a number of reasons: // a) global state on the TargetMachine is terrible in general, // b) these target options should be passed only on the function diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp index 9d193f09d5a..2d92b93ca70 100644 --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file is part of the WebAssembly Assembler. +/// This file is part of the WebAssembly Assembler. /// /// It contains code to translate a parsed .s file into MCInsts. /// diff --git a/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp index 9be11da9afa..f75832fdd9e 100644 --- a/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp +++ b/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file is part of the WebAssembly Disassembler. +/// This file is part of the WebAssembly Disassembler. /// /// It contains code to translate the data produced by the decoder into /// MCInsts. diff --git a/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp index 74a166a4cc3..6629ce6cda0 100644 --- a/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp +++ b/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Print MCInst instructions to wasm format. +/// Print MCInst instructions to wasm format. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h index 533e37fb418..f5b890a7615 100644 --- a/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h +++ b/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This class prints an WebAssembly MCInst to wasm file syntax. +/// This class prints an WebAssembly MCInst to wasm file syntax. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp index 226a3b35f2c..9c3a72a1681 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyAsmBackend class. +/// This file implements the WebAssemblyAsmBackend class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp index b67ecfa455b..3bbc8493afc 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file handles ELF-specific object emission, converting LLVM's +/// This file handles ELF-specific object emission, converting LLVM's /// internal fixups into the appropriate relocations. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp index c00dc19019c..ee0d3704d75 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declarations of the WebAssemblyMCAsmInfo +/// This file contains the declarations of the WebAssemblyMCAsmInfo /// properties. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h index d9547096190..b22946aa4a1 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the WebAssemblyMCAsmInfo class. +/// This file contains the declaration of the WebAssemblyMCAsmInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp index 440b9a5ad10..2efac06e5be 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyMCCodeEmitter class. +/// This file implements the WebAssemblyMCCodeEmitter class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp index a51fb9282fe..bc5734e0eb8 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides WebAssembly-specific target descriptions. +/// This file provides WebAssembly-specific target descriptions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h index 364aac3974b..dd633a76282 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides WebAssembly-specific target descriptions. +/// This file provides WebAssembly-specific target descriptions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp index cab14e9b47b..128394f2632 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines WebAssembly-specific target streamer classes. +/// This file defines WebAssembly-specific target streamer classes. /// These are for implementing support for target-specific assembly directives. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h index 165e71ad99f..8382baed667 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares WebAssembly-specific target streamer classes. +/// This file declares WebAssembly-specific target streamer classes. /// These are for implementing support for target-specific assembly directives. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp index ab0125e7c28..9a01792991b 100644 --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file handles Wasm-specific object emission, converting LLVM's +/// This file handles Wasm-specific object emission, converting LLVM's /// internal fixups into the appropriate relocations. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp b/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp index a2c03b1a040..f7a417c0ed4 100644 --- a/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp +++ b/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file registers the WebAssembly target. +/// This file registers the WebAssembly target. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.h b/llvm/lib/Target/WebAssembly/WebAssembly.h index 4a3ef59dbc6..10a0a606d2b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssembly.h +++ b/llvm/lib/Target/WebAssembly/WebAssembly.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the entry points for global functions defined in +/// This file contains the entry points for global functions defined in /// the LLVM WebAssembly back-end. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.td b/llvm/lib/Target/WebAssembly/WebAssembly.td index 33a4cd33f8d..ad1549f8d9d 100644 --- a/llvm/lib/Target/WebAssembly/WebAssembly.td +++ b/llvm/lib/Target/WebAssembly/WebAssembly.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This is a target description file for the WebAssembly architecture, +/// This is a target description file for the WebAssembly architecture, /// which is also known as "wasm". /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp index aaa1e4eee3f..3c17b2ec0f6 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file moves ARGUMENT instructions after ScheduleDAG scheduling. +/// This file moves ARGUMENT instructions after ScheduleDAG scheduling. /// /// Arguments are really live-in registers, however, since we use virtual /// registers and LLVM doesn't support live-in virtual registers, we're diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp index e8144add3f7..a224ae7316a 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains a printer that converts from our internal +/// This file contains a printer that converts from our internal /// representation of machine-dependent LLVM code to the WebAssembly assembly /// language. /// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp index 88c3cf63b62..b2607f27142 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a CFG sorting pass. +/// This file implements a CFG sorting pass. /// /// This pass reorders the blocks in a function to put them into topological /// order, ignoring loop backedges, and without any loop being interrupted diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp index 9e0a945194b..cf1bbfa5e18 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a CFG stacking pass. +/// This file implements a CFG stacking pass. /// /// This pass inserts BLOCK and LOOP markers to mark the start of scopes, since /// scope boundaries serve as the labels for WebAssembly's control transfers. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp index 8c45e862536..03bfe24e30b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file converts pseudo call_indirect instructions into real +/// This file converts pseudo call_indirect instructions into real /// call_indirects. /// /// The order of arguments for a call_indirect is the arguments to the function diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index bafc34f3753..cf14643627a 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file converts any remaining registers into WebAssembly locals. +/// This file converts any remaining registers into WebAssembly locals. /// /// After register stackification and register coloring, convert non-stackified /// registers into locals, inserting explicit get_local and set_local diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index c13dd7a48a7..fe821ced672 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific support for the FastISel +/// This file defines the WebAssembly-specific support for the FastISel /// class. Some of the target-specific code is generated by tablegen in the file /// WebAssemblyGenFastISel.inc, which is #included here. /// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp index 444eb3179f0..d5e47ee8251 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Fix bitcasted functions. +/// Fix bitcasted functions. /// /// WebAssembly requires caller and callee signatures to match, however in LLVM, /// some amount of slop is vaguely permitted. Detect mismatch by looking for diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp index c6d374c1a5a..c710c6972a6 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass that transforms irreducible control flow +/// This file implements a pass that transforms irreducible control flow /// into reducible control flow. Irreducible control flow means multiple-entry /// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo /// due to being unnatural. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp index 84246052f60..22d7aa1107e 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of +/// This file contains the WebAssembly implementation of /// TargetFrameLowering class. /// /// On WebAssembly, there aren't a lot of things to do here. There are no diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h index 4cc7f5ae058..fe23e418a3f 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This class implements WebAssembly-specific bits of +/// This class implements WebAssembly-specific bits of /// TargetFrameLowering class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/llvm/lib/Target/WebAssembly/WebAssemblyISD.def index 2f0f106ef5b..c12550feabb 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISD.def +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISD.def @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file describes the various WebAssembly ISD node types. +/// This file describes the various WebAssembly ISD node types. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp index 45263ccedf6..d22dda7546e 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines an instruction selector for the WebAssembly target. +/// This file defines an instruction selector for the WebAssembly target. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index d0b3ad37119..d7d49e039c3 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyTargetLowering class. +/// This file implements the WebAssemblyTargetLowering class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h index 7bb8e71ab97..3e9759eece8 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the interfaces that WebAssembly uses to lower LLVM +/// This file defines the interfaces that WebAssembly uses to lower LLVM /// code into a selection DAG. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td index a49172df158..9f6cfa37356 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Atomic operand code-gen constructs. +/// WebAssembly Atomic operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td index 8874fe5e8b1..eb529e36385 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Call operand code-gen constructs. +/// WebAssembly Call operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td index 18ffba7fe82..c667f931721 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly control-flow code-gen constructs. +/// WebAssembly control-flow code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td index bf1282b5edf..90e278ba7a0 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly datatype conversions, truncations, reinterpretations, +/// WebAssembly datatype conversions, truncations, reinterpretations, /// promotions, and demotions operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td index 921e7ab04fe..80f7c7aaa59 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly except_ref operand code-gen constructs. +/// WebAssembly except_ref operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td index 03c9c1f8d5c..aff96da0919 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Floating-point operand code-gen constructs. +/// WebAssembly Floating-point operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td index 4f41fcc232e..68d44b8e885 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly instruction format definitions. +/// WebAssembly instruction format definitions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index 8846952e5af..739d6cf6253 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetInstrInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h index eb74106336e..4a3763c345b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetInstrInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td index 64695fb1e50..1336565cfe0 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Instruction definitions. +/// WebAssembly Instruction definitions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td index e872dc21984..2b5b0795087 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Integer operand code-gen constructs. +/// WebAssembly Integer operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td index aa13e41f92e..275c0ff57c5 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Memory operand code-gen constructs. +/// WebAssembly Memory operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index e403534d580..7d1edccdeb3 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly SIMD operand code-gen constructs. +/// WebAssembly SIMD operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp index f951404a0dc..adb8f6c7c73 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file lowers br_unless into br_if with an inverted condition. +/// This file lowers br_unless into br_if with an inverted condition. /// /// br_unless is not currently in the spec, but it's very convenient for LLVM /// to use. This pass allows LLVM to use it, for now. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index f0b6a3e35db..06c6dc69d0c 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file lowers exception-related instructions and setjmp/longjmp +/// This file lowers exception-related instructions and setjmp/longjmp /// function calls in order to use Emscripten's JavaScript try and catch /// mechanism. /// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp index bcd648a71b4..ee708d637b2 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Lower @llvm.global_dtors. +/// Lower @llvm.global_dtors. /// /// WebAssembly doesn't have a builtin way to invoke static destructors. /// Implement @llvm.global_dtors by creating wrapper functions that are diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp index ce79bf34b0c..22bb788fbfc 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains code to lower WebAssembly MachineInstrs to their +/// This file contains code to lower WebAssembly MachineInstrs to their /// corresponding MCInst records. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h index d1d2794c3b8..41b4313bb38 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the class to lower WebAssembly MachineInstrs to +/// This file declares the class to lower WebAssembly MachineInstrs to /// their corresponding MCInst records. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp index ccf6a18b32e..e511e574050 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements WebAssembly-specific per-machine-function +/// This file implements WebAssembly-specific per-machine-function /// information. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h index 1fcbb7791d4..3c268366016 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares WebAssembly-specific per-machine-function +/// This file declares WebAssembly-specific per-machine-function /// information. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp index d97d8b75385..53e7688e265 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Optimize LiveIntervals for use in a post-RA context. +/// Optimize LiveIntervals for use in a post-RA context. // /// LiveIntervals normally runs before register allocation when the code is /// only recently lowered out of SSA form, so it's uncommon for registers to diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp index 804785c27c7..113ee2532bc 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Optimize calls with "returned" attributes for WebAssembly. +/// Optimize calls with "returned" attributes for WebAssembly. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp index 56487caf14f..aa70c91cb4b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Late peephole optimizations for WebAssembly. +/// Late peephole optimizations for WebAssembly. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp index 12b9a6f3555..f61d65b6530 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Fix up code to meet LiveInterval's requirements. +/// Fix up code to meet LiveInterval's requirements. /// /// Some CodeGen passes don't preserve LiveInterval's requirements, because /// they run after register allocation and it isn't important. However, diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index f845b8f8102..494259b17b0 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a virtual register coloring pass. +/// This file implements a virtual register coloring pass. /// /// WebAssembly doesn't have a fixed number of registers, but it is still /// desirable to minimize the total number of registers used in each function. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp index e29fe5ef4ae..7f518ac9868 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass which assigns WebAssembly register +/// This file implements a pass which assigns WebAssembly register /// numbers for CodeGen virtual registers. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index 2ccd7343d00..6780f252902 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a register stacking pass. +/// This file implements a register stacking pass. /// /// This pass reorders instructions to put register uses and defs in an order /// such that they form single-use expression trees. Registers fitting this form diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp index 5e7ebd19fac..b6481ac2d4a 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetRegisterInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h index ad1d71eebf2..4be8d40593c 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// WebAssemblyRegisterInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td index 67682c9ff24..29f42b96b24 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file describes the WebAssembly register classes and some nominal +/// This file describes the WebAssembly register classes and some nominal /// physical registers. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp index 85e20c74821..f66081174e9 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass that replaces physical registers with +/// This file implements a pass that replaces physical registers with /// virtual registers. /// /// LLVM expects certain physical registers, such as a stack pointer. However, diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp index e2a0d9d89a7..283cecd4b9d 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains signature information for runtime libcalls. +/// This file contains signature information for runtime libcalls. /// /// CodeGen uses external symbols, which it refers to by name. The WebAssembly /// target needs type information for all functions. This file contains a big diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h index 12906760478..2ba65ff5b71 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides signature information for runtime libcalls. +/// This file provides signature information for runtime libcalls. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp index fae9c610051..bec72049258 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblySelectionDAGInfo class. +/// This file implements the WebAssemblySelectionDAGInfo class. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h index 533c66b7a22..31d150eded6 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly subclass for +/// This file defines the WebAssembly subclass for /// SelectionDAGTargetInfo. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp index 429f94ee6cf..ac53151047f 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file sets the p2align operands on load and store instructions. +/// This file sets the p2align operands on load and store instructions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp index 9c73f7aad73..f1c700f9e8f 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements an optimization pass using store result values. +/// This file implements an optimization pass using store result values. /// /// WebAssembly's store instructions return the stored value. This is to enable /// an optimization wherein uses of the stored value can be replaced by uses of diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp index 6addaa39d71..d6af0fb219d 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssembly-specific subclass of +/// This file implements the WebAssembly-specific subclass of /// TargetSubtarget. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h index c2ced236dbd..b170dbff3b3 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetSubtarget. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp index 8151f2562d4..3349b6f8cd0 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific subclass of TargetMachine. +/// This file defines the WebAssembly-specific subclass of TargetMachine. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h index dd826befd11..41001e7a0cc 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetMachine. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp index b1fd108bc24..cdd572c27b5 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the functions of the WebAssembly-specific subclass +/// This file defines the functions of the WebAssembly-specific subclass /// of TargetLoweringObjectFile. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h index ace87c9e442..24257e731df 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetLoweringObjectFile. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp index 2e002781f43..4a2777cc3a9 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific TargetTransformInfo +/// This file defines the WebAssembly-specific TargetTransformInfo /// implementation. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h index 7b35fc91613..4300ca3defb 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file a TargetTransformInfo::Concept conforming object specific +/// This file a TargetTransformInfo::Concept conforming object specific /// to the WebAssembly target machine. /// /// It uses the target's detailed information to provide more precise answers to diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp index e32772d491c..0a811541b38 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements several utility functions for WebAssembly. +/// This file implements several utility functions for WebAssembly. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h index 595491f1bf5..305f406bbff 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the WebAssembly-specific +/// This file contains the declaration of the WebAssembly-specific /// utility functions. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index 44422a95f16..0fe1fab7ba5 100644 --- a/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -400,7 +400,7 @@ namespace X86Disassembler { REGS_BOUND \ ENTRY(RIP) -/// \brief All possible values of the base field for effective-address +/// All possible values of the base field for effective-address /// computations, a.k.a. the Mod and R/M fields of the ModR/M byte. /// We distinguish between bases (EA_BASE_*) and registers that just happen /// to be referred to when Mod == 0b11 (EA_REG_*). @@ -415,7 +415,7 @@ enum EABase { EA_max }; -/// \brief All possible values of the SIB index field. +/// All possible values of the SIB index field. /// borrows entries from ALL_EA_BASES with the special case that /// sib is synonymous with NONE. /// Vector SIB: index can be XMM or YMM. @@ -430,7 +430,7 @@ enum SIBIndex { SIB_INDEX_max }; -/// \brief All possible values of the SIB base field. +/// All possible values of the SIB base field. enum SIBBase { SIB_BASE_NONE, #define ENTRY(x) SIB_BASE_##x, @@ -439,7 +439,7 @@ enum SIBBase { SIB_BASE_max }; -/// \brief Possible displacement types for effective-address computations. +/// Possible displacement types for effective-address computations. typedef enum { EA_DISP_NONE, EA_DISP_8, @@ -447,7 +447,7 @@ typedef enum { EA_DISP_32 } EADisplacement; -/// \brief All possible values of the reg field in the ModR/M byte. +/// All possible values of the reg field in the ModR/M byte. enum Reg { #define ENTRY(x) MODRM_REG_##x, ALL_REGS @@ -455,7 +455,7 @@ enum Reg { MODRM_REG_max }; -/// \brief All possible segment overrides. +/// All possible segment overrides. enum SegmentOverride { SEG_OVERRIDE_NONE, SEG_OVERRIDE_CS, @@ -467,7 +467,7 @@ enum SegmentOverride { SEG_OVERRIDE_max }; -/// \brief Possible values for the VEX.m-mmmm field +/// Possible values for the VEX.m-mmmm field enum VEXLeadingOpcodeByte { VEX_LOB_0F = 0x1, VEX_LOB_0F38 = 0x2, @@ -480,7 +480,7 @@ enum XOPMapSelect { XOP_MAP_SELECT_A = 0xA }; -/// \brief Possible values for the VEX.pp/EVEX.pp field +/// Possible values for the VEX.pp/EVEX.pp field enum VEXPrefixCode { VEX_PREFIX_NONE = 0x0, VEX_PREFIX_66 = 0x1, @@ -496,7 +496,7 @@ enum VectorExtensionType { TYPE_XOP = 0x4 }; -/// \brief Type for the byte reader that the consumer must provide to +/// Type for the byte reader that the consumer must provide to /// the decoder. Reads a single byte from the instruction's address space. /// \param arg A baton that the consumer can associate with any internal /// state that it needs. @@ -507,7 +507,7 @@ enum VectorExtensionType { /// \return -1 if the byte cannot be read for any reason; 0 otherwise. typedef int (*byteReader_t)(const void *arg, uint8_t *byte, uint64_t address); -/// \brief Type for the logging function that the consumer can provide to +/// Type for the logging function that the consumer can provide to /// get debugging output from the decoder. /// \param arg A baton that the consumer can associate with any internal /// state that it needs. @@ -650,7 +650,7 @@ struct InternalInstruction { ArrayRef<OperandSpecifier> operands; }; -/// \brief Decode one instruction and store the decoding results in +/// Decode one instruction and store the decoding results in /// a buffer provided by the consumer. /// \param insn The buffer to store the instruction in. Allocated by the /// consumer. @@ -674,7 +674,7 @@ int decodeInstruction(InternalInstruction *insn, uint64_t startLoc, DisassemblerMode mode); -/// \brief Print a message to debugs() +/// Print a message to debugs() /// \param file The name of the file printing the debug message. /// \param line The line number that printed the debug message. /// \param s The message to print. diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index a469f0bf8df..de2138cab87 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -312,7 +312,7 @@ void X86AsmBackend::relaxInstruction(const MCInst &Inst, Res.setOpcode(RelaxedOp); } -/// \brief Write a sequence of optimal nops to the output, covering \p Count +/// Write a sequence of optimal nops to the output, covering \p Count /// bytes. /// \return - true on success, false on failure bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { @@ -487,7 +487,7 @@ namespace CU { class DarwinX86AsmBackend : public X86AsmBackend { const MCRegisterInfo &MRI; - /// \brief Number of registers that can be saved in a compact unwind encoding. + /// Number of registers that can be saved in a compact unwind encoding. enum { CU_NUM_SAVED_REGS = 6 }; mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; @@ -497,7 +497,7 @@ class DarwinX86AsmBackend : public X86AsmBackend { unsigned MoveInstrSize; ///< Size of a "move" instruction. unsigned StackDivide; ///< Amount to adjust stack size by. protected: - /// \brief Size of a "push" instruction for the given register. + /// Size of a "push" instruction for the given register. unsigned PushInstrSize(unsigned Reg) const { switch (Reg) { case X86::EBX: @@ -518,7 +518,7 @@ protected: return 1; } - /// \brief Implementation of algorithm to generate the compact unwind encoding + /// Implementation of algorithm to generate the compact unwind encoding /// for the CFI instructions. uint32_t generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { @@ -685,7 +685,7 @@ protected: } private: - /// \brief Get the compact unwind number for a given register. The number + /// Get the compact unwind number for a given register. The number /// corresponds to the enum lists in compact_unwind_encoding.h. int getCompactUnwindRegNum(unsigned Reg) const { static const MCPhysReg CU32BitRegs[7] = { @@ -702,7 +702,7 @@ private: return -1; } - /// \brief Return the registers encoded for a compact encoding with a frame + /// Return the registers encoded for a compact encoding with a frame /// pointer. uint32_t encodeCompactUnwindRegistersWithFrame() const { // Encode the registers in the order they were saved --- 3-bits per @@ -726,7 +726,7 @@ private: return RegEnc; } - /// \brief Create the permutation encoding used with frameless stacks. It is + /// Create the permutation encoding used with frameless stacks. It is /// passed the number of registers to be saved and an array of the registers /// saved. uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { @@ -820,7 +820,7 @@ public: MachO::CPU_SUBTYPE_I386_ALL); } - /// \brief Generate the compact unwind encoding for the CFI instructions. + /// Generate the compact unwind encoding for the CFI instructions. uint32_t generateCompactUnwindEncoding( ArrayRef<MCCFIInstruction> Instrs) const override { return generateCompactUnwindEncodingImpl(Instrs); @@ -840,7 +840,7 @@ public: MachO::CPU_TYPE_X86_64, Subtype); } - /// \brief Generate the compact unwind encoding for the CFI instructions. + /// Generate the compact unwind encoding for the CFI instructions. uint32_t generateCompactUnwindEncoding( ArrayRef<MCCFIInstruction> Instrs) const override { return generateCompactUnwindEncodingImpl(Instrs); diff --git a/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp index 007df73d5a5..8ac1762a30c 100644 --- a/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp +++ b/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp @@ -273,7 +273,7 @@ void DecodeSubVectorBroadcast(unsigned DstNumElts, unsigned SrcNumElts, ShuffleMask.push_back(j); } -/// \brief Decode a shuffle packed values at 128-bit granularity +/// Decode a shuffle packed values at 128-bit granularity /// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2) /// immediate mask into a shuffle mask. void decodeVSHUF64x2FamilyMask(unsigned NumElts, unsigned ScalarSize, diff --git a/llvm/lib/Target/X86/X86AsmPrinter.h b/llvm/lib/Target/X86/X86AsmPrinter.h index e600d93293a..3a20a9362de 100644 --- a/llvm/lib/Target/X86/X86AsmPrinter.h +++ b/llvm/lib/Target/X86/X86AsmPrinter.h @@ -130,7 +130,7 @@ public: unsigned AsmVariant, const char *ExtraCode, raw_ostream &OS) override; - /// \brief Return the symbol for the specified constant pool entry. + /// Return the symbol for the specified constant pool entry. MCSymbol *GetCPISymbol(unsigned CPID) const override; bool doInitialization(Module &M) override { diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp index bf6cc6728cb..e89dd497259 100644 --- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp +++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp @@ -97,15 +97,15 @@ private: SmallVector<MachineInstr *, 2> ForRemoval; AliasAnalysis *AA; - /// \brief Returns couples of Load then Store to memory which look + /// Returns couples of Load then Store to memory which look /// like a memcpy. void findPotentiallylBlockedCopies(MachineFunction &MF); - /// \brief Break the memcpy's load and store into smaller copies + /// Break the memcpy's load and store into smaller copies /// such that each memory load that was blocked by a smaller store /// would now be copied separately. void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst, const DisplacementSizeMap &BlockingStoresDispSizeMap); - /// \brief Break a copy of size Size to smaller copies. + /// Break a copy of size Size to smaller copies. void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm, MachineInstr *StoreInst, int64_t StDispImm, int64_t LMMOffset, int64_t SMMOffset); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 3806d2dad3d..b37b2835ac1 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -68,7 +68,7 @@ public: bool fastSelectInstruction(const Instruction *I) override; - /// \brief The specified machine instr operand is a vreg, and that + /// The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, /// try to fold the load as an operand to the instruction, returning true if /// possible. @@ -217,7 +217,7 @@ getX86SSEConditionCode(CmpInst::Predicate Predicate) { return std::make_pair(CC, NeedSwap); } -/// \brief Adds a complex addressing mode to the given machine instr builder. +/// Adds a complex addressing mode to the given machine instr builder. /// Note, this will constrain the index register. If its not possible to /// constrain the given index register, then a new one will be created. The /// IndexReg field of the addressing mode will be updated to match in this case. @@ -231,7 +231,7 @@ X86FastISel::addFullAddress(const MachineInstrBuilder &MIB, return ::addFullAddress(MIB, AM); } -/// \brief Check if it is possible to fold the condition from the XALU intrinsic +/// Check if it is possible to fold the condition from the XALU intrinsic /// into the user. The condition code will only be updated on success. bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, const Value *Cond) { @@ -2019,7 +2019,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { return true; } -/// \brief Emit a conditional move instruction (if the are supported) to lower +/// Emit a conditional move instruction (if the are supported) to lower /// the select. bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { // Check if the subtarget supports these instructions. @@ -2148,7 +2148,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { return true; } -/// \brief Emit SSE or AVX instructions to lower the select. +/// Emit SSE or AVX instructions to lower the select. /// /// Try to use SSE1/SSE2 instructions to simulate a select without branches. /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index 9a2f172aade..46f13821bae 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -166,7 +166,7 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { return true; } -/// \brief Check if after \p OrigMI the only portion of super register +/// Check if after \p OrigMI the only portion of super register /// of the destination register of \p OrigMI that is alive is that /// destination register. /// diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index d635c1e8574..df8c8340a61 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -40,13 +40,13 @@ namespace { class FixupLEAPass : public MachineFunctionPass { enum RegUsageState { RU_NotUsed, RU_Write, RU_Read }; - /// \brief Loop over all of the instructions in the basic block + /// Loop over all of the instructions in the basic block /// replacing applicable instructions with LEA instructions, /// where appropriate. bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI); - /// \brief Given a machine register, look for the instruction + /// Given a machine register, look for the instruction /// which writes it in the current basic block. If found, /// try to replace it with an equivalent LEA instruction. /// If replacement succeeds, then also process the newly created @@ -54,20 +54,20 @@ class FixupLEAPass : public MachineFunctionPass { void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); - /// \brief Given a memory access or LEA instruction + /// Given a memory access or LEA instruction /// whose address mode uses a base and/or index register, look for /// an opportunity to replace the instruction which sets the base or index /// register with an equivalent LEA instruction. void processInstruction(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); - /// \brief Given a LEA instruction which is unprofitable + /// Given a LEA instruction which is unprofitable /// on Silvermont try to replace it with an equivalent ADD instruction void processInstructionForSLM(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); - /// \brief Given a LEA instruction which is unprofitable + /// Given a LEA instruction which is unprofitable /// on SNB+ try to replace it with other instructions. /// According to Intel's Optimization Reference Manual: /// " For LEA instructions with three source operands and some specific @@ -82,23 +82,23 @@ class FixupLEAPass : public MachineFunctionPass { MachineInstr *processInstrForSlow3OpLEA(MachineInstr &MI, MachineFunction::iterator MFI); - /// \brief Look for LEAs that add 1 to reg or subtract 1 from reg + /// Look for LEAs that add 1 to reg or subtract 1 from reg /// and convert them to INC or DEC respectively. bool fixupIncDec(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI) const; - /// \brief Determine if an instruction references a machine register + /// Determine if an instruction references a machine register /// and, if so, whether it reads or writes the register. RegUsageState usesRegister(MachineOperand &p, MachineBasicBlock::iterator I); - /// \brief Step backwards through a basic block, looking + /// Step backwards through a basic block, looking /// for an instruction which writes a register within /// a maximum of INSTR_DISTANCE_THRESHOLD instruction latency cycles. MachineBasicBlock::iterator searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); - /// \brief if an instruction can be converted to an + /// if an instruction can be converted to an /// equivalent LEA, insert the new instruction into the basic block /// and return a pointer to it. Otherwise, return zero. MachineInstr *postRAConvertToLEA(MachineFunction::iterator &MFI, @@ -113,7 +113,7 @@ public: initializeFixupLEAPassPass(*PassRegistry::getPassRegistry()); } - /// \brief Loop over all of the basic blocks, + /// Loop over all of the basic blocks, /// replacing instructions by equivalent LEA instructions /// if needed and when possible. bool runOnMachineFunction(MachineFunction &MF) override; diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 11b814ea859..73455912d2c 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -414,7 +414,7 @@ namespace { return Subtarget->getInstrInfo(); } - /// \brief Address-mode matching performs shift-of-and to and-of-shift + /// Address-mode matching performs shift-of-and to and-of-shift /// reassociation in order to expose more scaled addressing /// opportunities. bool ComplexPatternFuncMutatesDAG() const override { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 1ba801edf79..91ef663844c 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4457,7 +4457,7 @@ bool X86::isCalleePop(CallingConv::ID CallingConv, } } -/// \brief Return true if the condition is an unsigned comparison operation. +/// Return true if the condition is an unsigned comparison operation. static bool isX86CCUnsigned(unsigned X86CC) { switch (X86CC) { default: @@ -4666,7 +4666,7 @@ bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load, return true; } -/// \brief Returns true if it is beneficial to convert a load of a constant +/// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const { @@ -4856,7 +4856,7 @@ static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos, return true; } -/// \brief Helper function to test whether a shuffle mask could be +/// Helper function to test whether a shuffle mask could be /// simplified by widening the elements being shuffled. /// /// Appends the mask for wider elements in WidenedMask if valid. Otherwise @@ -7151,7 +7151,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp, return SDValue(); } -/// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real +/// For an EXTRACT_VECTOR_ELT with a constant index return the real /// underlying vector and index. /// /// Modifies \p ExtractedFromVec to the real vector and returns the real @@ -7364,7 +7364,7 @@ static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG, return DstVec; } -/// \brief Return true if \p N implements a horizontal binop and return the +/// Return true if \p N implements a horizontal binop and return the /// operands for the horizontal binop into V0 and V1. /// /// This is a helper function of LowerToHorizontalOp(). @@ -7461,7 +7461,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, return CanFold; } -/// \brief Emit a sequence of two 128-bit horizontal add/sub followed by +/// Emit a sequence of two 128-bit horizontal add/sub followed by /// a concat_vector. /// /// This is a helper function of LowerToHorizontalOp(). @@ -8822,7 +8822,7 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, // patterns. //===----------------------------------------------------------------------===// -/// \brief Tiny helper function to identify a no-op mask. +/// Tiny helper function to identify a no-op mask. /// /// This is a somewhat boring predicate function. It checks whether the mask /// array input, which is assumed to be a single-input shuffle mask of the kind @@ -8838,7 +8838,7 @@ static bool isNoopShuffleMask(ArrayRef<int> Mask) { return true; } -/// \brief Test whether there are elements crossing 128-bit lanes in this +/// Test whether there are elements crossing 128-bit lanes in this /// shuffle mask. /// /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations @@ -8852,7 +8852,7 @@ static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) { return false; } -/// \brief Test whether a shuffle mask is equivalent within each sub-lane. +/// Test whether a shuffle mask is equivalent within each sub-lane. /// /// This checks a shuffle mask to see if it is performing the same /// lane-relative shuffle in each sub-lane. This trivially implies @@ -8941,7 +8941,7 @@ static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT, return true; } -/// \brief Checks whether a shuffle mask is equivalent to an explicit list of +/// Checks whether a shuffle mask is equivalent to an explicit list of /// arguments. /// /// This is a fast way to test a shuffle mask against a fixed pattern: @@ -9038,7 +9038,7 @@ static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) { return IsUnpackwdMask; } -/// \brief Get a 4-lane 8-bit shuffle immediate for a mask. +/// Get a 4-lane 8-bit shuffle immediate for a mask. /// /// This helper function produces an 8-bit shuffle immediate corresponding to /// the ubiquitous shuffle encoding scheme used in x86 instructions for @@ -9066,7 +9066,7 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL, return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8); } -/// \brief Compute whether each element of a shuffle is zeroable. +/// Compute whether each element of a shuffle is zeroable. /// /// A "zeroable" vector shuffle element is one which can be lowered to zero. /// Either it is an undef element in the shuffle mask, the element of the input @@ -9443,7 +9443,7 @@ static SDValue lowerVectorShuffleWithPACK(const SDLoc &DL, MVT VT, return SDValue(); } -/// \brief Try to emit a bitmask instruction for a shuffle. +/// Try to emit a bitmask instruction for a shuffle. /// /// This handles cases where we can model a blend exactly as a bitmask due to /// one of the inputs being zeroable. @@ -9476,7 +9476,7 @@ static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1, return DAG.getNode(ISD::AND, DL, VT, V, VMask); } -/// \brief Try to emit a blend instruction for a shuffle using bit math. +/// Try to emit a blend instruction for a shuffle using bit math. /// /// This is used as a fallback approach when first class blend instructions are /// unavailable. Currently it is only suitable for integer vectors, but could @@ -9563,7 +9563,7 @@ static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size, return ScaledMask; } -/// \brief Try to emit a blend instruction for a shuffle. +/// Try to emit a blend instruction for a shuffle. /// /// This doesn't do any checks for the availability of instructions for blending /// these values. It relies on the availability of the X86ISD::BLENDI pattern to @@ -9709,7 +9709,7 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1, } } -/// \brief Try to lower as a blend of elements from two inputs followed by +/// Try to lower as a blend of elements from two inputs followed by /// a single-input permutation. /// /// This matches the pattern where we can blend elements from two inputs and @@ -9741,7 +9741,7 @@ static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT, return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask); } -/// \brief Generic routine to decompose a shuffle and blend into independent +/// Generic routine to decompose a shuffle and blend into independent /// blends and permutes. /// /// This matches the extremely common pattern for handling combined @@ -9782,7 +9782,7 @@ static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL, return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask); } -/// \brief Try to lower a vector shuffle as a rotation. +/// Try to lower a vector shuffle as a rotation. /// /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512. static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2, @@ -9854,7 +9854,7 @@ static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2, return Rotation; } -/// \brief Try to lower a vector shuffle as a byte rotation. +/// Try to lower a vector shuffle as a byte rotation. /// /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use @@ -9938,7 +9938,7 @@ static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT, DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift)); } -/// \brief Try to lower a vector shuffle as a dword/qword rotation. +/// Try to lower a vector shuffle as a dword/qword rotation. /// /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary /// rotation of the concatenation of two vectors; This routine will @@ -9969,7 +9969,7 @@ static SDValue lowerVectorShuffleAsRotate(const SDLoc &DL, MVT VT, DAG.getConstant(Rotation, DL, MVT::i8)); } -/// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros). +/// Try to lower a vector shuffle as a bit shift (shifts in zeros). /// /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function @@ -10213,7 +10213,7 @@ static bool matchVectorShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2, return false; } -/// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ. +/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ. static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, const APInt &Zeroable, @@ -10233,7 +10233,7 @@ static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1, return SDValue(); } -/// \brief Lower a vector shuffle as a zero or any extension. +/// Lower a vector shuffle as a zero or any extension. /// /// Given a specific number of elements, element bit width, and extension /// stride, produce either a zero or any extension based on the available @@ -10388,7 +10388,7 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend( return DAG.getBitcast(VT, InputV); } -/// \brief Try to lower a vector shuffle as a zero extension on any microarch. +/// Try to lower a vector shuffle as a zero extension on any microarch. /// /// This routine will try to do everything in its power to cleverly lower /// a shuffle which happens to match the pattern of a zero extend. It doesn't @@ -10516,7 +10516,7 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend( return SDValue(); } -/// \brief Try to get a scalar value for a specific element of a vector. +/// Try to get a scalar value for a specific element of a vector. /// /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar. static SDValue getScalarValueForVectorElement(SDValue V, int Idx, @@ -10543,7 +10543,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx, return SDValue(); } -/// \brief Helper to test for a load that can be folded with x86 shuffles. +/// Helper to test for a load that can be folded with x86 shuffles. /// /// This is particularly important because the set of instructions varies /// significantly based on whether the operand is a load or not. @@ -10552,7 +10552,7 @@ static bool isShuffleFoldableLoad(SDValue V) { return ISD::isNON_EXTLoad(V.getNode()); } -/// \brief Try to lower insertion of a single element into a zero vector. +/// Try to lower insertion of a single element into a zero vector. /// /// This is a common pattern that we have especially efficient patterns to lower /// across all subtarget feature sets. @@ -10705,7 +10705,7 @@ static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar)); } -/// \brief Try to lower broadcast of a single element. +/// Try to lower broadcast of a single element. /// /// For convenience, this code also bundles all of the subtarget feature set /// filtering. While a little annoying to re-dispatch on type here, there isn't @@ -11030,7 +11030,7 @@ static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1, DAG.getConstant(InsertPSMask, DL, MVT::i8)); } -/// \brief Try to lower a shuffle as a permute of the inputs followed by an +/// Try to lower a shuffle as a permute of the inputs followed by an /// UNPCK instruction. /// /// This specifically targets cases where we end up with alternating between @@ -11142,7 +11142,7 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT, return SDValue(); } -/// \brief Handle lowering of 2-lane 64-bit floating point shuffles. +/// Handle lowering of 2-lane 64-bit floating point shuffles. /// /// This is the basis function for the 2-lane 64-bit shuffles as we have full /// support for floating point shuffles but not integer shuffles. These @@ -11225,7 +11225,7 @@ static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, DAG.getConstant(SHUFPDMask, DL, MVT::i8)); } -/// \brief Handle lowering of 2-lane 64-bit integer shuffles. +/// Handle lowering of 2-lane 64-bit integer shuffles. /// /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by /// the integer unit to minimize domain crossing penalties. However, for blends @@ -11322,7 +11322,7 @@ static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask)); } -/// \brief Test whether this can be lowered with a single SHUFPS instruction. +/// Test whether this can be lowered with a single SHUFPS instruction. /// /// This is used to disable more specialized lowerings when the shufps lowering /// will happen to be efficient. @@ -11344,7 +11344,7 @@ static bool isSingleSHUFPSMask(ArrayRef<int> Mask) { return true; } -/// \brief Lower a vector shuffle using the SHUFPS instruction. +/// Lower a vector shuffle using the SHUFPS instruction. /// /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS. /// It makes no assumptions about whether this is the *best* lowering, it simply @@ -11431,7 +11431,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT, getV4X86ShuffleImm8ForMask(NewMask, DL, DAG)); } -/// \brief Lower 4-lane 32-bit floating point shuffles. +/// Lower 4-lane 32-bit floating point shuffles. /// /// Uses instructions exclusively from the floating point unit to minimize /// domain crossing penalties, as these are sufficient to implement all v4f32 @@ -11527,7 +11527,7 @@ static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG); } -/// \brief Lower 4-lane i32 vector shuffles. +/// Lower 4-lane i32 vector shuffles. /// /// We try to handle these with integer-domain shuffles where we can, but for /// blends we use the floating point domain blend instructions. @@ -11639,7 +11639,7 @@ static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return DAG.getBitcast(MVT::v4i32, ShufPS); } -/// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2 +/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2 /// shuffle lowering, and the most complex part. /// /// The lowering strategy is to try to form pairs of input lanes which are @@ -12204,7 +12204,7 @@ static SDValue lowerVectorShuffleAsBlendOfPSHUFBs( return DAG.getBitcast(VT, V); } -/// \brief Generic lowering of 8-lane i16 shuffles. +/// Generic lowering of 8-lane i16 shuffles. /// /// This handles both single-input shuffles and combined shuffle/blends with /// two inputs. The single input shuffles are immediately delegated to @@ -12337,7 +12337,7 @@ static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, Mask, DAG); } -/// \brief Check whether a compaction lowering can be done by dropping even +/// Check whether a compaction lowering can be done by dropping even /// elements and compute how many times even elements must be dropped. /// /// This handles shuffles which take every Nth element where N is a power of @@ -12416,7 +12416,7 @@ static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT, return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2); } -/// \brief Generic lowering of v16i8 shuffles. +/// Generic lowering of v16i8 shuffles. /// /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to /// detect any complexity reducing interleaving. If that doesn't help, it uses @@ -12716,7 +12716,7 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV); } -/// \brief Dispatching routine to lower various 128-bit x86 vector shuffles. +/// Dispatching routine to lower various 128-bit x86 vector shuffles. /// /// This routine breaks down the specific type of 128-bit shuffle and /// dispatches to the lowering routines accordingly. @@ -12744,7 +12744,7 @@ static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, } } -/// \brief Generic routine to split vector shuffle into half-sized shuffles. +/// Generic routine to split vector shuffle into half-sized shuffles. /// /// This routine just extracts two subvectors, shuffles them independently, and /// then concatenates them back together. This should work effectively with all @@ -12867,7 +12867,7 @@ static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); } -/// \brief Either split a vector in halves or decompose the shuffles and the +/// Either split a vector in halves or decompose the shuffles and the /// blend. /// /// This is provided as a good fallback for many lowerings of non-single-input @@ -12925,7 +12925,7 @@ static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG); } -/// \brief Lower a vector shuffle crossing multiple 128-bit lanes as +/// Lower a vector shuffle crossing multiple 128-bit lanes as /// a permutation and blend of those lanes. /// /// This essentially blends the out-of-lane inputs to each lane into the lane @@ -12983,7 +12983,7 @@ static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT, return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask); } -/// \brief Handle lowering 2-lane 128-bit shuffles. +/// Handle lowering 2-lane 128-bit shuffles. static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, const APInt &Zeroable, @@ -13079,7 +13079,7 @@ static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, DAG.getConstant(PermMask, DL, MVT::i8)); } -/// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then +/// Lower a vector shuffle by first fixing the 128-bit lanes and then /// shuffling each lane. /// /// This will only succeed when the result of fixing the 128-bit lanes results @@ -13282,7 +13282,7 @@ static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT, DAG.getIntPtrConstant(Offset, DL)); } -/// \brief Test whether the specified input (0 or 1) is in-place blended by the +/// Test whether the specified input (0 or 1) is in-place blended by the /// given mask. /// /// This returns true if the elements from a particular input are already in the @@ -13518,7 +13518,7 @@ static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT, DAG.getConstant(Immediate, DL, MVT::i8)); } -/// \brief Handle lowering of 4-lane 64-bit floating point shuffles. +/// Handle lowering of 4-lane 64-bit floating point shuffles. /// /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2 /// isn't available. @@ -13615,7 +13615,7 @@ static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG); } -/// \brief Handle lowering of 4-lane 64-bit integer shuffles. +/// Handle lowering of 4-lane 64-bit integer shuffles. /// /// This routine is only called when we have AVX2 and thus a reasonable /// instruction set for v4i64 shuffling.. @@ -13709,7 +13709,7 @@ static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, Mask, DAG); } -/// \brief Handle lowering of 8-lane 32-bit floating point shuffles. +/// Handle lowering of 8-lane 32-bit floating point shuffles. /// /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2 /// isn't available. @@ -13808,7 +13808,7 @@ static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG); } -/// \brief Handle lowering of 8-lane 32-bit integer shuffles. +/// Handle lowering of 8-lane 32-bit integer shuffles. /// /// This routine is only called when we have AVX2 and thus a reasonable /// instruction set for v8i32 shuffling.. @@ -13921,7 +13921,7 @@ static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, Mask, DAG); } -/// \brief Handle lowering of 16-lane 16-bit integer shuffles. +/// Handle lowering of 16-lane 16-bit integer shuffles. /// /// This routine is only called when we have AVX2 and thus a reasonable /// instruction set for v16i16 shuffling.. @@ -14012,7 +14012,7 @@ static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG); } -/// \brief Handle lowering of 32-lane 8-bit integer shuffles. +/// Handle lowering of 32-lane 8-bit integer shuffles. /// /// This routine is only called when we have AVX2 and thus a reasonable /// instruction set for v32i8 shuffling.. @@ -14092,7 +14092,7 @@ static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG); } -/// \brief High-level routine to lower various 256-bit x86 vector shuffles. +/// High-level routine to lower various 256-bit x86 vector shuffles. /// /// This routine either breaks down the specific type of a 256-bit x86 vector /// shuffle or splits it into two 128-bit shuffles and fuses the results back @@ -14162,7 +14162,7 @@ static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, } } -/// \brief Try to lower a vector shuffle as a 128-bit shuffles. +/// Try to lower a vector shuffle as a 128-bit shuffles. static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask, const APInt &Zeroable, @@ -14263,7 +14263,7 @@ static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT, DAG.getConstant(PermMask, DL, MVT::i8)); } -/// \brief Handle lowering of 8-lane 64-bit floating point shuffles. +/// Handle lowering of 8-lane 64-bit floating point shuffles. static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14320,7 +14320,7 @@ static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG); } -/// \brief Handle lowering of 16-lane 32-bit floating point shuffles. +/// Handle lowering of 16-lane 32-bit floating point shuffles. static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14375,7 +14375,7 @@ static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG); } -/// \brief Handle lowering of 8-lane 64-bit integer shuffles. +/// Handle lowering of 8-lane 64-bit integer shuffles. static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14441,7 +14441,7 @@ static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG); } -/// \brief Handle lowering of 16-lane 32-bit integer shuffles. +/// Handle lowering of 16-lane 32-bit integer shuffles. static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14512,7 +14512,7 @@ static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG); } -/// \brief Handle lowering of 32-lane 16-bit integer shuffles. +/// Handle lowering of 32-lane 16-bit integer shuffles. static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14567,7 +14567,7 @@ static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG); } -/// \brief Handle lowering of 64-lane 8-bit integer shuffles. +/// Handle lowering of 64-lane 8-bit integer shuffles. static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, const APInt &Zeroable, SDValue V1, SDValue V2, @@ -14622,7 +14622,7 @@ static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG); } -/// \brief High-level routine to lower various 512-bit x86 vector shuffles. +/// High-level routine to lower various 512-bit x86 vector shuffles. /// /// This routine either breaks down the specific type of a 512-bit x86 vector /// shuffle or splits it into two 256-bit shuffles and fuses the results back @@ -14825,7 +14825,7 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) { return false; } -/// \brief Top-level lowering for x86 vector shuffles. +/// Top-level lowering for x86 vector shuffles. /// /// This handles decomposition, canonicalization, and lowering of all x86 /// vector shuffles. Most of the specific lowering strategies are encapsulated @@ -14928,7 +14928,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget, llvm_unreachable("Unimplemented!"); } -/// \brief Try to lower a VSELECT instruction to a vector shuffle. +/// Try to lower a VSELECT instruction to a vector shuffle. static SDValue lowerVSELECTtoVectorShuffle(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG) { @@ -17430,7 +17430,7 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC, return getSETCC(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, Res, DL, DAG); } -/// \brief return true if \c Op has a use that doesn't just read flags. +/// return true if \c Op has a use that doesn't just read flags. static bool hasNonFlagsUse(SDValue Op) { for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE; ++UI) { @@ -18070,7 +18070,7 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) { DAG.getConstant(SSECC, dl, MVT::i8)); } -/// \brief Try to turn a VSETULT into a VSETULE by modifying its second +/// Try to turn a VSETULT into a VSETULE by modifying its second /// operand \p Op1. If non-trivial (for example because it's not constant) /// return an empty value. static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1, @@ -20059,7 +20059,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT, return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); } -/// \brief Return Mask with the necessary casting or extending +/// Return Mask with the necessary casting or extending /// for \p Mask according to \p MaskVT when lowering masking intrinsics static SDValue getMaskNode(SDValue Mask, MVT MaskVT, const X86Subtarget &Subtarget, SelectionDAG &DAG, @@ -20101,7 +20101,7 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT, } } -/// \brief Return (and \p Op, \p Mask) for compare instructions or +/// Return (and \p Op, \p Mask) for compare instructions or /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the /// necessary casting or extending for \p Mask when lowering masking intrinsics static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, @@ -20142,7 +20142,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc); } -/// \brief Creates an SDNode for a predicated scalar operation. +/// Creates an SDNode for a predicated scalar operation. /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc). /// The mask is coming as MVT::i8 and it should be transformed /// to MVT::v1i1 while lowering masking intrinsics. @@ -22086,7 +22086,7 @@ static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) { return LowerVectorIntUnary(Op, DAG); } -/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction. +/// Lower a vector CTLZ using native supported vector CTLZ instruction. // // i8/i16 vector implemented using dword LZCNT vector instruction // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal, @@ -28972,7 +28972,7 @@ static bool matchBinaryPermuteVectorShuffle( return false; } -/// \brief Combine an arbitrary chain of shuffles into a single instruction if +/// Combine an arbitrary chain of shuffles into a single instruction if /// possible. /// /// This is the leaf of the recursive combine below. When we have found some @@ -29498,7 +29498,7 @@ static SDValue combineX86ShufflesConstants(const SmallVectorImpl<SDValue> &Ops, return DAG.getBitcast(VT, CstOp); } -/// \brief Fully generic combining of x86 shuffle instructions. +/// Fully generic combining of x86 shuffle instructions. /// /// This should be the last combine run over the x86 shuffle instructions. Once /// they have been fully optimized, this will recursively consider all chains @@ -29730,7 +29730,7 @@ static SDValue combineX86ShufflesRecursively( Subtarget); } -/// \brief Get the PSHUF-style mask from PSHUF node. +/// Get the PSHUF-style mask from PSHUF node. /// /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4 /// PSHUF-style masks that can be reused with such instructions. @@ -29773,7 +29773,7 @@ static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) { } } -/// \brief Search for a combinable shuffle across a chain ending in pshufd. +/// Search for a combinable shuffle across a chain ending in pshufd. /// /// We walk up the chain and look for a combinable shuffle, skipping over /// shuffles that we could hoist this shuffle's transformation past without @@ -29906,7 +29906,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask, return V; } -/// \brief Search for a combinable shuffle across a chain ending in pshuflw or +/// Search for a combinable shuffle across a chain ending in pshuflw or /// pshufhw. /// /// We walk up the chain, skipping shuffles of the other half and looking @@ -29974,7 +29974,7 @@ static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask, return true; } -/// \brief Try to combine x86 target specific shuffles. +/// Try to combine x86 target specific shuffles. static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { @@ -30382,7 +30382,7 @@ static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget, return true; } -/// \brief Try to combine a shuffle into a target-specific add-sub or +/// Try to combine a shuffle into a target-specific add-sub or /// mul-add-sub node. static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N, const X86Subtarget &Subtarget, diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 6af7b8da01a..e12585ab67e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -933,7 +933,7 @@ namespace llvm { /// the immediate into a register. bool isLegalAddImmediate(int64_t Imm) const override; - /// \brief Return the cost of the scaling factor used in the addressing + /// Return the cost of the scaling factor used in the addressing /// mode represented by AM for this target, for a load/store /// of the specified type. /// If the AM is supported, the return value must be >= 0. @@ -1027,7 +1027,7 @@ namespace llvm { (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } - /// \brief Returns true if it is beneficial to convert a load of a constant + /// Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; @@ -1096,7 +1096,7 @@ namespace llvm { bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; - /// \brief Customize the preferred legalization strategy for certain types. + /// Customize the preferred legalization strategy for certain types. LegalizeTypeAction getPreferredVectorAction(EVT VT) const override; MVT getRegisterTypeForCallingConv(MVT VT) const override; @@ -1117,14 +1117,14 @@ namespace llvm { unsigned getMaxSupportedInterleaveFactor() const override { return 4; } - /// \brief Lower interleaved load(s) into target specific + /// Lower interleaved load(s) into target specific /// instructions/intrinsics. bool lowerInterleavedLoad(LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, ArrayRef<unsigned> Indices, unsigned Factor) const override; - /// \brief Lower interleaved store(s) into target specific + /// Lower interleaved store(s) into target specific /// instructions/intrinsics. bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 728cf111542..5c85c7e5282 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6154,7 +6154,7 @@ unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes, } } -/// \brief Get the VPCMP immediate if the opcodes are swapped. +/// Get the VPCMP immediate if the opcodes are swapped. unsigned X86::getSwappedVPCMPImm(unsigned Imm) { switch (Imm) { default: llvm_unreachable("Unreachable!"); @@ -6172,7 +6172,7 @@ unsigned X86::getSwappedVPCMPImm(unsigned Imm) { return Imm; } -/// \brief Get the VPCOM immediate if the opcodes are swapped. +/// Get the VPCOM immediate if the opcodes are swapped. unsigned X86::getSwappedVPCOMImm(unsigned Imm) { switch (Imm) { default: llvm_unreachable("Unreachable!"); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 3abc0ad1458..fab919e6889 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -70,15 +70,15 @@ enum CondCode { // Turn condition code into conditional branch opcode. unsigned GetCondBranchFromCond(CondCode CC); -/// \brief Return a pair of condition code for the given predicate and whether +/// Return a pair of condition code for the given predicate and whether /// the instruction operands should be swaped to match the condition code. std::pair<CondCode, bool> getX86ConditionCode(CmpInst::Predicate Predicate); -/// \brief Return a set opcode for the given condition and whether it has +/// Return a set opcode for the given condition and whether it has /// a memory operand. unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false); -/// \brief Return a cmov opcode for the given condition, register size in +/// Return a cmov opcode for the given condition, register size in /// bytes, and operand type. unsigned getCMovFromCond(CondCode CC, unsigned RegBytes, bool HasMemoryOperand = false); @@ -96,10 +96,10 @@ CondCode getCondFromCMovOpc(unsigned Opc); /// e.g. turning COND_E to COND_NE. CondCode GetOppositeBranchCondition(CondCode CC); -/// \brief Get the VPCMP immediate if the opcodes are swapped. +/// Get the VPCMP immediate if the opcodes are swapped. unsigned getSwappedVPCMPImm(unsigned Imm); -/// \brief Get the VPCOM immediate if the opcodes are swapped. +/// Get the VPCOM immediate if the opcodes are swapped. unsigned getSwappedVPCOMImm(unsigned Imm); } // namespace X86 diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp index c39f9d6cdf9..6c7fb9c339a 100644 --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -39,7 +39,7 @@ using namespace llvm; namespace { -/// \brief This class holds necessary information to represent an interleaved +/// This class holds necessary information to represent an interleaved /// access group and supports utilities to lower the group into /// X86-specific instructions/intrinsics. /// E.g. A group of interleaving access loads (Factor = 2; accessing every @@ -48,32 +48,32 @@ namespace { /// %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6> /// %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7> class X86InterleavedAccessGroup { - /// \brief Reference to the wide-load instruction of an interleaved access + /// Reference to the wide-load instruction of an interleaved access /// group. Instruction *const Inst; - /// \brief Reference to the shuffle(s), consumer(s) of the (load) 'Inst'. + /// Reference to the shuffle(s), consumer(s) of the (load) 'Inst'. ArrayRef<ShuffleVectorInst *> Shuffles; - /// \brief Reference to the starting index of each user-shuffle. + /// Reference to the starting index of each user-shuffle. ArrayRef<unsigned> Indices; - /// \brief Reference to the interleaving stride in terms of elements. + /// Reference to the interleaving stride in terms of elements. const unsigned Factor; - /// \brief Reference to the underlying target. + /// Reference to the underlying target. const X86Subtarget &Subtarget; const DataLayout &DL; IRBuilder<> &Builder; - /// \brief Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors + /// Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors /// sub vectors of type \p T. Returns the sub-vectors in \p DecomposedVectors. void decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T, SmallVectorImpl<Instruction *> &DecomposedVectors); - /// \brief Performs matrix transposition on a 4x4 matrix \p InputVectors and + /// Performs matrix transposition on a 4x4 matrix \p InputVectors and /// returns the transposed-vectors in \p TransposedVectors. /// E.g. /// InputVectors: @@ -115,11 +115,11 @@ public: : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget), DL(Inst->getModule()->getDataLayout()), Builder(B) {} - /// \brief Returns true if this interleaved access group can be lowered into + /// Returns true if this interleaved access group can be lowered into /// x86-specific instructions/intrinsics, false otherwise. bool isSupported() const; - /// \brief Lowers this interleaved access group into X86-specific + /// Lowers this interleaved access group into X86-specific /// instructions/intrinsics. bool lowerIntoOptimizedSequence(); }; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index 9b0f2a64cf7..ae4b1e79c54 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -275,7 +275,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, return MCOperand::createExpr(Expr); } -/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with +/// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with /// a short fixed-register form. static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { unsigned ImmOp = Inst.getNumOperands() - 1; @@ -298,7 +298,7 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { Inst.addOperand(Saved); } -/// \brief If a movsx instruction has a shorter encoding for the used register +/// If a movsx instruction has a shorter encoding for the used register /// simplify the instruction to use it instead. static void SimplifyMOVSX(MCInst &Inst) { unsigned NewOpcode = 0; @@ -326,7 +326,7 @@ static void SimplifyMOVSX(MCInst &Inst) { } } -/// \brief Simplify things like MOV32rm to MOV32o32a. +/// Simplify things like MOV32rm to MOV32o32a. static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, unsigned Opcode) { // Don't make these simplifications in 64-bit mode; other assemblers don't @@ -1061,7 +1061,7 @@ void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, .addExpr(tlsRef)); } -/// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes +/// Emit the largest nop instruction smaller than or equal to \p NumBytes /// bytes. Return the size of nop emitted. static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) { @@ -1163,7 +1163,7 @@ static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, return NopSize; } -/// \brief Emit the optimal amount of multi-byte nops on X86. +/// Emit the optimal amount of multi-byte nops on X86. static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) { unsigned NopsToEmit = NumBytes; diff --git a/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/llvm/lib/Target/X86/X86MachineFunctionInfo.h index 1d1821a74cb..e1183bd1479 100644 --- a/llvm/lib/Target/X86/X86MachineFunctionInfo.h +++ b/llvm/lib/Target/X86/X86MachineFunctionInfo.h @@ -49,7 +49,7 @@ class X86MachineFunctionInfo : public MachineFunctionInfo { /// ReturnAddrIndex - FrameIndex for return slot. int ReturnAddrIndex = 0; - /// \brief FrameIndex for return slot. + /// FrameIndex for return slot. int FrameAddrIndex = 0; /// TailCallReturnAddrDelta - The number of bytes by which return address diff --git a/llvm/lib/Target/X86/X86MacroFusion.cpp b/llvm/lib/Target/X86/X86MacroFusion.cpp index 4e11397dec4..df3abb17014 100644 --- a/llvm/lib/Target/X86/X86MacroFusion.cpp +++ b/llvm/lib/Target/X86/X86MacroFusion.cpp @@ -19,7 +19,7 @@ using namespace llvm; -/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused +/// Check if the instr pair, FirstMI and SecondMI, should be fused /// together. Given SecondMI, when FirstMI is unspecified, then check if /// SecondMI may be part of a fused pair at all. static bool shouldScheduleAdjacent(const TargetInstrInfo &TII, diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp index 1fc6f07b79f..6329375720b 100644 --- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp @@ -60,17 +60,17 @@ static cl::opt<bool> STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions"); STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed"); -/// \brief Returns true if two machine operands are identical and they are not +/// Returns true if two machine operands are identical and they are not /// physical registers. static inline bool isIdenticalOp(const MachineOperand &MO1, const MachineOperand &MO2); -/// \brief Returns true if two address displacement operands are of the same +/// Returns true if two address displacement operands are of the same /// type and use the same symbol/index/address regardless of the offset. static bool isSimilarDispOp(const MachineOperand &MO1, const MachineOperand &MO2); -/// \brief Returns true if the instruction is LEA. +/// Returns true if the instruction is LEA. static inline bool isLEA(const MachineInstr &MI); namespace { @@ -184,7 +184,7 @@ template <> struct DenseMapInfo<MemOpKey> { } // end namespace llvm -/// \brief Returns a hash table key based on memory operands of \p MI. The +/// Returns a hash table key based on memory operands of \p MI. The /// number of the first memory operand of \p MI is specified through \p N. static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) { assert((isLEA(MI) || MI.mayLoadOrStore()) && @@ -242,7 +242,7 @@ public: StringRef getPassName() const override { return "X86 LEA Optimize"; } - /// \brief Loop over all of the basic blocks, replacing address + /// Loop over all of the basic blocks, replacing address /// calculations in load and store instructions, if it's already /// been calculated by LEA. Also, remove redundant LEAs. bool runOnMachineFunction(MachineFunction &MF) override; @@ -250,11 +250,11 @@ public: private: using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>; - /// \brief Returns a distance between two instructions inside one basic block. + /// Returns a distance between two instructions inside one basic block. /// Negative result means, that instructions occur in reverse order. int calcInstrDist(const MachineInstr &First, const MachineInstr &Last); - /// \brief Choose the best \p LEA instruction from the \p List to replace + /// Choose the best \p LEA instruction from the \p List to replace /// address calculation in \p MI instruction. Return the address displacement /// and the distance between \p MI and the chosen \p BestLEA in /// \p AddrDispShift and \p Dist. @@ -262,25 +262,25 @@ private: const MachineInstr &MI, MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist); - /// \brief Returns the difference between addresses' displacements of \p MI1 + /// Returns the difference between addresses' displacements of \p MI1 /// and \p MI2. The numbers of the first memory operands for the instructions /// are specified through \p N1 and \p N2. int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1, const MachineInstr &MI2, unsigned N2) const; - /// \brief Returns true if the \p Last LEA instruction can be replaced by the + /// Returns true if the \p Last LEA instruction can be replaced by the /// \p First. The difference between displacements of the addresses calculated /// by these LEAs is returned in \p AddrDispShift. It'll be used for proper /// replacement of the \p Last LEA's uses with the \p First's def register. bool isReplaceable(const MachineInstr &First, const MachineInstr &Last, int64_t &AddrDispShift) const; - /// \brief Find all LEA instructions in the basic block. Also, assign position + /// Find all LEA instructions in the basic block. Also, assign position /// numbers to all instructions in the basic block to speed up calculation of /// distance between them. void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs); - /// \brief Removes redundant address calculations. + /// Removes redundant address calculations. bool removeRedundantAddrCalc(MemOpMap &LEAs); /// Replace debug value MI with a new debug value instruction using register @@ -289,7 +289,7 @@ private: MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned VReg, int64_t AddrDispShift); - /// \brief Removes LEAs which calculate similar addresses. + /// Removes LEAs which calculate similar addresses. bool removeRedundantLEAs(MemOpMap &LEAs); DenseMap<const MachineInstr *, unsigned> InstrPos; diff --git a/llvm/lib/Target/X86/X86TargetObjectFile.h b/llvm/lib/Target/X86/X86TargetObjectFile.h index f6aa570b633..19078618d62 100644 --- a/llvm/lib/Target/X86/X86TargetObjectFile.h +++ b/llvm/lib/Target/X86/X86TargetObjectFile.h @@ -37,7 +37,7 @@ namespace llvm { MCStreamer &Streamer) const override; }; - /// \brief This implemenatation is used for X86 ELF targets that don't + /// This implemenatation is used for X86 ELF targets that don't /// have a further specialization. class X86ELFTargetObjectFile : public TargetLoweringObjectFileELF { public: @@ -45,7 +45,7 @@ namespace llvm { PLTRelativeVariantKind = MCSymbolRefExpr::VK_PLT; } - /// \brief Describe a TLS variable address within debug info. + /// Describe a TLS variable address within debug info. const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override; }; @@ -55,7 +55,7 @@ namespace llvm { void Initialize(MCContext &Ctx, const TargetMachine &TM) override; }; - /// \brief This implementation is used for Fuchsia on x86-64. + /// This implementation is used for Fuchsia on x86-64. class X86FuchsiaTargetObjectFile : public X86ELFTargetObjectFile { void Initialize(MCContext &Ctx, const TargetMachine &TM) override; }; @@ -66,18 +66,18 @@ namespace llvm { void Initialize(MCContext &Ctx, const TargetMachine &TM) override; }; - /// \brief This implementation is used for Solaris on x86/x86-64. + /// This implementation is used for Solaris on x86/x86-64. class X86SolarisTargetObjectFile : public X86ELFTargetObjectFile { void Initialize(MCContext &Ctx, const TargetMachine &TM) override; }; - /// \brief This implementation is used for Windows targets on x86 and x86-64. + /// This implementation is used for Windows targets on x86 and x86-64. class X86WindowsTargetObjectFile : public TargetLoweringObjectFileCOFF { const MCExpr * lowerRelativeReference(const GlobalValue *LHS, const GlobalValue *RHS, const TargetMachine &TM) const override; - /// \brief Given a mergeable constant with the specified size and relocation + /// Given a mergeable constant with the specified size and relocation /// information, return a section that it should be placed in. MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind, const Constant *C, diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 27190d5dc13..27717517e40 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -2199,7 +2199,7 @@ int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy, return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned); } -/// \brief Calculate the cost of materializing a 64-bit value. This helper +/// Calculate the cost of materializing a 64-bit value. This helper /// method might only calculate a fraction of a larger immediate. Therefore it /// is valid to return a cost of ZERO. int X86TTIImpl::getIntImmCost(int64_t Val) { diff --git a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp index 059b75ef482..faf66e5944a 100644 --- a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp +++ b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file is part of the XCore Disassembler. +/// This file is part of the XCore Disassembler. /// //===----------------------------------------------------------------------===// @@ -29,7 +29,7 @@ typedef MCDisassembler::DecodeStatus DecodeStatus; namespace { -/// \brief A disassembler class for XCore. +/// A disassembler class for XCore. class XCoreDisassembler : public MCDisassembler { public: XCoreDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) : diff --git a/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h b/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h index 8a7efe2e39c..a0b48002646 100644 --- a/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h +++ b/llvm/lib/Target/XCore/InstPrinter/XCoreInstPrinter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the XCoreInstPrinter class, +/// This file contains the declaration of the XCoreInstPrinter class, /// which is used to print XCore MCInst to a .s file. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp index 666b3870fb8..1c93ba8fa14 100644 --- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains a pass that lowers thread local variables on the +/// This file contains a pass that lowers thread local variables on the /// XCore. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/XCore/XCoreMCInstLower.cpp b/llvm/lib/Target/XCore/XCoreMCInstLower.cpp index 7763ccc8f4a..21270192b23 100644 --- a/llvm/lib/Target/XCore/XCoreMCInstLower.cpp +++ b/llvm/lib/Target/XCore/XCoreMCInstLower.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains code to lower XCore MachineInstrs to their +/// This file contains code to lower XCore MachineInstrs to their /// corresponding MCInst records. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/XCore/XCoreMCInstLower.h b/llvm/lib/Target/XCore/XCoreMCInstLower.h index 8fb1593cc6e..abcb80fcf76 100644 --- a/llvm/lib/Target/XCore/XCoreMCInstLower.h +++ b/llvm/lib/Target/XCore/XCoreMCInstLower.h @@ -21,7 +21,7 @@ namespace llvm { class Mangler; class AsmPrinter; -/// \brief This class is used to lower an MachineInstr into an MCInst. +/// This class is used to lower an MachineInstr into an MCInst. class LLVM_LIBRARY_VISIBILITY XCoreMCInstLower { typedef MachineOperand::MachineOperandType MachineOperandType; MCContext *Ctx; diff --git a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp index 5be728b3855..3b735ddd192 100644 --- a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp +++ b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp @@ -130,7 +130,7 @@ Pass *llvm::createAlwaysInlinerLegacyPass(bool InsertLifetime) { return new AlwaysInlinerLegacyPass(InsertLifetime); } -/// \brief Get the inline cost for the always-inliner. +/// Get the inline cost for the always-inliner. /// /// The always inliner *only* handles functions which are marked with the /// attribute to force inlining. As such, it is dramatically simpler and avoids diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index d27adca86f2..e159920116c 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -738,7 +738,7 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca, return true; } -/// \brief Checks if a type could have padding bytes. +/// Checks if a type could have padding bytes. static bool isDenselyPacked(Type *type, const DataLayout &DL) { // There is no size information, so be conservative. if (!type->isSized()) @@ -772,7 +772,7 @@ static bool isDenselyPacked(Type *type, const DataLayout &DL) { return true; } -/// \brief Checks if the padding bytes of an argument could be accessed. +/// Checks if the padding bytes of an argument could be accessed. static bool canPaddingBeAccessed(Argument *arg) { assert(arg->hasByValAttr()); diff --git a/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp b/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp index 6af10436259..05fc3dd6950 100644 --- a/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp +++ b/llvm/lib/Transforms/IPO/BarrierNoopPass.cpp @@ -23,7 +23,7 @@ using namespace llvm; namespace { -/// \brief A nonce module pass used to place a barrier in a pass manager. +/// A nonce module pass used to place a barrier in a pass manager. /// /// There is no mechanism for ending a CGSCC pass manager once one is started. /// This prevents extension points from having clear deterministic ordering diff --git a/llvm/lib/Transforms/IPO/InlineSimple.cpp b/llvm/lib/Transforms/IPO/InlineSimple.cpp index b259a0abd63..82bba1e5c93 100644 --- a/llvm/lib/Transforms/IPO/InlineSimple.cpp +++ b/llvm/lib/Transforms/IPO/InlineSimple.cpp @@ -31,7 +31,7 @@ using namespace llvm; namespace { -/// \brief Actual inliner pass implementation. +/// Actual inliner pass implementation. /// /// The common implementation of the inlining logic is shared between this /// inliner pass and the always inliner pass. The two passes use different cost diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 6444a3185f6..87a8b855780 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -170,7 +170,7 @@ private: uint64_t TotalUsedSamples = 0; }; -/// \brief Sample profile pass. +/// Sample profile pass. /// /// This pass reads profile data from the file specified by /// -sample-profile-file and annotates every affected function with the @@ -219,25 +219,25 @@ protected: void computeDominanceAndLoopInfo(Function &F); void clearFunctionData(); - /// \brief Map basic blocks to their computed weights. + /// Map basic blocks to their computed weights. /// /// The weight of a basic block is defined to be the maximum /// of all the instruction weights in that block. BlockWeightMap BlockWeights; - /// \brief Map edges to their computed weights. + /// Map edges to their computed weights. /// /// Edge weights are computed by propagating basic block weights in /// SampleProfile::propagateWeights. EdgeWeightMap EdgeWeights; - /// \brief Set of visited blocks during propagation. + /// Set of visited blocks during propagation. SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; - /// \brief Set of visited edges during propagation. + /// Set of visited edges during propagation. SmallSet<Edge, 32> VisitedEdges; - /// \brief Equivalence classes for block weights. + /// Equivalence classes for block weights. /// /// Two blocks BB1 and BB2 are in the same equivalence class if they /// dominate and post-dominate each other, and they are in the same loop @@ -251,7 +251,7 @@ protected: /// is one-to-one mapping. StringMap<Function *> SymbolMap; - /// \brief Dominance, post-dominance and loop information. + /// Dominance, post-dominance and loop information. std::unique_ptr<DominatorTree> DT; std::unique_ptr<PostDomTreeBase<BasicBlock>> PDT; std::unique_ptr<LoopInfo> LI; @@ -259,39 +259,39 @@ protected: std::function<AssumptionCache &(Function &)> GetAC; std::function<TargetTransformInfo &(Function &)> GetTTI; - /// \brief Predecessors for each basic block in the CFG. + /// Predecessors for each basic block in the CFG. BlockEdgeMap Predecessors; - /// \brief Successors for each basic block in the CFG. + /// Successors for each basic block in the CFG. BlockEdgeMap Successors; SampleCoverageTracker CoverageTracker; - /// \brief Profile reader object. + /// Profile reader object. std::unique_ptr<SampleProfileReader> Reader; - /// \brief Samples collected for the body of this function. + /// Samples collected for the body of this function. FunctionSamples *Samples = nullptr; - /// \brief Name of the profile file to load. + /// Name of the profile file to load. std::string Filename; - /// \brief Flag indicating whether the profile input loaded successfully. + /// Flag indicating whether the profile input loaded successfully. bool ProfileIsValid = false; - /// \brief Flag indicating if the pass is invoked in ThinLTO compile phase. + /// Flag indicating if the pass is invoked in ThinLTO compile phase. /// /// In this phase, in annotation, we should not promote indirect calls. /// Instead, we will mark GUIDs that needs to be annotated to the function. bool IsThinLTOPreLink; - /// \brief Total number of samples collected in this profile. + /// Total number of samples collected in this profile. /// /// This is the sum of all the samples collected in all the functions executed /// at runtime. uint64_t TotalCollectedSamples = 0; - /// \brief Optimization Remark Emitter used to emit diagnostic remarks. + /// Optimization Remark Emitter used to emit diagnostic remarks. OptimizationRemarkEmitter *ORE = nullptr; }; @@ -473,7 +473,7 @@ void SampleProfileLoader::clearFunctionData() { } #ifndef NDEBUG -/// \brief Print the weight of edge \p E on stream \p OS. +/// Print the weight of edge \p E on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param E Edge to print. @@ -482,7 +482,7 @@ void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { << "]: " << EdgeWeights[E] << "\n"; } -/// \brief Print the equivalence class of block \p BB on stream \p OS. +/// Print the equivalence class of block \p BB on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param BB Block to print. @@ -493,7 +493,7 @@ void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; } -/// \brief Print the weight of block \p BB on stream \p OS. +/// Print the weight of block \p BB on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param BB Block to print. @@ -505,7 +505,7 @@ void SampleProfileLoader::printBlockWeight(raw_ostream &OS, } #endif -/// \brief Get the weight for an instruction. +/// Get the weight for an instruction. /// /// The "weight" of an instruction \p Inst is the number of samples /// collected on that instruction at runtime. To retrieve it, we @@ -570,7 +570,7 @@ ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { return R; } -/// \brief Compute the weight of a basic block. +/// Compute the weight of a basic block. /// /// The weight of basic block \p BB is the maximum weight of all the /// instructions in BB. @@ -591,7 +591,7 @@ ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); } -/// \brief Compute and store the weights of every basic block. +/// Compute and store the weights of every basic block. /// /// This populates the BlockWeights map by computing /// the weights of every basic block in the CFG. @@ -613,7 +613,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) { return Changed; } -/// \brief Get the FunctionSamples for a call instruction. +/// Get the FunctionSamples for a call instruction. /// /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined /// instance in which that call instruction is calling to. It contains @@ -687,7 +687,7 @@ SampleProfileLoader::findIndirectCallFunctionSamples( return R; } -/// \brief Get the FunctionSamples for an instruction. +/// Get the FunctionSamples for an instruction. /// /// The FunctionSamples of an instruction \p Inst is the inlined instance /// in which that instruction is coming from. We traverse the inline stack @@ -739,7 +739,7 @@ bool SampleProfileLoader::inlineCallInstruction(Instruction *I) { return false; } -/// \brief Iteratively inline hot callsites of a function. +/// Iteratively inline hot callsites of a function. /// /// Iteratively traverse all callsites of the function \p F, and find if /// the corresponding inlined instance exists and is hot in profile. If @@ -840,7 +840,7 @@ bool SampleProfileLoader::inlineHotFunctions( return Changed; } -/// \brief Find equivalence classes for the given block. +/// Find equivalence classes for the given block. /// /// This finds all the blocks that are guaranteed to execute the same /// number of times as \p BB1. To do this, it traverses all the @@ -897,7 +897,7 @@ void SampleProfileLoader::findEquivalencesFor( } } -/// \brief Find equivalence classes. +/// Find equivalence classes. /// /// Since samples may be missing from blocks, we can fill in the gaps by setting /// the weights of all the blocks in the same equivalence class to the same @@ -955,7 +955,7 @@ void SampleProfileLoader::findEquivalenceClasses(Function &F) { } } -/// \brief Visit the given edge to decide if it has a valid weight. +/// Visit the given edge to decide if it has a valid weight. /// /// If \p E has not been visited before, we copy to \p UnknownEdge /// and increment the count of unknown edges. @@ -976,7 +976,7 @@ uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, return EdgeWeights[E]; } -/// \brief Propagate weights through incoming/outgoing edges. +/// Propagate weights through incoming/outgoing edges. /// /// If the weight of a basic block is known, and there is only one edge /// with an unknown weight, we can calculate the weight of that edge. @@ -1134,7 +1134,7 @@ bool SampleProfileLoader::propagateThroughEdges(Function &F, return Changed; } -/// \brief Build in/out edge lists for each basic block in the CFG. +/// Build in/out edge lists for each basic block in the CFG. /// /// We are interested in unique edges. If a block B1 has multiple /// edges to another block B2, we only add a single B1->B2 edge. @@ -1180,7 +1180,7 @@ static SmallVector<InstrProfValueData, 2> SortCallTargets( return R; } -/// \brief Propagate weights into edges +/// Propagate weights into edges /// /// The following rules are applied to every block BB in the CFG: /// @@ -1342,7 +1342,7 @@ void SampleProfileLoader::propagateWeights(Function &F) { } } -/// \brief Get the line number for the function header. +/// Get the line number for the function header. /// /// This looks up function \p F in the current compilation unit and /// retrieves the line number where the function is defined. This is @@ -1377,7 +1377,7 @@ void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) { LI->analyze(*DT); } -/// \brief Generate branch weight metadata for all branches in \p F. +/// Generate branch weight metadata for all branches in \p F. /// /// Branch weights are computed out of instruction samples using a /// propagation heuristic. Propagation proceeds in 3 phases: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 274bde0da89..d818441d09c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -856,7 +856,7 @@ Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) { return createFMul(OpndVal, Coeff.getValue(Instr->getType())); } -/// \brief Return true if we can prove that: +/// Return true if we can prove that: /// (sub LHS, RHS) === (sub nsw LHS, RHS) /// This basically requires proving that the add in the original type would not /// overflow to change the sign bit or have a carry out. @@ -884,7 +884,7 @@ bool InstCombiner::willNotOverflowSignedSub(const Value *LHS, return false; } -/// \brief Return true if we can prove that: +/// Return true if we can prove that: /// (sub LHS, RHS) === (sub nuw LHS, RHS) bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 9a5183cf28f..368402b57cb 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -75,7 +75,7 @@ static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, return Builder.CreateFCmp(Pred, LHS, RHS); } -/// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or +/// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B)) /// \param I Binary operator to transform. /// \return Pointer to node that must replace the original binary operator, or diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 246a335199c..1ba46693f3e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -3668,7 +3668,7 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, return false; } -/// \brief Recognize and process idiom involving test for multiplication +/// Recognize and process idiom involving test for multiplication /// overflow. /// /// The caller has matched a pattern of the form: @@ -3966,7 +3966,7 @@ static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) { return GoodToSwap > 0; } -/// \brief Check that one use is in the same block as the definition and all +/// Check that one use is in the same block as the definition and all /// other uses are in blocks dominated by a given block. /// /// \param DI Definition @@ -4011,7 +4011,7 @@ static bool isChainSelectCmpBranch(const SelectInst *SI) { return true; } -/// \brief True when a select result is replaced by one of its operands +/// True when a select result is replaced by one of its operands /// in select-icmp sequence. This will eventually result in the elimination /// of the select. /// diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 6cbe5035229..56fb5e013d3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -122,17 +122,17 @@ static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) { return V; } -/// \brief Add one to a Constant +/// Add one to a Constant static inline Constant *AddOne(Constant *C) { return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); } -/// \brief Subtract one from a Constant +/// Subtract one from a Constant static inline Constant *SubOne(Constant *C) { return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); } -/// \brief Return true if the specified value is free to invert (apply ~ to). +/// Return true if the specified value is free to invert (apply ~ to). /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses /// is true, work under the assumption that the caller intends to remove all /// uses of V and only keep uses of ~V. @@ -178,7 +178,7 @@ static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) { return false; } -/// \brief Specific patterns of overflow check idioms that we match. +/// Specific patterns of overflow check idioms that we match. enum OverflowCheckFlavor { OCF_UNSIGNED_ADD, OCF_SIGNED_ADD, @@ -190,7 +190,7 @@ enum OverflowCheckFlavor { OCF_INVALID }; -/// \brief Returns the OverflowCheckFlavor corresponding to a overflow_with_op +/// Returns the OverflowCheckFlavor corresponding to a overflow_with_op /// intrinsic. static inline OverflowCheckFlavor IntrinsicIDToOverflowCheckFlavor(unsigned ID) { @@ -212,7 +212,7 @@ IntrinsicIDToOverflowCheckFlavor(unsigned ID) { } } -/// \brief The core instruction combiner logic. +/// The core instruction combiner logic. /// /// This class provides both the logic to recursively visit instructions and /// combine them. @@ -220,10 +220,10 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner : public InstVisitor<InstCombiner, Instruction *> { // FIXME: These members shouldn't be public. public: - /// \brief A worklist of the instructions that need to be simplified. + /// A worklist of the instructions that need to be simplified. InstCombineWorklist &Worklist; - /// \brief An IRBuilder that automatically inserts new instructions into the + /// An IRBuilder that automatically inserts new instructions into the /// worklist. using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; BuilderTy &Builder; @@ -261,7 +261,7 @@ public: ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), LI(LI) {} - /// \brief Run the combiner over the entire worklist until it is empty. + /// Run the combiner over the entire worklist until it is empty. /// /// \returns true if the IR is changed. bool run(); @@ -390,7 +390,7 @@ private: /// if it cannot already be eliminated by some other transformation. bool shouldOptimizeCast(CastInst *CI); - /// \brief Try to optimize a sequence of instructions checking if an operation + /// Try to optimize a sequence of instructions checking if an operation /// on LHS and RHS overflows. /// /// If this overflow check is done via one of the overflow check intrinsics, @@ -488,7 +488,7 @@ private: Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, bool JoinedByAnd, Instruction &CxtI); public: - /// \brief Inserts an instruction \p New before instruction \p Old + /// Inserts an instruction \p New before instruction \p Old /// /// Also adds the new instruction to the worklist and returns \p New so that /// it is suitable for use as the return from the visitation patterns. @@ -501,13 +501,13 @@ public: return New; } - /// \brief Same as InsertNewInstBefore, but also sets the debug loc. + /// Same as InsertNewInstBefore, but also sets the debug loc. Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { New->setDebugLoc(Old.getDebugLoc()); return InsertNewInstBefore(New, Old); } - /// \brief A combiner-aware RAUW-like routine. + /// A combiner-aware RAUW-like routine. /// /// This method is to be used when an instruction is found to be dead, /// replaceable with another preexisting expression. Here we add all uses of @@ -542,7 +542,7 @@ public: return InsertValueInst::Create(Struct, Result, 0); } - /// \brief Combiner aware instruction erasure. + /// Combiner aware instruction erasure. /// /// When dealing with an instruction that has side effects or produces a void /// value, we can't rely on DCE to delete the instruction. Instead, visit @@ -613,11 +613,11 @@ public: uint64_t MaxArraySizeForCombine; private: - /// \brief Performs a few simplifications for operators which are associative + /// Performs a few simplifications for operators which are associative /// or commutative. bool SimplifyAssociativeOrCommutative(BinaryOperator &I); - /// \brief Tries to simplify binary operations which some other binary + /// Tries to simplify binary operations which some other binary /// operation distributes over. /// /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" @@ -652,7 +652,7 @@ private: ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater); - /// \brief Attempts to replace V with a simpler value based on the demanded + /// Attempts to replace V with a simpler value based on the demanded /// bits. Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, unsigned Depth, Instruction *CxtI); @@ -674,7 +674,7 @@ private: Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); - /// \brief Tries to simplify operands to an integer instruction based on its + /// Tries to simplify operands to an integer instruction based on its /// demanded bits. bool SimplifyDemandedInstructionBits(Instruction &Inst); @@ -700,7 +700,7 @@ private: Instruction *foldAddWithConstant(BinaryOperator &Add); - /// \brief Try to rotate an operation below a PHI node, using PHI nodes for + /// Try to rotate an operation below a PHI node, using PHI nodes for /// its operands. Instruction *FoldPHIArgOpIntoPHI(PHINode &PN); Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN); @@ -802,7 +802,7 @@ private: Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); - /// \brief Returns a value X such that Val = X * Scale, or null if none. + /// Returns a value X such that Val = X * Scale, or null if none. /// /// If the multiplication is known not to overflow then NoSignedWrap is set. Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 04119a980d1..b78de0fa691 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -440,7 +440,7 @@ static bool isSupportedAtomicType(Type *Ty) { return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy(); } -/// \brief Helper to combine a load to a new type. +/// Helper to combine a load to a new type. /// /// This just does the work of combining a load to a new type. It handles /// metadata, etc., and returns the new instruction. The \c NewTy should be the @@ -507,7 +507,7 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT return NewLoad; } -/// \brief Combine a store to a new type. +/// Combine a store to a new type. /// /// Returns the newly created store instruction. static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { @@ -584,7 +584,7 @@ static bool isMinMaxWithLoads(Value *V) { match(L2, m_Load(m_Specific(LHS)))); } -/// \brief Combine loads to match the type of their uses' value after looking +/// Combine loads to match the type of their uses' value after looking /// through intervening bitcasts. /// /// The core idea here is that if the result of a load is used in an operation, @@ -1087,7 +1087,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { return nullptr; } -/// \brief Look for extractelement/insertvalue sequence that acts like a bitcast. +/// Look for extractelement/insertvalue sequence that acts like a bitcast. /// /// \returns underlying value that was "cast", or nullptr otherwise. /// @@ -1142,7 +1142,7 @@ static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { return U; } -/// \brief Combine stores to match the type of value being stored. +/// Combine stores to match the type of value being stored. /// /// The core idea here is that the memory does not have any intrinsic type and /// where we can we should match the type of a store to the type of value being diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 5a4e12d142c..2885591b537 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -95,7 +95,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, return MadeChange ? V : nullptr; } -/// \brief A helper routine of InstCombiner::visitMul(). +/// A helper routine of InstCombiner::visitMul(). /// /// If C is a scalar/vector of known powers of 2, then this function returns /// a new scalar/vector obtained from logBase2 of C. @@ -125,7 +125,7 @@ static Constant *getLogBase2(Type *Ty, Constant *C) { return ConstantVector::get(Elts); } -/// \brief Return true if we can prove that: +/// Return true if we can prove that: /// (mul LHS, RHS) === (mul nsw LHS, RHS) bool InstCombiner::willNotOverflowSignedMul(const Value *LHS, const Value *RHS, @@ -830,7 +830,7 @@ using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, const BinaryOperator &I, InstCombiner &IC); -/// \brief Used to maintain state for visitUDivOperand(). +/// Used to maintain state for visitUDivOperand(). struct UDivFoldAction { /// Informs visitUDiv() how to fold this operand. This can be zero if this /// action joins two actions together. @@ -899,7 +899,7 @@ static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, return LShr; } -// \brief Recursively visits the possible right hand operands of a udiv +// Recursively visits the possible right hand operands of a udiv // instruction, seeing through select instructions, to determine if we can // replace the udiv with something simpler. If we find that an operand is not // able to simplify the udiv, we abort the entire transformation. diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 4e95423c989..ac4e568d529 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1351,7 +1351,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { } while (true); } -/// \brief Creates node of binary operation with the same attributes as the +/// Creates node of binary operation with the same attributes as the /// specified one but with other operands. static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS, InstCombiner::BuilderTy &B) { @@ -1362,7 +1362,7 @@ static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS, return BO; } -/// \brief Makes transformation of binary operation specific for vector types. +/// Makes transformation of binary operation specific for vector types. /// \param Inst Binary operator to transform. /// \return Pointer to node that must replace the original binary operator, or /// null pointer if no transformation was made. @@ -2207,7 +2207,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) { return nullptr; } -/// \brief Move the call to free before a NULL test. +/// Move the call to free before a NULL test. /// /// Check if this free is accessed after its argument has been test /// against NULL (property 0). @@ -3211,7 +3211,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, return MadeIRChange; } -/// \brief Populate the IC worklist from a function, and prune any dead basic +/// Populate the IC worklist from a function, and prune any dead basic /// blocks discovered in the process. /// /// This also does basic constant propagation and other forward fixing to make diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 810a20e10f1..42f0c60c1d7 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -893,13 +893,13 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { void createDynamicAllocasInitStorage(); // ----------------------- Visitors. - /// \brief Collect all Ret instructions. + /// Collect all Ret instructions. void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } - /// \brief Collect all Resume instructions. + /// Collect all Resume instructions. void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } - /// \brief Collect all CatchReturnInst instructions. + /// Collect all CatchReturnInst instructions. void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, @@ -947,7 +947,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { // requested memory, but also left, partial and right redzones. void handleDynamicAllocaCall(AllocaInst *AI); - /// \brief Collect Alloca instructions we want (and can) handle. + /// Collect Alloca instructions we want (and can) handle. void visitAllocaInst(AllocaInst &AI) { if (!ASan.isInterestingAlloca(AI)) { if (AI.isStaticAlloca()) { @@ -968,7 +968,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { AllocaVec.push_back(&AI); } - /// \brief Collect lifetime intrinsic calls to check for use-after-scope + /// Collect lifetime intrinsic calls to check for use-after-scope /// errors. void visitIntrinsicInst(IntrinsicInst &II) { Intrinsic::ID ID = II.getIntrinsicID(); @@ -1086,7 +1086,7 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { return Res; } -// \brief Create a constant for Str so that we can pass it to the run-time lib. +// Create a constant for Str so that we can pass it to the run-time lib. static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); @@ -1100,7 +1100,7 @@ static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, return GV; } -/// \brief Create a global describing a source location. +/// Create a global describing a source location. static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, LocationMetadata MD) { Constant *LocData[] = { @@ -1116,7 +1116,7 @@ static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, return GV; } -/// \brief Check if \p G has been created by a trusted compiler pass. +/// Check if \p G has been created by a trusted compiler pass. static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { // Do not instrument asan globals. if (G->getName().startswith(kAsanGenPrefix) || diff --git a/llvm/lib/Transforms/Instrumentation/CFGMST.h b/llvm/lib/Transforms/Instrumentation/CFGMST.h index 075e5672cff..54a36eb716a 100644 --- a/llvm/lib/Transforms/Instrumentation/CFGMST.h +++ b/llvm/lib/Transforms/Instrumentation/CFGMST.h @@ -31,7 +31,7 @@ namespace llvm { -/// \brief An union-find based Minimum Spanning Tree for CFG +/// An union-find based Minimum Spanning Tree for CFG /// /// Implements a Union-find algorithm to compute Minimum Spanning Tree /// for a given CFG. diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp index 55bdda3eb1a..75061749fbb 100644 --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -121,7 +121,7 @@ static cl::opt<unsigned long long> ClMappingOffset( namespace { -/// \brief An instrumentation pass implementing detection of addressability bugs +/// An instrumentation pass implementing detection of addressability bugs /// using tagged pointers. class HWAddressSanitizer : public FunctionPass { public: @@ -223,7 +223,7 @@ FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel, return new HWAddressSanitizer(CompileKernel, Recover); } -/// \brief Module-level initialization. +/// Module-level initialization. /// /// inserts a call to __hwasan_init to the module's constructor list. bool HWAddressSanitizer::doInitialization(Module &M) { diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 6437c739f5a..a2316881233 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -163,7 +163,7 @@ static const unsigned kRetvalTLSSize = 800; // Accesses sizes are powers of two: 1, 2, 4, 8. static const size_t kNumberOfAccessSizes = 4; -/// \brief Track origins of uninitialized values. +/// Track origins of uninitialized values. /// /// Adds a section to MemorySanitizer report that points to the allocation /// (stack or heap) the uninitialized bits came from originally. @@ -390,7 +390,7 @@ static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = { namespace { -/// \brief An instrumentation pass implementing detection of uninitialized +/// An instrumentation pass implementing detection of uninitialized /// reads. /// /// MemorySanitizer: instrument the code in module to find @@ -423,7 +423,7 @@ private: void initializeCallbacks(Module &M); - /// \brief Track origins (allocation points) of uninitialized values. + /// Track origins (allocation points) of uninitialized values. int TrackOrigins; bool Recover; @@ -431,64 +431,64 @@ private: Type *IntptrTy; Type *OriginTy; - /// \brief Thread-local shadow storage for function parameters. + /// Thread-local shadow storage for function parameters. GlobalVariable *ParamTLS; - /// \brief Thread-local origin storage for function parameters. + /// Thread-local origin storage for function parameters. GlobalVariable *ParamOriginTLS; - /// \brief Thread-local shadow storage for function return value. + /// Thread-local shadow storage for function return value. GlobalVariable *RetvalTLS; - /// \brief Thread-local origin storage for function return value. + /// Thread-local origin storage for function return value. GlobalVariable *RetvalOriginTLS; - /// \brief Thread-local shadow storage for in-register va_arg function + /// Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). GlobalVariable *VAArgTLS; - /// \brief Thread-local shadow storage for va_arg overflow area + /// Thread-local shadow storage for va_arg overflow area /// (x86_64-specific). GlobalVariable *VAArgOverflowSizeTLS; - /// \brief Thread-local space used to pass origin value to the UMR reporting + /// Thread-local space used to pass origin value to the UMR reporting /// function. GlobalVariable *OriginTLS; - /// \brief The run-time callback to print a warning. + /// The run-time callback to print a warning. Value *WarningFn = nullptr; // These arrays are indexed by log2(AccessSize). Value *MaybeWarningFn[kNumberOfAccessSizes]; Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; - /// \brief Run-time helper that generates a new origin value for a stack + /// Run-time helper that generates a new origin value for a stack /// allocation. Value *MsanSetAllocaOrigin4Fn; - /// \brief Run-time helper that poisons stack on function entry. + /// Run-time helper that poisons stack on function entry. Value *MsanPoisonStackFn; - /// \brief Run-time helper that records a store (or any event) of an + /// Run-time helper that records a store (or any event) of an /// uninitialized value and returns an updated origin id encoding this info. Value *MsanChainOriginFn; - /// \brief MSan runtime replacements for memmove, memcpy and memset. + /// MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; - /// \brief Memory map parameters used in application-to-shadow calculation. + /// Memory map parameters used in application-to-shadow calculation. const MemoryMapParams *MapParams; - /// \brief Custom memory map parameters used when -msan-shadow-base or + /// Custom memory map parameters used when -msan-shadow-base or // -msan-origin-base is provided. MemoryMapParams CustomMapParams; MDNode *ColdCallWeights; - /// \brief Branch weights for origin store. + /// Branch weights for origin store. MDNode *OriginStoreWeights; - /// \brief An empty volatile inline asm that prevents callback merge. + /// An empty volatile inline asm that prevents callback merge. InlineAsm *EmptyAsm; Function *MsanCtorFunction; @@ -510,7 +510,7 @@ FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) { return new MemorySanitizer(TrackOrigins, Recover); } -/// \brief Create a non-const global initialized with the given string. +/// Create a non-const global initialized with the given string. /// /// Creates a writable global for Str so that we can pass it to the /// run-time lib. Runtime uses first 4 bytes of the string to store the @@ -522,7 +522,7 @@ static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, GlobalValue::PrivateLinkage, StrConst, ""); } -/// \brief Insert extern declaration of runtime-provided functions and globals. +/// Insert extern declaration of runtime-provided functions and globals. void MemorySanitizer::initializeCallbacks(Module &M) { // Only do this once. if (WarningFn) @@ -604,7 +604,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) { /*hasSideEffects=*/true); } -/// \brief Module-level initialization. +/// Module-level initialization. /// /// inserts a call to __msan_init to the module's constructor list. bool MemorySanitizer::doInitialization(Module &M) { @@ -706,7 +706,7 @@ bool MemorySanitizer::doInitialization(Module &M) { namespace { -/// \brief A helper class that handles instrumentation of VarArg +/// A helper class that handles instrumentation of VarArg /// functions on a particular platform. /// /// Implementations are expected to insert the instrumentation @@ -717,16 +717,16 @@ namespace { struct VarArgHelper { virtual ~VarArgHelper() = default; - /// \brief Visit a CallSite. + /// Visit a CallSite. virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; - /// \brief Visit a va_start call. + /// Visit a va_start call. virtual void visitVAStartInst(VAStartInst &I) = 0; - /// \brief Visit a va_copy call. + /// Visit a va_copy call. virtual void visitVACopyInst(VACopyInst &I) = 0; - /// \brief Finalize function instrumentation. + /// Finalize function instrumentation. /// /// This method is called after visiting all interesting (see above) /// instructions in a function. @@ -815,7 +815,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8)); } - /// \brief Fill memory range with the given origin value. + /// Fill memory range with the given origin value. void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, unsigned Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); @@ -915,7 +915,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Helper function to insert a warning at IRB's current insert point. + /// Helper function to insert a warning at IRB's current insert point. void insertWarningFn(IRBuilder<> &IRB, Value *Origin) { if (!Origin) Origin = (Value *)IRB.getInt32(0); @@ -978,7 +978,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { DEBUG(dbgs() << "DONE:\n" << F); } - /// \brief Add MemorySanitizer instrumentation to a function. + /// Add MemorySanitizer instrumentation to a function. bool runOnFunction() { // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable @@ -1019,12 +1019,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Compute the shadow type that corresponds to a given Value. + /// Compute the shadow type that corresponds to a given Value. Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); } - /// \brief Compute the shadow type that corresponds to a given Type. + /// Compute the shadow type that corresponds to a given Type. Type *getShadowTy(Type *OrigTy) { if (!OrigTy->isSized()) { return nullptr; @@ -1055,14 +1055,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IntegerType::get(*MS.C, TypeSize); } - /// \brief Flatten a vector type. + /// Flatten a vector type. Type *getShadowTyNoVec(Type *ty) { if (VectorType *vt = dyn_cast<VectorType>(ty)) return IntegerType::get(*MS.C, vt->getBitWidth()); return ty; } - /// \brief Convert a shadow value to it's flattened variant. + /// Convert a shadow value to it's flattened variant. Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { Type *Ty = V->getType(); Type *NoVecTy = getShadowTyNoVec(Ty); @@ -1070,7 +1070,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateBitCast(V, NoVecTy); } - /// \brief Compute the integer shadow offset that corresponds to a given + /// Compute the integer shadow offset that corresponds to a given /// application address. /// /// Offset = (Addr & ~AndMask) ^ XorMask @@ -1089,7 +1089,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return OffsetLong; } - /// \brief Compute the shadow and origin addresses corresponding to a given + /// Compute the shadow and origin addresses corresponding to a given /// application address. /// /// Shadow = ShadowBase + Offset @@ -1136,7 +1136,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return ret; } - /// \brief Compute the shadow address for a given function argument. + /// Compute the shadow address for a given function argument. /// /// Shadow = ParamTLS+ArgOffset. Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, @@ -1148,7 +1148,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { "_msarg"); } - /// \brief Compute the origin address for a given function argument. + /// Compute the origin address for a given function argument. Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { if (!MS.TrackOrigins) return nullptr; @@ -1159,26 +1159,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { "_msarg_o"); } - /// \brief Compute the shadow address for a retval. + /// Compute the shadow address for a retval. Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { return IRB.CreatePointerCast(MS.RetvalTLS, PointerType::get(getShadowTy(A), 0), "_msret"); } - /// \brief Compute the origin address for a retval. + /// Compute the origin address for a retval. Value *getOriginPtrForRetval(IRBuilder<> &IRB) { // We keep a single origin for the entire retval. Might be too optimistic. return MS.RetvalOriginTLS; } - /// \brief Set SV to be the shadow value for V. + /// Set SV to be the shadow value for V. void setShadow(Value *V, Value *SV) { assert(!ShadowMap.count(V) && "Values may only have one shadow"); ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); } - /// \brief Set Origin to be the origin value for V. + /// Set Origin to be the origin value for V. void setOrigin(Value *V, Value *Origin) { if (!MS.TrackOrigins) return; assert(!OriginMap.count(V) && "Values may only have one origin"); @@ -1193,7 +1193,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return Constant::getNullValue(ShadowTy); } - /// \brief Create a clean shadow value for a given value. + /// Create a clean shadow value for a given value. /// /// Clean shadow (all zeroes) means all bits of the value are defined /// (initialized). @@ -1201,7 +1201,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getCleanShadow(V->getType()); } - /// \brief Create a dirty shadow of a given shadow type. + /// Create a dirty shadow of a given shadow type. Constant *getPoisonedShadow(Type *ShadowTy) { assert(ShadowTy); if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) @@ -1220,7 +1220,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { llvm_unreachable("Unexpected shadow type"); } - /// \brief Create a dirty shadow for a given value. + /// Create a dirty shadow for a given value. Constant *getPoisonedShadow(Value *V) { Type *ShadowTy = getShadowTy(V); if (!ShadowTy) @@ -1228,12 +1228,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getPoisonedShadow(ShadowTy); } - /// \brief Create a clean (zero) origin. + /// Create a clean (zero) origin. Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); } - /// \brief Get the shadow value for a given Value. + /// Get the shadow value for a given Value. /// /// This function either returns the value set earlier with setShadow, /// or extracts if from ParamTLS (for function arguments). @@ -1332,12 +1332,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getCleanShadow(V); } - /// \brief Get the shadow for i-th argument of the instruction I. + /// Get the shadow for i-th argument of the instruction I. Value *getShadow(Instruction *I, int i) { return getShadow(I->getOperand(i)); } - /// \brief Get the origin for a value. + /// Get the origin for a value. Value *getOrigin(Value *V) { if (!MS.TrackOrigins) return nullptr; if (!PropagateShadow) return getCleanOrigin(); @@ -1353,12 +1353,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return Origin; } - /// \brief Get the origin for i-th argument of the instruction I. + /// Get the origin for i-th argument of the instruction I. Value *getOrigin(Instruction *I, int i) { return getOrigin(I->getOperand(i)); } - /// \brief Remember the place where a shadow check should be inserted. + /// Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the shadow value is not 0. @@ -1374,7 +1374,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); } - /// \brief Remember the place where a shadow check should be inserted. + /// Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the value is not fully defined. @@ -1434,7 +1434,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { InstVisitor<MemorySanitizerVisitor>::visit(I); } - /// \brief Instrument LoadInst + /// Instrument LoadInst /// /// Loads the corresponding shadow and (optionally) origin. /// Optionally, checks that the load address is fully defined. @@ -1470,7 +1470,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Instrument StoreInst + /// Instrument StoreInst /// /// Stores the corresponding shadow and (optionally) origin. /// Optionally, checks that the store address is fully defined. @@ -1589,7 +1589,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitFPExtInst(CastInst& I) { handleShadowOr(I); } void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } - /// \brief Propagate shadow for bitwise AND. + /// Propagate shadow for bitwise AND. /// /// This code is exact, i.e. if, for example, a bit in the left argument /// is defined and 0, then neither the value not definedness of the @@ -1638,7 +1638,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Default propagation of shadow and/or origin. + /// Default propagation of shadow and/or origin. /// /// This class implements the general case of shadow propagation, used in all /// cases where we don't know and/or don't care about what the operation @@ -1664,7 +1664,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : IRB(IRB), MSV(MSV) {} - /// \brief Add a pair of shadow and origin values to the mix. + /// Add a pair of shadow and origin values to the mix. Combiner &Add(Value *OpShadow, Value *OpOrigin) { if (CombineShadow) { assert(OpShadow); @@ -1694,14 +1694,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return *this; } - /// \brief Add an application value to the mix. + /// Add an application value to the mix. Combiner &Add(Value *V) { Value *OpShadow = MSV->getShadow(V); Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; return Add(OpShadow, OpOrigin); } - /// \brief Set the current combined values as the given instruction's shadow + /// Set the current combined values as the given instruction's shadow /// and origin. void Done(Instruction *I) { if (CombineShadow) { @@ -1719,7 +1719,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { using ShadowAndOriginCombiner = Combiner<true>; using OriginCombiner = Combiner<false>; - /// \brief Propagate origin for arbitrary operation. + /// Propagate origin for arbitrary operation. void setOriginForNaryOp(Instruction &I) { if (!MS.TrackOrigins) return; IRBuilder<> IRB(&I); @@ -1737,7 +1737,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Ty->getPrimitiveSizeInBits(); } - /// \brief Cast between two shadow types, extending or truncating as + /// Cast between two shadow types, extending or truncating as /// necessary. Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, bool Signed = false) { @@ -1759,7 +1759,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // TODO: handle struct types. } - /// \brief Cast an application value to the type of its own shadow. + /// Cast an application value to the type of its own shadow. Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { Type *ShadowTy = getShadowTy(V); if (V->getType() == ShadowTy) @@ -1770,7 +1770,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateBitCast(V, ShadowTy); } - /// \brief Propagate shadow for arbitrary operation. + /// Propagate shadow for arbitrary operation. void handleShadowOr(Instruction &I) { IRBuilder<> IRB(&I); ShadowAndOriginCombiner SC(this, IRB); @@ -1779,7 +1779,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { SC.Done(&I); } - // \brief Handle multiplication by constant. + // Handle multiplication by constant. // // Handle a special case of multiplication by constant that may have one or // more zeros in the lower bits. This makes corresponding number of lower bits @@ -1856,7 +1856,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitSRem(BinaryOperator &I) { handleDiv(I); } void visitFRem(BinaryOperator &I) { handleDiv(I); } - /// \brief Instrument == and != comparisons. + /// Instrument == and != comparisons. /// /// Sometimes the comparison result is known even if some of the bits of the /// arguments are not. @@ -1894,7 +1894,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Build the lowest possible value of V, taking into account V's + /// Build the lowest possible value of V, taking into account V's /// uninitialized bits. Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { @@ -1911,7 +1911,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Build the highest possible value of V, taking into account V's + /// Build the highest possible value of V, taking into account V's /// uninitialized bits. Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { @@ -1928,7 +1928,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Instrument relational comparisons. + /// Instrument relational comparisons. /// /// This function does exact shadow propagation for all relational /// comparisons of integers, pointers and vectors of those. @@ -1961,7 +1961,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Instrument signed relational comparisons. + /// Instrument signed relational comparisons. /// /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest /// bit of the shadow. Everything else is delegated to handleShadowOr(). @@ -2045,7 +2045,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitAShr(BinaryOperator &I) { handleShift(I); } void visitLShr(BinaryOperator &I) { handleShift(I); } - /// \brief Instrument llvm.memmove + /// Instrument llvm.memmove /// /// At this point we don't know if llvm.memmove will be inlined or not. /// If we don't instrument it and it gets inlined, @@ -2098,7 +2098,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { VAHelper->visitVACopyInst(I); } - /// \brief Handle vector store-like intrinsics. + /// Handle vector store-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD store: writes memory, /// has 1 pointer argument and 1 vector argument, returns void. @@ -2122,7 +2122,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Handle vector load-like intrinsics. + /// Handle vector load-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD load: reads memory, /// has 1 pointer argument, returns a vector. @@ -2155,7 +2155,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Handle (SIMD arithmetic)-like intrinsics. + /// Handle (SIMD arithmetic)-like intrinsics. /// /// Instrument intrinsics with any number of arguments of the same type, /// equal to the return type. The type should be simple (no aggregates or @@ -2185,7 +2185,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Heuristically instrument unknown intrinsics. + /// Heuristically instrument unknown intrinsics. /// /// The main purpose of this code is to do something reasonable with all /// random intrinsics we might encounter, most importantly - SIMD intrinsics. @@ -2235,7 +2235,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOrigin(&I, getOrigin(Op)); } - // \brief Instrument vector convert instrinsic. + // Instrument vector convert instrinsic. // // This function instruments intrinsics like cvtsi2ss: // %Out = int_xxx_cvtyyy(%ConvertOp) @@ -2338,7 +2338,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateSExt(S2, T); } - // \brief Instrument vector shift instrinsic. + // Instrument vector shift instrinsic. // // This function instruments intrinsics like int_x86_avx2_psll_w. // Intrinsic shifts %In by %ShiftSize bits. @@ -2363,14 +2363,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Get an X86_MMX-sized vector type. + // Get an X86_MMX-sized vector type. Type *getMMXVectorTy(unsigned EltSizeInBits) { const unsigned X86_MMXSizeInBits = 64; return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), X86_MMXSizeInBits / EltSizeInBits); } - // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack + // Returns a signed counterpart for an (un)signed-saturate-and-pack // intrinsic. Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { switch (id) { @@ -2401,7 +2401,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - // \brief Instrument vector pack instrinsic. + // Instrument vector pack instrinsic. // // This function instruments intrinsics like x86_mmx_packsswb, that // packs elements of 2 input vectors into half as many bits with saturation. @@ -2444,7 +2444,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument sum-of-absolute-differencies intrinsic. + // Instrument sum-of-absolute-differencies intrinsic. void handleVectorSadIntrinsic(IntrinsicInst &I) { const unsigned SignificantBitsPerResultElement = 16; bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); @@ -2463,7 +2463,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument multiply-add intrinsic. + // Instrument multiply-add intrinsic. void handleVectorPmaddIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); @@ -2478,7 +2478,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument compare-packed intrinsic. + // Instrument compare-packed intrinsic. // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or // all-ones shadow. void handleVectorComparePackedIntrinsic(IntrinsicInst &I) { @@ -2491,7 +2491,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument compare-scalar intrinsic. + // Instrument compare-scalar intrinsic. // This handles both cmp* intrinsics which return the result in the first // element of a vector, and comi* which return the result as i32. void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) { @@ -3146,7 +3146,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } }; -/// \brief AMD64-specific implementation of VarArgHelper. +/// AMD64-specific implementation of VarArgHelper. struct VarArgAMD64Helper : public VarArgHelper { // An unfortunate workaround for asymmetric lowering of va_arg stuff. // See a comment in visitCallSite for more details. @@ -3253,7 +3253,7 @@ struct VarArgAMD64Helper : public VarArgHelper { IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3342,7 +3342,7 @@ struct VarArgAMD64Helper : public VarArgHelper { } }; -/// \brief MIPS64-specific implementation of VarArgHelper. +/// MIPS64-specific implementation of VarArgHelper. struct VarArgMIPS64Helper : public VarArgHelper { Function &F; MemorySanitizer &MS; @@ -3383,7 +3383,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3452,7 +3452,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { } }; -/// \brief AArch64-specific implementation of VarArgHelper. +/// AArch64-specific implementation of VarArgHelper. struct VarArgAArch64Helper : public VarArgHelper { static const unsigned kAArch64GrArgSize = 64; static const unsigned kAArch64VrArgSize = 128; @@ -3704,7 +3704,7 @@ struct VarArgAArch64Helper : public VarArgHelper { } }; -/// \brief PowerPC64-specific implementation of VarArgHelper. +/// PowerPC64-specific implementation of VarArgHelper. struct VarArgPowerPC64Helper : public VarArgHelper { Function &F; MemorySanitizer &MS; @@ -3803,7 +3803,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3873,7 +3873,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { } }; -/// \brief A no-op implementation of VarArgHelper. +/// A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) {} diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index 16e95b5f688..3121d102c6d 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -449,7 +449,7 @@ ModulePass *llvm::createPGOInstrumentationUseLegacyPass(StringRef Filename) { namespace { -/// \brief An MST based instrumentation for PGO +/// An MST based instrumentation for PGO /// /// Implements a Minimum Spanning Tree (MST) based instrumentation for PGO /// in the function level. diff --git a/llvm/lib/Transforms/ObjCARC/BlotMapVector.h b/llvm/lib/Transforms/ObjCARC/BlotMapVector.h index 5518b49c409..9ade14c1177 100644 --- a/llvm/lib/Transforms/ObjCARC/BlotMapVector.h +++ b/llvm/lib/Transforms/ObjCARC/BlotMapVector.h @@ -18,7 +18,7 @@ namespace llvm { -/// \brief An associative container with fast insertion-order (deterministic) +/// An associative container with fast insertion-order (deterministic) /// iteration over its elements. Plus the special blot operation. template <class KeyT, class ValueT> class BlotMapVector { /// Map keys to indices in Vector. diff --git a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h index 8cc1232b18c..0f13b02c806 100644 --- a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h +++ b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h @@ -38,7 +38,7 @@ namespace objcarc { class ProvenanceAnalysis; /// \enum DependenceKind -/// \brief Defines different dependence kinds among various ARC constructs. +/// Defines different dependence kinds among various ARC constructs. /// /// There are several kinds of dependence-like concepts in use here. /// diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARC.h b/llvm/lib/Transforms/ObjCARC/ObjCARC.h index 326c06c9293..62b38e8e62b 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARC.h +++ b/llvm/lib/Transforms/ObjCARC/ObjCARC.h @@ -43,7 +43,7 @@ class raw_ostream; namespace llvm { namespace objcarc { -/// \brief Erase the given instruction. +/// Erase the given instruction. /// /// Many ObjC calls return their argument verbatim, /// so if it's such a call and the return value has users, replace them with the diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp index b2c62a0e8ee..fb4eef523ba 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp @@ -36,7 +36,7 @@ using namespace llvm::objcarc; #define DEBUG_TYPE "objc-arc-ap-elim" namespace { - /// \brief Autorelease pool elimination. + /// Autorelease pool elimination. class ObjCARCAPElim : public ModulePass { void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnModule(Module &M) override; diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp index e6dd69d6b58..602bfa1c020 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -51,7 +51,7 @@ STATISTIC(NumStoreStrongs, "Number objc_storeStrong calls formed"); //===----------------------------------------------------------------------===// namespace { - /// \brief Late ARC optimizations + /// Late ARC optimizations /// /// These change the IR in a way that makes it difficult to be analyzed by /// ObjCARCOpt, so it's run late. diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp index bb6a0a0e73d..fab9845facc 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp @@ -47,7 +47,7 @@ using namespace llvm; using namespace llvm::objcarc; namespace { - /// \brief Early ARC transformations. + /// Early ARC transformations. class ObjCARCExpand : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override; bool doInitialization(Module &M) override; diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index b28cbe29588..7df2fe52cae 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -77,7 +77,7 @@ using namespace llvm::objcarc; /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC. /// @{ -/// \brief This is similar to GetRCIdentityRoot but it stops as soon +/// This is similar to GetRCIdentityRoot but it stops as soon /// as it finds a value with multiple uses. static const Value *FindSingleUseIdentifiedObject(const Value *Arg) { // ConstantData (like ConstantPointerNull and UndefValue) is used across @@ -175,7 +175,7 @@ STATISTIC(NumReleasesAfterOpt, namespace { - /// \brief Per-BasicBlock state. + /// Per-BasicBlock state. class BBState { /// The number of unique control paths from the entry which can reach this /// block. @@ -466,7 +466,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) { namespace { - /// \brief The main ARC optimization pass. + /// The main ARC optimization pass. class ObjCARCOpt : public FunctionPass { bool Changed; ProvenanceAnalysis PA; diff --git a/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h b/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h index f21ea3666b1..8a2e16e65fb 100644 --- a/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h +++ b/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h @@ -39,7 +39,7 @@ class Value; namespace objcarc { -/// \brief This is similar to BasicAliasAnalysis, and it uses many of the same +/// This is similar to BasicAliasAnalysis, and it uses many of the same /// techniques, except it uses special ObjC-specific reasoning about pointer /// relationships. /// diff --git a/llvm/lib/Transforms/ObjCARC/PtrState.h b/llvm/lib/Transforms/ObjCARC/PtrState.h index e1e95afcf76..f5b9b853d8e 100644 --- a/llvm/lib/Transforms/ObjCARC/PtrState.h +++ b/llvm/lib/Transforms/ObjCARC/PtrState.h @@ -36,7 +36,7 @@ class ProvenanceAnalysis; /// \enum Sequence /// -/// \brief A sequence of states that a pointer may go through in which an +/// A sequence of states that a pointer may go through in which an /// objc_retain and objc_release are actually needed. enum Sequence { S_None, @@ -51,7 +51,7 @@ enum Sequence { raw_ostream &operator<<(raw_ostream &OS, const Sequence S) LLVM_ATTRIBUTE_UNUSED; -/// \brief Unidirectional information about either a +/// Unidirectional information about either a /// retain-decrement-use-release sequence or release-use-decrement-retain /// reverse sequence. struct RRInfo { @@ -97,7 +97,7 @@ struct RRInfo { bool Merge(const RRInfo &Other); }; -/// \brief This class summarizes several per-pointer runtime properties which +/// This class summarizes several per-pointer runtime properties which /// are propagated through the flow graph. class PtrState { protected: diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp index 4b53628e442..470e687a722 100644 --- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -84,7 +84,7 @@ static cl::opt<bool> ConstHoistWithBlockFrequency( namespace { -/// \brief The constant hoisting pass. +/// The constant hoisting pass. class ConstantHoistingLegacyPass : public FunctionPass { public: static char ID; // Pass identification, replacement for typeid @@ -127,7 +127,7 @@ FunctionPass *llvm::createConstantHoistingPass() { return new ConstantHoistingLegacyPass(); } -/// \brief Perform the constant hoisting optimization for the given function. +/// Perform the constant hoisting optimization for the given function. bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) { if (skipFunction(Fn)) return false; @@ -153,7 +153,7 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) { return MadeChange; } -/// \brief Find the constant materialization insertion point. +/// Find the constant materialization insertion point. Instruction *ConstantHoistingPass::findMatInsertPt(Instruction *Inst, unsigned Idx) const { // If the operand is a cast instruction, then we have to materialize the @@ -187,7 +187,7 @@ Instruction *ConstantHoistingPass::findMatInsertPt(Instruction *Inst, return IDom->getBlock()->getTerminator(); } -/// \brief Given \p BBs as input, find another set of BBs which collectively +/// Given \p BBs as input, find another set of BBs which collectively /// dominates \p BBs and have the minimal sum of frequencies. Return the BB /// set found in \p BBs. static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI, @@ -289,7 +289,7 @@ static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI, } } -/// \brief Find an insertion point that dominates all uses. +/// Find an insertion point that dominates all uses. SmallPtrSet<Instruction *, 8> ConstantHoistingPass::findConstantInsertionPoint( const ConstantInfo &ConstInfo) const { assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry."); @@ -335,7 +335,7 @@ SmallPtrSet<Instruction *, 8> ConstantHoistingPass::findConstantInsertionPoint( return InsertPts; } -/// \brief Record constant integer ConstInt for instruction Inst at operand +/// Record constant integer ConstInt for instruction Inst at operand /// index Idx. /// /// The operand at index Idx is not necessarily the constant integer itself. It @@ -375,7 +375,7 @@ void ConstantHoistingPass::collectConstantCandidates( } } -/// \brief Check the operand for instruction Inst at index Idx. +/// Check the operand for instruction Inst at index Idx. void ConstantHoistingPass::collectConstantCandidates( ConstCandMapType &ConstCandMap, Instruction *Inst, unsigned Idx) { Value *Opnd = Inst->getOperand(Idx); @@ -416,7 +416,7 @@ void ConstantHoistingPass::collectConstantCandidates( } } -/// \brief Scan the instruction for expensive integer constants and record them +/// Scan the instruction for expensive integer constants and record them /// in the constant candidate vector. void ConstantHoistingPass::collectConstantCandidates( ConstCandMapType &ConstCandMap, Instruction *Inst) { @@ -436,7 +436,7 @@ void ConstantHoistingPass::collectConstantCandidates( } // end of for all operands } -/// \brief Collect all integer constants in the function that cannot be folded +/// Collect all integer constants in the function that cannot be folded /// into an instruction itself. void ConstantHoistingPass::collectConstantCandidates(Function &Fn) { ConstCandMapType ConstCandMap; @@ -541,7 +541,7 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S, return NumUses; } -/// \brief Find the base constant within the given range and rebase all other +/// Find the base constant within the given range and rebase all other /// constants with respect to the base constant. void ConstantHoistingPass::findAndMakeBaseConstant( ConstCandVecType::iterator S, ConstCandVecType::iterator E) { @@ -567,7 +567,7 @@ void ConstantHoistingPass::findAndMakeBaseConstant( ConstantVec.push_back(std::move(ConstInfo)); } -/// \brief Finds and combines constant candidates that can be easily +/// Finds and combines constant candidates that can be easily /// rematerialized with an add from a common base constant. void ConstantHoistingPass::findBaseConstants() { // Sort the constants by value and type. This invalidates the mapping! @@ -601,7 +601,7 @@ void ConstantHoistingPass::findBaseConstants() { findAndMakeBaseConstant(MinValItr, ConstCandVec.end()); } -/// \brief Updates the operand at Idx in instruction Inst with the result of +/// Updates the operand at Idx in instruction Inst with the result of /// instruction Mat. If the instruction is a PHI node then special /// handling for duplicate values form the same incoming basic block is /// required. @@ -629,7 +629,7 @@ static bool updateOperand(Instruction *Inst, unsigned Idx, Instruction *Mat) { return true; } -/// \brief Emit materialization code for all rebased constants and update their +/// Emit materialization code for all rebased constants and update their /// users. void ConstantHoistingPass::emitBaseConstants(Instruction *Base, Constant *Offset, @@ -702,7 +702,7 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base, } } -/// \brief Hoist and hide the base constant behind a bitcast and emit +/// Hoist and hide the base constant behind a bitcast and emit /// materialization code for derived constants. bool ConstantHoistingPass::emitBaseConstants() { bool MadeChange = false; @@ -765,7 +765,7 @@ bool ConstantHoistingPass::emitBaseConstants() { return MadeChange; } -/// \brief Check all cast instructions we made a copy of and remove them if they +/// Check all cast instructions we made a copy of and remove them if they /// have no more users. void ConstantHoistingPass::deleteDeadCastInst() const { for (auto const &I : ClonedCastMap) @@ -773,7 +773,7 @@ void ConstantHoistingPass::deleteDeadCastInst() const { I.first->eraseFromParent(); } -/// \brief Optimize expensive integer constants in the given function. +/// Optimize expensive integer constants in the given function. bool ConstantHoistingPass::runImpl(Function &Fn, TargetTransformInfo &TTI, DominatorTree &DT, BlockFrequencyInfo *BFI, BasicBlock &Entry) { diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index 7f320d5f95a..4380812968a 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -80,7 +80,7 @@ DEBUG_COUNTER(CSECounter, "early-cse", namespace { -/// \brief Struct representing the available values in the scoped hash table. +/// Struct representing the available values in the scoped hash table. struct SimpleValue { Instruction *Inst; @@ -243,7 +243,7 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { namespace { -/// \brief Struct representing the available call values in the scoped hash +/// Struct representing the available call values in the scoped hash /// table. struct CallValue { Instruction *Inst; @@ -309,7 +309,7 @@ bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { namespace { -/// \brief A simple and fast domtree-based CSE pass. +/// A simple and fast domtree-based CSE pass. /// /// This pass does a simple depth-first walk over the dominator tree, /// eliminating trivially redundant instructions and using instsimplify to @@ -333,7 +333,7 @@ public: ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, AllocatorTy>; - /// \brief A scoped hash table of the current values of all of our simple + /// A scoped hash table of the current values of all of our simple /// scalar expressions. /// /// As we walk down the domtree, we look to see if instructions are in this: @@ -388,7 +388,7 @@ public: InvariantMapAllocator>; InvariantHTType AvailableInvariants; - /// \brief A scoped hash table of the current values of read-only call + /// A scoped hash table of the current values of read-only call /// values. /// /// It uses the same generation count as loads. @@ -396,10 +396,10 @@ public: ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>; CallHTType AvailableCalls; - /// \brief This is the current generation of the memory value. + /// This is the current generation of the memory value. unsigned CurrentGeneration = 0; - /// \brief Set up the EarlyCSE runner for a particular function. + /// Set up the EarlyCSE runner for a particular function. EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, const TargetTransformInfo &TTI, DominatorTree &DT, AssumptionCache &AC, MemorySSA *MSSA) @@ -473,7 +473,7 @@ private: bool Processed = false; }; - /// \brief Wrapper class to handle memory instructions, including loads, + /// Wrapper class to handle memory instructions, including loads, /// stores and intrinsic loads and stores defined by the target. class ParseMemoryInst { public: @@ -1193,7 +1193,7 @@ PreservedAnalyses EarlyCSEPass::run(Function &F, namespace { -/// \brief A simple and fast domtree-based CSE pass. +/// A simple and fast domtree-based CSE pass. /// /// This pass does a simple depth-first walk over the dominator tree, /// eliminating trivially redundant instructions and using instsimplify to diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 878b91fa1e2..59b87e9a77d 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -826,7 +826,7 @@ static bool isLifetimeStart(const Instruction *Inst) { return false; } -/// \brief Try to locate the three instruction involved in a missed +/// Try to locate the three instruction involved in a missed /// load-elimination case that is due to an intervening store. static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, DominatorTree *DT, diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp index 841a9a31483..454ea254b88 100644 --- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -140,7 +140,7 @@ namespace { using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>; -/// \brief InferAddressSpaces +/// InferAddressSpaces class InferAddressSpaces : public FunctionPass { /// Target specific address space which uses of should be replaced if /// possible. diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp index 24150b1e471..c804115f415 100644 --- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp @@ -71,7 +71,7 @@ public: private: bool runOnLoop(Loop *L); - /// \brief Check if the stride of the accesses is large enough to + /// Check if the stride of the accesses is large enough to /// warrant a prefetch. bool isStrideLargeEnough(const SCEVAddRecExpr *AR); diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp index 2f7b4923b33..a4da0940e33 100644 --- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp +++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp @@ -111,7 +111,7 @@ STATISTIC(NumLoopsDistributed, "Number of loops distributed"); namespace { -/// \brief Maintains the set of instructions of the loop for a partition before +/// Maintains the set of instructions of the loop for a partition before /// cloning. After cloning, it hosts the new loop. class InstPartition { using InstructionSet = SmallPtrSet<Instruction *, 8>; @@ -122,20 +122,20 @@ public: Set.insert(I); } - /// \brief Returns whether this partition contains a dependence cycle. + /// Returns whether this partition contains a dependence cycle. bool hasDepCycle() const { return DepCycle; } - /// \brief Adds an instruction to this partition. + /// Adds an instruction to this partition. void add(Instruction *I) { Set.insert(I); } - /// \brief Collection accessors. + /// Collection accessors. InstructionSet::iterator begin() { return Set.begin(); } InstructionSet::iterator end() { return Set.end(); } InstructionSet::const_iterator begin() const { return Set.begin(); } InstructionSet::const_iterator end() const { return Set.end(); } bool empty() const { return Set.empty(); } - /// \brief Moves this partition into \p Other. This partition becomes empty + /// Moves this partition into \p Other. This partition becomes empty /// after this. void moveTo(InstPartition &Other) { Other.Set.insert(Set.begin(), Set.end()); @@ -143,7 +143,7 @@ public: Other.DepCycle |= DepCycle; } - /// \brief Populates the partition with a transitive closure of all the + /// Populates the partition with a transitive closure of all the /// instructions that the seeded instructions dependent on. void populateUsedSet() { // FIXME: We currently don't use control-dependence but simply include all @@ -166,7 +166,7 @@ public: } } - /// \brief Clones the original loop. + /// Clones the original loop. /// /// Updates LoopInfo and DominatorTree using the information that block \p /// LoopDomBB dominates the loop. @@ -179,27 +179,27 @@ public: return ClonedLoop; } - /// \brief The cloned loop. If this partition is mapped to the original loop, + /// The cloned loop. If this partition is mapped to the original loop, /// this is null. const Loop *getClonedLoop() const { return ClonedLoop; } - /// \brief Returns the loop where this partition ends up after distribution. + /// Returns the loop where this partition ends up after distribution. /// If this partition is mapped to the original loop then use the block from /// the loop. const Loop *getDistributedLoop() const { return ClonedLoop ? ClonedLoop : OrigLoop; } - /// \brief The VMap that is populated by cloning and then used in + /// The VMap that is populated by cloning and then used in /// remapinstruction to remap the cloned instructions. ValueToValueMapTy &getVMap() { return VMap; } - /// \brief Remaps the cloned instructions using VMap. + /// Remaps the cloned instructions using VMap. void remapInstructions() { remapInstructionsInBlocks(ClonedLoopBlocks, VMap); } - /// \brief Based on the set of instructions selected for this partition, + /// Based on the set of instructions selected for this partition, /// removes the unnecessary ones. void removeUnusedInsts() { SmallVector<Instruction *, 8> Unused; @@ -239,30 +239,30 @@ public: } private: - /// \brief Instructions from OrigLoop selected for this partition. + /// Instructions from OrigLoop selected for this partition. InstructionSet Set; - /// \brief Whether this partition contains a dependence cycle. + /// Whether this partition contains a dependence cycle. bool DepCycle; - /// \brief The original loop. + /// The original loop. Loop *OrigLoop; - /// \brief The cloned loop. If this partition is mapped to the original loop, + /// The cloned loop. If this partition is mapped to the original loop, /// this is null. Loop *ClonedLoop = nullptr; - /// \brief The blocks of ClonedLoop including the preheader. If this + /// The blocks of ClonedLoop including the preheader. If this /// partition is mapped to the original loop, this is empty. SmallVector<BasicBlock *, 8> ClonedLoopBlocks; - /// \brief These gets populated once the set of instructions have been + /// These gets populated once the set of instructions have been /// finalized. If this partition is mapped to the original loop, these are not /// set. ValueToValueMapTy VMap; }; -/// \brief Holds the set of Partitions. It populates them, merges them and then +/// Holds the set of Partitions. It populates them, merges them and then /// clones the loops. class InstPartitionContainer { using InstToPartitionIdT = DenseMap<Instruction *, int>; @@ -271,10 +271,10 @@ public: InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT) : L(L), LI(LI), DT(DT) {} - /// \brief Returns the number of partitions. + /// Returns the number of partitions. unsigned getSize() const { return PartitionContainer.size(); } - /// \brief Adds \p Inst into the current partition if that is marked to + /// Adds \p Inst into the current partition if that is marked to /// contain cycles. Otherwise start a new partition for it. void addToCyclicPartition(Instruction *Inst) { // If the current partition is non-cyclic. Start a new one. @@ -284,7 +284,7 @@ public: PartitionContainer.back().add(Inst); } - /// \brief Adds \p Inst into a partition that is not marked to contain + /// Adds \p Inst into a partition that is not marked to contain /// dependence cycles. /// // Initially we isolate memory instructions into as many partitions as @@ -293,7 +293,7 @@ public: PartitionContainer.emplace_back(Inst, L); } - /// \brief Merges adjacent non-cyclic partitions. + /// Merges adjacent non-cyclic partitions. /// /// The idea is that we currently only want to isolate the non-vectorizable /// partition. We could later allow more distribution among these partition @@ -303,7 +303,7 @@ public: [](const InstPartition *P) { return !P->hasDepCycle(); }); } - /// \brief If a partition contains only conditional stores, we won't vectorize + /// If a partition contains only conditional stores, we won't vectorize /// it. Try to merge it with a previous cyclic partition. void mergeNonIfConvertible() { mergeAdjacentPartitionsIf([&](const InstPartition *Partition) { @@ -323,14 +323,14 @@ public: }); } - /// \brief Merges the partitions according to various heuristics. + /// Merges the partitions according to various heuristics. void mergeBeforePopulating() { mergeAdjacentNonCyclic(); if (!DistributeNonIfConvertible) mergeNonIfConvertible(); } - /// \brief Merges partitions in order to ensure that no loads are duplicated. + /// Merges partitions in order to ensure that no loads are duplicated. /// /// We can't duplicate loads because that could potentially reorder them. /// LoopAccessAnalysis provides dependency information with the context that @@ -398,7 +398,7 @@ public: return true; } - /// \brief Sets up the mapping between instructions to partitions. If the + /// Sets up the mapping between instructions to partitions. If the /// instruction is duplicated across multiple partitions, set the entry to -1. void setupPartitionIdOnInstructions() { int PartitionID = 0; @@ -416,14 +416,14 @@ public: } } - /// \brief Populates the partition with everything that the seeding + /// Populates the partition with everything that the seeding /// instructions require. void populateUsedSet() { for (auto &P : PartitionContainer) P.populateUsedSet(); } - /// \brief This performs the main chunk of the work of cloning the loops for + /// This performs the main chunk of the work of cloning the loops for /// the partitions. void cloneLoops() { BasicBlock *OrigPH = L->getLoopPreheader(); @@ -470,13 +470,13 @@ public: Curr->getDistributedLoop()->getExitingBlock()); } - /// \brief Removes the dead instructions from the cloned loops. + /// Removes the dead instructions from the cloned loops. void removeUnusedInsts() { for (auto &Partition : PartitionContainer) Partition.removeUnusedInsts(); } - /// \brief For each memory pointer, it computes the partitionId the pointer is + /// For each memory pointer, it computes the partitionId the pointer is /// used in. /// /// This returns an array of int where the I-th entry corresponds to I-th @@ -543,10 +543,10 @@ public: private: using PartitionContainerT = std::list<InstPartition>; - /// \brief List of partitions. + /// List of partitions. PartitionContainerT PartitionContainer; - /// \brief Mapping from Instruction to partition Id. If the instruction + /// Mapping from Instruction to partition Id. If the instruction /// belongs to multiple partitions the entry contains -1. InstToPartitionIdT InstToPartitionId; @@ -554,7 +554,7 @@ private: LoopInfo *LI; DominatorTree *DT; - /// \brief The control structure to merge adjacent partitions if both satisfy + /// The control structure to merge adjacent partitions if both satisfy /// the \p Predicate. template <class UnaryPredicate> void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) { @@ -575,7 +575,7 @@ private: } }; -/// \brief For each memory instruction, this class maintains difference of the +/// For each memory instruction, this class maintains difference of the /// number of unsafe dependences that start out from this instruction minus /// those that end here. /// @@ -619,7 +619,7 @@ private: AccessesType Accesses; }; -/// \brief The actual class performing the per-loop work. +/// The actual class performing the per-loop work. class LoopDistributeForLoop { public: LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT, @@ -628,7 +628,7 @@ public: setForced(); } - /// \brief Try to distribute an inner-most loop. + /// Try to distribute an inner-most loop. bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) { assert(L->empty() && "Only process inner loops."); @@ -793,7 +793,7 @@ public: return true; } - /// \brief Provide diagnostics then \return with false. + /// Provide diagnostics then \return with false. bool fail(StringRef RemarkName, StringRef Message) { LLVMContext &Ctx = F->getContext(); bool Forced = isForced().getValueOr(false); @@ -826,7 +826,7 @@ public: return false; } - /// \brief Return if distribution forced to be enabled/disabled for the loop. + /// Return if distribution forced to be enabled/disabled for the loop. /// /// If the optional has a value, it indicates whether distribution was forced /// to be enabled (true) or disabled (false). If the optional has no value @@ -834,7 +834,7 @@ public: const Optional<bool> &isForced() const { return IsForced; } private: - /// \brief Filter out checks between pointers from the same partition. + /// Filter out checks between pointers from the same partition. /// /// \p PtrToPartition contains the partition number for pointers. Partition /// number -1 means that the pointer is used in multiple partitions. In this @@ -873,7 +873,7 @@ private: return Checks; } - /// \brief Check whether the loop metadata is forcing distribution to be + /// Check whether the loop metadata is forcing distribution to be /// enabled/disabled. void setForced() { Optional<const MDOperand *> Value = @@ -896,7 +896,7 @@ private: ScalarEvolution *SE; OptimizationRemarkEmitter *ORE; - /// \brief Indicates whether distribution is forced to be enabled/disabled for + /// Indicates whether distribution is forced to be enabled/disabled for /// the loop. /// /// If the optional has a value, it indicates whether distribution was forced @@ -939,7 +939,7 @@ static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT, namespace { -/// \brief The pass class. +/// The pass class. class LoopDistributeLegacy : public FunctionPass { public: static char ID; diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 97894726637..272dcaff2bc 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -1330,7 +1330,7 @@ void LoopInterchangeTransform::splitInnerLoopHeader() { "InnerLoopHeader\n"); } -/// \brief Move all instructions except the terminator from FromBB right before +/// Move all instructions except the terminator from FromBB right before /// InsertBefore static void moveBBContents(BasicBlock *FromBB, Instruction *InsertBefore) { auto &ToList = InsertBefore->getParent()->getInstList(); @@ -1353,7 +1353,7 @@ void LoopInterchangeTransform::updateIncomingBlock(BasicBlock *CurrBlock, } } -/// \brief Update BI to jump to NewBB instead of OldBB. Records updates to +/// Update BI to jump to NewBB instead of OldBB. Records updates to /// the dominator tree in DTUpdates, if DT should be preserved. static void updateSuccessor(BranchInst *BI, BasicBlock *OldBB, BasicBlock *NewBB, diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp index 46b81355c07..a7c27662aa0 100644 --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -80,7 +80,7 @@ STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE"); namespace { -/// \brief Represent a store-to-forwarding candidate. +/// Represent a store-to-forwarding candidate. struct StoreToLoadForwardingCandidate { LoadInst *Load; StoreInst *Store; @@ -88,7 +88,7 @@ struct StoreToLoadForwardingCandidate { StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store) : Load(Load), Store(Store) {} - /// \brief Return true if the dependence from the store to the load has a + /// Return true if the dependence from the store to the load has a /// distance of one. E.g. A[i+1] = A[i] bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE, Loop *L) const { @@ -137,7 +137,7 @@ struct StoreToLoadForwardingCandidate { } // end anonymous namespace -/// \brief Check if the store dominates all latches, so as long as there is no +/// Check if the store dominates all latches, so as long as there is no /// intervening store this value will be loaded in the next iteration. static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, DominatorTree *DT) { @@ -148,21 +148,21 @@ static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, }); } -/// \brief Return true if the load is not executed on all paths in the loop. +/// Return true if the load is not executed on all paths in the loop. static bool isLoadConditional(LoadInst *Load, Loop *L) { return Load->getParent() != L->getHeader(); } namespace { -/// \brief The per-loop class that does most of the work. +/// The per-loop class that does most of the work. class LoadEliminationForLoop { public: LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI, DominatorTree *DT) : L(L), LI(LI), LAI(LAI), DT(DT), PSE(LAI.getPSE()) {} - /// \brief Look through the loop-carried and loop-independent dependences in + /// Look through the loop-carried and loop-independent dependences in /// this loop and find store->load dependences. /// /// Note that no candidate is returned if LAA has failed to analyze the loop @@ -223,14 +223,14 @@ public: return Candidates; } - /// \brief Return the index of the instruction according to program order. + /// Return the index of the instruction according to program order. unsigned getInstrIndex(Instruction *Inst) { auto I = InstOrder.find(Inst); assert(I != InstOrder.end() && "No index for instruction"); return I->second; } - /// \brief If a load has multiple candidates associated (i.e. different + /// If a load has multiple candidates associated (i.e. different /// stores), it means that it could be forwarding from multiple stores /// depending on control flow. Remove these candidates. /// @@ -294,7 +294,7 @@ public: }); } - /// \brief Given two pointers operations by their RuntimePointerChecking + /// Given two pointers operations by their RuntimePointerChecking /// indices, return true if they require an alias check. /// /// We need a check if one is a pointer for a candidate load and the other is @@ -310,7 +310,7 @@ public: (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1))); } - /// \brief Return pointers that are possibly written to on the path from a + /// Return pointers that are possibly written to on the path from a /// forwarding store to a load. /// /// These pointers need to be alias-checked against the forwarding candidates. @@ -367,7 +367,7 @@ public: return PtrsWrittenOnFwdingPath; } - /// \brief Determine the pointer alias checks to prove that there are no + /// Determine the pointer alias checks to prove that there are no /// intervening stores. SmallVector<RuntimePointerChecking::PointerCheck, 4> collectMemchecks( const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) { @@ -401,7 +401,7 @@ public: return Checks; } - /// \brief Perform the transformation for a candidate. + /// Perform the transformation for a candidate. void propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand, SCEVExpander &SEE) { @@ -437,7 +437,7 @@ public: Cand.Load->replaceAllUsesWith(PHI); } - /// \brief Top-level driver for each loop: find store->load forwarding + /// Top-level driver for each loop: find store->load forwarding /// candidates, add run-time checks and perform transformation. bool processLoop() { DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName() @@ -559,7 +559,7 @@ public: private: Loop *L; - /// \brief Maps the load/store instructions to their index according to + /// Maps the load/store instructions to their index according to /// program order. DenseMap<Instruction *, unsigned> InstOrder; @@ -600,7 +600,7 @@ eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT, namespace { -/// \brief The pass. Most of the work is delegated to the per-loop +/// The pass. Most of the work is delegated to the per-loop /// LoadEliminationForLoop class. class LoopLoadElimination : public FunctionPass { public: diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 2d8b5469faf..4c0b3cc808c 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -446,7 +446,7 @@ void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { canonicalize(*L); } -/// \brief Check whether or not this formula satisfies the canonical +/// Check whether or not this formula satisfies the canonical /// representation. /// \see Formula::BaseRegs. bool Formula::isCanonical(const Loop &L) const { @@ -474,7 +474,7 @@ bool Formula::isCanonical(const Loop &L) const { return I == BaseRegs.end(); } -/// \brief Helper method to morph a formula into its canonical representation. +/// Helper method to morph a formula into its canonical representation. /// \see Formula::BaseRegs. /// Every formula having more than one base register, must use the ScaledReg /// field. Otherwise, we would have to do special cases everywhere in LSR @@ -509,7 +509,7 @@ void Formula::canonicalize(const Loop &L) { } } -/// \brief Get rid of the scale in the formula. +/// Get rid of the scale in the formula. /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. /// \return true if it was possible to get rid of the scale, false otherwise. /// \note After this operation the formula may not be in the canonical form. @@ -974,7 +974,7 @@ class LSRUse; } // end anonymous namespace -/// \brief Check if the addressing mode defined by \p F is completely +/// Check if the addressing mode defined by \p F is completely /// folded in \p LU at isel time. /// This includes address-mode folding and special icmp tricks. /// This function returns true if \p LU can accommodate what \p F @@ -3515,7 +3515,7 @@ static bool mayUsePostIncMode(const TargetTransformInfo &TTI, return false; } -/// \brief Helper function for LSRInstance::GenerateReassociations. +/// Helper function for LSRInstance::GenerateReassociations. void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, unsigned Depth, size_t Idx, @@ -3653,7 +3653,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, } } -/// \brief Helper function for LSRInstance::GenerateSymbolicOffsets. +/// Helper function for LSRInstance::GenerateSymbolicOffsets. void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, size_t Idx, bool IsScaledReg) { @@ -3685,7 +3685,7 @@ void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, /* IsScaledReg */ true); } -/// \brief Helper function for LSRInstance::GenerateConstantOffsets. +/// Helper function for LSRInstance::GenerateConstantOffsets. void LSRInstance::GenerateConstantOffsetsImpl( LSRUse &LU, unsigned LUIdx, const Formula &Base, const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index a1b25a22a14..822f880f222 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -286,17 +286,17 @@ struct UnrolledInstStateKeyInfo { }; struct EstimatedUnrollCost { - /// \brief The estimated cost after unrolling. + /// The estimated cost after unrolling. unsigned UnrolledCost; - /// \brief The estimated dynamic cost of executing the instructions in the + /// The estimated dynamic cost of executing the instructions in the /// rolled form. unsigned RolledDynamicCost; }; } // end anonymous namespace -/// \brief Figure out if the loop is worth full unrolling. +/// Figure out if the loop is worth full unrolling. /// /// Complete loop unrolling can make some loads constant, and we need to know /// if that would expose any further optimization opportunities. This routine diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp index ba75b8c705e..e0e2c1938aa 100644 --- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp +++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp @@ -113,7 +113,7 @@ static cl::opt<unsigned> LVLoopDepthThreshold( "LoopVersioningLICM's threshold for maximum allowed loop nest/depth"), cl::init(2), cl::Hidden); -/// \brief Create MDNode for input string. +/// Create MDNode for input string. static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) { LLVMContext &Context = TheLoop->getHeader()->getContext(); Metadata *MDs[] = { @@ -122,7 +122,7 @@ static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) { return MDNode::get(Context, MDs); } -/// \brief Set input string into loop metadata by keeping other values intact. +/// Set input string into loop metadata by keeping other values intact. void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *MDString, unsigned V) { SmallVector<Metadata *, 4> MDs(1); @@ -242,7 +242,7 @@ private: } // end anonymous namespace -/// \brief Check loop structure and confirms it's good for LoopVersioningLICM. +/// Check loop structure and confirms it's good for LoopVersioningLICM. bool LoopVersioningLICM::legalLoopStructure() { // Loop must be in loop simplify form. if (!CurLoop->isLoopSimplifyForm()) { @@ -293,7 +293,7 @@ bool LoopVersioningLICM::legalLoopStructure() { return true; } -/// \brief Check memory accesses in loop and confirms it's good for +/// Check memory accesses in loop and confirms it's good for /// LoopVersioningLICM. bool LoopVersioningLICM::legalLoopMemoryAccesses() { bool HasMayAlias = false; @@ -352,7 +352,7 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() { return true; } -/// \brief Check loop instructions safe for Loop versioning. +/// Check loop instructions safe for Loop versioning. /// It returns true if it's safe else returns false. /// Consider following: /// 1) Check all load store in loop body are non atomic & non volatile. @@ -403,7 +403,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) { return true; } -/// \brief Check loop instructions and confirms it's good for +/// Check loop instructions and confirms it's good for /// LoopVersioningLICM. bool LoopVersioningLICM::legalLoopInstructions() { // Resetting counters. @@ -480,7 +480,7 @@ bool LoopVersioningLICM::legalLoopInstructions() { return true; } -/// \brief It checks loop is already visited or not. +/// It checks loop is already visited or not. /// check loop meta data, if loop revisited return true /// else false. bool LoopVersioningLICM::isLoopAlreadyVisited() { @@ -491,7 +491,7 @@ bool LoopVersioningLICM::isLoopAlreadyVisited() { return false; } -/// \brief Checks legality for LoopVersioningLICM by considering following: +/// Checks legality for LoopVersioningLICM by considering following: /// a) loop structure legality b) loop instruction legality /// c) loop memory access legality. /// Return true if legal else returns false. @@ -546,7 +546,7 @@ bool LoopVersioningLICM::isLegalForVersioning() { return true; } -/// \brief Update loop with aggressive aliasing assumptions. +/// Update loop with aggressive aliasing assumptions. /// It marks no-alias to any pairs of memory operations by assuming /// loop should not have any must-alias memory accesses pairs. /// During LoopVersioningLICM legality we ignore loops having must diff --git a/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp index 46f8a356426..68bfa003039 100644 --- a/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp +++ b/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp @@ -357,7 +357,7 @@ PreservedAnalyses LowerExpectIntrinsicPass::run(Function &F, } namespace { -/// \brief Legacy pass for lowering expect intrinsics out of the IR. +/// Legacy pass for lowering expect intrinsics out of the IR. /// /// When this pass is run over a function it uses expect intrinsics which feed /// branches and switches to provide branch weight metadata for those diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp index 058da52dd84..cbed9a97c56 100644 --- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // //! \file -//! \brief This pass performs merges of loads and stores on both sides of a +//! This pass performs merges of loads and stores on both sides of a // diamond (hammock). It hoists the loads and sinks the stores. // // The algorithm iteratively hoists two loads to the same address out of a @@ -121,7 +121,7 @@ private: } // end anonymous namespace /// -/// \brief Return tail block of a diamond. +/// Return tail block of a diamond. /// BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) { assert(isDiamondHead(BB) && "Basic block is not head of a diamond"); @@ -129,7 +129,7 @@ BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) { } /// -/// \brief True when BB is the head of a diamond (hammock) +/// True when BB is the head of a diamond (hammock) /// bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) { if (!BB) @@ -156,7 +156,7 @@ bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) { /// -/// \brief True when instruction is a sink barrier for a store +/// True when instruction is a sink barrier for a store /// located in Loc /// /// Whenever an instruction could possibly read or modify the @@ -174,7 +174,7 @@ bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start, } /// -/// \brief Check if \p BB contains a store to the same address as \p SI +/// Check if \p BB contains a store to the same address as \p SI /// /// \return The store in \p when it is safe to sink. Otherwise return Null. /// @@ -199,7 +199,7 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1, } /// -/// \brief Create a PHI node in BB for the operands of S0 and S1 +/// Create a PHI node in BB for the operands of S0 and S1 /// PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1) { @@ -217,7 +217,7 @@ PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, } /// -/// \brief Merge two stores to same address and sink into \p BB +/// Merge two stores to same address and sink into \p BB /// /// Also sinks GEP instruction computing the store address /// @@ -262,7 +262,7 @@ bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, } /// -/// \brief True when two stores are equivalent and can sink into the footer +/// True when two stores are equivalent and can sink into the footer /// /// Starting from a diamond tail block, iterate over the instructions in one /// predecessor block and try to match a store in the second predecessor. @@ -349,7 +349,7 @@ public: } /// - /// \brief Run the transformation for each function + /// Run the transformation for each function /// bool runOnFunction(Function &F) override { if (skipFunction(F)) @@ -370,7 +370,7 @@ char MergedLoadStoreMotionLegacyPass::ID = 0; } // anonymous namespace /// -/// \brief createMergedLoadStoreMotionPass - The public interface to this file. +/// createMergedLoadStoreMotionPass - The public interface to this file. /// FunctionPass *llvm::createMergedLoadStoreMotionPass() { return new MergedLoadStoreMotionLegacyPass(); diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp index 36f16618a75..b51e84238b4 100644 --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -1634,7 +1634,7 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I, return nullptr; } -/// \brief Build up a vector of value/power pairs factoring a product. +/// Build up a vector of value/power pairs factoring a product. /// /// Given a series of multiplication operands, build a vector of factors and /// the powers each is raised to when forming the final product. Sort them in @@ -1699,7 +1699,7 @@ static bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, return true; } -/// \brief Build a tree of multiplies, computing the product of Ops. +/// Build a tree of multiplies, computing the product of Ops. static Value *buildMultiplyTree(IRBuilder<> &Builder, SmallVectorImpl<Value*> &Ops) { if (Ops.size() == 1) @@ -1716,7 +1716,7 @@ static Value *buildMultiplyTree(IRBuilder<> &Builder, return LHS; } -/// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*... +/// Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*... /// /// Given a vector of values raised to various powers, where no two values are /// equal and the powers are sorted in decreasing order, compute the minimal diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 255c5b959ad..b4200da99cc 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -127,7 +127,7 @@ static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), namespace { -/// \brief A custom IRBuilder inserter which prefixes all names, but only in +/// A custom IRBuilder inserter which prefixes all names, but only in /// Assert builds. class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter { std::string Prefix; @@ -147,23 +147,23 @@ protected: } }; -/// \brief Provide a type for IRBuilder that drops names in release builds. +/// Provide a type for IRBuilder that drops names in release builds. using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; -/// \brief A used slice of an alloca. +/// A used slice of an alloca. /// /// This structure represents a slice of an alloca used by some instruction. It /// stores both the begin and end offsets of this use, a pointer to the use /// itself, and a flag indicating whether we can classify the use as splittable /// or not when forming partitions of the alloca. class Slice { - /// \brief The beginning offset of the range. + /// The beginning offset of the range. uint64_t BeginOffset = 0; - /// \brief The ending offset, not included in the range. + /// The ending offset, not included in the range. uint64_t EndOffset = 0; - /// \brief Storage for both the use of this slice and whether it can be + /// Storage for both the use of this slice and whether it can be /// split. PointerIntPair<Use *, 1, bool> UseAndIsSplittable; @@ -185,7 +185,7 @@ public: bool isDead() const { return getUse() == nullptr; } void kill() { UseAndIsSplittable.setPointer(nullptr); } - /// \brief Support for ordering ranges. + /// Support for ordering ranges. /// /// This provides an ordering over ranges such that start offsets are /// always increasing, and within equal start offsets, the end offsets are @@ -203,7 +203,7 @@ public: return false; } - /// \brief Support comparison with a single offset to allow binary searches. + /// Support comparison with a single offset to allow binary searches. friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, uint64_t RHSOffset) { return LHS.beginOffset() < RHSOffset; @@ -229,7 +229,7 @@ template <> struct isPodLike<Slice> { static const bool value = true; }; } // end namespace llvm -/// \brief Representation of the alloca slices. +/// Representation of the alloca slices. /// /// This class represents the slices of an alloca which are formed by its /// various uses. If a pointer escapes, we can't fully build a representation @@ -238,16 +238,16 @@ template <> struct isPodLike<Slice> { static const bool value = true; }; /// starting at a particular offset before splittable slices. class llvm::sroa::AllocaSlices { public: - /// \brief Construct the slices of a particular alloca. + /// Construct the slices of a particular alloca. AllocaSlices(const DataLayout &DL, AllocaInst &AI); - /// \brief Test whether a pointer to the allocation escapes our analysis. + /// Test whether a pointer to the allocation escapes our analysis. /// /// If this is true, the slices are never fully built and should be /// ignored. bool isEscaped() const { return PointerEscapingInstr; } - /// \brief Support for iterating over the slices. + /// Support for iterating over the slices. /// @{ using iterator = SmallVectorImpl<Slice>::iterator; using range = iterator_range<iterator>; @@ -262,10 +262,10 @@ public: const_iterator end() const { return Slices.end(); } /// @} - /// \brief Erase a range of slices. + /// Erase a range of slices. void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } - /// \brief Insert new slices for this alloca. + /// Insert new slices for this alloca. /// /// This moves the slices into the alloca's slices collection, and re-sorts /// everything so that the usual ordering properties of the alloca's slices @@ -283,10 +283,10 @@ public: class partition_iterator; iterator_range<partition_iterator> partitions(); - /// \brief Access the dead users for this alloca. + /// Access the dead users for this alloca. ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } - /// \brief Access the dead operands referring to this alloca. + /// Access the dead operands referring to this alloca. /// /// These are operands which have cannot actually be used to refer to the /// alloca as they are outside its range and the user doesn't correct for @@ -312,11 +312,11 @@ private: friend class AllocaSlices::SliceBuilder; #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) - /// \brief Handle to alloca instruction to simplify method interfaces. + /// Handle to alloca instruction to simplify method interfaces. AllocaInst &AI; #endif - /// \brief The instruction responsible for this alloca not having a known set + /// The instruction responsible for this alloca not having a known set /// of slices. /// /// When an instruction (potentially) escapes the pointer to the alloca, we @@ -324,7 +324,7 @@ private: /// alloca. This will be null if the alloca slices are analyzed successfully. Instruction *PointerEscapingInstr; - /// \brief The slices of the alloca. + /// The slices of the alloca. /// /// We store a vector of the slices formed by uses of the alloca here. This /// vector is sorted by increasing begin offset, and then the unsplittable @@ -332,7 +332,7 @@ private: /// details. SmallVector<Slice, 8> Slices; - /// \brief Instructions which will become dead if we rewrite the alloca. + /// Instructions which will become dead if we rewrite the alloca. /// /// Note that these are not separated by slice. This is because we expect an /// alloca to be completely rewritten or not rewritten at all. If rewritten, @@ -340,7 +340,7 @@ private: /// they come from outside of the allocated space. SmallVector<Instruction *, 8> DeadUsers; - /// \brief Operands which will become dead if we rewrite the alloca. + /// Operands which will become dead if we rewrite the alloca. /// /// These are operands that in their particular use can be replaced with /// undef when we rewrite the alloca. These show up in out-of-bounds inputs @@ -351,7 +351,7 @@ private: SmallVector<Use *, 8> DeadOperands; }; -/// \brief A partition of the slices. +/// A partition of the slices. /// /// An ephemeral representation for a range of slices which can be viewed as /// a partition of the alloca. This range represents a span of the alloca's @@ -367,32 +367,32 @@ private: using iterator = AllocaSlices::iterator; - /// \brief The beginning and ending offsets of the alloca for this + /// The beginning and ending offsets of the alloca for this /// partition. uint64_t BeginOffset, EndOffset; - /// \brief The start and end iterators of this partition. + /// The start and end iterators of this partition. iterator SI, SJ; - /// \brief A collection of split slice tails overlapping the partition. + /// A collection of split slice tails overlapping the partition. SmallVector<Slice *, 4> SplitTails; - /// \brief Raw constructor builds an empty partition starting and ending at + /// Raw constructor builds an empty partition starting and ending at /// the given iterator. Partition(iterator SI) : SI(SI), SJ(SI) {} public: - /// \brief The start offset of this partition. + /// The start offset of this partition. /// /// All of the contained slices start at or after this offset. uint64_t beginOffset() const { return BeginOffset; } - /// \brief The end offset of this partition. + /// The end offset of this partition. /// /// All of the contained slices end at or before this offset. uint64_t endOffset() const { return EndOffset; } - /// \brief The size of the partition. + /// The size of the partition. /// /// Note that this can never be zero. uint64_t size() const { @@ -400,7 +400,7 @@ public: return EndOffset - BeginOffset; } - /// \brief Test whether this partition contains no slices, and merely spans + /// Test whether this partition contains no slices, and merely spans /// a region occupied by split slices. bool empty() const { return SI == SJ; } @@ -417,7 +417,7 @@ public: iterator end() const { return SJ; } /// @} - /// \brief Get the sequence of split slice tails. + /// Get the sequence of split slice tails. /// /// These tails are of slices which start before this partition but are /// split and overlap into the partition. We accumulate these while forming @@ -425,7 +425,7 @@ public: ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } }; -/// \brief An iterator over partitions of the alloca's slices. +/// An iterator over partitions of the alloca's slices. /// /// This iterator implements the core algorithm for partitioning the alloca's /// slices. It is a forward iterator as we don't support backtracking for @@ -439,18 +439,18 @@ class AllocaSlices::partition_iterator Partition> { friend class AllocaSlices; - /// \brief Most of the state for walking the partitions is held in a class + /// Most of the state for walking the partitions is held in a class /// with a nice interface for examining them. Partition P; - /// \brief We need to keep the end of the slices to know when to stop. + /// We need to keep the end of the slices to know when to stop. AllocaSlices::iterator SE; - /// \brief We also need to keep track of the maximum split end offset seen. + /// We also need to keep track of the maximum split end offset seen. /// FIXME: Do we really? uint64_t MaxSplitSliceEndOffset = 0; - /// \brief Sets the partition to be empty at given iterator, and sets the + /// Sets the partition to be empty at given iterator, and sets the /// end iterator. partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) : P(SI), SE(SE) { @@ -460,7 +460,7 @@ class AllocaSlices::partition_iterator advance(); } - /// \brief Advance the iterator to the next partition. + /// Advance the iterator to the next partition. /// /// Requires that the iterator not be at the end of the slices. void advance() { @@ -615,7 +615,7 @@ public: Partition &operator*() { return P; } }; -/// \brief A forward range over the partitions of the alloca's slices. +/// A forward range over the partitions of the alloca's slices. /// /// This accesses an iterator range over the partitions of the alloca's /// slices. It computes these partitions on the fly based on the overlapping @@ -639,7 +639,7 @@ static Value *foldSelectInst(SelectInst &SI) { return nullptr; } -/// \brief A helper that folds a PHI node or a select. +/// A helper that folds a PHI node or a select. static Value *foldPHINodeOrSelectInst(Instruction &I) { if (PHINode *PN = dyn_cast<PHINode>(&I)) { // If PN merges together the same value, return that value. @@ -648,7 +648,7 @@ static Value *foldPHINodeOrSelectInst(Instruction &I) { return foldSelectInst(cast<SelectInst>(I)); } -/// \brief Builder for the alloca slices. +/// Builder for the alloca slices. /// /// This class builds a set of alloca slices by recursively visiting the uses /// of an alloca and making a slice for each load and store at each offset. @@ -664,7 +664,7 @@ class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; - /// \brief Set to de-duplicate dead instructions found in the use walk. + /// Set to de-duplicate dead instructions found in the use walk. SmallPtrSet<Instruction *, 4> VisitedDeadInsts; public: @@ -1023,7 +1023,7 @@ private: void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } - /// \brief Disable SROA entirely if there are unhandled users of the alloca. + /// Disable SROA entirely if there are unhandled users of the alloca. void visitInstruction(Instruction &I) { PI.setAborted(&I); } }; @@ -1352,7 +1352,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { SI.eraseFromParent(); } -/// \brief Build a GEP out of a base pointer and indices. +/// Build a GEP out of a base pointer and indices. /// /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. @@ -1370,7 +1370,7 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, NamePrefix + "sroa_idx"); } -/// \brief Get a natural GEP off of the BasePtr walking through Ty toward +/// Get a natural GEP off of the BasePtr walking through Ty toward /// TargetTy without changing the offset of the pointer. /// /// This routine assumes we've already established a properly offset GEP with @@ -1419,7 +1419,7 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, return buildGEP(IRB, BasePtr, Indices, NamePrefix); } -/// \brief Recursively compute indices for a natural GEP. +/// Recursively compute indices for a natural GEP. /// /// This is the recursive step for getNaturalGEPWithOffset that walks down the /// element types adding appropriate indices for the GEP. @@ -1487,7 +1487,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Indices, NamePrefix); } -/// \brief Get a natural GEP from a base pointer to a particular offset and +/// Get a natural GEP from a base pointer to a particular offset and /// resulting in a particular type. /// /// The goal is to produce a "natural" looking GEP that works with the existing @@ -1522,7 +1522,7 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Indices, NamePrefix); } -/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the +/// Compute an adjusted pointer from Ptr by Offset bytes where the /// resulting pointer has PointerTy. /// /// This tries very hard to compute a "natural" GEP which arrives at the offset @@ -1631,7 +1631,7 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, return Ptr; } -/// \brief Compute the adjusted alignment for a load or store from an offset. +/// Compute the adjusted alignment for a load or store from an offset. static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, const DataLayout &DL) { unsigned Alignment; @@ -1652,7 +1652,7 @@ static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, return MinAlign(Alignment, Offset); } -/// \brief Test whether we can convert a value from the old to the new type. +/// Test whether we can convert a value from the old to the new type. /// /// This predicate should be used to guard calls to convertValue in order to /// ensure that we only try to convert viable values. The strategy is that we @@ -1703,7 +1703,7 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { return true; } -/// \brief Generic routine to convert an SSA value to a value of a different +/// Generic routine to convert an SSA value to a value of a different /// type. /// /// This will try various different casting techniques, such as bitcasts, @@ -1755,7 +1755,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, return IRB.CreateBitCast(V, NewTy); } -/// \brief Test whether the given slice use can be promoted to a vector. +/// Test whether the given slice use can be promoted to a vector. /// /// This function is called to test each entry in a partition which is slated /// for a single slice. @@ -1826,7 +1826,7 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, return true; } -/// \brief Test whether the given alloca partitioning and range of slices can be +/// Test whether the given alloca partitioning and range of slices can be /// promoted to a vector. /// /// This is a quick test to check whether we can rewrite a particular alloca @@ -1939,7 +1939,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { return nullptr; } -/// \brief Test whether a slice of an alloca is valid for integer widening. +/// Test whether a slice of an alloca is valid for integer widening. /// /// This implements the necessary checking for the \c isIntegerWideningViable /// test below on a single slice of the alloca. @@ -2017,7 +2017,7 @@ static bool isIntegerWideningViableForSlice(const Slice &S, return true; } -/// \brief Test whether the given alloca partition's integer operations can be +/// Test whether the given alloca partition's integer operations can be /// widened to promotable ones. /// /// This is a quick test to check whether we can rewrite the integer loads and @@ -2192,7 +2192,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, return V; } -/// \brief Visitor to rewrite instructions using p particular slice of an alloca +/// Visitor to rewrite instructions using p particular slice of an alloca /// to use a new alloca. /// /// Also implements the rewriting to vector-based accesses when the partition @@ -2365,7 +2365,7 @@ private: ); } - /// \brief Compute suitable alignment to access this slice of the *new* + /// Compute suitable alignment to access this slice of the *new* /// alloca. /// /// You can optionally pass a type to this routine and if that type's ABI @@ -2652,7 +2652,7 @@ private: return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); } - /// \brief Compute an integer value from splatting an i8 across the given + /// Compute an integer value from splatting an i8 across the given /// number of bytes. /// /// Note that this routine assumes an i8 is a byte. If that isn't true, don't @@ -2679,7 +2679,7 @@ private: return V; } - /// \brief Compute a vector splat for a given element value. + /// Compute a vector splat for a given element value. Value *getVectorSplat(Value *V, unsigned NumElements) { V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); DEBUG(dbgs() << " splat: " << *V << "\n"); @@ -3081,7 +3081,7 @@ private: namespace { -/// \brief Visitor to rewrite aggregate loads and stores as scalar. +/// Visitor to rewrite aggregate loads and stores as scalar. /// /// This pass aggressively rewrites all aggregate loads and stores on /// a particular pointer (or any pointer derived from it which we can identify) @@ -3126,7 +3126,7 @@ private: // Conservative default is to not rewrite anything. bool visitInstruction(Instruction &I) { return false; } - /// \brief Generic recursive split emission class. + /// Generic recursive split emission class. template <typename Derived> class OpSplitter { protected: /// The builder used to form new instructions. @@ -3150,7 +3150,7 @@ private: : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} public: - /// \brief Generic recursive split emission routine. + /// Generic recursive split emission routine. /// /// This method recursively splits an aggregate op (load or store) into /// scalar or vector ops. It splits recursively until it hits a single value @@ -3303,7 +3303,7 @@ private: } // end anonymous namespace -/// \brief Strip aggregate type wrapping. +/// Strip aggregate type wrapping. /// /// This removes no-op aggregate types wrapping an underlying type. It will /// strip as many layers of types as it can without changing either the type @@ -3333,7 +3333,7 @@ static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { return stripAggregateTypeWrapping(DL, InnerTy); } -/// \brief Try to find a partition of the aggregate type passed in for a given +/// Try to find a partition of the aggregate type passed in for a given /// offset and size. /// /// This recurses through the aggregate type and tries to compute a subtype @@ -3439,7 +3439,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, return SubTy; } -/// \brief Pre-split loads and stores to simplify rewriting. +/// Pre-split loads and stores to simplify rewriting. /// /// We want to break up the splittable load+store pairs as much as /// possible. This is important to do as a preprocessing step, as once we @@ -3938,7 +3938,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { return true; } -/// \brief Rewrite an alloca partition's users. +/// Rewrite an alloca partition's users. /// /// This routine drives both of the rewriting goals of the SROA pass. It tries /// to rewrite uses of an alloca partition to be conducive for SSA value @@ -4087,7 +4087,7 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, return NewAI; } -/// \brief Walks the slices of an alloca and form partitions based on them, +/// Walks the slices of an alloca and form partitions based on them, /// rewriting each of their uses. bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { if (AS.begin() == AS.end()) @@ -4248,7 +4248,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { return Changed; } -/// \brief Clobber a use with undef, deleting the used value if it becomes dead. +/// Clobber a use with undef, deleting the used value if it becomes dead. void SROA::clobberUse(Use &U) { Value *OldV = U; // Replace the use with an undef value. @@ -4263,7 +4263,7 @@ void SROA::clobberUse(Use &U) { } } -/// \brief Analyze an alloca for SROA. +/// Analyze an alloca for SROA. /// /// This analyzes the alloca to ensure we can reason about it, builds /// the slices of the alloca, and then hands it off to be split and @@ -4332,7 +4332,7 @@ bool SROA::runOnAlloca(AllocaInst &AI) { return Changed; } -/// \brief Delete the dead instructions accumulated in this run. +/// Delete the dead instructions accumulated in this run. /// /// Recursively deletes the dead instructions we've accumulated. This is done /// at the very end to maximize locality of the recursive delete and to @@ -4374,7 +4374,7 @@ bool SROA::deleteDeadInstructions( return Changed; } -/// \brief Promote the allocas, using the best available technique. +/// Promote the allocas, using the best available technique. /// /// This attempts to promote whatever allocas have been identified as viable in /// the PromotableAllocas list. If that list is empty, there is nothing to do. diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index ca9f82c9aa4..e8a8328d24c 100644 --- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -212,7 +212,7 @@ static cl::opt<bool> namespace { -/// \brief A helper class for separating a constant offset from a GEP index. +/// A helper class for separating a constant offset from a GEP index. /// /// In real programs, a GEP index may be more complicated than a simple addition /// of something and a constant integer which can be trivially splitted. For @@ -339,7 +339,7 @@ private: const DominatorTree *DT; }; -/// \brief A pass that tries to split every GEP in the function into a variadic +/// A pass that tries to split every GEP in the function into a variadic /// base and a constant offset. It is a FunctionPass because searching for the /// constant offset may inspect other basic blocks. class SeparateConstOffsetFromGEP : public FunctionPass { diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp index b54a92325c4..6f5c32cd1bc 100644 --- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp +++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp @@ -289,7 +289,7 @@ INITIALIZE_PASS_DEPENDENCY(RegionInfoPass) INITIALIZE_PASS_END(StructurizeCFG, "structurizecfg", "Structurize the CFG", false, false) -/// \brief Initialize the types and constants used in the pass +/// Initialize the types and constants used in the pass bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) { LLVMContext &Context = R->getEntry()->getContext(); @@ -301,7 +301,7 @@ bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) { return false; } -/// \brief Build up the general order of nodes +/// Build up the general order of nodes void StructurizeCFG::orderNodes() { ReversePostOrderTraversal<Region*> RPOT(ParentRegion); SmallDenseMap<Loop*, unsigned, 8> LoopBlocks; @@ -354,7 +354,7 @@ void StructurizeCFG::orderNodes() { std::reverse(Order.begin(), Order.end()); } -/// \brief Determine the end of the loops +/// Determine the end of the loops void StructurizeCFG::analyzeLoops(RegionNode *N) { if (N->isSubRegion()) { // Test for exit as back edge @@ -373,7 +373,7 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) { } } -/// \brief Invert the given condition +/// Invert the given condition Value *StructurizeCFG::invert(Value *Condition) { // First: Check if it's a constant if (Constant *C = dyn_cast<Constant>(Condition)) @@ -405,7 +405,7 @@ Value *StructurizeCFG::invert(Value *Condition) { llvm_unreachable("Unhandled condition to invert"); } -/// \brief Build the condition for one edge +/// Build the condition for one edge Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx, bool Invert) { Value *Cond = Invert ? BoolFalse : BoolTrue; @@ -418,7 +418,7 @@ Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx, return Cond; } -/// \brief Analyze the predecessors of each block and build up predicates +/// Analyze the predecessors of each block and build up predicates void StructurizeCFG::gatherPredicates(RegionNode *N) { RegionInfo *RI = ParentRegion->getRegionInfo(); BasicBlock *BB = N->getEntry(); @@ -476,7 +476,7 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) { } } -/// \brief Collect various loop and predicate infos +/// Collect various loop and predicate infos void StructurizeCFG::collectInfos() { // Reset predicate Predicates.clear(); @@ -505,7 +505,7 @@ void StructurizeCFG::collectInfos() { } } -/// \brief Insert the missing branch conditions +/// Insert the missing branch conditions void StructurizeCFG::insertConditions(bool Loops) { BranchVector &Conds = Loops ? LoopConds : Conditions; Value *Default = Loops ? BoolTrue : BoolFalse; @@ -551,7 +551,7 @@ void StructurizeCFG::insertConditions(bool Loops) { } } -/// \brief Remove all PHI values coming from "From" into "To" and remember +/// Remove all PHI values coming from "From" into "To" and remember /// them in DeletedPhis void StructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) { PhiMap &Map = DeletedPhis[To]; @@ -563,7 +563,7 @@ void StructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) { } } -/// \brief Add a dummy PHI value as soon as we knew the new predecessor +/// Add a dummy PHI value as soon as we knew the new predecessor void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) { for (PHINode &Phi : To->phis()) { Value *Undef = UndefValue::get(Phi.getType()); @@ -572,7 +572,7 @@ void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) { AddedPhis[To].push_back(From); } -/// \brief Add the real PHI value as soon as everything is set up +/// Add the real PHI value as soon as everything is set up void StructurizeCFG::setPhiValues() { SSAUpdater Updater; for (const auto &AddedPhi : AddedPhis) { @@ -612,7 +612,7 @@ void StructurizeCFG::setPhiValues() { assert(DeletedPhis.empty()); } -/// \brief Remove phi values from all successors and then remove the terminator. +/// Remove phi values from all successors and then remove the terminator. void StructurizeCFG::killTerminator(BasicBlock *BB) { TerminatorInst *Term = BB->getTerminator(); if (!Term) @@ -627,7 +627,7 @@ void StructurizeCFG::killTerminator(BasicBlock *BB) { Term->eraseFromParent(); } -/// \brief Let node exit(s) point to NewExit +/// Let node exit(s) point to NewExit void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit, bool IncludeDominator) { if (Node->isSubRegion()) { @@ -673,7 +673,7 @@ void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit, } } -/// \brief Create a new flow node and update dominator tree and region info +/// Create a new flow node and update dominator tree and region info BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) { LLVMContext &Context = Func->getContext(); BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() : @@ -685,7 +685,7 @@ BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) { return Flow; } -/// \brief Create a new or reuse the previous node as flow node +/// Create a new or reuse the previous node as flow node BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) { BasicBlock *Entry = PrevNode->getEntry(); @@ -704,7 +704,7 @@ BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) { return Flow; } -/// \brief Returns the region exit if possible, otherwise just a new flow node +/// Returns the region exit if possible, otherwise just a new flow node BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow, bool ExitUseAllowed) { if (!Order.empty() || !ExitUseAllowed) @@ -716,13 +716,13 @@ BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow, return Exit; } -/// \brief Set the previous node +/// Set the previous node void StructurizeCFG::setPrevNode(BasicBlock *BB) { PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : nullptr; } -/// \brief Does BB dominate all the predicates of Node? +/// Does BB dominate all the predicates of Node? bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) { BBPredicates &Preds = Predicates[Node->getEntry()]; return llvm::all_of(Preds, [&](std::pair<BasicBlock *, Value *> Pred) { @@ -730,7 +730,7 @@ bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) { }); } -/// \brief Can we predict that this node will always be called? +/// Can we predict that this node will always be called? bool StructurizeCFG::isPredictableTrue(RegionNode *Node) { BBPredicates &Preds = Predicates[Node->getEntry()]; bool Dominated = false; @@ -926,7 +926,7 @@ static bool hasOnlyUniformBranches(Region *R, unsigned UniformMDKindID, return true; } -/// \brief Run the transformation for each region found +/// Run the transformation for each region found bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) { if (R->isTopLevelRegion()) return false; diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index 2a1106b41de..37ea4375a4c 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -87,7 +87,7 @@ STATISTIC(NumEliminated, "Number of tail calls removed"); STATISTIC(NumRetDuped, "Number of return duplicated"); STATISTIC(NumAccumAdded, "Number of accumulators introduced"); -/// \brief Scan the specified function for alloca instructions. +/// Scan the specified function for alloca instructions. /// If it contains any dynamic allocas, returns false. static bool canTRE(Function &F) { // Because of PR962, we don't TRE dynamic allocas. diff --git a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp index e687b386155..9a4996e5475 100644 --- a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp +++ b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp @@ -114,7 +114,7 @@ static bool shouldHaveDiscriminator(const Instruction *I) { return !isa<IntrinsicInst>(I) || isa<MemIntrinsic>(I); } -/// \brief Assign DWARF discriminators. +/// Assign DWARF discriminators. /// /// To assign discriminators, we examine the boundaries of every /// basic block and its successors. Suppose there is a basic block B1 diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp index 09ef84aafa8..08ccfbcff15 100644 --- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp +++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp @@ -173,7 +173,7 @@ Value *FastDivInsertionTask::getReplacement(DivCacheTy &Cache) { return isDivisionOp() ? Value.Quotient : Value.Remainder; } -/// \brief Check if a value looks like a hash. +/// Check if a value looks like a hash. /// /// The routine is expected to detect values computed using the most common hash /// algorithms. Typically, hash computations end with one of the following diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp index efef34008ca..5fce77ece25 100644 --- a/llvm/lib/Transforms/Utils/CloneFunction.cpp +++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp @@ -710,7 +710,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ModuleLevelChanges, Returns, NameSuffix, CodeInfo); } -/// \brief Remaps instructions in \p Blocks using the mapping in \p VMap. +/// Remaps instructions in \p Blocks using the mapping in \p VMap. void llvm::remapInstructionsInBlocks( const SmallVectorImpl<BasicBlock *> &Blocks, ValueToValueMapTy &VMap) { // Rewrite the code to refer to itself. @@ -720,7 +720,7 @@ void llvm::remapInstructionsInBlocks( RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); } -/// \brief Clones a loop \p OrigLoop. Returns the loop and the blocks in \p +/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p /// Blocks. /// /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block @@ -784,7 +784,7 @@ Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, return NewLoop; } -/// \brief Duplicate non-Phi instructions from the beginning of block up to +/// Duplicate non-Phi instructions from the beginning of block up to /// StopAt instruction into a split block between BB and its predecessor. BasicBlock * llvm::DuplicateInstructionsInSplitBetween(BasicBlock *BB, BasicBlock *PredBB, diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index 573ccc3cf6a..fe82e0ab768 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -78,7 +78,7 @@ static cl::opt<bool> AggregateArgsOpt("aggregate-extracted-args", cl::Hidden, cl::desc("Aggregate arguments to code-extracted functions")); -/// \brief Test whether a block is valid for extraction. +/// Test whether a block is valid for extraction. bool CodeExtractor::isBlockValidForExtraction(const BasicBlock &BB, bool AllowVarArgs) { // Landing pads must be in the function where they were inserted for cleanup. @@ -130,7 +130,7 @@ bool CodeExtractor::isBlockValidForExtraction(const BasicBlock &BB, return true; } -/// \brief Build a set of blocks to extract if the input blocks are viable. +/// Build a set of blocks to extract if the input blocks are viable. static SetVector<BasicBlock *> buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, bool AllowVarArgs) { diff --git a/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/llvm/lib/Transforms/Utils/FlattenCFG.cpp index 921e366ef7b..a1adc31e499 100644 --- a/llvm/lib/Transforms/Utils/FlattenCFG.cpp +++ b/llvm/lib/Transforms/Utils/FlattenCFG.cpp @@ -36,16 +36,16 @@ namespace { class FlattenCFGOpt { AliasAnalysis *AA; - /// \brief Use parallel-and or parallel-or to generate conditions for + /// Use parallel-and or parallel-or to generate conditions for /// conditional branches. bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder); - /// \brief If \param BB is the merge block of an if-region, attempt to merge + /// If \param BB is the merge block of an if-region, attempt to merge /// the if-region with an adjacent if-region upstream if two if-regions /// contain identical instructions. bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder); - /// \brief Compare a pair of blocks: \p Block1 and \p Block2, which + /// Compare a pair of blocks: \p Block1 and \p Block2, which /// are from two if-regions whose entry blocks are \p Head1 and \p /// Head2. \returns true if \p Block1 and \p Block2 contain identical /// instructions, and have no memory reference alias with \p Head2. diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 2f7d414e4dc..99fd2906754 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -791,7 +791,7 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { using PredBlockVector = SmallVector<BasicBlock *, 16>; using IncomingValueMap = DenseMap<BasicBlock *, Value *>; -/// \brief Determines the value to use as the phi node input for a block. +/// Determines the value to use as the phi node input for a block. /// /// Select between \p OldVal any value that we know flows from \p BB /// to a particular phi on the basis of which one (if either) is not @@ -820,7 +820,7 @@ static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, return OldVal; } -/// \brief Create a map from block to value for the operands of a +/// Create a map from block to value for the operands of a /// given phi. /// /// Create a map from block to value for each non-undef value flowing @@ -839,7 +839,7 @@ static void gatherIncomingValuesToPhi(PHINode *PN, } } -/// \brief Replace the incoming undef values to a phi with the values +/// Replace the incoming undef values to a phi with the values /// from a block-to-value map. /// /// \param PN The phi we are replacing the undefs in. @@ -859,7 +859,7 @@ static void replaceUndefValuesInPhi(PHINode *PN, } } -/// \brief Replace a value flowing from a block to a phi with +/// Replace a value flowing from a block to a phi with /// potentially multiple instances of that value flowing from the /// block's predecessors to the phi. /// diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp index bc5d6a9b54e..d70fc4ac028 100644 --- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp +++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp @@ -170,7 +170,7 @@ static void addBlockAndPredsToSet(BasicBlock *InputBB, BasicBlock *StopBlock, } while (!Worklist.empty()); } -/// \brief The first part of loop-nestification is to find a PHI node that tells +/// The first part of loop-nestification is to find a PHI node that tells /// us how to partition the loops. static PHINode *findPHIToPartitionLoops(Loop *L, DominatorTree *DT, AssumptionCache *AC) { @@ -195,7 +195,7 @@ static PHINode *findPHIToPartitionLoops(Loop *L, DominatorTree *DT, return nullptr; } -/// \brief If this loop has multiple backedges, try to pull one of them out into +/// If this loop has multiple backedges, try to pull one of them out into /// a nested loop. /// /// This is important for code that looks like @@ -332,7 +332,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader, return NewOuter; } -/// \brief This method is called when the specified loop has more than one +/// This method is called when the specified loop has more than one /// backedge in it. /// /// If this occurs, revector all of these backedges to target a new basic block @@ -457,7 +457,7 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader, return BEBlock; } -/// \brief Simplify one loop and queue further loops for simplification. +/// Simplify one loop and queue further loops for simplification. static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, AssumptionCache *AC, diff --git a/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp b/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp index 555e328099d..96d9acddb1a 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp @@ -338,7 +338,7 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, } } -/// \brief Update the branch weights of the latch of a peeled-off loop +/// Update the branch weights of the latch of a peeled-off loop /// iteration. /// This sets the branch weights for the latch of the recently peeled off loop /// iteration correctly. @@ -379,7 +379,7 @@ static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR, } } -/// \brief Clones the body of the loop L, putting it between \p InsertTop and \p +/// Clones the body of the loop L, putting it between \p InsertTop and \p /// InsertBot. /// \param IterNumber The serial number of the iteration currently being /// peeled off. @@ -488,7 +488,7 @@ static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop, LVMap[KV.first] = KV.second; } -/// \brief Peel off the first \p PeelCount iterations of loop \p L. +/// Peel off the first \p PeelCount iterations of loop \p L. /// /// Note that this does not peel them off as a single straight-line block. /// Rather, each iteration is peeled off separately, and needs to check the diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 805a003f18f..cec34b09f20 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -1201,7 +1201,7 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, return Changed; } -/// \brief Returns the instructions that use values defined in the loop. +/// Returns the instructions that use values defined in the loop. SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) { SmallVector<Instruction *, 8> UsedOutside; @@ -1278,7 +1278,7 @@ void llvm::initializeLoopPassPass(PassRegistry &Registry) { INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) } -/// \brief Find string metadata for loop +/// Find string metadata for loop /// /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an /// operand or null otherwise. If the string metadata is not found return @@ -1516,7 +1516,7 @@ Optional<unsigned> llvm::getLoopEstimatedTripCount(Loop *L) { return (FalseVal + (TrueVal / 2)) / TrueVal; } -/// \brief Adds a 'fast' flag to floating point operations. +/// Adds a 'fast' flag to floating point operations. static Value *addFastMathFlag(Value *V) { if (isa<FPMathOperator>(V)) { FastMathFlags Flags; diff --git a/llvm/lib/Transforms/Utils/LoopVersioning.cpp b/llvm/lib/Transforms/Utils/LoopVersioning.cpp index 29756d9dab7..95e9a186c10 100644 --- a/llvm/lib/Transforms/Utils/LoopVersioning.cpp +++ b/llvm/lib/Transforms/Utils/LoopVersioning.cpp @@ -248,7 +248,7 @@ void LoopVersioning::annotateInstWithNoAlias(Instruction *VersionedInst, } namespace { -/// \brief Also expose this is a pass. Currently this is only used for +/// Also expose this is a pass. Currently this is only used for /// unit-testing. It adds all memchecks necessary to remove all may-aliasing /// array accesses from the loop. class LoopVersioningPass : public FunctionPass { diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp index f18bd2539a7..efdc04a8d7d 100644 --- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp +++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp @@ -172,7 +172,7 @@ static raw_ostream& operator<<(raw_ostream &O, return O << "]"; } -/// \brief Update the first occurrence of the "switch statement" BB in the PHI +/// Update the first occurrence of the "switch statement" BB in the PHI /// node with the "new" BB. The other occurrences will: /// /// 1) Be updated by subsequent calls to this function. Switch statements may diff --git a/llvm/lib/Transforms/Utils/PredicateInfo.cpp b/llvm/lib/Transforms/Utils/PredicateInfo.cpp index 2676f6673ac..62235895a1a 100644 --- a/llvm/lib/Transforms/Utils/PredicateInfo.cpp +++ b/llvm/lib/Transforms/Utils/PredicateInfo.cpp @@ -740,7 +740,7 @@ PreservedAnalyses PredicateInfoPrinterPass::run(Function &F, return PreservedAnalyses::all(); } -/// \brief An assembly annotator class to print PredicateInfo information in +/// An assembly annotator class to print PredicateInfo information in /// comments. class PredicateInfoAnnotatedWriter : public AssemblyAnnotationWriter { friend class PredicateInfo; diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index 562242e08ea..d90db0322a5 100644 --- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -178,13 +178,13 @@ struct RenamePassData { LocationVector Locations; }; -/// \brief This assigns and keeps a per-bb relative ordering of load/store +/// This assigns and keeps a per-bb relative ordering of load/store /// instructions in the block that directly load or store an alloca. /// /// This functionality is important because it avoids scanning large basic /// blocks multiple times when promoting many allocas in the same block. class LargeBlockInfo { - /// \brief For each instruction that we track, keep the index of the + /// For each instruction that we track, keep the index of the /// instruction. /// /// The index starts out as the number of the instruction from the start of @@ -243,7 +243,7 @@ struct PromoteMem2Reg { /// Reverse mapping of Allocas. DenseMap<AllocaInst *, unsigned> AllocaLookup; - /// \brief The PhiNodes we're adding. + /// The PhiNodes we're adding. /// /// That map is used to simplify some Phi nodes as we iterate over it, so /// it should have deterministic iterators. We could use a MapVector, but @@ -347,7 +347,7 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { } } -/// \brief Rewrite as many loads as possible given a single store. +/// Rewrite as many loads as possible given a single store. /// /// When there is only a single store, we can use the domtree to trivially /// replace all of the dominated loads with the stored value. Do so, and return @@ -779,7 +779,7 @@ void PromoteMem2Reg::run() { NewPhiNodes.clear(); } -/// \brief Determine which blocks the value is live in. +/// Determine which blocks the value is live in. /// /// These are blocks which lead to uses. Knowing this allows us to avoid /// inserting PHI nodes into blocks which don't lead to uses (thus, the @@ -853,7 +853,7 @@ void PromoteMem2Reg::ComputeLiveInBlocks( } } -/// \brief Queue a phi-node to be added to a basic-block for a specific Alloca. +/// Queue a phi-node to be added to a basic-block for a specific Alloca. /// /// Returns true if there wasn't already a phi-node for that variable bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, @@ -885,7 +885,7 @@ static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, PN->setDebugLoc(DL); } -/// \brief Recursively traverse the CFG of the function, renaming loads and +/// Recursively traverse the CFG of the function, renaming loads and /// stores to the allocas which we are promoting. /// /// IncomingVals indicates what value each Alloca contains on exit from the diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 1be16c572e9..2cf2d27725c 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1824,7 +1824,7 @@ static bool SinkCommonCodeFromPredecessors(BasicBlock *BB) { return Changed; } -/// \brief Determine if we can hoist sink a sole store instruction out of a +/// Determine if we can hoist sink a sole store instruction out of a /// conditional block. /// /// We are looking for code like the following: @@ -1885,7 +1885,7 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB, return nullptr; } -/// \brief Speculate a conditional basic block flattening the CFG. +/// Speculate a conditional basic block flattening the CFG. /// /// Note that this is a very risky transform currently. Speculating /// instructions like this is most often not desirable. Instead, there is an MI diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index e6b7328417e..d1fd2eb68a8 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -395,7 +395,7 @@ static bool isUniformLoopNest(Loop *Lp, Loop *OuterLp) { return true; } -/// \brief Check whether it is safe to if-convert this phi node. +/// Check whether it is safe to if-convert this phi node. /// /// Phi nodes with constant expressions that can trap are not safe to if /// convert. @@ -429,7 +429,7 @@ static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { return Ty1; } -/// \brief Check that the instruction has outside loop users and is not an +/// Check that the instruction has outside loop users and is not an /// identified reduction variable. static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, SmallPtrSetImpl<Value *> &AllowedExit) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h index c8d2d0fdce5..304bc7ab57b 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h @@ -48,7 +48,7 @@ private: public: VPBuilder() {} - /// \brief This specifies that created VPInstructions should be appended to + /// This specifies that created VPInstructions should be appended to /// the end of the specified block. void setInsertPoint(VPBasicBlock *TheBB) { assert(TheBB && "Attempting to set a null insert point"); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 4a80dc08a46..1b1c16bacae 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -432,7 +432,7 @@ public: void vectorizeMemoryInstruction(Instruction *Instr, VectorParts *BlockInMask = nullptr); - /// \brief Set the debug location in the builder using the debug location in + /// Set the debug location in the builder using the debug location in /// the instruction. void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); @@ -468,7 +468,7 @@ protected: /// vectorizing this phi node. void fixReduction(PHINode *Phi); - /// \brief The Loop exit block may have single value PHI nodes with some + /// The Loop exit block may have single value PHI nodes with some /// incoming value. While vectorizing we only handled real values /// that were defined inside the loop and we should have one value for /// each predecessor of its parent basic block. See PR14725. @@ -586,7 +586,7 @@ protected: /// loop. void addMetadata(Instruction *To, Instruction *From); - /// \brief Similar to the previous function but it adds the metadata to a + /// Similar to the previous function but it adds the metadata to a /// vector of instructions. void addMetadata(ArrayRef<Value *> To, Instruction *From); @@ -619,7 +619,7 @@ protected: /// Interface to emit optimization remarks. OptimizationRemarkEmitter *ORE; - /// \brief LoopVersioning. It's only set up (non-null) if memchecks were + /// LoopVersioning. It's only set up (non-null) if memchecks were /// used. /// /// This is currently only used to add no-alias metadata based on the @@ -717,7 +717,7 @@ private: } // end namespace llvm -/// \brief Look for a meaningful debug location on the instruction or it's +/// Look for a meaningful debug location on the instruction or it's /// operands. static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { if (!I) @@ -789,7 +789,7 @@ void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, namespace llvm { -/// \brief The group of interleaved loads/stores sharing the same stride and +/// The group of interleaved loads/stores sharing the same stride and /// close to each other. /// /// Each member in this group has an index starting from 0, and the largest @@ -833,7 +833,7 @@ public: unsigned getAlignment() const { return Align; } unsigned getNumMembers() const { return Members.size(); } - /// \brief Try to insert a new member \p Instr with index \p Index and + /// Try to insert a new member \p Instr with index \p Index and /// alignment \p NewAlign. The index is related to the leader and it could be /// negative if it is the new leader. /// @@ -867,7 +867,7 @@ public: return true; } - /// \brief Get the member with the given index \p Index + /// Get the member with the given index \p Index /// /// \returns nullptr if contains no such member. Instruction *getMember(unsigned Index) const { @@ -878,7 +878,7 @@ public: return Members.find(Key)->second; } - /// \brief Get the index for the given member. Unlike the key in the member + /// Get the index for the given member. Unlike the key in the member /// map, the index starts from 0. unsigned getIndex(Instruction *Instr) const { for (auto I : Members) @@ -929,7 +929,7 @@ private: namespace { -/// \brief Drive the analysis of interleaved memory accesses in the loop. +/// Drive the analysis of interleaved memory accesses in the loop. /// /// Use this class to analyze interleaved accesses only when we can vectorize /// a loop. Otherwise it's meaningless to do analysis as the vectorization @@ -953,16 +953,16 @@ public: delete Ptr; } - /// \brief Analyze the interleaved accesses and collect them in interleave + /// Analyze the interleaved accesses and collect them in interleave /// groups. Substitute symbolic strides using \p Strides. void analyzeInterleaving(); - /// \brief Check if \p Instr belongs to any interleave group. + /// Check if \p Instr belongs to any interleave group. bool isInterleaved(Instruction *Instr) const { return InterleaveGroupMap.count(Instr); } - /// \brief Get the interleave group that \p Instr belongs to. + /// Get the interleave group that \p Instr belongs to. /// /// \returns nullptr if doesn't have such group. InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { @@ -971,7 +971,7 @@ public: return nullptr; } - /// \brief Returns true if an interleaved group that may access memory + /// Returns true if an interleaved group that may access memory /// out-of-bounds requires a scalar epilogue iteration for correctness. bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } @@ -999,7 +999,7 @@ private: /// access to a set of dependent sink accesses. DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; - /// \brief The descriptor for a strided memory access. + /// The descriptor for a strided memory access. struct StrideDescriptor { StrideDescriptor() = default; StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, @@ -1019,10 +1019,10 @@ private: unsigned Align = 0; }; - /// \brief A type for holding instructions and their stride descriptors. + /// A type for holding instructions and their stride descriptors. using StrideEntry = std::pair<Instruction *, StrideDescriptor>; - /// \brief Create a new interleave group with the given instruction \p Instr, + /// Create a new interleave group with the given instruction \p Instr, /// stride \p Stride and alignment \p Align. /// /// \returns the newly created interleave group. @@ -1034,7 +1034,7 @@ private: return InterleaveGroupMap[Instr]; } - /// \brief Release the group and remove all the relationships. + /// Release the group and remove all the relationships. void releaseGroup(InterleaveGroup *Group) { for (unsigned i = 0; i < Group->getFactor(); i++) if (Instruction *Member = Group->getMember(i)) @@ -1043,28 +1043,28 @@ private: delete Group; } - /// \brief Collect all the accesses with a constant stride in program order. + /// Collect all the accesses with a constant stride in program order. void collectConstStrideAccesses( MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, const ValueToValueMap &Strides); - /// \brief Returns true if \p Stride is allowed in an interleaved group. + /// Returns true if \p Stride is allowed in an interleaved group. static bool isStrided(int Stride) { unsigned Factor = std::abs(Stride); return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; } - /// \brief Returns true if \p BB is a predicated block. + /// Returns true if \p BB is a predicated block. bool isPredicated(BasicBlock *BB) const { return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); } - /// \brief Returns true if LoopAccessInfo can be used for dependence queries. + /// Returns true if LoopAccessInfo can be used for dependence queries. bool areDependencesValid() const { return LAI && LAI->getDepChecker().getDependences(); } - /// \brief Returns true if memory accesses \p A and \p B can be reordered, if + /// Returns true if memory accesses \p A and \p B can be reordered, if /// necessary, when constructing interleaved groups. /// /// \p A must precede \p B in program order. We return false if reordering is @@ -1112,7 +1112,7 @@ private: return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); } - /// \brief Collect the dependences from LoopAccessInfo. + /// Collect the dependences from LoopAccessInfo. /// /// We process the dependences once during the interleaved access analysis to /// enable constant-time dependence queries. @@ -1207,7 +1207,7 @@ public: /// avoid redundant calculations. void setCostBasedWideningDecision(unsigned VF); - /// \brief A struct that represents some properties of the register usage + /// A struct that represents some properties of the register usage /// of a loop. struct RegisterUsage { /// Holds the number of loop invariant values that are used in the loop. @@ -1408,17 +1408,17 @@ public: /// access that can be widened. bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); - /// \brief Check if \p Instr belongs to any interleaved access group. + /// Check if \p Instr belongs to any interleaved access group. bool isAccessInterleaved(Instruction *Instr) { return InterleaveInfo.isInterleaved(Instr); } - /// \brief Get the interleaved access group that \p Instr belongs to. + /// Get the interleaved access group that \p Instr belongs to. const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { return InterleaveInfo.getInterleaveGroup(Instr); } - /// \brief Returns true if an interleaved group requires a scalar iteration + /// Returns true if an interleaved group requires a scalar iteration /// to handle accesses with gaps. bool requiresScalarEpilogue() const { return InterleaveInfo.requiresScalarEpilogue(); @@ -3052,7 +3052,7 @@ struct CSEDenseMapInfo { } // end anonymous namespace -///\brief Perform cse of induction variable instructions. +///Perform cse of induction variable instructions. static void cse(BasicBlock *BB) { // Perform simple cse. SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; @@ -3074,7 +3074,7 @@ static void cse(BasicBlock *BB) { } } -/// \brief Estimate the overhead of scalarizing an instruction. This is a +/// Estimate the overhead of scalarizing an instruction. This is a /// convenience wrapper for the type-based getScalarizationOverhead API. static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, const TargetTransformInfo &TTI) { @@ -5605,7 +5605,7 @@ LoopVectorizationCostModel::expectedCost(unsigned VF) { return Cost; } -/// \brief Gets Address Access SCEV after verifying that the access pattern +/// Gets Address Access SCEV after verifying that the access pattern /// is loop invariant except the induction variable dependence. /// /// This SCEV can be sent to the Target in order to estimate the address diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 6835ed9986f..3f87fd913f4 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -161,7 +161,7 @@ static const unsigned MaxMemDepDistance = 160; /// regions to be handled. static const int MinScheduleRegionSize = 16; -/// \brief Predicate for the element types that the SLP vectorizer supports. +/// Predicate for the element types that the SLP vectorizer supports. /// /// The most important thing to filter here are types which are invalid in LLVM /// vectors. We also filter target specific types which have absolutely no @@ -554,7 +554,7 @@ public: MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); } - /// \brief Vectorize the tree that starts with the elements in \p VL. + /// Vectorize the tree that starts with the elements in \p VL. /// Returns the vectorized root. Value *vectorizeTree(); @@ -601,7 +601,7 @@ public: unsigned getTreeSize() const { return VectorizableTree.size(); } - /// \brief Perform LICM and CSE on the newly generated gather sequences. + /// Perform LICM and CSE on the newly generated gather sequences. void optimizeGatherSequence(); /// \returns The best order of instructions for vectorization. @@ -640,7 +640,7 @@ public: return MinVecRegSize; } - /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. + /// Check if ArrayType or StructType is isomorphic to some VectorType. /// /// \returns number of elements in vector if isomorphism exists, 0 otherwise. unsigned canMapToVector(Type *T, const DataLayout &DL) const; @@ -686,7 +686,7 @@ private: /// roots. This method calculates the cost of extracting the values. int getGatherCost(ArrayRef<Value *> VL); - /// \brief Set the Builder insert point to one after the last instruction in + /// Set the Builder insert point to one after the last instruction in /// the bundle void setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue); @@ -1290,7 +1290,7 @@ template <> struct GraphTraits<BoUpSLP *> { /// NodeRef has to be a pointer per the GraphWriter. using NodeRef = TreeEntry *; - /// \brief Add the VectorizableTree to the index iterator to be able to return + /// Add the VectorizableTree to the index iterator to be able to return /// TreeEntry pointers. struct ChildIteratorType : public iterator_adaptor_base<ChildIteratorType, @@ -4689,7 +4689,7 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, return Changed; } -/// \brief Check that the Values in the slice in VL array are still existent in +/// Check that the Values in the slice in VL array are still existent in /// the WeakTrackingVH array. /// Vectorization of part of the VL array may cause later values in the VL array /// to become invalid. We track when this has happened in the WeakTrackingVH @@ -5073,7 +5073,7 @@ bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { return false; } -/// \brief Generate a shuffle mask to be used in a reduction tree. +/// Generate a shuffle mask to be used in a reduction tree. /// /// \param VecLen The length of the vector to be reduced. /// \param NumEltsToRdx The number of elements that should be reduced in the @@ -5564,7 +5564,7 @@ class HorizontalReduction { public: HorizontalReduction() = default; - /// \brief Try to find a reduction tree. + /// Try to find a reduction tree. bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { assert((!Phi || is_contained(Phi->operands(), B)) && "Thi phi needs to use the binary operator"); @@ -5690,7 +5690,7 @@ public: return true; } - /// \brief Attempt to vectorize the tree found by + /// Attempt to vectorize the tree found by /// matchAssociativeReduction. bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { if (ReducedVals.empty()) @@ -5815,7 +5815,7 @@ public: } private: - /// \brief Calculate the cost of a reduction. + /// Calculate the cost of a reduction. int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, unsigned ReduxWidth) { Type *ScalarTy = FirstReducedVal->getType(); @@ -5883,7 +5883,7 @@ private: return VecReduxCost - ScalarReduxCost; } - /// \brief Emit a horizontal reduction of the vectorized value. + /// Emit a horizontal reduction of the vectorized value. Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, unsigned ReduxWidth, const TargetTransformInfo *TTI) { assert(VectorizedValue && "Need to have a vectorized tree node"); @@ -5919,7 +5919,7 @@ private: } // end anonymous namespace -/// \brief Recognize construction of vectors like +/// Recognize construction of vectors like /// %ra = insertelement <4 x float> undef, float %s0, i32 0 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 @@ -5951,7 +5951,7 @@ static bool findBuildVector(InsertElementInst *LastInsertElem, return true; } -/// \brief Like findBuildVector, but looks for construction of aggregate. +/// Like findBuildVector, but looks for construction of aggregate. /// /// \return true if it matches. static bool findBuildAggregate(InsertValueInst *IV, @@ -5974,7 +5974,7 @@ static bool PhiTypeSorterFunc(Value *V, Value *V2) { return V->getType() < V2->getType(); } -/// \brief Try and get a reduction value from a phi node. +/// Try and get a reduction value from a phi node. /// /// Given a phi node \p P in a block \p ParentBB, consider possible reductions /// if they come from either \p ParentBB or a containing loop latch. diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 555a31fbb86..f0ef38c0d00 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -902,7 +902,7 @@ public: inline const VPRecipeBase &back() const { return Recipes.back(); } inline VPRecipeBase &back() { return Recipes.back(); } - /// \brief Returns a pointer to a member of the recipe list. + /// Returns a pointer to a member of the recipe list. static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) { return &VPBasicBlock::Recipes; } |