summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorAdrian Prantl <aprantl@apple.com>2018-05-01 15:54:18 +0000
committerAdrian Prantl <aprantl@apple.com>2018-05-01 15:54:18 +0000
commit5f8f34e459b60efb332337e7cfe902a7cabe4096 (patch)
treeb80a88887ea8331179e6294f1135d38a66ec28ce /llvm/lib/Analysis
parent5727011fd552d87351c6229dc0337114a0269848 (diff)
downloadbcm5719-llvm-5f8f34e459b60efb332337e7cfe902a7cabe4096.tar.gz
bcm5719-llvm-5f8f34e459b60efb332337e7cfe902a7cabe4096.zip
Remove \brief commands from doxygen comments.
We've been running doxygen with the autobrief option for a couple of years now. This makes the \brief markers into our comments redundant. Since they are a visual distraction and we don't want to encourage more \brief markers in new code either, this patch removes them all. Patch produced by for i in $(git grep -l '\\brief'); do perl -pi -e 's/\\brief //g' $i & done Differential Revision: https://reviews.llvm.org/D46290 llvm-svn: 331272
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp12
-rw-r--r--llvm/lib/Analysis/BranchProbabilityInfo.cpp18
-rw-r--r--llvm/lib/Analysis/CFLGraph.h4
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp30
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp8
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp4
-rw-r--r--llvm/lib/Analysis/Lint.cpp4
-rw-r--r--llvm/lib/Analysis/Loads.cpp4
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp42
-rw-r--r--llvm/lib/Analysis/LoopUnrollAnalyzer.cpp2
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp14
-rw-r--r--llvm/lib/Analysis/MemorySSA.cpp30
-rw-r--r--llvm/lib/Analysis/MemorySSAUpdater.cpp2
-rw-r--r--llvm/lib/Analysis/MustExecute.cpp2
-rw-r--r--llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp2
-rw-r--r--llvm/lib/Analysis/ObjCARCInstKind.cpp18
-rw-r--r--llvm/lib/Analysis/OrderedBasicBlock.cpp4
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp4
-rw-r--r--llvm/lib/Analysis/StratifiedSets.h20
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp6
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp18
23 files changed, 126 insertions, 126 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 2e77c2f19a1..a499a6e6413 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -521,7 +521,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
return ModRefInfo::ModRef;
}
-/// \brief Return information about whether a particular call site modifies
+/// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction-ordering queries inside the BasicBlock containing \p I.
diff --git a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
index 796916d49d3..748736b6080 100644
--- a/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -74,7 +74,7 @@ using LoopData = BlockFrequencyInfoImplBase::LoopData;
using Weight = BlockFrequencyInfoImplBase::Weight;
using FrequencyData = BlockFrequencyInfoImplBase::FrequencyData;
-/// \brief Dithering mass distributer.
+/// Dithering mass distributer.
///
/// This class splits up a single mass into portions by weight, dithering to
/// spread out error. No mass is lost. The dithering precision depends on the
@@ -277,7 +277,7 @@ void BlockFrequencyInfoImplBase::clear() {
Loops.clear();
}
-/// \brief Clear all memory not needed downstream.
+/// Clear all memory not needed downstream.
///
/// Releases all memory not used downstream. In particular, saves Freqs.
static void cleanup(BlockFrequencyInfoImplBase &BFI) {
@@ -362,7 +362,7 @@ bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
return true;
}
-/// \brief Compute the loop scale for a loop.
+/// Compute the loop scale for a loop.
void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
// Compute loop scale.
DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
@@ -396,7 +396,7 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
<< " - scale = " << Loop.Scale << "\n");
}
-/// \brief Package up a loop.
+/// Package up a loop.
void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
@@ -492,7 +492,7 @@ static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
}
}
-/// \brief Unwrap a loop package.
+/// Unwrap a loop package.
///
/// Visits all the members of a loop, adjusting their BlockData according to
/// the loop's pseudo-node.
@@ -670,7 +670,7 @@ template <> struct GraphTraits<IrreducibleGraph> {
} // end namespace llvm
-/// \brief Find extra irreducible headers.
+/// Find extra irreducible headers.
///
/// Find entry blocks and other blocks with backedges, which exist when \c G
/// contains irreducible sub-SCCs.
diff --git a/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index f4aea51d301..1937e1c20ec 100644
--- a/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -88,14 +88,14 @@ static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
// Unlikely edges within a loop are half as likely as other edges
static const uint32_t LBH_UNLIKELY_WEIGHT = 62;
-/// \brief Unreachable-terminating branch taken probability.
+/// Unreachable-terminating branch taken probability.
///
/// This is the probability for a branch being taken to a block that terminates
/// (eventually) in unreachable. These are predicted as unlikely as possible.
/// All reachable probability will equally share the remaining part.
static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1);
-/// \brief Weight for a branch taken going into a cold block.
+/// Weight for a branch taken going into a cold block.
///
/// This is the weight for a branch taken toward a block marked
/// cold. A block is marked cold if it's postdominated by a
@@ -103,7 +103,7 @@ static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1);
/// are those marked with attribute 'cold'.
static const uint32_t CC_TAKEN_WEIGHT = 4;
-/// \brief Weight for a branch not-taken into a cold block.
+/// Weight for a branch not-taken into a cold block.
///
/// This is the weight for a branch not taken toward a block marked
/// cold.
@@ -118,20 +118,20 @@ static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
static const uint32_t FPH_TAKEN_WEIGHT = 20;
static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
-/// \brief Invoke-terminating normal branch taken weight
+/// Invoke-terminating normal branch taken weight
///
/// This is the weight for branching to the normal destination of an invoke
/// instruction. We expect this to happen most of the time. Set the weight to an
/// absurdly high value so that nested loops subsume it.
static const uint32_t IH_TAKEN_WEIGHT = 1024 * 1024 - 1;
-/// \brief Invoke-terminating normal branch not-taken weight.
+/// Invoke-terminating normal branch not-taken weight.
///
/// This is the weight for branching to the unwind destination of an invoke
/// instruction. This is essentially never taken.
static const uint32_t IH_NONTAKEN_WEIGHT = 1;
-/// \brief Add \p BB to PostDominatedByUnreachable set if applicable.
+/// Add \p BB to PostDominatedByUnreachable set if applicable.
void
BranchProbabilityInfo::updatePostDominatedByUnreachable(const BasicBlock *BB) {
const TerminatorInst *TI = BB->getTerminator();
@@ -162,7 +162,7 @@ BranchProbabilityInfo::updatePostDominatedByUnreachable(const BasicBlock *BB) {
PostDominatedByUnreachable.insert(BB);
}
-/// \brief Add \p BB to PostDominatedByColdCall set if applicable.
+/// Add \p BB to PostDominatedByColdCall set if applicable.
void
BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) {
assert(!PostDominatedByColdCall.count(BB));
@@ -196,7 +196,7 @@ BranchProbabilityInfo::updatePostDominatedByColdCall(const BasicBlock *BB) {
}
}
-/// \brief Calculate edge weights for successors lead to unreachable.
+/// Calculate edge weights for successors lead to unreachable.
///
/// Predict that a successor which leads necessarily to an
/// unreachable-terminated block as extremely unlikely.
@@ -340,7 +340,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
return true;
}
-/// \brief Calculate edge weights for edges leading to cold blocks.
+/// Calculate edge weights for edges leading to cold blocks.
///
/// A cold block is one post-dominated by a block with a call to a
/// cold function. Those edges are unlikely to be taken, so we give
diff --git a/llvm/lib/Analysis/CFLGraph.h b/llvm/lib/Analysis/CFLGraph.h
index e4e92864061..02e1e2b9315 100644
--- a/llvm/lib/Analysis/CFLGraph.h
+++ b/llvm/lib/Analysis/CFLGraph.h
@@ -46,7 +46,7 @@
namespace llvm {
namespace cflaa {
-/// \brief The Program Expression Graph (PEG) of CFL analysis
+/// The Program Expression Graph (PEG) of CFL analysis
/// CFLGraph is auxiliary data structure used by CFL-based alias analysis to
/// describe flow-insensitive pointer-related behaviors. Given an LLVM function,
/// the main purpose of this graph is to abstract away unrelated facts and
@@ -154,7 +154,7 @@ public:
}
};
-///\brief A builder class used to create CFLGraph instance from a given function
+///A builder class used to create CFLGraph instance from a given function
/// The CFL-AA that uses this builder must provide its own type as a template
/// argument. This is necessary for interprocedural processing: CFLGraphBuilder
/// needs a way of obtaining the summary of other functions when callinsts are
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index c575a8be772..c81a66a051a 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -311,12 +311,12 @@ public:
} // namespace
-/// \brief Test whether the given value is an Alloca-derived function argument.
+/// Test whether the given value is an Alloca-derived function argument.
bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
return SROAArgValues.count(V);
}
-/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
+/// Lookup the SROA-candidate argument and cost iterator which V maps to.
/// Returns false if V does not map to a SROA-candidate.
bool CallAnalyzer::lookupSROAArgAndCost(
Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
@@ -332,7 +332,7 @@ bool CallAnalyzer::lookupSROAArgAndCost(
return CostIt != SROAArgCosts.end();
}
-/// \brief Disable SROA for the candidate marked by this cost iterator.
+/// Disable SROA for the candidate marked by this cost iterator.
///
/// This marks the candidate as no longer viable for SROA, and adds the cost
/// savings associated with it back into the inline cost measurement.
@@ -346,7 +346,7 @@ void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
disableLoadElimination();
}
-/// \brief If 'V' maps to a SROA candidate, disable SROA for it.
+/// If 'V' maps to a SROA candidate, disable SROA for it.
void CallAnalyzer::disableSROA(Value *V) {
Value *SROAArg;
DenseMap<Value *, int>::iterator CostIt;
@@ -354,7 +354,7 @@ void CallAnalyzer::disableSROA(Value *V) {
disableSROA(CostIt);
}
-/// \brief Accumulate the given cost for a particular SROA candidate.
+/// Accumulate the given cost for a particular SROA candidate.
void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
int InstructionCost) {
CostIt->second += InstructionCost;
@@ -369,7 +369,7 @@ void CallAnalyzer::disableLoadElimination() {
}
}
-/// \brief Accumulate a constant GEP offset into an APInt if possible.
+/// Accumulate a constant GEP offset into an APInt if possible.
///
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
@@ -402,7 +402,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
return true;
}
-/// \brief Use TTI to check whether a GEP is free.
+/// Use TTI to check whether a GEP is free.
///
/// Respects any simplified values known during the analysis of this callsite.
bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
@@ -543,7 +543,7 @@ bool CallAnalyzer::visitPHI(PHINode &I) {
return true;
}
-/// \brief Check we can fold GEPs of constant-offset call site argument pointers.
+/// Check we can fold GEPs of constant-offset call site argument pointers.
/// This requires target data and inbounds GEPs.
///
/// \return true if the specified GEP can be folded.
@@ -1163,7 +1163,7 @@ bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
return false;
}
-/// \brief Try to simplify a call site.
+/// Try to simplify a call site.
///
/// Takes a concrete function and callsite and tries to actually simplify it by
/// analyzing the arguments and call itself with instsimplify. Returns true if
@@ -1534,7 +1534,7 @@ bool CallAnalyzer::visitInstruction(Instruction &I) {
return false;
}
-/// \brief Analyze a basic block for its contribution to the inline cost.
+/// Analyze a basic block for its contribution to the inline cost.
///
/// This method walks the analyzer over every instruction in the given basic
/// block and accounts for their cost during inlining at this callsite. It
@@ -1611,7 +1611,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
return true;
}
-/// \brief Compute the base pointer and cumulative constant offsets for V.
+/// Compute the base pointer and cumulative constant offsets for V.
///
/// This strips all constant offsets off of V, leaving it the base pointer, and
/// accumulates the total constant offset applied in the returned constant. It
@@ -1650,7 +1650,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
-/// \brief Find dead blocks due to deleted CFG edges during inlining.
+/// Find dead blocks due to deleted CFG edges during inlining.
///
/// If we know the successor of the current block, \p CurrBB, has to be \p
/// NextBB, the other successors of \p CurrBB are dead if these successors have
@@ -1688,7 +1688,7 @@ void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
}
}
-/// \brief Analyze a call site for potential inlining.
+/// Analyze a call site for potential inlining.
///
/// Returns true if inlining this call is viable, and false if it is not
/// viable. It computes the cost and adjusts the threshold based on numerous
@@ -1881,7 +1881,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-/// \brief Dump stats about this call's analysis.
+/// Dump stats about this call's analysis.
LLVM_DUMP_METHOD void CallAnalyzer::dump() {
#define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
DEBUG_PRINT_STAT(NumConstantArgs);
@@ -1901,7 +1901,7 @@ LLVM_DUMP_METHOD void CallAnalyzer::dump() {
}
#endif
-/// \brief Test that there are no attribute conflicts between Caller and Callee
+/// Test that there are no attribute conflicts between Caller and Callee
/// that prevent inlining.
static bool functionsHaveCompatibleAttributes(Function *Caller,
Function *Callee,
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 25c38c06223..cdcb81b8060 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -588,7 +588,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query, RecursionLimit);
}
-/// \brief Compute the base pointer and cumulative constant offsets for V.
+/// Compute the base pointer and cumulative constant offsets for V.
///
/// This strips all constant offsets off of V, leaving it the base pointer, and
/// accumulates the total constant offset applied in the returned constant. It
@@ -639,7 +639,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
return OffsetIntPtr;
}
-/// \brief Compute the constant difference between two pointer values.
+/// Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
Value *RHS) {
@@ -1183,7 +1183,7 @@ static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
return nullptr;
}
-/// \brief Given operands for an Shl, LShr or AShr, see if we can
+/// Given operands for an Shl, LShr or AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
Value *Op1, bool isExact, const SimplifyQuery &Q,
@@ -4931,7 +4931,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
return Result == I ? UndefValue::get(I->getType()) : Result;
}
-/// \brief Implementation of recursive simplification through an instruction's
+/// Implementation of recursive simplification through an instruction's
/// uses.
///
/// This is the common implementation of the recursive simplification routines.
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 011c01f2ad8..4bb1e56d086 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1222,7 +1222,7 @@ static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
return ValueLatticeElement::getOverdefined();
}
-/// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
+/// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
/// Val is not constrained on the edge. Result is unspecified if return value
/// is false.
static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
@@ -1347,7 +1347,7 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
return false;
}
-/// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at
+/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
/// the basic block if the edge does not constrain Val.
bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
BasicBlock *BBTo,
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 327f09a05e9..db919bd233b 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -165,13 +165,13 @@ namespace {
}
}
- /// \brief A check failed, so printout out the condition and the message.
+ /// A check failed, so printout out the condition and the message.
///
/// This provides a nice place to put a breakpoint if you want to see why
/// something is not correct.
void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
- /// \brief A check failed (with values to print).
+ /// A check failed (with values to print).
///
/// This calls the Message-only version so that the above is easier to set
/// a breakpoint on.
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 9be8456963d..10a71de82e3 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -156,7 +156,7 @@ bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
}
-/// \brief Test if A and B will obviously have the same value.
+/// Test if A and B will obviously have the same value.
///
/// This includes recognizing that %t0 and %t1 will have the same
/// value in code like this:
@@ -187,7 +187,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
return false;
}
-/// \brief Check if executing a load of this pointer value cannot trap.
+/// Check if executing a load of this pointer value cannot trap.
///
/// If DT and ScanFrom are specified this method performs context-sensitive
/// analysis and returns true if it is safe to load immediately before ScanFrom.
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 8bbe5a25215..6f94d30855c 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -92,7 +92,7 @@ static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
-/// \brief The maximum iterations used to merge memory checks
+/// The maximum iterations used to merge memory checks
static cl::opt<unsigned> MemoryCheckMergeThreshold(
"memory-check-merge-threshold", cl::Hidden,
cl::desc("Maximum number of comparisons done when trying to merge "
@@ -102,7 +102,7 @@ static cl::opt<unsigned> MemoryCheckMergeThreshold(
/// Maximum SIMD width.
const unsigned VectorizerParams::MaxVectorWidth = 64;
-/// \brief We collect dependences up to this threshold.
+/// We collect dependences up to this threshold.
static cl::opt<unsigned>
MaxDependences("max-dependences", cl::Hidden,
cl::desc("Maximum number of dependences collected by "
@@ -124,7 +124,7 @@ static cl::opt<bool> EnableMemAccessVersioning(
"enable-mem-access-versioning", cl::init(true), cl::Hidden,
cl::desc("Enable symbolic stride memory access versioning"));
-/// \brief Enable store-to-load forwarding conflict detection. This option can
+/// Enable store-to-load forwarding conflict detection. This option can
/// be disabled for correctness testing.
static cl::opt<bool> EnableForwardingConflictDetection(
"store-to-load-forwarding-conflict-detection", cl::Hidden,
@@ -490,13 +490,13 @@ void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
namespace {
-/// \brief Analyses memory accesses in a loop.
+/// Analyses memory accesses in a loop.
///
/// Checks whether run time pointer checks are needed and builds sets for data
/// dependence checking.
class AccessAnalysis {
public:
- /// \brief Read or write access location.
+ /// Read or write access location.
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
@@ -506,7 +506,7 @@ public:
: DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
PSE(PSE) {}
- /// \brief Register a load and whether it is only read from.
+ /// Register a load and whether it is only read from.
void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
@@ -515,14 +515,14 @@ public:
ReadOnlyPtr.insert(Ptr);
}
- /// \brief Register a store.
+ /// Register a store.
void addStore(MemoryLocation &Loc) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
Accesses.insert(MemAccessInfo(Ptr, true));
}
- /// \brief Check if we can emit a run-time no-alias check for \p Access.
+ /// Check if we can emit a run-time no-alias check for \p Access.
///
/// Returns true if we can emit a run-time no alias check for \p Access.
/// If we can check this access, this also adds it to a dependence set and
@@ -537,7 +537,7 @@ public:
unsigned ASId, bool ShouldCheckStride,
bool Assume);
- /// \brief Check whether we can check the pointers at runtime for
+ /// Check whether we can check the pointers at runtime for
/// non-intersection.
///
/// Returns true if we need no check or if we do and we can generate them
@@ -546,13 +546,13 @@ public:
Loop *TheLoop, const ValueToValueMap &Strides,
bool ShouldCheckWrap = false);
- /// \brief Goes over all memory accesses, checks whether a RT check is needed
+ /// Goes over all memory accesses, checks whether a RT check is needed
/// and builds sets of dependent accesses.
void buildDependenceSets() {
processMemAccesses();
}
- /// \brief Initial processing of memory accesses determined that we need to
+ /// Initial processing of memory accesses determined that we need to
/// perform dependency checking.
///
/// Note that this can later be cleared if we retry memcheck analysis without
@@ -570,7 +570,7 @@ public:
private:
typedef SetVector<MemAccessInfo> PtrAccessSet;
- /// \brief Go over all memory access and check whether runtime pointer checks
+ /// Go over all memory access and check whether runtime pointer checks
/// are needed and build sets of dependency check candidates.
void processMemAccesses();
@@ -596,7 +596,7 @@ private:
/// dependence check.
MemoryDepChecker::DepCandidates &DepCands;
- /// \brief Initial processing of memory accesses determined that we may need
+ /// Initial processing of memory accesses determined that we may need
/// to add memchecks. Perform the analysis to determine the necessary checks.
///
/// Note that, this is different from isDependencyCheckNeeded. When we retry
@@ -611,7 +611,7 @@ private:
} // end anonymous namespace
-/// \brief Check whether a pointer can participate in a runtime bounds check.
+/// Check whether a pointer can participate in a runtime bounds check.
/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
/// by adding run-time checks (overflow checks) if necessary.
static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
@@ -634,7 +634,7 @@ static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
return AR->isAffine();
}
-/// \brief Check whether a pointer address cannot wrap.
+/// Check whether a pointer address cannot wrap.
static bool isNoWrap(PredicatedScalarEvolution &PSE,
const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
const SCEV *PtrScev = PSE.getSCEV(Ptr);
@@ -931,7 +931,7 @@ static bool isInBoundsGep(Value *Ptr) {
return false;
}
-/// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
+/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
/// i.e. monotonically increasing/decreasing.
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
PredicatedScalarEvolution &PSE, const Loop *L) {
@@ -979,7 +979,7 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
return false;
}
-/// \brief Check whether the access through \p Ptr has a constant stride.
+/// Check whether the access through \p Ptr has a constant stride.
int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
const Loop *Lp, const ValueToValueMap &StridesMap,
bool Assume, bool ShouldCheckWrap) {
@@ -1372,7 +1372,7 @@ static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
return false;
}
-/// \brief Check the dependence for two accesses with the same stride \p Stride.
+/// Check the dependence for two accesses with the same stride \p Stride.
/// \p Distance is the positive distance and \p TypeByteSize is type size in
/// bytes.
///
@@ -2025,7 +2025,7 @@ static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
namespace {
-/// \brief IR Values for the lower and upper bounds of a pointer evolution. We
+/// IR Values for the lower and upper bounds of a pointer evolution. We
/// need to use value-handles because SCEV expansion can invalidate previously
/// expanded values. Thus expansion of a pointer can invalidate the bounds for
/// a previous one.
@@ -2036,7 +2036,7 @@ struct PointerBounds {
} // end anonymous namespace
-/// \brief Expand code for the lower and upper bound of the pointer group \p CG
+/// Expand code for the lower and upper bound of the pointer group \p CG
/// in \p TheLoop. \return the values for the bounds.
static PointerBounds
expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
@@ -2074,7 +2074,7 @@ expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
}
}
-/// \brief Turns a collection of checks into a collection of expanded upper and
+/// Turns a collection of checks into a collection of expanded upper and
/// lower bounds for both pointers in the check.
static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds(
const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks,
diff --git a/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp b/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
index 0da90dae3d9..c8b91a7a1a5 100644
--- a/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
+++ b/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
@@ -17,7 +17,7 @@
using namespace llvm;
-/// \brief Try to simplify instruction \param I using its SCEV expression.
+/// Try to simplify instruction \param I using its SCEV expression.
///
/// The idea is that some AddRec expressions become constants, which then
/// could trigger folding of other instructions. However, that only happens
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 2e5197a8ff1..186fda18886 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -217,7 +217,7 @@ static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
return CS && CS.hasRetAttr(Attribute::NoAlias);
}
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
@@ -225,7 +225,7 @@ bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast).hasValue();
}
-/// \brief Tests if a value is a call or invoke to a function that returns a
+/// Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
@@ -235,21 +235,21 @@ bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
hasNoAliasAttr(V, LookThroughBitCast);
}
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
return getAllocationData(V, MallocLike, TLI, LookThroughBitCast).hasValue();
}
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
return getAllocationData(V, CallocLike, TLI, LookThroughBitCast).hasValue();
}
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
@@ -257,7 +257,7 @@ bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
LookThroughBitCast).hasValue();
}
-/// \brief Tests if a value is a call or invoke to a library function that
+/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
@@ -427,7 +427,7 @@ static APInt getSizeWithOverflow(const SizeOffsetType &Data) {
return Data.first - Data.second;
}
-/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the alignment of
/// allocas, byval arguments, and global variables.
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index 5130c21cde9..7341188c719 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -83,7 +83,7 @@ static cl::opt<bool>
namespace llvm {
-/// \brief An assembly annotator class to print Memory SSA information in
+/// An assembly annotator class to print Memory SSA information in
/// comments.
class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
friend class MemorySSA;
@@ -906,7 +906,7 @@ struct RenamePassData {
namespace llvm {
-/// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
+/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
/// longer does caching on its own,
/// but the name has been retained for the moment.
class MemorySSA::CachingWalker final : public MemorySSAWalker {
@@ -952,7 +952,7 @@ void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
}
}
-/// \brief Rename a single basic block into MemorySSA form.
+/// Rename a single basic block into MemorySSA form.
/// Uses the standard SSA renaming algorithm.
/// \returns The new incoming value.
MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
@@ -975,7 +975,7 @@ MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
return IncomingVal;
}
-/// \brief This is the standard SSA renaming algorithm.
+/// This is the standard SSA renaming algorithm.
///
/// We walk the dominator tree in preorder, renaming accesses, and then filling
/// in phi nodes in our successors.
@@ -1024,7 +1024,7 @@ void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
}
}
-/// \brief This handles unreachable block accesses by deleting phi nodes in
+/// This handles unreachable block accesses by deleting phi nodes in
/// unreachable blocks, and marking all other unreachable MemoryAccess's as
/// being uses of the live on entry definition.
void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
@@ -1525,7 +1525,7 @@ static inline bool isOrdered(const Instruction *I) {
return false;
}
-/// \brief Helper function to create new memory accesses
+/// Helper function to create new memory accesses
MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
// The assume intrinsic has a control dependency which we model by claiming
// that it writes arbitrarily. Ignore that fake memory dependency here.
@@ -1562,7 +1562,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
return MUD;
}
-/// \brief Returns true if \p Replacer dominates \p Replacee .
+/// Returns true if \p Replacer dominates \p Replacee .
bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
const MemoryAccess *Replacee) const {
if (isa<MemoryUseOrDef>(Replacee))
@@ -1579,7 +1579,7 @@ bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
return true;
}
-/// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
+/// Properly remove \p MA from all of MemorySSA's lookup tables.
void MemorySSA::removeFromLookups(MemoryAccess *MA) {
assert(MA->use_empty() &&
"Trying to remove memory access that still has uses");
@@ -1602,7 +1602,7 @@ void MemorySSA::removeFromLookups(MemoryAccess *MA) {
ValueToMemoryAccess.erase(VMA);
}
-/// \brief Properly remove \p MA from all of MemorySSA's lists.
+/// Properly remove \p MA from all of MemorySSA's lists.
///
/// Because of the way the intrusive list and use lists work, it is important to
/// do removal in the right order.
@@ -1648,7 +1648,7 @@ void MemorySSA::verifyMemorySSA() const {
Walker->verify(this);
}
-/// \brief Verify that the order and existence of MemoryAccesses matches the
+/// Verify that the order and existence of MemoryAccesses matches the
/// order and existence of memory affecting instructions.
void MemorySSA::verifyOrdering(Function &F) const {
// Walk all the blocks, comparing what the lookups think and what the access
@@ -1711,7 +1711,7 @@ void MemorySSA::verifyOrdering(Function &F) const {
}
}
-/// \brief Verify the domination properties of MemorySSA by checking that each
+/// Verify the domination properties of MemorySSA by checking that each
/// definition dominates all of its uses.
void MemorySSA::verifyDomination(Function &F) const {
#ifndef NDEBUG
@@ -1733,7 +1733,7 @@ void MemorySSA::verifyDomination(Function &F) const {
#endif
}
-/// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
+/// Verify the def-use lists in MemorySSA, by verifying that \p Use
/// appears in the use list of \p Def.
void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
#ifndef NDEBUG
@@ -1747,7 +1747,7 @@ void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
#endif
}
-/// \brief Verify the immediate use information, by walking all the memory
+/// Verify the immediate use information, by walking all the memory
/// accesses and verifying that, for each use, it appears in the
/// appropriate def's use list
void MemorySSA::verifyDefUses(Function &F) const {
@@ -1793,7 +1793,7 @@ void MemorySSA::renumberBlock(const BasicBlock *B) const {
BlockNumberingValid.insert(B);
}
-/// \brief Determine, for two memory accesses in the same block,
+/// Determine, for two memory accesses in the same block,
/// whether \p Dominator dominates \p Dominatee.
/// \returns True if \p Dominator dominates \p Dominatee.
bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
@@ -2001,7 +2001,7 @@ void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
MUD->resetOptimized();
}
-/// \brief Walk the use-def chains starting at \p MA and find
+/// Walk the use-def chains starting at \p MA and find
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
diff --git a/llvm/lib/Analysis/MemorySSAUpdater.cpp b/llvm/lib/Analysis/MemorySSAUpdater.cpp
index 57b9cc98a43..e14319bba87 100644
--- a/llvm/lib/Analysis/MemorySSAUpdater.cpp
+++ b/llvm/lib/Analysis/MemorySSAUpdater.cpp
@@ -424,7 +424,7 @@ void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
return moveTo(What, BB, Where);
}
-/// \brief If all arguments of a MemoryPHI are defined by the same incoming
+/// If all arguments of a MemoryPHI are defined by the same incoming
/// argument, return that argument.
static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
MemoryAccess *MA = nullptr;
diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp
index 1e922fd44c4..677b5a28222 100644
--- a/llvm/lib/Analysis/MustExecute.cpp
+++ b/llvm/lib/Analysis/MustExecute.cpp
@@ -198,7 +198,7 @@ static bool isMustExecuteIn(const Instruction &I, Loop *L, DominatorTree *DT) {
}
namespace {
-/// \brief An assembly annotator class to print must execute information in
+/// An assembly annotator class to print must execute information in
/// comments.
class MustExecuteAnnotatedWriter : public AssemblyAnnotationWriter {
DenseMap<const Value*, SmallVector<Loop*, 4> > MustExec;
diff --git a/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp b/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp
index 55335f3a7cb..d6db6386c38 100644
--- a/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp
+++ b/llvm/lib/Analysis/ObjCARCAnalysisUtils.cpp
@@ -19,7 +19,7 @@
using namespace llvm;
using namespace llvm::objcarc;
-/// \brief A handy option to enable/disable all ARC Optimizations.
+/// A handy option to enable/disable all ARC Optimizations.
bool llvm::objcarc::EnableARCOpts;
static cl::opt<bool, true> EnableARCOptimizations(
"enable-objc-arc-opts", cl::desc("enable/disable all ARC Optimizations"),
diff --git a/llvm/lib/Analysis/ObjCARCInstKind.cpp b/llvm/lib/Analysis/ObjCARCInstKind.cpp
index f374dd33f86..332c9e894de 100644
--- a/llvm/lib/Analysis/ObjCARCInstKind.cpp
+++ b/llvm/lib/Analysis/ObjCARCInstKind.cpp
@@ -233,7 +233,7 @@ static bool isUseOnlyIntrinsic(unsigned ID) {
}
}
-/// \brief Determine what kind of construct V is.
+/// Determine what kind of construct V is.
ARCInstKind llvm::objcarc::GetARCInstKind(const Value *V) {
if (const Instruction *I = dyn_cast<Instruction>(V)) {
// Any instruction other than bitcast and gep with a pointer operand have a
@@ -331,7 +331,7 @@ ARCInstKind llvm::objcarc::GetARCInstKind(const Value *V) {
return ARCInstKind::None;
}
-/// \brief Test if the given class is a kind of user.
+/// Test if the given class is a kind of user.
bool llvm::objcarc::IsUser(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::User:
@@ -365,7 +365,7 @@ bool llvm::objcarc::IsUser(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class is objc_retain or equivalent.
+/// Test if the given class is objc_retain or equivalent.
bool llvm::objcarc::IsRetain(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Retain:
@@ -401,7 +401,7 @@ bool llvm::objcarc::IsRetain(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class is objc_autorelease or equivalent.
+/// Test if the given class is objc_autorelease or equivalent.
bool llvm::objcarc::IsAutorelease(ARCInstKind Class) {
switch (Class) {
case ARCInstKind::Autorelease:
@@ -435,7 +435,7 @@ bool llvm::objcarc::IsAutorelease(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class represents instructions which return their
+/// Test if the given class represents instructions which return their
/// argument verbatim.
bool llvm::objcarc::IsForwarding(ARCInstKind Class) {
switch (Class) {
@@ -470,7 +470,7 @@ bool llvm::objcarc::IsForwarding(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class represents instructions which do nothing if
+/// Test if the given class represents instructions which do nothing if
/// passed a null pointer.
bool llvm::objcarc::IsNoopOnNull(ARCInstKind Class) {
switch (Class) {
@@ -505,7 +505,7 @@ bool llvm::objcarc::IsNoopOnNull(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class represents instructions which are always safe
+/// Test if the given class represents instructions which are always safe
/// to mark with the "tail" keyword.
bool llvm::objcarc::IsAlwaysTail(ARCInstKind Class) {
// ARCInstKind::RetainBlock may be given a stack argument.
@@ -541,7 +541,7 @@ bool llvm::objcarc::IsAlwaysTail(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class represents instructions which are never safe
+/// Test if the given class represents instructions which are never safe
/// to mark with the "tail" keyword.
bool llvm::objcarc::IsNeverTail(ARCInstKind Class) {
/// It is never safe to tail call objc_autorelease since by tail calling
@@ -580,7 +580,7 @@ bool llvm::objcarc::IsNeverTail(ARCInstKind Class) {
llvm_unreachable("covered switch isn't covered?");
}
-/// \brief Test if the given class represents instructions which are always safe
+/// Test if the given class represents instructions which are always safe
/// to mark with the nounwind attribute.
bool llvm::objcarc::IsNoThrow(ARCInstKind Class) {
// objc_retainBlock is not nounwind because it calls user copy constructors
diff --git a/llvm/lib/Analysis/OrderedBasicBlock.cpp b/llvm/lib/Analysis/OrderedBasicBlock.cpp
index a04c0aef04b..6c47651eae9 100644
--- a/llvm/lib/Analysis/OrderedBasicBlock.cpp
+++ b/llvm/lib/Analysis/OrderedBasicBlock.cpp
@@ -30,7 +30,7 @@ OrderedBasicBlock::OrderedBasicBlock(const BasicBlock *BasicB)
LastInstFound = BB->end();
}
-/// \brief Given no cached results, find if \p A comes before \p B in \p BB.
+/// Given no cached results, find if \p A comes before \p B in \p BB.
/// Cache and number out instruction while walking \p BB.
bool OrderedBasicBlock::comesBefore(const Instruction *A,
const Instruction *B) {
@@ -58,7 +58,7 @@ bool OrderedBasicBlock::comesBefore(const Instruction *A,
return Inst != B;
}
-/// \brief Find out whether \p A dominates \p B, meaning whether \p A
+/// Find out whether \p A dominates \p B, meaning whether \p A
/// comes before \p B in \p BB. This is a simplification that considers
/// cached instruction positions and ignores other basic blocks, being
/// only relevant to compare relative instructions positions inside \p BB.
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index e2113d17196..f2ce0f4aa86 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1051,7 +1051,7 @@ Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
return IncV;
}
-/// \brief Hoist the addrec instruction chain rooted in the loop phi above the
+/// Hoist the addrec instruction chain rooted in the loop phi above the
/// position. This routine assumes that this is possible (has been checked).
void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
Instruction *Pos, PHINode *LoopPhi) {
@@ -1067,7 +1067,7 @@ void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
} while (InstToHoist != LoopPhi);
}
-/// \brief Check whether we can cheaply express the requested SCEV in terms of
+/// Check whether we can cheaply express the requested SCEV in terms of
/// the available PHI SCEV by truncation and/or inversion of the step.
static bool canBeCheaplyTransformed(ScalarEvolution &SE,
const SCEVAddRecExpr *Phi,
diff --git a/llvm/lib/Analysis/StratifiedSets.h b/llvm/lib/Analysis/StratifiedSets.h
index 772df175b38..2f20cd12506 100644
--- a/llvm/lib/Analysis/StratifiedSets.h
+++ b/llvm/lib/Analysis/StratifiedSets.h
@@ -29,7 +29,7 @@ typedef unsigned StratifiedIndex;
/// NOTE: ^ This can't be a short -- bootstrapping clang has a case where
/// ~1M sets exist.
-// \brief Container of information related to a value in a StratifiedSet.
+// Container of information related to a value in a StratifiedSet.
struct StratifiedInfo {
StratifiedIndex Index;
/// For field sensitivity, etc. we can tack fields on here.
@@ -37,7 +37,7 @@ struct StratifiedInfo {
/// A "link" between two StratifiedSets.
struct StratifiedLink {
- /// \brief This is a value used to signify "does not exist" where the
+ /// This is a value used to signify "does not exist" where the
/// StratifiedIndex type is used.
///
/// This is used instead of Optional<StratifiedIndex> because
@@ -63,7 +63,7 @@ struct StratifiedLink {
void clearAbove() { Above = SetSentinel; }
};
-/// \brief These are stratified sets, as described in "Fast algorithms for
+/// These are stratified sets, as described in "Fast algorithms for
/// Dyck-CFL-reachability with applications to Alias Analysis" by Zhang Q, Lyu M
/// R, Yuan H, and Su Z. -- in short, this is meant to represent different sets
/// of Value*s. If two Value*s are in the same set, or if both sets have
@@ -172,7 +172,7 @@ private:
/// remap has occurred, and use this information so we can defer renumbering set
/// elements until build time.
template <typename T> class StratifiedSetsBuilder {
- /// \brief Represents a Stratified Set, with information about the Stratified
+ /// Represents a Stratified Set, with information about the Stratified
/// Set above it, the set below it, and whether the current set has been
/// remapped to another.
struct BuilderLink {
@@ -263,7 +263,7 @@ template <typename T> class StratifiedSetsBuilder {
StratifiedIndex Remap;
};
- /// \brief This function performs all of the set unioning/value renumbering
+ /// This function performs all of the set unioning/value renumbering
/// that we've been putting off, and generates a vector<StratifiedLink> that
/// may be placed in a StratifiedSets instance.
void finalizeSets(std::vector<StratifiedLink> &StratLinks) {
@@ -302,7 +302,7 @@ template <typename T> class StratifiedSetsBuilder {
}
}
- /// \brief There's a guarantee in StratifiedLink where all bits set in a
+ /// There's a guarantee in StratifiedLink where all bits set in a
/// Link.externals will be set in all Link.externals "below" it.
static void propagateAttrs(std::vector<StratifiedLink> &Links) {
const auto getHighestParentAbove = [&Links](StratifiedIndex Idx) {
@@ -351,7 +351,7 @@ public:
return addAtMerging(Main, NewIndex);
}
- /// \brief Restructures the stratified sets as necessary to make "ToAdd" in a
+ /// Restructures the stratified sets as necessary to make "ToAdd" in a
/// set above "Main". There are some cases where this is not possible (see
/// above), so we merge them such that ToAdd and Main are in the same set.
bool addAbove(const T &Main, const T &ToAdd) {
@@ -364,7 +364,7 @@ public:
return addAtMerging(ToAdd, Above);
}
- /// \brief Restructures the stratified sets as necessary to make "ToAdd" in a
+ /// Restructures the stratified sets as necessary to make "ToAdd" in a
/// set below "Main". There are some cases where this is not possible (see
/// above), so we merge them such that ToAdd and Main are in the same set.
bool addBelow(const T &Main, const T &ToAdd) {
@@ -437,7 +437,7 @@ private:
return *Current;
}
- /// \brief Merges two sets into one another. Assumes that these sets are not
+ /// Merges two sets into one another. Assumes that these sets are not
/// already one in the same.
void merge(StratifiedIndex Idx1, StratifiedIndex Idx2) {
assert(inbounds(Idx1) && inbounds(Idx2));
@@ -458,7 +458,7 @@ private:
mergeDirect(Idx1, Idx2);
}
- /// \brief Merges two sets assuming that the set at `Idx1` is unreachable from
+ /// Merges two sets assuming that the set at `Idx1` is unreachable from
/// traversing above or below the set at `Idx2`.
void mergeDirect(StratifiedIndex Idx1, StratifiedIndex Idx2) {
assert(inbounds(Idx1) && inbounds(Idx2));
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 6ac4fbe2dc2..684545243ae 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -31,7 +31,7 @@ static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
cl::desc("Recognize reduction patterns."));
namespace {
-/// \brief No-op implementation of the TTI interface using the utility base
+/// No-op implementation of the TTI interface using the utility base
/// classes.
///
/// This is used when no target specific information is available.
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index a799f7c05b2..f757acfade0 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1754,7 +1754,7 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
return false;
}
-/// \brief Test whether a GEP's result is known to be non-null.
+/// Test whether a GEP's result is known to be non-null.
///
/// Uses properties inherent in a GEP to try to determine whether it is known
/// to be non-null.
@@ -3380,7 +3380,7 @@ uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
return Len == ~0ULL ? 1 : Len;
}
-/// \brief \p PN defines a loop-variant pointer to an object. Check if the
+/// \p PN defines a loop-variant pointer to an object. Check if the
/// previous iteration of the loop was referring to the same object as \p PN.
static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
const LoopInfo *LI) {
@@ -3729,7 +3729,7 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
return OverflowResult::MayOverflow;
}
-/// \brief Return true if we can prove that adding the two values of the
+/// Return true if we can prove that adding the two values of the
/// knownbits will not overflow.
/// Otherwise return false.
static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 7827bcf5c6b..d73d2473643 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -28,7 +28,7 @@
using namespace llvm;
using namespace llvm::PatternMatch;
-/// \brief Identify if the intrinsic is trivially vectorizable.
+/// Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
@@ -67,7 +67,7 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
}
}
-/// \brief Identifies if the intrinsic has a scalar operand. It check for
+/// Identifies if the intrinsic has a scalar operand. It check for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
@@ -81,7 +81,7 @@ bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
}
}
-/// \brief Returns intrinsic ID for call.
+/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its ID, in case it does not found it return not_intrinsic.
Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
@@ -97,7 +97,7 @@ Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
return Intrinsic::not_intrinsic;
}
-/// \brief Find the operand of the GEP that should be checked for consecutive
+/// Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
@@ -121,7 +121,7 @@ unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
return LastOperand;
}
-/// \brief If the argument is a GEP, then returns the operand identified by
+/// If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
@@ -140,7 +140,7 @@ Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
return GEP->getOperand(InductionOperand);
}
-/// \brief If a value has only one user that is a CastInst, return it.
+/// If a value has only one user that is a CastInst, return it.
Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
Value *UniqueCast = nullptr;
for (User *U : Ptr->users()) {
@@ -155,7 +155,7 @@ Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
return UniqueCast;
}
-/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
+/// Get the stride of a pointer access in a loop. Looks for symbolic
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
@@ -230,7 +230,7 @@ Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
return Stride;
}
-/// \brief Given a vector and an element number, see if the scalar value is
+/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
@@ -280,7 +280,7 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
return nullptr;
}
-/// \brief Get splat value if the input is a splat vector or return nullptr.
+/// Get splat value if the input is a splat vector or return nullptr.
/// This function is not fully general. It checks only 2 cases:
/// the input value is (1) a splat constants vector or (2) a sequence
/// of instructions that broadcast a single value into a vector.
OpenPOWER on IntegriCloud