summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorAdrian Prantl <aprantl@apple.com>2018-05-01 15:54:18 +0000
committerAdrian Prantl <aprantl@apple.com>2018-05-01 15:54:18 +0000
commit5f8f34e459b60efb332337e7cfe902a7cabe4096 (patch)
treeb80a88887ea8331179e6294f1135d38a66ec28ce /llvm/lib/Transforms/Scalar
parent5727011fd552d87351c6229dc0337114a0269848 (diff)
downloadbcm5719-llvm-5f8f34e459b60efb332337e7cfe902a7cabe4096.tar.gz
bcm5719-llvm-5f8f34e459b60efb332337e7cfe902a7cabe4096.zip
Remove \brief commands from doxygen comments.
We've been running doxygen with the autobrief option for a couple of years now. This makes the \brief markers into our comments redundant. Since they are a visual distraction and we don't want to encourage more \brief markers in new code either, this patch removes them all. Patch produced by for i in $(git grep -l '\\brief'); do perl -pi -e 's/\\brief //g' $i & done Differential Revision: https://reviews.llvm.org/D46290 llvm-svn: 331272
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r--llvm/lib/Transforms/Scalar/ConstantHoisting.cpp32
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDistribute.cpp86
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInterchange.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp30
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/Reassociate.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp140
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/StructurizeCFG.cpp40
-rw-r--r--llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp2
18 files changed, 214 insertions, 214 deletions
diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
index 4b53628e442..470e687a722 100644
--- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -84,7 +84,7 @@ static cl::opt<bool> ConstHoistWithBlockFrequency(
namespace {
-/// \brief The constant hoisting pass.
+/// The constant hoisting pass.
class ConstantHoistingLegacyPass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
@@ -127,7 +127,7 @@ FunctionPass *llvm::createConstantHoistingPass() {
return new ConstantHoistingLegacyPass();
}
-/// \brief Perform the constant hoisting optimization for the given function.
+/// Perform the constant hoisting optimization for the given function.
bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) {
if (skipFunction(Fn))
return false;
@@ -153,7 +153,7 @@ bool ConstantHoistingLegacyPass::runOnFunction(Function &Fn) {
return MadeChange;
}
-/// \brief Find the constant materialization insertion point.
+/// Find the constant materialization insertion point.
Instruction *ConstantHoistingPass::findMatInsertPt(Instruction *Inst,
unsigned Idx) const {
// If the operand is a cast instruction, then we have to materialize the
@@ -187,7 +187,7 @@ Instruction *ConstantHoistingPass::findMatInsertPt(Instruction *Inst,
return IDom->getBlock()->getTerminator();
}
-/// \brief Given \p BBs as input, find another set of BBs which collectively
+/// Given \p BBs as input, find another set of BBs which collectively
/// dominates \p BBs and have the minimal sum of frequencies. Return the BB
/// set found in \p BBs.
static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
@@ -289,7 +289,7 @@ static void findBestInsertionSet(DominatorTree &DT, BlockFrequencyInfo &BFI,
}
}
-/// \brief Find an insertion point that dominates all uses.
+/// Find an insertion point that dominates all uses.
SmallPtrSet<Instruction *, 8> ConstantHoistingPass::findConstantInsertionPoint(
const ConstantInfo &ConstInfo) const {
assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry.");
@@ -335,7 +335,7 @@ SmallPtrSet<Instruction *, 8> ConstantHoistingPass::findConstantInsertionPoint(
return InsertPts;
}
-/// \brief Record constant integer ConstInt for instruction Inst at operand
+/// Record constant integer ConstInt for instruction Inst at operand
/// index Idx.
///
/// The operand at index Idx is not necessarily the constant integer itself. It
@@ -375,7 +375,7 @@ void ConstantHoistingPass::collectConstantCandidates(
}
}
-/// \brief Check the operand for instruction Inst at index Idx.
+/// Check the operand for instruction Inst at index Idx.
void ConstantHoistingPass::collectConstantCandidates(
ConstCandMapType &ConstCandMap, Instruction *Inst, unsigned Idx) {
Value *Opnd = Inst->getOperand(Idx);
@@ -416,7 +416,7 @@ void ConstantHoistingPass::collectConstantCandidates(
}
}
-/// \brief Scan the instruction for expensive integer constants and record them
+/// Scan the instruction for expensive integer constants and record them
/// in the constant candidate vector.
void ConstantHoistingPass::collectConstantCandidates(
ConstCandMapType &ConstCandMap, Instruction *Inst) {
@@ -436,7 +436,7 @@ void ConstantHoistingPass::collectConstantCandidates(
} // end of for all operands
}
-/// \brief Collect all integer constants in the function that cannot be folded
+/// Collect all integer constants in the function that cannot be folded
/// into an instruction itself.
void ConstantHoistingPass::collectConstantCandidates(Function &Fn) {
ConstCandMapType ConstCandMap;
@@ -541,7 +541,7 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S,
return NumUses;
}
-/// \brief Find the base constant within the given range and rebase all other
+/// Find the base constant within the given range and rebase all other
/// constants with respect to the base constant.
void ConstantHoistingPass::findAndMakeBaseConstant(
ConstCandVecType::iterator S, ConstCandVecType::iterator E) {
@@ -567,7 +567,7 @@ void ConstantHoistingPass::findAndMakeBaseConstant(
ConstantVec.push_back(std::move(ConstInfo));
}
-/// \brief Finds and combines constant candidates that can be easily
+/// Finds and combines constant candidates that can be easily
/// rematerialized with an add from a common base constant.
void ConstantHoistingPass::findBaseConstants() {
// Sort the constants by value and type. This invalidates the mapping!
@@ -601,7 +601,7 @@ void ConstantHoistingPass::findBaseConstants() {
findAndMakeBaseConstant(MinValItr, ConstCandVec.end());
}
-/// \brief Updates the operand at Idx in instruction Inst with the result of
+/// Updates the operand at Idx in instruction Inst with the result of
/// instruction Mat. If the instruction is a PHI node then special
/// handling for duplicate values form the same incoming basic block is
/// required.
@@ -629,7 +629,7 @@ static bool updateOperand(Instruction *Inst, unsigned Idx, Instruction *Mat) {
return true;
}
-/// \brief Emit materialization code for all rebased constants and update their
+/// Emit materialization code for all rebased constants and update their
/// users.
void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
Constant *Offset,
@@ -702,7 +702,7 @@ void ConstantHoistingPass::emitBaseConstants(Instruction *Base,
}
}
-/// \brief Hoist and hide the base constant behind a bitcast and emit
+/// Hoist and hide the base constant behind a bitcast and emit
/// materialization code for derived constants.
bool ConstantHoistingPass::emitBaseConstants() {
bool MadeChange = false;
@@ -765,7 +765,7 @@ bool ConstantHoistingPass::emitBaseConstants() {
return MadeChange;
}
-/// \brief Check all cast instructions we made a copy of and remove them if they
+/// Check all cast instructions we made a copy of and remove them if they
/// have no more users.
void ConstantHoistingPass::deleteDeadCastInst() const {
for (auto const &I : ClonedCastMap)
@@ -773,7 +773,7 @@ void ConstantHoistingPass::deleteDeadCastInst() const {
I.first->eraseFromParent();
}
-/// \brief Optimize expensive integer constants in the given function.
+/// Optimize expensive integer constants in the given function.
bool ConstantHoistingPass::runImpl(Function &Fn, TargetTransformInfo &TTI,
DominatorTree &DT, BlockFrequencyInfo *BFI,
BasicBlock &Entry) {
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 7f320d5f95a..4380812968a 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -80,7 +80,7 @@ DEBUG_COUNTER(CSECounter, "early-cse",
namespace {
-/// \brief Struct representing the available values in the scoped hash table.
+/// Struct representing the available values in the scoped hash table.
struct SimpleValue {
Instruction *Inst;
@@ -243,7 +243,7 @@ bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
namespace {
-/// \brief Struct representing the available call values in the scoped hash
+/// Struct representing the available call values in the scoped hash
/// table.
struct CallValue {
Instruction *Inst;
@@ -309,7 +309,7 @@ bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
namespace {
-/// \brief A simple and fast domtree-based CSE pass.
+/// A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
/// eliminating trivially redundant instructions and using instsimplify to
@@ -333,7 +333,7 @@ public:
ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
AllocatorTy>;
- /// \brief A scoped hash table of the current values of all of our simple
+ /// A scoped hash table of the current values of all of our simple
/// scalar expressions.
///
/// As we walk down the domtree, we look to see if instructions are in this:
@@ -388,7 +388,7 @@ public:
InvariantMapAllocator>;
InvariantHTType AvailableInvariants;
- /// \brief A scoped hash table of the current values of read-only call
+ /// A scoped hash table of the current values of read-only call
/// values.
///
/// It uses the same generation count as loads.
@@ -396,10 +396,10 @@ public:
ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
CallHTType AvailableCalls;
- /// \brief This is the current generation of the memory value.
+ /// This is the current generation of the memory value.
unsigned CurrentGeneration = 0;
- /// \brief Set up the EarlyCSE runner for a particular function.
+ /// Set up the EarlyCSE runner for a particular function.
EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
const TargetTransformInfo &TTI, DominatorTree &DT,
AssumptionCache &AC, MemorySSA *MSSA)
@@ -473,7 +473,7 @@ private:
bool Processed = false;
};
- /// \brief Wrapper class to handle memory instructions, including loads,
+ /// Wrapper class to handle memory instructions, including loads,
/// stores and intrinsic loads and stores defined by the target.
class ParseMemoryInst {
public:
@@ -1193,7 +1193,7 @@ PreservedAnalyses EarlyCSEPass::run(Function &F,
namespace {
-/// \brief A simple and fast domtree-based CSE pass.
+/// A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
/// eliminating trivially redundant instructions and using instsimplify to
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 878b91fa1e2..59b87e9a77d 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -826,7 +826,7 @@ static bool isLifetimeStart(const Instruction *Inst) {
return false;
}
-/// \brief Try to locate the three instruction involved in a missed
+/// Try to locate the three instruction involved in a missed
/// load-elimination case that is due to an intervening store.
static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo,
DominatorTree *DT,
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 841a9a31483..454ea254b88 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -140,7 +140,7 @@ namespace {
using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
-/// \brief InferAddressSpaces
+/// InferAddressSpaces
class InferAddressSpaces : public FunctionPass {
/// Target specific address space which uses of should be replaced if
/// possible.
diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index 24150b1e471..c804115f415 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -71,7 +71,7 @@ public:
private:
bool runOnLoop(Loop *L);
- /// \brief Check if the stride of the accesses is large enough to
+ /// Check if the stride of the accesses is large enough to
/// warrant a prefetch.
bool isStrideLargeEnough(const SCEVAddRecExpr *AR);
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index 2f7b4923b33..a4da0940e33 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -111,7 +111,7 @@ STATISTIC(NumLoopsDistributed, "Number of loops distributed");
namespace {
-/// \brief Maintains the set of instructions of the loop for a partition before
+/// Maintains the set of instructions of the loop for a partition before
/// cloning. After cloning, it hosts the new loop.
class InstPartition {
using InstructionSet = SmallPtrSet<Instruction *, 8>;
@@ -122,20 +122,20 @@ public:
Set.insert(I);
}
- /// \brief Returns whether this partition contains a dependence cycle.
+ /// Returns whether this partition contains a dependence cycle.
bool hasDepCycle() const { return DepCycle; }
- /// \brief Adds an instruction to this partition.
+ /// Adds an instruction to this partition.
void add(Instruction *I) { Set.insert(I); }
- /// \brief Collection accessors.
+ /// Collection accessors.
InstructionSet::iterator begin() { return Set.begin(); }
InstructionSet::iterator end() { return Set.end(); }
InstructionSet::const_iterator begin() const { return Set.begin(); }
InstructionSet::const_iterator end() const { return Set.end(); }
bool empty() const { return Set.empty(); }
- /// \brief Moves this partition into \p Other. This partition becomes empty
+ /// Moves this partition into \p Other. This partition becomes empty
/// after this.
void moveTo(InstPartition &Other) {
Other.Set.insert(Set.begin(), Set.end());
@@ -143,7 +143,7 @@ public:
Other.DepCycle |= DepCycle;
}
- /// \brief Populates the partition with a transitive closure of all the
+ /// Populates the partition with a transitive closure of all the
/// instructions that the seeded instructions dependent on.
void populateUsedSet() {
// FIXME: We currently don't use control-dependence but simply include all
@@ -166,7 +166,7 @@ public:
}
}
- /// \brief Clones the original loop.
+ /// Clones the original loop.
///
/// Updates LoopInfo and DominatorTree using the information that block \p
/// LoopDomBB dominates the loop.
@@ -179,27 +179,27 @@ public:
return ClonedLoop;
}
- /// \brief The cloned loop. If this partition is mapped to the original loop,
+ /// The cloned loop. If this partition is mapped to the original loop,
/// this is null.
const Loop *getClonedLoop() const { return ClonedLoop; }
- /// \brief Returns the loop where this partition ends up after distribution.
+ /// Returns the loop where this partition ends up after distribution.
/// If this partition is mapped to the original loop then use the block from
/// the loop.
const Loop *getDistributedLoop() const {
return ClonedLoop ? ClonedLoop : OrigLoop;
}
- /// \brief The VMap that is populated by cloning and then used in
+ /// The VMap that is populated by cloning and then used in
/// remapinstruction to remap the cloned instructions.
ValueToValueMapTy &getVMap() { return VMap; }
- /// \brief Remaps the cloned instructions using VMap.
+ /// Remaps the cloned instructions using VMap.
void remapInstructions() {
remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
}
- /// \brief Based on the set of instructions selected for this partition,
+ /// Based on the set of instructions selected for this partition,
/// removes the unnecessary ones.
void removeUnusedInsts() {
SmallVector<Instruction *, 8> Unused;
@@ -239,30 +239,30 @@ public:
}
private:
- /// \brief Instructions from OrigLoop selected for this partition.
+ /// Instructions from OrigLoop selected for this partition.
InstructionSet Set;
- /// \brief Whether this partition contains a dependence cycle.
+ /// Whether this partition contains a dependence cycle.
bool DepCycle;
- /// \brief The original loop.
+ /// The original loop.
Loop *OrigLoop;
- /// \brief The cloned loop. If this partition is mapped to the original loop,
+ /// The cloned loop. If this partition is mapped to the original loop,
/// this is null.
Loop *ClonedLoop = nullptr;
- /// \brief The blocks of ClonedLoop including the preheader. If this
+ /// The blocks of ClonedLoop including the preheader. If this
/// partition is mapped to the original loop, this is empty.
SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
- /// \brief These gets populated once the set of instructions have been
+ /// These gets populated once the set of instructions have been
/// finalized. If this partition is mapped to the original loop, these are not
/// set.
ValueToValueMapTy VMap;
};
-/// \brief Holds the set of Partitions. It populates them, merges them and then
+/// Holds the set of Partitions. It populates them, merges them and then
/// clones the loops.
class InstPartitionContainer {
using InstToPartitionIdT = DenseMap<Instruction *, int>;
@@ -271,10 +271,10 @@ public:
InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
: L(L), LI(LI), DT(DT) {}
- /// \brief Returns the number of partitions.
+ /// Returns the number of partitions.
unsigned getSize() const { return PartitionContainer.size(); }
- /// \brief Adds \p Inst into the current partition if that is marked to
+ /// Adds \p Inst into the current partition if that is marked to
/// contain cycles. Otherwise start a new partition for it.
void addToCyclicPartition(Instruction *Inst) {
// If the current partition is non-cyclic. Start a new one.
@@ -284,7 +284,7 @@ public:
PartitionContainer.back().add(Inst);
}
- /// \brief Adds \p Inst into a partition that is not marked to contain
+ /// Adds \p Inst into a partition that is not marked to contain
/// dependence cycles.
///
// Initially we isolate memory instructions into as many partitions as
@@ -293,7 +293,7 @@ public:
PartitionContainer.emplace_back(Inst, L);
}
- /// \brief Merges adjacent non-cyclic partitions.
+ /// Merges adjacent non-cyclic partitions.
///
/// The idea is that we currently only want to isolate the non-vectorizable
/// partition. We could later allow more distribution among these partition
@@ -303,7 +303,7 @@ public:
[](const InstPartition *P) { return !P->hasDepCycle(); });
}
- /// \brief If a partition contains only conditional stores, we won't vectorize
+ /// If a partition contains only conditional stores, we won't vectorize
/// it. Try to merge it with a previous cyclic partition.
void mergeNonIfConvertible() {
mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
@@ -323,14 +323,14 @@ public:
});
}
- /// \brief Merges the partitions according to various heuristics.
+ /// Merges the partitions according to various heuristics.
void mergeBeforePopulating() {
mergeAdjacentNonCyclic();
if (!DistributeNonIfConvertible)
mergeNonIfConvertible();
}
- /// \brief Merges partitions in order to ensure that no loads are duplicated.
+ /// Merges partitions in order to ensure that no loads are duplicated.
///
/// We can't duplicate loads because that could potentially reorder them.
/// LoopAccessAnalysis provides dependency information with the context that
@@ -398,7 +398,7 @@ public:
return true;
}
- /// \brief Sets up the mapping between instructions to partitions. If the
+ /// Sets up the mapping between instructions to partitions. If the
/// instruction is duplicated across multiple partitions, set the entry to -1.
void setupPartitionIdOnInstructions() {
int PartitionID = 0;
@@ -416,14 +416,14 @@ public:
}
}
- /// \brief Populates the partition with everything that the seeding
+ /// Populates the partition with everything that the seeding
/// instructions require.
void populateUsedSet() {
for (auto &P : PartitionContainer)
P.populateUsedSet();
}
- /// \brief This performs the main chunk of the work of cloning the loops for
+ /// This performs the main chunk of the work of cloning the loops for
/// the partitions.
void cloneLoops() {
BasicBlock *OrigPH = L->getLoopPreheader();
@@ -470,13 +470,13 @@ public:
Curr->getDistributedLoop()->getExitingBlock());
}
- /// \brief Removes the dead instructions from the cloned loops.
+ /// Removes the dead instructions from the cloned loops.
void removeUnusedInsts() {
for (auto &Partition : PartitionContainer)
Partition.removeUnusedInsts();
}
- /// \brief For each memory pointer, it computes the partitionId the pointer is
+ /// For each memory pointer, it computes the partitionId the pointer is
/// used in.
///
/// This returns an array of int where the I-th entry corresponds to I-th
@@ -543,10 +543,10 @@ public:
private:
using PartitionContainerT = std::list<InstPartition>;
- /// \brief List of partitions.
+ /// List of partitions.
PartitionContainerT PartitionContainer;
- /// \brief Mapping from Instruction to partition Id. If the instruction
+ /// Mapping from Instruction to partition Id. If the instruction
/// belongs to multiple partitions the entry contains -1.
InstToPartitionIdT InstToPartitionId;
@@ -554,7 +554,7 @@ private:
LoopInfo *LI;
DominatorTree *DT;
- /// \brief The control structure to merge adjacent partitions if both satisfy
+ /// The control structure to merge adjacent partitions if both satisfy
/// the \p Predicate.
template <class UnaryPredicate>
void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
@@ -575,7 +575,7 @@ private:
}
};
-/// \brief For each memory instruction, this class maintains difference of the
+/// For each memory instruction, this class maintains difference of the
/// number of unsafe dependences that start out from this instruction minus
/// those that end here.
///
@@ -619,7 +619,7 @@ private:
AccessesType Accesses;
};
-/// \brief The actual class performing the per-loop work.
+/// The actual class performing the per-loop work.
class LoopDistributeForLoop {
public:
LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
@@ -628,7 +628,7 @@ public:
setForced();
}
- /// \brief Try to distribute an inner-most loop.
+ /// Try to distribute an inner-most loop.
bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
assert(L->empty() && "Only process inner loops.");
@@ -793,7 +793,7 @@ public:
return true;
}
- /// \brief Provide diagnostics then \return with false.
+ /// Provide diagnostics then \return with false.
bool fail(StringRef RemarkName, StringRef Message) {
LLVMContext &Ctx = F->getContext();
bool Forced = isForced().getValueOr(false);
@@ -826,7 +826,7 @@ public:
return false;
}
- /// \brief Return if distribution forced to be enabled/disabled for the loop.
+ /// Return if distribution forced to be enabled/disabled for the loop.
///
/// If the optional has a value, it indicates whether distribution was forced
/// to be enabled (true) or disabled (false). If the optional has no value
@@ -834,7 +834,7 @@ public:
const Optional<bool> &isForced() const { return IsForced; }
private:
- /// \brief Filter out checks between pointers from the same partition.
+ /// Filter out checks between pointers from the same partition.
///
/// \p PtrToPartition contains the partition number for pointers. Partition
/// number -1 means that the pointer is used in multiple partitions. In this
@@ -873,7 +873,7 @@ private:
return Checks;
}
- /// \brief Check whether the loop metadata is forcing distribution to be
+ /// Check whether the loop metadata is forcing distribution to be
/// enabled/disabled.
void setForced() {
Optional<const MDOperand *> Value =
@@ -896,7 +896,7 @@ private:
ScalarEvolution *SE;
OptimizationRemarkEmitter *ORE;
- /// \brief Indicates whether distribution is forced to be enabled/disabled for
+ /// Indicates whether distribution is forced to be enabled/disabled for
/// the loop.
///
/// If the optional has a value, it indicates whether distribution was forced
@@ -939,7 +939,7 @@ static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
namespace {
-/// \brief The pass class.
+/// The pass class.
class LoopDistributeLegacy : public FunctionPass {
public:
static char ID;
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 97894726637..272dcaff2bc 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -1330,7 +1330,7 @@ void LoopInterchangeTransform::splitInnerLoopHeader() {
"InnerLoopHeader\n");
}
-/// \brief Move all instructions except the terminator from FromBB right before
+/// Move all instructions except the terminator from FromBB right before
/// InsertBefore
static void moveBBContents(BasicBlock *FromBB, Instruction *InsertBefore) {
auto &ToList = InsertBefore->getParent()->getInstList();
@@ -1353,7 +1353,7 @@ void LoopInterchangeTransform::updateIncomingBlock(BasicBlock *CurrBlock,
}
}
-/// \brief Update BI to jump to NewBB instead of OldBB. Records updates to
+/// Update BI to jump to NewBB instead of OldBB. Records updates to
/// the dominator tree in DTUpdates, if DT should be preserved.
static void updateSuccessor(BranchInst *BI, BasicBlock *OldBB,
BasicBlock *NewBB,
diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index 46b81355c07..a7c27662aa0 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -80,7 +80,7 @@ STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
namespace {
-/// \brief Represent a store-to-forwarding candidate.
+/// Represent a store-to-forwarding candidate.
struct StoreToLoadForwardingCandidate {
LoadInst *Load;
StoreInst *Store;
@@ -88,7 +88,7 @@ struct StoreToLoadForwardingCandidate {
StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
: Load(Load), Store(Store) {}
- /// \brief Return true if the dependence from the store to the load has a
+ /// Return true if the dependence from the store to the load has a
/// distance of one. E.g. A[i+1] = A[i]
bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
Loop *L) const {
@@ -137,7 +137,7 @@ struct StoreToLoadForwardingCandidate {
} // end anonymous namespace
-/// \brief Check if the store dominates all latches, so as long as there is no
+/// Check if the store dominates all latches, so as long as there is no
/// intervening store this value will be loaded in the next iteration.
static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
DominatorTree *DT) {
@@ -148,21 +148,21 @@ static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
});
}
-/// \brief Return true if the load is not executed on all paths in the loop.
+/// Return true if the load is not executed on all paths in the loop.
static bool isLoadConditional(LoadInst *Load, Loop *L) {
return Load->getParent() != L->getHeader();
}
namespace {
-/// \brief The per-loop class that does most of the work.
+/// The per-loop class that does most of the work.
class LoadEliminationForLoop {
public:
LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
DominatorTree *DT)
: L(L), LI(LI), LAI(LAI), DT(DT), PSE(LAI.getPSE()) {}
- /// \brief Look through the loop-carried and loop-independent dependences in
+ /// Look through the loop-carried and loop-independent dependences in
/// this loop and find store->load dependences.
///
/// Note that no candidate is returned if LAA has failed to analyze the loop
@@ -223,14 +223,14 @@ public:
return Candidates;
}
- /// \brief Return the index of the instruction according to program order.
+ /// Return the index of the instruction according to program order.
unsigned getInstrIndex(Instruction *Inst) {
auto I = InstOrder.find(Inst);
assert(I != InstOrder.end() && "No index for instruction");
return I->second;
}
- /// \brief If a load has multiple candidates associated (i.e. different
+ /// If a load has multiple candidates associated (i.e. different
/// stores), it means that it could be forwarding from multiple stores
/// depending on control flow. Remove these candidates.
///
@@ -294,7 +294,7 @@ public:
});
}
- /// \brief Given two pointers operations by their RuntimePointerChecking
+ /// Given two pointers operations by their RuntimePointerChecking
/// indices, return true if they require an alias check.
///
/// We need a check if one is a pointer for a candidate load and the other is
@@ -310,7 +310,7 @@ public:
(PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
}
- /// \brief Return pointers that are possibly written to on the path from a
+ /// Return pointers that are possibly written to on the path from a
/// forwarding store to a load.
///
/// These pointers need to be alias-checked against the forwarding candidates.
@@ -367,7 +367,7 @@ public:
return PtrsWrittenOnFwdingPath;
}
- /// \brief Determine the pointer alias checks to prove that there are no
+ /// Determine the pointer alias checks to prove that there are no
/// intervening stores.
SmallVector<RuntimePointerChecking::PointerCheck, 4> collectMemchecks(
const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
@@ -401,7 +401,7 @@ public:
return Checks;
}
- /// \brief Perform the transformation for a candidate.
+ /// Perform the transformation for a candidate.
void
propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
SCEVExpander &SEE) {
@@ -437,7 +437,7 @@ public:
Cand.Load->replaceAllUsesWith(PHI);
}
- /// \brief Top-level driver for each loop: find store->load forwarding
+ /// Top-level driver for each loop: find store->load forwarding
/// candidates, add run-time checks and perform transformation.
bool processLoop() {
DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
@@ -559,7 +559,7 @@ public:
private:
Loop *L;
- /// \brief Maps the load/store instructions to their index according to
+ /// Maps the load/store instructions to their index according to
/// program order.
DenseMap<Instruction *, unsigned> InstOrder;
@@ -600,7 +600,7 @@ eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT,
namespace {
-/// \brief The pass. Most of the work is delegated to the per-loop
+/// The pass. Most of the work is delegated to the per-loop
/// LoadEliminationForLoop class.
class LoopLoadElimination : public FunctionPass {
public:
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 2d8b5469faf..4c0b3cc808c 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -446,7 +446,7 @@ void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
canonicalize(*L);
}
-/// \brief Check whether or not this formula satisfies the canonical
+/// Check whether or not this formula satisfies the canonical
/// representation.
/// \see Formula::BaseRegs.
bool Formula::isCanonical(const Loop &L) const {
@@ -474,7 +474,7 @@ bool Formula::isCanonical(const Loop &L) const {
return I == BaseRegs.end();
}
-/// \brief Helper method to morph a formula into its canonical representation.
+/// Helper method to morph a formula into its canonical representation.
/// \see Formula::BaseRegs.
/// Every formula having more than one base register, must use the ScaledReg
/// field. Otherwise, we would have to do special cases everywhere in LSR
@@ -509,7 +509,7 @@ void Formula::canonicalize(const Loop &L) {
}
}
-/// \brief Get rid of the scale in the formula.
+/// Get rid of the scale in the formula.
/// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
/// \return true if it was possible to get rid of the scale, false otherwise.
/// \note After this operation the formula may not be in the canonical form.
@@ -974,7 +974,7 @@ class LSRUse;
} // end anonymous namespace
-/// \brief Check if the addressing mode defined by \p F is completely
+/// Check if the addressing mode defined by \p F is completely
/// folded in \p LU at isel time.
/// This includes address-mode folding and special icmp tricks.
/// This function returns true if \p LU can accommodate what \p F
@@ -3515,7 +3515,7 @@ static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
return false;
}
-/// \brief Helper function for LSRInstance::GenerateReassociations.
+/// Helper function for LSRInstance::GenerateReassociations.
void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
const Formula &Base,
unsigned Depth, size_t Idx,
@@ -3653,7 +3653,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
}
}
-/// \brief Helper function for LSRInstance::GenerateSymbolicOffsets.
+/// Helper function for LSRInstance::GenerateSymbolicOffsets.
void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
const Formula &Base, size_t Idx,
bool IsScaledReg) {
@@ -3685,7 +3685,7 @@ void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
/* IsScaledReg */ true);
}
-/// \brief Helper function for LSRInstance::GenerateConstantOffsets.
+/// Helper function for LSRInstance::GenerateConstantOffsets.
void LSRInstance::GenerateConstantOffsetsImpl(
LSRUse &LU, unsigned LUIdx, const Formula &Base,
const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index a1b25a22a14..822f880f222 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -286,17 +286,17 @@ struct UnrolledInstStateKeyInfo {
};
struct EstimatedUnrollCost {
- /// \brief The estimated cost after unrolling.
+ /// The estimated cost after unrolling.
unsigned UnrolledCost;
- /// \brief The estimated dynamic cost of executing the instructions in the
+ /// The estimated dynamic cost of executing the instructions in the
/// rolled form.
unsigned RolledDynamicCost;
};
} // end anonymous namespace
-/// \brief Figure out if the loop is worth full unrolling.
+/// Figure out if the loop is worth full unrolling.
///
/// Complete loop unrolling can make some loads constant, and we need to know
/// if that would expose any further optimization opportunities. This routine
diff --git a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
index ba75b8c705e..e0e2c1938aa 100644
--- a/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopVersioningLICM.cpp
@@ -113,7 +113,7 @@ static cl::opt<unsigned> LVLoopDepthThreshold(
"LoopVersioningLICM's threshold for maximum allowed loop nest/depth"),
cl::init(2), cl::Hidden);
-/// \brief Create MDNode for input string.
+/// Create MDNode for input string.
static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
LLVMContext &Context = TheLoop->getHeader()->getContext();
Metadata *MDs[] = {
@@ -122,7 +122,7 @@ static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
return MDNode::get(Context, MDs);
}
-/// \brief Set input string into loop metadata by keeping other values intact.
+/// Set input string into loop metadata by keeping other values intact.
void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
unsigned V) {
SmallVector<Metadata *, 4> MDs(1);
@@ -242,7 +242,7 @@ private:
} // end anonymous namespace
-/// \brief Check loop structure and confirms it's good for LoopVersioningLICM.
+/// Check loop structure and confirms it's good for LoopVersioningLICM.
bool LoopVersioningLICM::legalLoopStructure() {
// Loop must be in loop simplify form.
if (!CurLoop->isLoopSimplifyForm()) {
@@ -293,7 +293,7 @@ bool LoopVersioningLICM::legalLoopStructure() {
return true;
}
-/// \brief Check memory accesses in loop and confirms it's good for
+/// Check memory accesses in loop and confirms it's good for
/// LoopVersioningLICM.
bool LoopVersioningLICM::legalLoopMemoryAccesses() {
bool HasMayAlias = false;
@@ -352,7 +352,7 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() {
return true;
}
-/// \brief Check loop instructions safe for Loop versioning.
+/// Check loop instructions safe for Loop versioning.
/// It returns true if it's safe else returns false.
/// Consider following:
/// 1) Check all load store in loop body are non atomic & non volatile.
@@ -403,7 +403,7 @@ bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
return true;
}
-/// \brief Check loop instructions and confirms it's good for
+/// Check loop instructions and confirms it's good for
/// LoopVersioningLICM.
bool LoopVersioningLICM::legalLoopInstructions() {
// Resetting counters.
@@ -480,7 +480,7 @@ bool LoopVersioningLICM::legalLoopInstructions() {
return true;
}
-/// \brief It checks loop is already visited or not.
+/// It checks loop is already visited or not.
/// check loop meta data, if loop revisited return true
/// else false.
bool LoopVersioningLICM::isLoopAlreadyVisited() {
@@ -491,7 +491,7 @@ bool LoopVersioningLICM::isLoopAlreadyVisited() {
return false;
}
-/// \brief Checks legality for LoopVersioningLICM by considering following:
+/// Checks legality for LoopVersioningLICM by considering following:
/// a) loop structure legality b) loop instruction legality
/// c) loop memory access legality.
/// Return true if legal else returns false.
@@ -546,7 +546,7 @@ bool LoopVersioningLICM::isLegalForVersioning() {
return true;
}
-/// \brief Update loop with aggressive aliasing assumptions.
+/// Update loop with aggressive aliasing assumptions.
/// It marks no-alias to any pairs of memory operations by assuming
/// loop should not have any must-alias memory accesses pairs.
/// During LoopVersioningLICM legality we ignore loops having must
diff --git a/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
index 46f8a356426..68bfa003039 100644
--- a/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -357,7 +357,7 @@ PreservedAnalyses LowerExpectIntrinsicPass::run(Function &F,
}
namespace {
-/// \brief Legacy pass for lowering expect intrinsics out of the IR.
+/// Legacy pass for lowering expect intrinsics out of the IR.
///
/// When this pass is run over a function it uses expect intrinsics which feed
/// branches and switches to provide branch weight metadata for those
diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 058da52dd84..cbed9a97c56 100644
--- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
//! \file
-//! \brief This pass performs merges of loads and stores on both sides of a
+//! This pass performs merges of loads and stores on both sides of a
// diamond (hammock). It hoists the loads and sinks the stores.
//
// The algorithm iteratively hoists two loads to the same address out of a
@@ -121,7 +121,7 @@ private:
} // end anonymous namespace
///
-/// \brief Return tail block of a diamond.
+/// Return tail block of a diamond.
///
BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) {
assert(isDiamondHead(BB) && "Basic block is not head of a diamond");
@@ -129,7 +129,7 @@ BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) {
}
///
-/// \brief True when BB is the head of a diamond (hammock)
+/// True when BB is the head of a diamond (hammock)
///
bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
if (!BB)
@@ -156,7 +156,7 @@ bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
///
-/// \brief True when instruction is a sink barrier for a store
+/// True when instruction is a sink barrier for a store
/// located in Loc
///
/// Whenever an instruction could possibly read or modify the
@@ -174,7 +174,7 @@ bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
}
///
-/// \brief Check if \p BB contains a store to the same address as \p SI
+/// Check if \p BB contains a store to the same address as \p SI
///
/// \return The store in \p when it is safe to sink. Otherwise return Null.
///
@@ -199,7 +199,7 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
}
///
-/// \brief Create a PHI node in BB for the operands of S0 and S1
+/// Create a PHI node in BB for the operands of S0 and S1
///
PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
StoreInst *S1) {
@@ -217,7 +217,7 @@ PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
}
///
-/// \brief Merge two stores to same address and sink into \p BB
+/// Merge two stores to same address and sink into \p BB
///
/// Also sinks GEP instruction computing the store address
///
@@ -262,7 +262,7 @@ bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0,
}
///
-/// \brief True when two stores are equivalent and can sink into the footer
+/// True when two stores are equivalent and can sink into the footer
///
/// Starting from a diamond tail block, iterate over the instructions in one
/// predecessor block and try to match a store in the second predecessor.
@@ -349,7 +349,7 @@ public:
}
///
- /// \brief Run the transformation for each function
+ /// Run the transformation for each function
///
bool runOnFunction(Function &F) override {
if (skipFunction(F))
@@ -370,7 +370,7 @@ char MergedLoadStoreMotionLegacyPass::ID = 0;
} // anonymous namespace
///
-/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
+/// createMergedLoadStoreMotionPass - The public interface to this file.
///
FunctionPass *llvm::createMergedLoadStoreMotionPass() {
return new MergedLoadStoreMotionLegacyPass();
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 36f16618a75..b51e84238b4 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -1634,7 +1634,7 @@ Value *ReassociatePass::OptimizeAdd(Instruction *I,
return nullptr;
}
-/// \brief Build up a vector of value/power pairs factoring a product.
+/// Build up a vector of value/power pairs factoring a product.
///
/// Given a series of multiplication operands, build a vector of factors and
/// the powers each is raised to when forming the final product. Sort them in
@@ -1699,7 +1699,7 @@ static bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
return true;
}
-/// \brief Build a tree of multiplies, computing the product of Ops.
+/// Build a tree of multiplies, computing the product of Ops.
static Value *buildMultiplyTree(IRBuilder<> &Builder,
SmallVectorImpl<Value*> &Ops) {
if (Ops.size() == 1)
@@ -1716,7 +1716,7 @@ static Value *buildMultiplyTree(IRBuilder<> &Builder,
return LHS;
}
-/// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*...
+/// Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*...
///
/// Given a vector of values raised to various powers, where no two values are
/// equal and the powers are sorted in decreasing order, compute the minimal
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 255c5b959ad..b4200da99cc 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -127,7 +127,7 @@ static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
namespace {
-/// \brief A custom IRBuilder inserter which prefixes all names, but only in
+/// A custom IRBuilder inserter which prefixes all names, but only in
/// Assert builds.
class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter {
std::string Prefix;
@@ -147,23 +147,23 @@ protected:
}
};
-/// \brief Provide a type for IRBuilder that drops names in release builds.
+/// Provide a type for IRBuilder that drops names in release builds.
using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>;
-/// \brief A used slice of an alloca.
+/// A used slice of an alloca.
///
/// This structure represents a slice of an alloca used by some instruction. It
/// stores both the begin and end offsets of this use, a pointer to the use
/// itself, and a flag indicating whether we can classify the use as splittable
/// or not when forming partitions of the alloca.
class Slice {
- /// \brief The beginning offset of the range.
+ /// The beginning offset of the range.
uint64_t BeginOffset = 0;
- /// \brief The ending offset, not included in the range.
+ /// The ending offset, not included in the range.
uint64_t EndOffset = 0;
- /// \brief Storage for both the use of this slice and whether it can be
+ /// Storage for both the use of this slice and whether it can be
/// split.
PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
@@ -185,7 +185,7 @@ public:
bool isDead() const { return getUse() == nullptr; }
void kill() { UseAndIsSplittable.setPointer(nullptr); }
- /// \brief Support for ordering ranges.
+ /// Support for ordering ranges.
///
/// This provides an ordering over ranges such that start offsets are
/// always increasing, and within equal start offsets, the end offsets are
@@ -203,7 +203,7 @@ public:
return false;
}
- /// \brief Support comparison with a single offset to allow binary searches.
+ /// Support comparison with a single offset to allow binary searches.
friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
uint64_t RHSOffset) {
return LHS.beginOffset() < RHSOffset;
@@ -229,7 +229,7 @@ template <> struct isPodLike<Slice> { static const bool value = true; };
} // end namespace llvm
-/// \brief Representation of the alloca slices.
+/// Representation of the alloca slices.
///
/// This class represents the slices of an alloca which are formed by its
/// various uses. If a pointer escapes, we can't fully build a representation
@@ -238,16 +238,16 @@ template <> struct isPodLike<Slice> { static const bool value = true; };
/// starting at a particular offset before splittable slices.
class llvm::sroa::AllocaSlices {
public:
- /// \brief Construct the slices of a particular alloca.
+ /// Construct the slices of a particular alloca.
AllocaSlices(const DataLayout &DL, AllocaInst &AI);
- /// \brief Test whether a pointer to the allocation escapes our analysis.
+ /// Test whether a pointer to the allocation escapes our analysis.
///
/// If this is true, the slices are never fully built and should be
/// ignored.
bool isEscaped() const { return PointerEscapingInstr; }
- /// \brief Support for iterating over the slices.
+ /// Support for iterating over the slices.
/// @{
using iterator = SmallVectorImpl<Slice>::iterator;
using range = iterator_range<iterator>;
@@ -262,10 +262,10 @@ public:
const_iterator end() const { return Slices.end(); }
/// @}
- /// \brief Erase a range of slices.
+ /// Erase a range of slices.
void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
- /// \brief Insert new slices for this alloca.
+ /// Insert new slices for this alloca.
///
/// This moves the slices into the alloca's slices collection, and re-sorts
/// everything so that the usual ordering properties of the alloca's slices
@@ -283,10 +283,10 @@ public:
class partition_iterator;
iterator_range<partition_iterator> partitions();
- /// \brief Access the dead users for this alloca.
+ /// Access the dead users for this alloca.
ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
- /// \brief Access the dead operands referring to this alloca.
+ /// Access the dead operands referring to this alloca.
///
/// These are operands which have cannot actually be used to refer to the
/// alloca as they are outside its range and the user doesn't correct for
@@ -312,11 +312,11 @@ private:
friend class AllocaSlices::SliceBuilder;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- /// \brief Handle to alloca instruction to simplify method interfaces.
+ /// Handle to alloca instruction to simplify method interfaces.
AllocaInst &AI;
#endif
- /// \brief The instruction responsible for this alloca not having a known set
+ /// The instruction responsible for this alloca not having a known set
/// of slices.
///
/// When an instruction (potentially) escapes the pointer to the alloca, we
@@ -324,7 +324,7 @@ private:
/// alloca. This will be null if the alloca slices are analyzed successfully.
Instruction *PointerEscapingInstr;
- /// \brief The slices of the alloca.
+ /// The slices of the alloca.
///
/// We store a vector of the slices formed by uses of the alloca here. This
/// vector is sorted by increasing begin offset, and then the unsplittable
@@ -332,7 +332,7 @@ private:
/// details.
SmallVector<Slice, 8> Slices;
- /// \brief Instructions which will become dead if we rewrite the alloca.
+ /// Instructions which will become dead if we rewrite the alloca.
///
/// Note that these are not separated by slice. This is because we expect an
/// alloca to be completely rewritten or not rewritten at all. If rewritten,
@@ -340,7 +340,7 @@ private:
/// they come from outside of the allocated space.
SmallVector<Instruction *, 8> DeadUsers;
- /// \brief Operands which will become dead if we rewrite the alloca.
+ /// Operands which will become dead if we rewrite the alloca.
///
/// These are operands that in their particular use can be replaced with
/// undef when we rewrite the alloca. These show up in out-of-bounds inputs
@@ -351,7 +351,7 @@ private:
SmallVector<Use *, 8> DeadOperands;
};
-/// \brief A partition of the slices.
+/// A partition of the slices.
///
/// An ephemeral representation for a range of slices which can be viewed as
/// a partition of the alloca. This range represents a span of the alloca's
@@ -367,32 +367,32 @@ private:
using iterator = AllocaSlices::iterator;
- /// \brief The beginning and ending offsets of the alloca for this
+ /// The beginning and ending offsets of the alloca for this
/// partition.
uint64_t BeginOffset, EndOffset;
- /// \brief The start and end iterators of this partition.
+ /// The start and end iterators of this partition.
iterator SI, SJ;
- /// \brief A collection of split slice tails overlapping the partition.
+ /// A collection of split slice tails overlapping the partition.
SmallVector<Slice *, 4> SplitTails;
- /// \brief Raw constructor builds an empty partition starting and ending at
+ /// Raw constructor builds an empty partition starting and ending at
/// the given iterator.
Partition(iterator SI) : SI(SI), SJ(SI) {}
public:
- /// \brief The start offset of this partition.
+ /// The start offset of this partition.
///
/// All of the contained slices start at or after this offset.
uint64_t beginOffset() const { return BeginOffset; }
- /// \brief The end offset of this partition.
+ /// The end offset of this partition.
///
/// All of the contained slices end at or before this offset.
uint64_t endOffset() const { return EndOffset; }
- /// \brief The size of the partition.
+ /// The size of the partition.
///
/// Note that this can never be zero.
uint64_t size() const {
@@ -400,7 +400,7 @@ public:
return EndOffset - BeginOffset;
}
- /// \brief Test whether this partition contains no slices, and merely spans
+ /// Test whether this partition contains no slices, and merely spans
/// a region occupied by split slices.
bool empty() const { return SI == SJ; }
@@ -417,7 +417,7 @@ public:
iterator end() const { return SJ; }
/// @}
- /// \brief Get the sequence of split slice tails.
+ /// Get the sequence of split slice tails.
///
/// These tails are of slices which start before this partition but are
/// split and overlap into the partition. We accumulate these while forming
@@ -425,7 +425,7 @@ public:
ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
};
-/// \brief An iterator over partitions of the alloca's slices.
+/// An iterator over partitions of the alloca's slices.
///
/// This iterator implements the core algorithm for partitioning the alloca's
/// slices. It is a forward iterator as we don't support backtracking for
@@ -439,18 +439,18 @@ class AllocaSlices::partition_iterator
Partition> {
friend class AllocaSlices;
- /// \brief Most of the state for walking the partitions is held in a class
+ /// Most of the state for walking the partitions is held in a class
/// with a nice interface for examining them.
Partition P;
- /// \brief We need to keep the end of the slices to know when to stop.
+ /// We need to keep the end of the slices to know when to stop.
AllocaSlices::iterator SE;
- /// \brief We also need to keep track of the maximum split end offset seen.
+ /// We also need to keep track of the maximum split end offset seen.
/// FIXME: Do we really?
uint64_t MaxSplitSliceEndOffset = 0;
- /// \brief Sets the partition to be empty at given iterator, and sets the
+ /// Sets the partition to be empty at given iterator, and sets the
/// end iterator.
partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE)
: P(SI), SE(SE) {
@@ -460,7 +460,7 @@ class AllocaSlices::partition_iterator
advance();
}
- /// \brief Advance the iterator to the next partition.
+ /// Advance the iterator to the next partition.
///
/// Requires that the iterator not be at the end of the slices.
void advance() {
@@ -615,7 +615,7 @@ public:
Partition &operator*() { return P; }
};
-/// \brief A forward range over the partitions of the alloca's slices.
+/// A forward range over the partitions of the alloca's slices.
///
/// This accesses an iterator range over the partitions of the alloca's
/// slices. It computes these partitions on the fly based on the overlapping
@@ -639,7 +639,7 @@ static Value *foldSelectInst(SelectInst &SI) {
return nullptr;
}
-/// \brief A helper that folds a PHI node or a select.
+/// A helper that folds a PHI node or a select.
static Value *foldPHINodeOrSelectInst(Instruction &I) {
if (PHINode *PN = dyn_cast<PHINode>(&I)) {
// If PN merges together the same value, return that value.
@@ -648,7 +648,7 @@ static Value *foldPHINodeOrSelectInst(Instruction &I) {
return foldSelectInst(cast<SelectInst>(I));
}
-/// \brief Builder for the alloca slices.
+/// Builder for the alloca slices.
///
/// This class builds a set of alloca slices by recursively visiting the uses
/// of an alloca and making a slice for each load and store at each offset.
@@ -664,7 +664,7 @@ class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
- /// \brief Set to de-duplicate dead instructions found in the use walk.
+ /// Set to de-duplicate dead instructions found in the use walk.
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
@@ -1023,7 +1023,7 @@ private:
void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
- /// \brief Disable SROA entirely if there are unhandled users of the alloca.
+ /// Disable SROA entirely if there are unhandled users of the alloca.
void visitInstruction(Instruction &I) { PI.setAborted(&I); }
};
@@ -1352,7 +1352,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
SI.eraseFromParent();
}
-/// \brief Build a GEP out of a base pointer and indices.
+/// Build a GEP out of a base pointer and indices.
///
/// This will return the BasePtr if that is valid, or build a new GEP
/// instruction using the IRBuilder if GEP-ing is needed.
@@ -1370,7 +1370,7 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
NamePrefix + "sroa_idx");
}
-/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
+/// Get a natural GEP off of the BasePtr walking through Ty toward
/// TargetTy without changing the offset of the pointer.
///
/// This routine assumes we've already established a properly offset GEP with
@@ -1419,7 +1419,7 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
return buildGEP(IRB, BasePtr, Indices, NamePrefix);
}
-/// \brief Recursively compute indices for a natural GEP.
+/// Recursively compute indices for a natural GEP.
///
/// This is the recursive step for getNaturalGEPWithOffset that walks down the
/// element types adding appropriate indices for the GEP.
@@ -1487,7 +1487,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Indices, NamePrefix);
}
-/// \brief Get a natural GEP from a base pointer to a particular offset and
+/// Get a natural GEP from a base pointer to a particular offset and
/// resulting in a particular type.
///
/// The goal is to produce a "natural" looking GEP that works with the existing
@@ -1522,7 +1522,7 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Indices, NamePrefix);
}
-/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
+/// Compute an adjusted pointer from Ptr by Offset bytes where the
/// resulting pointer has PointerTy.
///
/// This tries very hard to compute a "natural" GEP which arrives at the offset
@@ -1631,7 +1631,7 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
return Ptr;
}
-/// \brief Compute the adjusted alignment for a load or store from an offset.
+/// Compute the adjusted alignment for a load or store from an offset.
static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset,
const DataLayout &DL) {
unsigned Alignment;
@@ -1652,7 +1652,7 @@ static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset,
return MinAlign(Alignment, Offset);
}
-/// \brief Test whether we can convert a value from the old to the new type.
+/// Test whether we can convert a value from the old to the new type.
///
/// This predicate should be used to guard calls to convertValue in order to
/// ensure that we only try to convert viable values. The strategy is that we
@@ -1703,7 +1703,7 @@ static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
return true;
}
-/// \brief Generic routine to convert an SSA value to a value of a different
+/// Generic routine to convert an SSA value to a value of a different
/// type.
///
/// This will try various different casting techniques, such as bitcasts,
@@ -1755,7 +1755,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
return IRB.CreateBitCast(V, NewTy);
}
-/// \brief Test whether the given slice use can be promoted to a vector.
+/// Test whether the given slice use can be promoted to a vector.
///
/// This function is called to test each entry in a partition which is slated
/// for a single slice.
@@ -1826,7 +1826,7 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
return true;
}
-/// \brief Test whether the given alloca partitioning and range of slices can be
+/// Test whether the given alloca partitioning and range of slices can be
/// promoted to a vector.
///
/// This is a quick test to check whether we can rewrite a particular alloca
@@ -1939,7 +1939,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
return nullptr;
}
-/// \brief Test whether a slice of an alloca is valid for integer widening.
+/// Test whether a slice of an alloca is valid for integer widening.
///
/// This implements the necessary checking for the \c isIntegerWideningViable
/// test below on a single slice of the alloca.
@@ -2017,7 +2017,7 @@ static bool isIntegerWideningViableForSlice(const Slice &S,
return true;
}
-/// \brief Test whether the given alloca partition's integer operations can be
+/// Test whether the given alloca partition's integer operations can be
/// widened to promotable ones.
///
/// This is a quick test to check whether we can rewrite the integer loads and
@@ -2192,7 +2192,7 @@ static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
return V;
}
-/// \brief Visitor to rewrite instructions using p particular slice of an alloca
+/// Visitor to rewrite instructions using p particular slice of an alloca
/// to use a new alloca.
///
/// Also implements the rewriting to vector-based accesses when the partition
@@ -2365,7 +2365,7 @@ private:
);
}
- /// \brief Compute suitable alignment to access this slice of the *new*
+ /// Compute suitable alignment to access this slice of the *new*
/// alloca.
///
/// You can optionally pass a type to this routine and if that type's ABI
@@ -2652,7 +2652,7 @@ private:
return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
}
- /// \brief Compute an integer value from splatting an i8 across the given
+ /// Compute an integer value from splatting an i8 across the given
/// number of bytes.
///
/// Note that this routine assumes an i8 is a byte. If that isn't true, don't
@@ -2679,7 +2679,7 @@ private:
return V;
}
- /// \brief Compute a vector splat for a given element value.
+ /// Compute a vector splat for a given element value.
Value *getVectorSplat(Value *V, unsigned NumElements) {
V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
DEBUG(dbgs() << " splat: " << *V << "\n");
@@ -3081,7 +3081,7 @@ private:
namespace {
-/// \brief Visitor to rewrite aggregate loads and stores as scalar.
+/// Visitor to rewrite aggregate loads and stores as scalar.
///
/// This pass aggressively rewrites all aggregate loads and stores on
/// a particular pointer (or any pointer derived from it which we can identify)
@@ -3126,7 +3126,7 @@ private:
// Conservative default is to not rewrite anything.
bool visitInstruction(Instruction &I) { return false; }
- /// \brief Generic recursive split emission class.
+ /// Generic recursive split emission class.
template <typename Derived> class OpSplitter {
protected:
/// The builder used to form new instructions.
@@ -3150,7 +3150,7 @@ private:
: IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
public:
- /// \brief Generic recursive split emission routine.
+ /// Generic recursive split emission routine.
///
/// This method recursively splits an aggregate op (load or store) into
/// scalar or vector ops. It splits recursively until it hits a single value
@@ -3303,7 +3303,7 @@ private:
} // end anonymous namespace
-/// \brief Strip aggregate type wrapping.
+/// Strip aggregate type wrapping.
///
/// This removes no-op aggregate types wrapping an underlying type. It will
/// strip as many layers of types as it can without changing either the type
@@ -3333,7 +3333,7 @@ static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
return stripAggregateTypeWrapping(DL, InnerTy);
}
-/// \brief Try to find a partition of the aggregate type passed in for a given
+/// Try to find a partition of the aggregate type passed in for a given
/// offset and size.
///
/// This recurses through the aggregate type and tries to compute a subtype
@@ -3439,7 +3439,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
return SubTy;
}
-/// \brief Pre-split loads and stores to simplify rewriting.
+/// Pre-split loads and stores to simplify rewriting.
///
/// We want to break up the splittable load+store pairs as much as
/// possible. This is important to do as a preprocessing step, as once we
@@ -3938,7 +3938,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
return true;
}
-/// \brief Rewrite an alloca partition's users.
+/// Rewrite an alloca partition's users.
///
/// This routine drives both of the rewriting goals of the SROA pass. It tries
/// to rewrite uses of an alloca partition to be conducive for SSA value
@@ -4087,7 +4087,7 @@ AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
return NewAI;
}
-/// \brief Walks the slices of an alloca and form partitions based on them,
+/// Walks the slices of an alloca and form partitions based on them,
/// rewriting each of their uses.
bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
if (AS.begin() == AS.end())
@@ -4248,7 +4248,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
return Changed;
}
-/// \brief Clobber a use with undef, deleting the used value if it becomes dead.
+/// Clobber a use with undef, deleting the used value if it becomes dead.
void SROA::clobberUse(Use &U) {
Value *OldV = U;
// Replace the use with an undef value.
@@ -4263,7 +4263,7 @@ void SROA::clobberUse(Use &U) {
}
}
-/// \brief Analyze an alloca for SROA.
+/// Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
/// the slices of the alloca, and then hands it off to be split and
@@ -4332,7 +4332,7 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
return Changed;
}
-/// \brief Delete the dead instructions accumulated in this run.
+/// Delete the dead instructions accumulated in this run.
///
/// Recursively deletes the dead instructions we've accumulated. This is done
/// at the very end to maximize locality of the recursive delete and to
@@ -4374,7 +4374,7 @@ bool SROA::deleteDeadInstructions(
return Changed;
}
-/// \brief Promote the allocas, using the best available technique.
+/// Promote the allocas, using the best available technique.
///
/// This attempts to promote whatever allocas have been identified as viable in
/// the PromotableAllocas list. If that list is empty, there is nothing to do.
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index ca9f82c9aa4..e8a8328d24c 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -212,7 +212,7 @@ static cl::opt<bool>
namespace {
-/// \brief A helper class for separating a constant offset from a GEP index.
+/// A helper class for separating a constant offset from a GEP index.
///
/// In real programs, a GEP index may be more complicated than a simple addition
/// of something and a constant integer which can be trivially splitted. For
@@ -339,7 +339,7 @@ private:
const DominatorTree *DT;
};
-/// \brief A pass that tries to split every GEP in the function into a variadic
+/// A pass that tries to split every GEP in the function into a variadic
/// base and a constant offset. It is a FunctionPass because searching for the
/// constant offset may inspect other basic blocks.
class SeparateConstOffsetFromGEP : public FunctionPass {
diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index b54a92325c4..6f5c32cd1bc 100644
--- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -289,7 +289,7 @@ INITIALIZE_PASS_DEPENDENCY(RegionInfoPass)
INITIALIZE_PASS_END(StructurizeCFG, "structurizecfg", "Structurize the CFG",
false, false)
-/// \brief Initialize the types and constants used in the pass
+/// Initialize the types and constants used in the pass
bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
LLVMContext &Context = R->getEntry()->getContext();
@@ -301,7 +301,7 @@ bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
return false;
}
-/// \brief Build up the general order of nodes
+/// Build up the general order of nodes
void StructurizeCFG::orderNodes() {
ReversePostOrderTraversal<Region*> RPOT(ParentRegion);
SmallDenseMap<Loop*, unsigned, 8> LoopBlocks;
@@ -354,7 +354,7 @@ void StructurizeCFG::orderNodes() {
std::reverse(Order.begin(), Order.end());
}
-/// \brief Determine the end of the loops
+/// Determine the end of the loops
void StructurizeCFG::analyzeLoops(RegionNode *N) {
if (N->isSubRegion()) {
// Test for exit as back edge
@@ -373,7 +373,7 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) {
}
}
-/// \brief Invert the given condition
+/// Invert the given condition
Value *StructurizeCFG::invert(Value *Condition) {
// First: Check if it's a constant
if (Constant *C = dyn_cast<Constant>(Condition))
@@ -405,7 +405,7 @@ Value *StructurizeCFG::invert(Value *Condition) {
llvm_unreachable("Unhandled condition to invert");
}
-/// \brief Build the condition for one edge
+/// Build the condition for one edge
Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
bool Invert) {
Value *Cond = Invert ? BoolFalse : BoolTrue;
@@ -418,7 +418,7 @@ Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
return Cond;
}
-/// \brief Analyze the predecessors of each block and build up predicates
+/// Analyze the predecessors of each block and build up predicates
void StructurizeCFG::gatherPredicates(RegionNode *N) {
RegionInfo *RI = ParentRegion->getRegionInfo();
BasicBlock *BB = N->getEntry();
@@ -476,7 +476,7 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {
}
}
-/// \brief Collect various loop and predicate infos
+/// Collect various loop and predicate infos
void StructurizeCFG::collectInfos() {
// Reset predicate
Predicates.clear();
@@ -505,7 +505,7 @@ void StructurizeCFG::collectInfos() {
}
}
-/// \brief Insert the missing branch conditions
+/// Insert the missing branch conditions
void StructurizeCFG::insertConditions(bool Loops) {
BranchVector &Conds = Loops ? LoopConds : Conditions;
Value *Default = Loops ? BoolTrue : BoolFalse;
@@ -551,7 +551,7 @@ void StructurizeCFG::insertConditions(bool Loops) {
}
}
-/// \brief Remove all PHI values coming from "From" into "To" and remember
+/// Remove all PHI values coming from "From" into "To" and remember
/// them in DeletedPhis
void StructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) {
PhiMap &Map = DeletedPhis[To];
@@ -563,7 +563,7 @@ void StructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) {
}
}
-/// \brief Add a dummy PHI value as soon as we knew the new predecessor
+/// Add a dummy PHI value as soon as we knew the new predecessor
void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) {
for (PHINode &Phi : To->phis()) {
Value *Undef = UndefValue::get(Phi.getType());
@@ -572,7 +572,7 @@ void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) {
AddedPhis[To].push_back(From);
}
-/// \brief Add the real PHI value as soon as everything is set up
+/// Add the real PHI value as soon as everything is set up
void StructurizeCFG::setPhiValues() {
SSAUpdater Updater;
for (const auto &AddedPhi : AddedPhis) {
@@ -612,7 +612,7 @@ void StructurizeCFG::setPhiValues() {
assert(DeletedPhis.empty());
}
-/// \brief Remove phi values from all successors and then remove the terminator.
+/// Remove phi values from all successors and then remove the terminator.
void StructurizeCFG::killTerminator(BasicBlock *BB) {
TerminatorInst *Term = BB->getTerminator();
if (!Term)
@@ -627,7 +627,7 @@ void StructurizeCFG::killTerminator(BasicBlock *BB) {
Term->eraseFromParent();
}
-/// \brief Let node exit(s) point to NewExit
+/// Let node exit(s) point to NewExit
void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
bool IncludeDominator) {
if (Node->isSubRegion()) {
@@ -673,7 +673,7 @@ void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
}
}
-/// \brief Create a new flow node and update dominator tree and region info
+/// Create a new flow node and update dominator tree and region info
BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) {
LLVMContext &Context = Func->getContext();
BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() :
@@ -685,7 +685,7 @@ BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) {
return Flow;
}
-/// \brief Create a new or reuse the previous node as flow node
+/// Create a new or reuse the previous node as flow node
BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) {
BasicBlock *Entry = PrevNode->getEntry();
@@ -704,7 +704,7 @@ BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) {
return Flow;
}
-/// \brief Returns the region exit if possible, otherwise just a new flow node
+/// Returns the region exit if possible, otherwise just a new flow node
BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow,
bool ExitUseAllowed) {
if (!Order.empty() || !ExitUseAllowed)
@@ -716,13 +716,13 @@ BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow,
return Exit;
}
-/// \brief Set the previous node
+/// Set the previous node
void StructurizeCFG::setPrevNode(BasicBlock *BB) {
PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB)
: nullptr;
}
-/// \brief Does BB dominate all the predicates of Node?
+/// Does BB dominate all the predicates of Node?
bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) {
BBPredicates &Preds = Predicates[Node->getEntry()];
return llvm::all_of(Preds, [&](std::pair<BasicBlock *, Value *> Pred) {
@@ -730,7 +730,7 @@ bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) {
});
}
-/// \brief Can we predict that this node will always be called?
+/// Can we predict that this node will always be called?
bool StructurizeCFG::isPredictableTrue(RegionNode *Node) {
BBPredicates &Preds = Predicates[Node->getEntry()];
bool Dominated = false;
@@ -926,7 +926,7 @@ static bool hasOnlyUniformBranches(Region *R, unsigned UniformMDKindID,
return true;
}
-/// \brief Run the transformation for each region found
+/// Run the transformation for each region found
bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
if (R->isTopLevelRegion())
return false;
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 2a1106b41de..37ea4375a4c 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -87,7 +87,7 @@ STATISTIC(NumEliminated, "Number of tail calls removed");
STATISTIC(NumRetDuped, "Number of return duplicated");
STATISTIC(NumAccumAdded, "Number of accumulators introduced");
-/// \brief Scan the specified function for alloca instructions.
+/// Scan the specified function for alloca instructions.
/// If it contains any dynamic allocas, returns false.
static bool canTRE(Function &F) {
// Because of PR962, we don't TRE dynamic allocas.
OpenPOWER on IntegriCloud