summaryrefslogtreecommitdiffstats
path: root/mlir
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-03-27 08:55:17 -0700
committerjpienaar <jpienaar@google.com>2019-03-29 17:44:56 -0700
commit9c085406904780d25673ad213ac53a4c6e1558c0 (patch)
tree0072f8fd28a49ee11bbf0a849982727967952182 /mlir
parent04b925f1b8af88228991f23b0950a2b530170962 (diff)
downloadbcm5719-llvm-9c085406904780d25673ad213ac53a4c6e1558c0.tar.gz
bcm5719-llvm-9c085406904780d25673ad213ac53a4c6e1558c0.zip
Replace usages of Instruction with Operation in the /Analysis directory.
PiperOrigin-RevId: 240569775
Diffstat (limited to 'mlir')
-rw-r--r--mlir/include/mlir/Analysis/AffineAnalysis.h21
-rw-r--r--mlir/include/mlir/Analysis/AffineStructures.h6
-rw-r--r--mlir/include/mlir/Analysis/Dominance.h26
-rw-r--r--mlir/include/mlir/Analysis/LoopAnalysis.h3
-rw-r--r--mlir/include/mlir/Analysis/NestedMatcher.h47
-rw-r--r--mlir/include/mlir/Analysis/SliceAnalysis.h65
-rw-r--r--mlir/include/mlir/Analysis/Utils.h25
-rw-r--r--mlir/include/mlir/Analysis/VectorAnalysis.h7
-rw-r--r--mlir/include/mlir/IR/Operation.h6
-rw-r--r--mlir/lib/AffineOps/AffineOps.cpp22
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp67
-rw-r--r--mlir/lib/Analysis/Dominance.cpp20
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp44
-rw-r--r--mlir/lib/Analysis/MemRefBoundCheck.cpp2
-rw-r--r--mlir/lib/Analysis/MemRefDependenceCheck.cpp10
-rw-r--r--mlir/lib/Analysis/NestedMatcher.cpp60
-rw-r--r--mlir/lib/Analysis/OpStats.cpp3
-rw-r--r--mlir/lib/Analysis/SliceAnalysis.cpp93
-rw-r--r--mlir/lib/Analysis/Utils.cpp110
-rw-r--r--mlir/lib/Analysis/VectorAnalysis.cpp53
-rw-r--r--mlir/lib/Analysis/Verifier.cpp54
-rw-r--r--mlir/lib/IR/Block.cpp2
-rw-r--r--mlir/lib/IR/Operation.cpp6
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp4
-rw-r--r--mlir/lib/Transforms/LoopTiling.cpp3
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp4
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp22
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp10
28 files changed, 390 insertions, 405 deletions
diff --git a/mlir/include/mlir/Analysis/AffineAnalysis.h b/mlir/include/mlir/Analysis/AffineAnalysis.h
index 2944a6bca97..4873475e58b 100644
--- a/mlir/include/mlir/Analysis/AffineAnalysis.h
+++ b/mlir/include/mlir/Analysis/AffineAnalysis.h
@@ -36,15 +36,14 @@ class AffineForOp;
class AffineValueMap;
class FlatAffineConstraints;
class Operation;
-using Instruction = Operation;
class Value;
/// Returns in `affineApplyOps`, the sequence of those AffineApplyOp
-/// Instructions that are reachable via a search starting from `operands` and
+/// Operations that are reachable via a search starting from `operands` and
/// ending at those operands that are not the result of an AffineApplyOp.
void getReachableAffineApplyOps(
llvm::ArrayRef<Value *> operands,
- llvm::SmallVectorImpl<Instruction *> &affineApplyOps);
+ llvm::SmallVectorImpl<Operation *> &affineApplyOps);
/// Builds a system of constraints with dimensional identifiers corresponding to
/// the loop IVs of the forOps appearing in that order. Bounds of the loop are
@@ -58,13 +57,13 @@ LogicalResult getIndexSet(llvm::MutableArrayRef<AffineForOp> forOps,
/// Encapsulates a memref load or store access information.
struct MemRefAccess {
Value *memref;
- Instruction *opInst;
+ Operation *opInst;
llvm::SmallVector<Value *, 4> indices;
- /// Constructs a MemRefAccess from a load or store operation instruction.
+ /// Constructs a MemRefAccess from a load or store operation.
// TODO(b/119949820): add accessors to standard op's load, store, DMA op's to
// return MemRefAccess, i.e., loadOp->getAccess(), dmaOp->getRead/WriteAccess.
- explicit MemRefAccess(Instruction *opInst);
+ explicit MemRefAccess(Operation *opInst);
// Returns the rank of the memref associated with this access.
unsigned getRank() const;
@@ -92,11 +91,11 @@ struct DependenceComponent {
/// Checks whether two accesses to the same memref access the same element.
/// Each access is specified using the MemRefAccess structure, which contains
-/// the operation instruction, indices and memref associated with the access.
-/// Returns 'success' if it can be determined conclusively that the accesses do
-/// not access the same memref element.
-/// If 'allowRAR' is true, will consider read-after-read dependences (typically
-/// used by applications trying to optimize input reuse).
+/// the operation, indices and memref associated with the access. Returns
+/// 'false' if it can be determined conclusively that the accesses do not
+/// access the same memref element. If 'allowRAR' is true, will consider
+/// read-after-read dependences (typically used by applications trying to
+/// optimize input reuse).
// TODO(andydavis) Wrap 'dependenceConstraints' and 'dependenceComponents' into
// a single struct.
// TODO(andydavis) Make 'dependenceConstraints' optional arg.
diff --git a/mlir/include/mlir/Analysis/AffineStructures.h b/mlir/include/mlir/Analysis/AffineStructures.h
index f9ea873d0f7..7dcab234143 100644
--- a/mlir/include/mlir/Analysis/AffineStructures.h
+++ b/mlir/include/mlir/Analysis/AffineStructures.h
@@ -377,12 +377,12 @@ public:
AffineExpr toAffineExpr(unsigned idx, MLIRContext *context);
/// Adds constraints (lower and upper bounds) for the specified 'affine.for'
- /// instruction's Value using IR information stored in its bound maps. The
+ /// operation's Value using IR information stored in its bound maps. The
/// right identifier is first looked up using forOp's Value. Asserts if the
- /// Value corresponding to the 'affine.for' instruction isn't found in the
+ /// Value corresponding to the 'affine.for' operation isn't found in the
/// constraint system. Returns failure for the yet unimplemented/unsupported
/// cases. Any new identifiers that are found in the bound operands of the
- /// 'affine.for' instruction are added as trailing identifiers (either
+ /// 'affine.for' operation are added as trailing identifiers (either
/// dimensional or symbolic depending on whether the operand is a valid ML
/// Function symbol).
// TODO(bondhugula): add support for non-unit strides.
diff --git a/mlir/include/mlir/Analysis/Dominance.h b/mlir/include/mlir/Analysis/Dominance.h
index 1c3ca02e41c..f22def7699d 100644
--- a/mlir/include/mlir/Analysis/Dominance.h
+++ b/mlir/include/mlir/Analysis/Dominance.h
@@ -65,20 +65,20 @@ class DominanceInfo : public detail::DominanceInfoBase</*IsPostDom=*/false> {
public:
using super::super;
- /// Return true if instruction A properly dominates instruction B.
- bool properlyDominates(Instruction *a, Instruction *b);
+ /// Return true if operation A properly dominates operation B.
+ bool properlyDominates(Operation *a, Operation *b);
- /// Return true if instruction A dominates instruction B.
- bool dominates(Instruction *a, Instruction *b) {
+ /// Return true if operation A dominates operation B.
+ bool dominates(Operation *a, Operation *b) {
return a == b || properlyDominates(a, b);
}
- /// Return true if value A properly dominates instruction B.
- bool properlyDominates(Value *a, Instruction *b);
+ /// Return true if value A properly dominates operation B.
+ bool properlyDominates(Value *a, Operation *b);
- /// Return true if instruction A dominates instruction B.
- bool dominates(Value *a, Instruction *b) {
- return (Instruction *)a->getDefiningOp() == b || properlyDominates(a, b);
+ /// Return true if operation A dominates operation B.
+ bool dominates(Value *a, Operation *b) {
+ return (Operation *)a->getDefiningOp() == b || properlyDominates(a, b);
}
/// Return true if the specified block A dominates block B.
@@ -97,11 +97,11 @@ class PostDominanceInfo : public detail::DominanceInfoBase</*IsPostDom=*/true> {
public:
using super::super;
- /// Return true if instruction A properly postdominates instruction B.
- bool properlyPostDominates(Instruction *a, Instruction *b);
+ /// Return true if operation A properly postdominates operation B.
+ bool properlyPostDominates(Operation *a, Operation *b);
- /// Return true if instruction A postdominates instruction B.
- bool postDominates(Instruction *a, Instruction *b) {
+ /// Return true if operation A postdominates operation B.
+ bool postDominates(Operation *a, Operation *b) {
return a == b || properlyPostDominates(a, b);
}
diff --git a/mlir/include/mlir/Analysis/LoopAnalysis.h b/mlir/include/mlir/Analysis/LoopAnalysis.h
index cc7af1184b3..b364f084295 100644
--- a/mlir/include/mlir/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Analysis/LoopAnalysis.h
@@ -32,7 +32,6 @@ class AffineExpr;
class AffineForOp;
class AffineMap;
class Operation;
-using Instruction = Operation;
class MemRefType;
class Value;
@@ -102,7 +101,7 @@ bool isVectorizableLoopAlongFastestVaryingMemRefDim(AffineForOp loop,
unsigned fastestVaryingDim);
/// Checks where SSA dominance would be violated if a for inst's body
-/// instructions are shifted by the specified shifts. This method checks if a
+/// operations are shifted by the specified shifts. This method checks if a
/// 'def' and all its uses have the same shift factor.
// TODO(mlir-team): extend this to check for memory-based dependence
// violation when we have the support.
diff --git a/mlir/include/mlir/Analysis/NestedMatcher.h b/mlir/include/mlir/Analysis/NestedMatcher.h
index 393abdb33a4..8ee5ba826b2 100644
--- a/mlir/include/mlir/Analysis/NestedMatcher.h
+++ b/mlir/include/mlir/Analysis/NestedMatcher.h
@@ -25,7 +25,6 @@ namespace mlir {
struct NestedPattern;
class Operation;
-using Instruction = Operation;
/// An NestedPattern captures nested patterns in the IR.
/// It is used in conjunction with a scoped NestedPatternContext which is an
@@ -47,20 +46,20 @@ using Instruction = Operation;
///
///
/// Nested abstraction for matching results.
-/// Provides access to the nested Instruction* captured by a Matcher.
+/// Provides access to the nested Operation* captured by a Matcher.
///
-/// A NestedMatch contains an Instruction* and the children NestedMatch and is
+/// A NestedMatch contains an Operation* and the children NestedMatch and is
/// thus cheap to copy. NestedMatch is stored in a scoped bumper allocator whose
/// lifetime is managed by an RAII NestedPatternContext.
struct NestedMatch {
- static NestedMatch build(Instruction *instruction,
+ static NestedMatch build(Operation *operation,
ArrayRef<NestedMatch> nestedMatches);
NestedMatch(const NestedMatch &) = default;
NestedMatch &operator=(const NestedMatch &) = default;
- explicit operator bool() { return matchedInstruction != nullptr; }
+ explicit operator bool() { return matchedOperation != nullptr; }
- Instruction *getMatchedInstruction() { return matchedInstruction; }
+ Operation *getMatchedOperation() { return matchedOperation; }
ArrayRef<NestedMatch> getMatchedChildren() { return matchedChildren; }
private:
@@ -73,11 +72,11 @@ private:
NestedMatch() = default;
/// Payload, holds a NestedMatch and all its children along this branch.
- Instruction *matchedInstruction;
+ Operation *matchedOperation;
ArrayRef<NestedMatch> matchedChildren;
};
-/// A NestedPattern is a nested instruction walker that:
+/// A NestedPattern is a nested operation walker that:
/// 1. recursively matches a substructure in the tree;
/// 2. uses a filter function to refine matches with extra semantic
/// constraints (passed via a lambda of type FilterFunctionType);
@@ -93,10 +92,10 @@ private:
///
/// The NestedMatches captured in the IR can grow large, especially after
/// aggressive unrolling. As experience has shown, it is generally better to use
-/// a plain walk over instructions to match flat patterns but the current
+/// a plain walk over operations to match flat patterns but the current
/// implementation is competitive nonetheless.
-using FilterFunctionType = std::function<bool(Instruction &)>;
-static bool defaultFilterFunction(Instruction &) { return true; };
+using FilterFunctionType = std::function<bool(Operation &)>;
+static bool defaultFilterFunction(Operation &) { return true; };
struct NestedPattern {
NestedPattern(ArrayRef<NestedPattern> nested,
FilterFunctionType filter = defaultFilterFunction);
@@ -105,12 +104,12 @@ struct NestedPattern {
/// Returns all the top-level matches in `func`.
void match(Function *func, SmallVectorImpl<NestedMatch> *matches) {
- func->walkPostOrder([&](Instruction *inst) { matchOne(inst, matches); });
+ func->walkPostOrder([&](Operation *op) { matchOne(op, matches); });
}
- /// Returns all the top-level matches in `inst`.
- void match(Instruction *inst, SmallVectorImpl<NestedMatch> *matches) {
- inst->walkPostOrder([&](Instruction *child) { matchOne(child, matches); });
+ /// Returns all the top-level matches in `op`.
+ void match(Operation *op, SmallVectorImpl<NestedMatch> *matches) {
+ op->walkPostOrder([&](Operation *child) { matchOne(child, matches); });
}
/// Returns the depth of the pattern.
@@ -124,9 +123,9 @@ private:
/// Underlying global bump allocator managed by a NestedPatternContext.
static llvm::BumpPtrAllocator *&allocator();
- /// Matches this pattern against a single `inst` and fills matches with the
+ /// Matches this pattern against a single `op` and fills matches with the
/// result.
- void matchOne(Instruction *inst, SmallVectorImpl<NestedMatch> *matches);
+ void matchOne(Operation *op, SmallVectorImpl<NestedMatch> *matches);
/// Nested patterns to be matched.
ArrayRef<NestedPattern> nestedPatterns;
@@ -135,19 +134,19 @@ private:
FilterFunctionType filter;
/// skip is an implementation detail needed so that we can implement match
- /// without switching on the type of the Instruction. The idea is that a
+ /// without switching on the type of the Operation. The idea is that a
/// NestedPattern first checks if it matches locally and then recursively
/// applies its nested matchers to its elem->nested. Since we want to rely on
- /// the existing instruction walking functionality rather than duplicate
+ /// the existing operation walking functionality rather than duplicate
/// it, we allow an off-by-one traversal to account for the fact that we
/// write:
///
- /// void match(Instruction *elem) {
+ /// void match(Operation *elem) {
/// for (auto &c : getNestedPatterns()) {
/// NestedPattern childPattern(...);
/// ^~~~ Needs off-by-one skip.
///
- Instruction *skip;
+ Operation *skip;
};
/// RAII structure to transparently manage the bump allocator for
@@ -183,9 +182,9 @@ NestedPattern For(ArrayRef<NestedPattern> nested = {});
NestedPattern For(FilterFunctionType filter,
ArrayRef<NestedPattern> nested = {});
-bool isParallelLoop(Instruction &inst);
-bool isReductionLoop(Instruction &inst);
-bool isLoadOrStore(Instruction &inst);
+bool isParallelLoop(Operation &op);
+bool isReductionLoop(Operation &op);
+bool isLoadOrStore(Operation &op);
} // end namespace matcher
} // end namespace mlir
diff --git a/mlir/include/mlir/Analysis/SliceAnalysis.h b/mlir/include/mlir/Analysis/SliceAnalysis.h
index a3fb841092a..c76f0b2a03c 100644
--- a/mlir/include/mlir/Analysis/SliceAnalysis.h
+++ b/mlir/include/mlir/Analysis/SliceAnalysis.h
@@ -28,24 +28,23 @@
namespace mlir {
class Operation;
-using Instruction = Operation;
/// Type of the condition to limit the propagation of transitive use-defs.
/// This can be used in particular to limit the propagation to a given Scope or
-/// to avoid passing through certain types of instruction in a configurable
+/// to avoid passing through certain types of operation in a configurable
/// manner.
-using TransitiveFilter = std::function<bool(Instruction *)>;
+using TransitiveFilter = std::function<bool(Operation *)>;
/// Fills `forwardSlice` with the computed forward slice (i.e. all
-/// the transitive uses of inst), **without** including that instruction.
+/// the transitive uses of op), **without** including that operation.
///
/// This additionally takes a TransitiveFilter which acts as a frontier:
-/// when looking at uses transitively, a instruction that does not pass the
+/// when looking at uses transitively, a operation that does not pass the
/// filter is never propagated through. This allows in particular to carve out
/// the scope within a ForInst or the scope within an IfInst.
///
/// The implementation traverses the use chains in postorder traversal for
-/// efficiency reasons: if a instruction is already in `forwardSlice`, no
+/// efficiency reasons: if a operation is already in `forwardSlice`, no
/// need to traverse its uses again. Since use-def chains form a DAG, this
/// terminates.
///
@@ -78,20 +77,20 @@ using TransitiveFilter = std::function<bool(Instruction *)>;
/// {4, 3, 6, 2, 1, 5, 8, 7, 9}
///
void getForwardSlice(
- Instruction *inst, llvm::SetVector<Instruction *> *forwardSlice,
+ Operation *op, llvm::SetVector<Operation *> *forwardSlice,
TransitiveFilter filter = /* pass-through*/
- [](Instruction *) { return true; });
+ [](Operation *) { return true; });
/// Fills `backwardSlice` with the computed backward slice (i.e.
-/// all the transitive defs of inst), **without** including that instruction.
+/// all the transitive defs of op), **without** including that operation.
///
/// This additionally takes a TransitiveFilter which acts as a frontier:
-/// when looking at defs transitively, a instruction that does not pass the
+/// when looking at defs transitively, a operation that does not pass the
/// filter is never propagated through. This allows in particular to carve out
/// the scope within a ForInst or the scope within an IfInst.
///
/// The implementation traverses the def chains in postorder traversal for
-/// efficiency reasons: if a instruction is already in `backwardSlice`, no
+/// efficiency reasons: if a operation is already in `backwardSlice`, no
/// need to traverse its definitions again. Since useuse-def chains form a DAG,
/// this terminates.
///
@@ -117,18 +116,18 @@ void getForwardSlice(
/// {1, 2, 5, 7, 3, 4, 6, 8}
///
void getBackwardSlice(
- Instruction *inst, llvm::SetVector<Instruction *> *backwardSlice,
+ Operation *op, llvm::SetVector<Operation *> *backwardSlice,
TransitiveFilter filter = /* pass-through*/
- [](Instruction *) { return true; });
+ [](Operation *) { return true; });
/// Iteratively computes backward slices and forward slices until
-/// a fixed point is reached. Returns an `llvm::SetVector<Instruction *>` which
-/// **includes** the original instruction.
+/// a fixed point is reached. Returns an `llvm::SetVector<Operation *>` which
+/// **includes** the original operation.
///
/// This allows building a slice (i.e. multi-root DAG where everything
/// that is reachable from an Value in forward and backward direction is
/// contained in the slice).
-/// This is the abstraction we need to materialize all the instructions for
+/// This is the abstraction we need to materialize all the operations for
/// supervectorization without worrying about orderings and Value
/// replacements.
///
@@ -157,20 +156,20 @@ void getBackwardSlice(
///
/// Additional implementation considerations
/// ========================================
-/// Consider the defs-inst-uses hourglass.
+/// Consider the defs-op-uses hourglass.
/// ____
/// \ / defs (in some topological order)
/// \/
-/// inst
+/// op
/// /\
/// / \ uses (in some topological order)
/// /____\
///
/// We want to iteratively apply `getSlice` to construct the whole
-/// list of Instruction that are reachable by (use|def)+ from inst.
+/// list of Operation that are reachable by (use|def)+ from op.
/// We want the resulting slice in topological order.
/// Ideally we would like the ordering to be maintained in-place to avoid
-/// copying Instruction at each step. Keeping this ordering by construction
+/// copying Operation at each step. Keeping this ordering by construction
/// seems very unclear, so we list invariants in the hope of seeing whether
/// useful properties pop up.
///
@@ -182,34 +181,34 @@ void getBackwardSlice(
/// ===========
/// We wish to maintain the following property by a recursive argument:
/// """
-/// defs << {inst} <<uses are in topological order.
+/// defs << {op} <<uses are in topological order.
/// """
/// The property clearly holds for 0 and 1-sized uses and defs;
///
/// Invariants:
/// 2. defs and uses are in topological order internally, by construction;
-/// 3. for any {x} |= defs, defs(x) |= defs; because all go through inst
-/// 4. for any {x} |= uses, defs |= defs(x); because all go through inst
-/// 5. for any {x} |= defs, uses |= uses(x); because all go through inst
-/// 6. for any {x} |= uses, uses(x) |= uses; because all go through inst
+/// 3. for any {x} |= defs, defs(x) |= defs; because all go through op
+/// 4. for any {x} |= uses, defs |= defs(x); because all go through op
+/// 5. for any {x} |= defs, uses |= uses(x); because all go through op
+/// 6. for any {x} |= uses, uses(x) |= uses; because all go through op
///
/// Intuitively, we should be able to recurse like:
-/// preorder(defs) - inst - postorder(uses)
+/// preorder(defs) - op - postorder(uses)
/// and keep things ordered but this is still hand-wavy and not worth the
/// trouble for now: punt to a simple worklist-based solution.
///
-llvm::SetVector<Instruction *> getSlice(
- Instruction *inst,
+llvm::SetVector<Operation *> getSlice(
+ Operation *op,
TransitiveFilter backwardFilter = /* pass-through*/
- [](Instruction *) { return true; },
+ [](Operation *) { return true; },
TransitiveFilter forwardFilter = /* pass-through*/
- [](Instruction *) { return true; });
+ [](Operation *) { return true; });
/// Multi-root DAG topological sort.
-/// Performs a topological sort of the Instruction in the `toSort` SetVector.
+/// Performs a topological sort of the Operation in the `toSort` SetVector.
/// Returns a topologically sorted SetVector.
-llvm::SetVector<Instruction *>
-topologicalSort(const llvm::SetVector<Instruction *> &toSort);
+llvm::SetVector<Operation *>
+topologicalSort(const llvm::SetVector<Operation *> &toSort);
} // end namespace mlir
diff --git a/mlir/include/mlir/Analysis/Utils.h b/mlir/include/mlir/Analysis/Utils.h
index e6af0ce3ff2..8ce4de10eb7 100644
--- a/mlir/include/mlir/Analysis/Utils.h
+++ b/mlir/include/mlir/Analysis/Utils.h
@@ -41,17 +41,16 @@ class FlatAffineConstraints;
class Location;
class MemRefAccess;
class Operation;
-using Instruction = Operation;
class Value;
-/// Populates 'loops' with IVs of the loops surrounding 'inst' ordered from
-/// the outermost 'affine.for' instruction to the innermost one.
-// TODO(bondhugula): handle 'affine.if' inst's.
-void getLoopIVs(Instruction &inst, SmallVectorImpl<AffineForOp> *loops);
+/// Populates 'loops' with IVs of the loops surrounding 'op' ordered from
+/// the outermost 'affine.for' operation to the innermost one.
+// TODO(bondhugula): handle 'affine.if' ops.
+void getLoopIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops);
-/// Returns the nesting depth of this instruction, i.e., the number of loops
-/// surrounding this instruction.
-unsigned getNestingDepth(Instruction &inst);
+/// Returns the nesting depth of this operation, i.e., the number of loops
+/// surrounding this operation.
+unsigned getNestingDepth(Operation &op);
/// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
/// at 'forOp'.
@@ -96,15 +95,15 @@ LogicalResult getBackwardComputationSliceState(
/// Creates a clone of the computation contained in the loop nest surrounding
/// 'srcOpInst', slices the iteration space of src loop based on slice bounds
/// in 'sliceState', and inserts the computation slice at the beginning of the
-/// instruction block of the loop at 'dstLoopDepth' in the loop nest surrounding
+/// operation block of the loop at 'dstLoopDepth' in the loop nest surrounding
/// 'dstOpInst'. Returns the top-level loop of the computation slice on
/// success, returns nullptr otherwise.
// Loop depth is a crucial optimization choice that determines where to
// materialize the results of the backward slice - presenting a trade-off b/w
// storage and redundant computation in several cases.
// TODO(andydavis) Support computation slices with common surrounding loops.
-AffineForOp insertBackwardComputationSlice(Instruction *srcOpInst,
- Instruction *dstOpInst,
+AffineForOp insertBackwardComputationSlice(Operation *srcOpInst,
+ Operation *dstOpInst,
unsigned dstLoopDepth,
ComputationSliceState *sliceState);
@@ -155,7 +154,7 @@ struct MemRefRegion {
/// {memref = %A, write = false, {%i <= m0 <= %i + 7} }
/// The last field is a 2-d FlatAffineConstraints symbolic in %i.
///
- LogicalResult compute(Instruction *inst, unsigned loopDepth,
+ LogicalResult compute(Operation *op, unsigned loopDepth,
ComputationSliceState *sliceState = nullptr);
FlatAffineConstraints *getConstraints() { return &cst; }
@@ -229,7 +228,7 @@ LogicalResult boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
bool emitError = true);
/// Returns the number of surrounding loops common to both A and B.
-unsigned getNumCommonSurroundingLoops(Instruction &A, Instruction &B);
+unsigned getNumCommonSurroundingLoops(Operation &A, Operation &B);
/// Gets the memory footprint of all data touched in the specified memory space
/// in bytes; if the memory space is unspecified, considers all memory spaces.
diff --git a/mlir/include/mlir/Analysis/VectorAnalysis.h b/mlir/include/mlir/Analysis/VectorAnalysis.h
index deb630b1708..c7726ed8a89 100644
--- a/mlir/include/mlir/Analysis/VectorAnalysis.h
+++ b/mlir/include/mlir/Analysis/VectorAnalysis.h
@@ -31,7 +31,6 @@ class FuncBuilder;
class Location;
class MemRefType;
class Operation;
-using Instruction = Operation;
class Value;
class VectorType;
@@ -123,8 +122,8 @@ shapeRatio(VectorType superVectorType, VectorType subVectorType);
/// `%arg0[%c0, %c0]` into vector<128xf32> which needs a 1-D vector broadcast.
///
AffineMap makePermutationMap(
- Instruction *opInst,
- const llvm::DenseMap<Instruction *, unsigned> &loopToVectorDim);
+ Operation *op,
+ const llvm::DenseMap<Operation *, unsigned> &loopToVectorDim);
namespace matcher {
@@ -136,7 +135,7 @@ namespace matcher {
/// TODO(ntv): this could all be much simpler if we added a bit that a vector
/// type to mark that a vector is a strict super-vector but it still does not
/// warrant adding even 1 extra bit in the IR for now.
-bool operatesOnSuperVectors(Instruction &inst, VectorType subVectorType);
+bool operatesOnSuperVectors(Operation &op, VectorType subVectorType);
} // end namespace matcher
} // end namespace mlir
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index ef7a6e56368..f2dd357a1ae 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -108,7 +108,7 @@ public:
/// Returns the closest surrounding operation that contains this operation
/// or nullptr if this is a top-level operation.
- Operation *getParentInst();
+ Operation *getParentOp();
/// Returns the function that this operation is part of.
/// The function is determined by traversing the chain of parent operations.
@@ -131,8 +131,8 @@ public:
/// function.
void moveBefore(Operation *existingInst);
- /// Unlink this operation operation from its current block and insert it
- /// right before `iterator` in the specified block.
+ /// Unlink this operation from its current block and insert it right before
+ /// `iterator` in the specified block.
void moveBefore(Block *block, llvm::iplist<Operation>::iterator iterator);
/// Given an operation 'other' that is within the same parent block, return
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index d23f2841e15..76889168d09 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -45,7 +45,7 @@ AffineOpsDialect::AffineOpsDialect(MLIRContext *context)
bool mlir::isTopLevelSymbol(Value *value) {
if (auto *arg = dyn_cast<BlockArgument>(value))
return arg->getOwner()->getParent()->getContainingFunction();
- return value->getDefiningOp()->getParentInst() == nullptr;
+ return value->getDefiningOp()->getParentOp() == nullptr;
}
// Value can be used as a dimension id if it is valid as a symbol, or
@@ -56,16 +56,16 @@ bool mlir::isValidDim(Value *value) {
if (!value->getType().isIndex())
return false;
- if (auto *inst = value->getDefiningOp()) {
+ if (auto *op = value->getDefiningOp()) {
// Top level instruction or constant operation is ok.
- if (inst->getParentInst() == nullptr || inst->isa<ConstantOp>())
+ if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
- if (auto op = inst->dyn_cast<AffineApplyOp>())
- return op.isValidDim();
+ if (auto applyOp = op->dyn_cast<AffineApplyOp>())
+ return applyOp.isValidDim();
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
- if (auto dimOp = inst->dyn_cast<DimOp>())
+ if (auto dimOp = op->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp.getOperand());
return false;
}
@@ -81,16 +81,16 @@ bool mlir::isValidSymbol(Value *value) {
if (!value->getType().isIndex())
return false;
- if (auto *inst = value->getDefiningOp()) {
+ if (auto *op = value->getDefiningOp()) {
// Top level instruction or constant operation is ok.
- if (inst->getParentInst() == nullptr || inst->isa<ConstantOp>())
+ if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
- if (auto op = inst->dyn_cast<AffineApplyOp>())
- return op.isValidSymbol();
+ if (auto applyOp = op->dyn_cast<AffineApplyOp>())
+ return applyOp.isValidSymbol();
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
- if (auto dimOp = inst->dyn_cast<DimOp>())
+ if (auto dimOp = op->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp.getOperand());
return false;
}
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index b3548f96b29..9fac3c8d11b 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -41,14 +41,13 @@ using namespace mlir;
using llvm::dbgs;
-/// Returns the sequence of AffineApplyOp Instructions operation in
+/// Returns the sequence of AffineApplyOp Operations operation in
/// 'affineApplyOps', which are reachable via a search starting from 'operands',
/// and ending at operands which are not defined by AffineApplyOps.
// TODO(andydavis) Add a method to AffineApplyOp which forward substitutes
// the AffineApplyOp into any user AffineApplyOps.
void mlir::getReachableAffineApplyOps(
- ArrayRef<Value *> operands,
- SmallVectorImpl<Instruction *> &affineApplyOps) {
+ ArrayRef<Value *> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
struct State {
// The ssa value for this node in the DFS traversal.
Value *value;
@@ -64,28 +63,27 @@ void mlir::getReachableAffineApplyOps(
State &state = worklist.back();
auto *opInst = state.value->getDefiningOp();
// Note: getDefiningOp will return nullptr if the operand is not an
- // Instruction (i.e. AffineForOp), which is a terminator for the search.
+ // Operation (i.e. block argument), which is a terminator for the search.
if (opInst == nullptr || !opInst->isa<AffineApplyOp>()) {
worklist.pop_back();
continue;
}
- if (auto affineApplyOp = opInst->dyn_cast<AffineApplyOp>()) {
- if (state.operandIndex == 0) {
- // Pre-Visit: Add 'opInst' to reachable sequence.
- affineApplyOps.push_back(opInst);
- }
- if (state.operandIndex < opInst->getNumOperands()) {
- // Visit: Add next 'affineApplyOp' operand to worklist.
- // Get next operand to visit at 'operandIndex'.
- auto *nextOperand = opInst->getOperand(state.operandIndex);
- // Increment 'operandIndex' in 'state'.
- ++state.operandIndex;
- // Add 'nextOperand' to worklist.
- worklist.push_back({nextOperand, 0});
- } else {
- // Post-visit: done visiting operands AffineApplyOp, pop off stack.
- worklist.pop_back();
- }
+
+ if (state.operandIndex == 0) {
+ // Pre-Visit: Add 'opInst' to reachable sequence.
+ affineApplyOps.push_back(opInst);
+ }
+ if (state.operandIndex < opInst->getNumOperands()) {
+ // Visit: Add next 'affineApplyOp' operand to worklist.
+ // Get next operand to visit at 'operandIndex'.
+ auto *nextOperand = opInst->getOperand(state.operandIndex);
+ // Increment 'operandIndex' in 'state'.
+ ++state.operandIndex;
+ // Add 'nextOperand' to worklist.
+ worklist.push_back({nextOperand, 0});
+ } else {
+ // Post-visit: done visiting operands AffineApplyOp, pop off stack.
+ worklist.pop_back();
}
}
}
@@ -115,15 +113,15 @@ LogicalResult mlir::getIndexSet(MutableArrayRef<AffineForOp> forOps,
// Computes the iteration domain for 'opInst' and populates 'indexSet', which
// encapsulates the constraints involving loops surrounding 'opInst' and
// potentially involving any Function symbols. The dimensional identifiers in
-// 'indexSet' correspond to the loops surounding 'inst' from outermost to
+// 'indexSet' correspond to the loops surounding 'op' from outermost to
// innermost.
-// TODO(andydavis) Add support to handle IfInsts surrounding 'inst'.
-static LogicalResult getInstIndexSet(Instruction *inst,
+// TODO(andydavis) Add support to handle IfInsts surrounding 'op'.
+static LogicalResult getInstIndexSet(Operation *op,
FlatAffineConstraints *indexSet) {
// TODO(andydavis) Extend this to gather enclosing IfInsts and consider
// factoring it out into a utility function.
SmallVector<AffineForOp, 4> loops;
- getLoopIVs(*inst, &loops);
+ getLoopIVs(*op, &loops);
return getIndexSet(loops, indexSet);
}
@@ -549,13 +547,12 @@ static Block *getCommonBlock(const MemRefAccess &srcAccess,
return forOp.getBody();
}
-// Returns true if the ancestor operation instruction of 'srcAccess' appears
-// before the ancestor operation instruction of 'dstAccess' in the common
-// ancestral block. Returns false otherwise.
+// Returns true if the ancestor operation of 'srcAccess' appears before the
+// ancestor operation of 'dstAccess' in the common ancestral block. Returns
+// false otherwise.
// Note that because 'srcAccess' or 'dstAccess' may be nested in conditionals,
-// the function is named 'srcAppearsBeforeDstInCommonBlock'.
-// Note that 'numCommonLoops' is the number of contiguous surrounding outer
-// loops.
+// the function is named 'srcAppearsBeforeDstInCommonBlock'. Note that
+// 'numCommonLoops' is the number of contiguous surrounding outer loops.
static bool srcAppearsBeforeDstInAncestralBlock(
const MemRefAccess &srcAccess, const MemRefAccess &dstAccess,
const FlatAffineConstraints &srcDomain, unsigned numCommonLoops) {
@@ -791,19 +788,19 @@ bool mlir::checkMemrefAccessDependence(
AffineValueMap dstAccessMap;
dstAccess.getAccessMap(&dstAccessMap);
- // Get iteration domain for the 'srcAccess' instruction.
+ // Get iteration domain for the 'srcAccess' operation.
FlatAffineConstraints srcDomain;
if (failed(getInstIndexSet(srcAccess.opInst, &srcDomain)))
return false;
- // Get iteration domain for 'dstAccess' instruction.
+ // Get iteration domain for 'dstAccess' operation.
FlatAffineConstraints dstDomain;
if (failed(getInstIndexSet(dstAccess.opInst, &dstDomain)))
return false;
// Return 'false' if loopDepth > numCommonLoops and if the ancestor operation
- // instruction of 'srcAccess' does not properly dominate the ancestor
- // operation instruction of 'dstAccess' in the same common instruction block.
+ // operation of 'srcAccess' does not properly dominate the ancestor
+ // operation of 'dstAccess' in the same common operation block.
// Note: this check is skipped if 'allowRAR' is true, because because RAR
// deps can exist irrespective of lexicographic ordering b/w src and dst.
unsigned numCommonLoops = getNumCommonLoops(srcDomain, dstDomain);
diff --git a/mlir/lib/Analysis/Dominance.cpp b/mlir/lib/Analysis/Dominance.cpp
index b8a9e1c0218..d914f36cdaf 100644
--- a/mlir/lib/Analysis/Dominance.cpp
+++ b/mlir/lib/Analysis/Dominance.cpp
@@ -46,8 +46,8 @@ void DominanceInfoBase<IsPostDom>::recalculate(Function *function) {
std::move(functionDominance));
/// Build the dominance for each of the operation regions.
- function->walk([&](Instruction *inst) {
- for (auto &region : inst->getRegions()) {
+ function->walk([&](Operation *op) {
+ for (auto &region : op->getRegions()) {
// Don't compute dominance if the region is empty.
if (region.empty())
continue;
@@ -66,11 +66,11 @@ bool DominanceInfoBase<IsPostDom>::properlyDominates(Block *a, Block *b) {
return false;
// If both blocks are not in the same region, 'a' properly dominates 'b' if
- // 'b' is defined in an instruction region that (recursively) ends up being
+ // 'b' is defined in an operation region that (recursively) ends up being
// dominated by 'a'. Walk up the list of containers enclosing B.
auto *regionA = a->getParent(), *regionB = b->getParent();
if (regionA != regionB) {
- Instruction *bAncestor;
+ Operation *bAncestor;
do {
bAncestor = regionB->getContainingOp();
// If 'bAncestor' is the top level function, then 'a' is a block
@@ -100,8 +100,8 @@ template class mlir::detail::DominanceInfoBase</*IsPostDom=*/false>;
// DominanceInfo
//===----------------------------------------------------------------------===//
-/// Return true if instruction A properly dominates instruction B.
-bool DominanceInfo::properlyDominates(Instruction *a, Instruction *b) {
+/// Return true if operation A properly dominates operation B.
+bool DominanceInfo::properlyDominates(Operation *a, Operation *b) {
auto *aBlock = a->getBlock(), *bBlock = b->getBlock();
// If the blocks are the same, then check if b is before a in the block.
@@ -120,12 +120,12 @@ bool DominanceInfo::properlyDominates(Instruction *a, Instruction *b) {
return properlyDominates(aBlock, bBlock);
}
-/// Return true if value A properly dominates instruction B.
-bool DominanceInfo::properlyDominates(Value *a, Instruction *b) {
+/// Return true if value A properly dominates operation B.
+bool DominanceInfo::properlyDominates(Value *a, Operation *b) {
if (auto *aInst = a->getDefiningOp())
return properlyDominates(aInst, b);
- // block arguments properly dominate all instructions in their own block, so
+ // block arguments properly dominate all operations in their own block, so
// we use a dominates check here, not a properlyDominates check.
return dominates(cast<BlockArgument>(a)->getOwner(), b->getBlock());
}
@@ -135,7 +135,7 @@ bool DominanceInfo::properlyDominates(Value *a, Instruction *b) {
//===----------------------------------------------------------------------===//
/// Returns true if statement 'a' properly postdominates statement b.
-bool PostDominanceInfo::properlyPostDominates(Instruction *a, Instruction *b) {
+bool PostDominanceInfo::properlyPostDominates(Operation *a, Operation *b) {
auto *aBlock = a->getBlock(), *bBlock = b->getBlock();
// If the blocks are the same, check if b is before a in the block.
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index eb272389957..e720e194814 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -177,7 +177,7 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
bool mlir::isAccessInvariant(Value &iv, Value &index) {
assert(isForInductionVar(&iv) && "iv must be a AffineForOp");
assert(index.getType().isa<IndexType>() && "index must be of IndexType");
- SmallVector<Instruction *, 4> affineApplyOps;
+ SmallVector<Operation *, 4> affineApplyOps;
getReachableAffineApplyOps({&index}, affineApplyOps);
if (affineApplyOps.empty()) {
@@ -272,11 +272,11 @@ static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
return memRefType.getElementType().template isa<VectorType>();
}
-static bool isVectorTransferReadOrWrite(Instruction &inst) {
- return inst.isa<VectorTransferReadOp>() || inst.isa<VectorTransferWriteOp>();
+static bool isVectorTransferReadOrWrite(Operation &op) {
+ return op.isa<VectorTransferReadOp>() || op.isa<VectorTransferWriteOp>();
}
-using VectorizableInstFun = std::function<bool(AffineForOp, Instruction &)>;
+using VectorizableInstFun = std::function<bool(AffineForOp, Operation &)>;
static bool isVectorizableLoopWithCond(AffineForOp loop,
VectorizableInstFun isVectorizableInst) {
@@ -295,9 +295,9 @@ static bool isVectorizableLoopWithCond(AffineForOp loop,
}
// No vectorization across unknown regions.
- auto regions = matcher::Op([](Instruction &inst) -> bool {
- return inst.getNumRegions() != 0 &&
- !(inst.isa<AffineIfOp>() || inst.isa<AffineForOp>());
+ auto regions = matcher::Op([](Operation &op) -> bool {
+ return op.getNumRegions() != 0 &&
+ !(op.isa<AffineIfOp>() || op.isa<AffineForOp>());
});
SmallVector<NestedMatch, 8> regionsMatched;
regions.match(forInst, &regionsMatched);
@@ -316,7 +316,7 @@ static bool isVectorizableLoopWithCond(AffineForOp loop,
SmallVector<NestedMatch, 8> loadAndStoresMatched;
loadAndStores.match(forInst, &loadAndStoresMatched);
for (auto ls : loadAndStoresMatched) {
- auto *op = ls.getMatchedInstruction();
+ auto *op = ls.getMatchedOperation();
auto load = op->dyn_cast<LoadOp>();
auto store = op->dyn_cast<StoreOp>();
// Only scalar types are considered vectorizable, all load/store must be
@@ -336,7 +336,7 @@ static bool isVectorizableLoopWithCond(AffineForOp loop,
bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
AffineForOp loop, unsigned fastestVaryingDim) {
VectorizableInstFun fun(
- [fastestVaryingDim](AffineForOp loop, Instruction &op) {
+ [fastestVaryingDim](AffineForOp loop, Operation &op) {
auto load = op.dyn_cast<LoadOp>();
auto store = op.dyn_cast<StoreOp>();
return load ? isContiguousAccess(*loop.getInductionVar(), load,
@@ -350,12 +350,12 @@ bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
bool mlir::isVectorizableLoop(AffineForOp loop) {
VectorizableInstFun fun(
// TODO: implement me
- [](AffineForOp loop, Instruction &op) { return true; });
+ [](AffineForOp loop, Operation &op) { return true; });
return isVectorizableLoopWithCond(loop, fun);
}
-/// Checks whether SSA dominance would be violated if a for inst's body
-/// instructions are shifted by the specified shifts. This method checks if a
+/// Checks whether SSA dominance would be violated if a for op's body
+/// operations are shifted by the specified shifts. This method checks if a
/// 'def' and all its uses have the same shift factor.
// TODO(mlir-team): extend this to check for memory-based dependence
// violation when we have the support.
@@ -364,24 +364,24 @@ bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
assert(shifts.size() == forBody->getOperations().size());
// Work backwards over the body of the block so that the shift of a use's
- // ancestor instruction in the block gets recorded before it's looked up.
- DenseMap<Instruction *, uint64_t> forBodyShift;
+ // ancestor operation in the block gets recorded before it's looked up.
+ DenseMap<Operation *, uint64_t> forBodyShift;
for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
- auto &inst = it.value();
+ auto &op = it.value();
- // Get the index of the current instruction, note that we are iterating in
+ // Get the index of the current operation, note that we are iterating in
// reverse so we need to fix it up.
size_t index = shifts.size() - it.index() - 1;
- // Remember the shift of this instruction.
+ // Remember the shift of this operation.
uint64_t shift = shifts[index];
- forBodyShift.try_emplace(&inst, shift);
+ forBodyShift.try_emplace(&op, shift);
- // Validate the results of this instruction if it were to be shifted.
- for (unsigned i = 0, e = inst.getNumResults(); i < e; ++i) {
- Value *result = inst.getResult(i);
+ // Validate the results of this operation if it were to be shifted.
+ for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
+ Value *result = op.getResult(i);
for (const InstOperand &use : result->getUses()) {
- // If an ancestor instruction doesn't lie in the block of forOp,
+ // If an ancestor operation doesn't lie in the block of forOp,
// there is no shift to check.
if (auto *ancInst = forBody->findAncestorInstInBlock(*use.getOwner())) {
assert(forBodyShift.count(ancInst) > 0 && "ancestor expected in map");
diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp
index 8edf79d6db3..0fb88620fa1 100644
--- a/mlir/lib/Analysis/MemRefBoundCheck.cpp
+++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp
@@ -47,7 +47,7 @@ FunctionPassBase *mlir::createMemRefBoundCheckPass() {
}
void MemRefBoundCheck::runOnFunction() {
- getFunction().walk([](Instruction *opInst) {
+ getFunction().walk([](Operation *opInst) {
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
boundCheckLoadOrStoreOp(loadOp);
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
diff --git a/mlir/lib/Analysis/MemRefDependenceCheck.cpp b/mlir/lib/Analysis/MemRefDependenceCheck.cpp
index 8e438108bce..2872c4cf256 100644
--- a/mlir/lib/Analysis/MemRefDependenceCheck.cpp
+++ b/mlir/lib/Analysis/MemRefDependenceCheck.cpp
@@ -37,7 +37,7 @@ namespace {
// TODO(andydavis) Add common surrounding loop depth-wise dependence checks.
/// Checks dependences between all pairs of memref accesses in a Function.
struct MemRefDependenceCheck : public FunctionPass<MemRefDependenceCheck> {
- SmallVector<Instruction *, 4> loadsAndStores;
+ SmallVector<Operation *, 4> loadsAndStores;
void runOnFunction() override;
};
@@ -79,7 +79,7 @@ getDirectionVectorStr(bool ret, unsigned numCommonLoops, unsigned loopNestDepth,
// "source" access and all subsequent "destination" accesses in
// 'loadsAndStores'. Emits the result of the dependence check as a note with
// the source access.
-static void checkDependences(ArrayRef<Instruction *> loadsAndStores) {
+static void checkDependences(ArrayRef<Operation *> loadsAndStores) {
for (unsigned i = 0, e = loadsAndStores.size(); i < e; ++i) {
auto *srcOpInst = loadsAndStores[i];
MemRefAccess srcAccess(srcOpInst);
@@ -113,9 +113,9 @@ static void checkDependences(ArrayRef<Instruction *> loadsAndStores) {
void MemRefDependenceCheck::runOnFunction() {
// Collect the loads and stores within the function.
loadsAndStores.clear();
- getFunction().walk([&](Instruction *inst) {
- if (inst->isa<LoadOp>() || inst->isa<StoreOp>())
- loadsAndStores.push_back(inst);
+ getFunction().walk([&](Operation *op) {
+ if (op->isa<LoadOp>() || op->isa<StoreOp>())
+ loadsAndStores.push_back(op);
});
checkDependences(loadsAndStores);
diff --git a/mlir/lib/Analysis/NestedMatcher.cpp b/mlir/lib/Analysis/NestedMatcher.cpp
index 83b3591ce5c..43a725a3b7d 100644
--- a/mlir/lib/Analysis/NestedMatcher.cpp
+++ b/mlir/lib/Analysis/NestedMatcher.cpp
@@ -31,13 +31,13 @@ llvm::BumpPtrAllocator *&NestedMatch::allocator() {
return allocator;
}
-NestedMatch NestedMatch::build(Instruction *instruction,
+NestedMatch NestedMatch::build(Operation *operation,
ArrayRef<NestedMatch> nestedMatches) {
auto *result = allocator()->Allocate<NestedMatch>();
auto *children = allocator()->Allocate<NestedMatch>(nestedMatches.size());
std::uninitialized_copy(nestedMatches.begin(), nestedMatches.end(), children);
new (result) NestedMatch();
- result->matchedInstruction = instruction;
+ result->matchedOperation = operation;
result->matchedChildren =
ArrayRef<NestedMatch>(children, nestedMatches.size());
return *result;
@@ -69,29 +69,29 @@ unsigned NestedPattern::getDepth() const {
return depth + 1;
}
-/// Matches a single instruction in the following way:
-/// 1. checks the kind of instruction against the matcher, if different then
+/// Matches a single operation in the following way:
+/// 1. checks the kind of operation against the matcher, if different then
/// there is no match;
-/// 2. calls the customizable filter function to refine the single instruction
+/// 2. calls the customizable filter function to refine the single operation
/// match with extra semantic constraints;
/// 3. if all is good, recursivey matches the nested patterns;
-/// 4. if all nested match then the single instruction matches too and is
+/// 4. if all nested match then the single operation matches too and is
/// appended to the list of matches;
/// 5. TODO(ntv) Optionally applies actions (lambda), in which case we will
/// want to traverse in post-order DFS to avoid invalidating iterators.
-void NestedPattern::matchOne(Instruction *inst,
+void NestedPattern::matchOne(Operation *op,
SmallVectorImpl<NestedMatch> *matches) {
- if (skip == inst) {
+ if (skip == op) {
return;
}
// Local custom filter function
- if (!filter(*inst)) {
+ if (!filter(*op)) {
return;
}
if (nestedPatterns.empty()) {
SmallVector<NestedMatch, 8> nestedMatches;
- matches->push_back(NestedMatch::build(inst, nestedMatches));
+ matches->push_back(NestedMatch::build(op, nestedMatches));
return;
}
// Take a copy of each nested pattern so we can match it.
@@ -99,20 +99,20 @@ void NestedPattern::matchOne(Instruction *inst,
SmallVector<NestedMatch, 8> nestedMatches;
// Skip elem in the walk immediately following. Without this we would
// essentially need to reimplement walkPostOrder here.
- nestedPattern.skip = inst;
- nestedPattern.match(inst, &nestedMatches);
+ nestedPattern.skip = op;
+ nestedPattern.match(op, &nestedMatches);
// If we could not match even one of the specified nestedPattern, early exit
// as this whole branch is not a match.
if (nestedMatches.empty()) {
return;
}
- matches->push_back(NestedMatch::build(inst, nestedMatches));
+ matches->push_back(NestedMatch::build(op, nestedMatches));
}
}
-static bool isAffineForOp(Instruction &inst) { return inst.isa<AffineForOp>(); }
+static bool isAffineForOp(Operation &op) { return op.isa<AffineForOp>(); }
-static bool isAffineIfOp(Instruction &inst) { return inst.isa<AffineIfOp>(); }
+static bool isAffineIfOp(Operation &op) { return op.isa<AffineIfOp>(); }
namespace mlir {
namespace matcher {
@@ -125,16 +125,16 @@ NestedPattern If(NestedPattern child) {
return NestedPattern(child, isAffineIfOp);
}
NestedPattern If(FilterFunctionType filter, NestedPattern child) {
- return NestedPattern(child, [filter](Instruction &inst) {
- return isAffineIfOp(inst) && filter(inst);
+ return NestedPattern(child, [filter](Operation &op) {
+ return isAffineIfOp(op) && filter(op);
});
}
NestedPattern If(ArrayRef<NestedPattern> nested) {
return NestedPattern(nested, isAffineIfOp);
}
NestedPattern If(FilterFunctionType filter, ArrayRef<NestedPattern> nested) {
- return NestedPattern(nested, [filter](Instruction &inst) {
- return isAffineIfOp(inst) && filter(inst);
+ return NestedPattern(nested, [filter](Operation &op) {
+ return isAffineIfOp(op) && filter(op);
});
}
@@ -142,33 +142,31 @@ NestedPattern For(NestedPattern child) {
return NestedPattern(child, isAffineForOp);
}
NestedPattern For(FilterFunctionType filter, NestedPattern child) {
- return NestedPattern(child, [=](Instruction &inst) {
- return isAffineForOp(inst) && filter(inst);
- });
+ return NestedPattern(
+ child, [=](Operation &op) { return isAffineForOp(op) && filter(op); });
}
NestedPattern For(ArrayRef<NestedPattern> nested) {
return NestedPattern(nested, isAffineForOp);
}
NestedPattern For(FilterFunctionType filter, ArrayRef<NestedPattern> nested) {
- return NestedPattern(nested, [=](Instruction &inst) {
- return isAffineForOp(inst) && filter(inst);
- });
+ return NestedPattern(
+ nested, [=](Operation &op) { return isAffineForOp(op) && filter(op); });
}
// TODO(ntv): parallel annotation on loops.
-bool isParallelLoop(Instruction &inst) {
- auto loop = inst.cast<AffineForOp>();
+bool isParallelLoop(Operation &op) {
+ auto loop = op.cast<AffineForOp>();
return loop || true; // loop->isParallel();
};
// TODO(ntv): reduction annotation on loops.
-bool isReductionLoop(Instruction &inst) {
- auto loop = inst.cast<AffineForOp>();
+bool isReductionLoop(Operation &op) {
+ auto loop = op.cast<AffineForOp>();
return loop || true; // loop->isReduction();
};
-bool isLoadOrStore(Instruction &inst) {
- return inst.isa<LoadOp>() || inst.isa<StoreOp>();
+bool isLoadOrStore(Operation &op) {
+ return op.isa<LoadOp>() || op.isa<StoreOp>();
};
} // end namespace matcher
diff --git a/mlir/lib/Analysis/OpStats.cpp b/mlir/lib/Analysis/OpStats.cpp
index 0986ac480fe..7be7e9d9f12 100644
--- a/mlir/lib/Analysis/OpStats.cpp
+++ b/mlir/lib/Analysis/OpStats.cpp
@@ -46,8 +46,7 @@ void PrintOpStatsPass::runOnModule() {
// Compute the operation statistics for each function in the module.
for (auto &fn : getModule())
- fn.walk(
- [&](Instruction *inst) { ++opCount[inst->getName().getStringRef()]; });
+ fn.walk([&](Operation *op) { ++opCount[op->getName().getStringRef()]; });
printSummary();
}
diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp
index 82320bd26ff..496c0b33e1e 100644
--- a/mlir/lib/Analysis/SliceAnalysis.cpp
+++ b/mlir/lib/Analysis/SliceAnalysis.cpp
@@ -38,21 +38,21 @@ using namespace mlir;
using llvm::DenseSet;
using llvm::SetVector;
-static void getForwardSliceImpl(Instruction *inst,
- SetVector<Instruction *> *forwardSlice,
+static void getForwardSliceImpl(Operation *op,
+ SetVector<Operation *> *forwardSlice,
TransitiveFilter filter) {
- if (!inst) {
+ if (!op) {
return;
}
// Evaluate whether we should keep this use.
// This is useful in particular to implement scoping; i.e. return the
// transitive forwardSlice in the current scope.
- if (!filter(inst)) {
+ if (!filter(op)) {
return;
}
- if (auto forOp = inst->dyn_cast<AffineForOp>()) {
+ if (auto forOp = op->dyn_cast<AffineForOp>()) {
for (auto &u : forOp.getInductionVar()->getUses()) {
auto *ownerInst = u.getOwner();
if (forwardSlice->count(ownerInst) == 0) {
@@ -60,9 +60,9 @@ static void getForwardSliceImpl(Instruction *inst,
}
}
} else {
- assert(inst->getNumResults() <= 1 && "NYI: multiple results");
- if (inst->getNumResults() > 0) {
- for (auto &u : inst->getResult(0)->getUses()) {
+ assert(op->getNumResults() <= 1 && "NYI: multiple results");
+ if (op->getNumResults() > 0) {
+ for (auto &u : op->getResult(0)->getUses()) {
auto *ownerInst = u.getOwner();
if (forwardSlice->count(ownerInst) == 0) {
getForwardSliceImpl(ownerInst, forwardSlice, filter);
@@ -71,67 +71,66 @@ static void getForwardSliceImpl(Instruction *inst,
}
}
- forwardSlice->insert(inst);
+ forwardSlice->insert(op);
}
-void mlir::getForwardSlice(Instruction *inst,
- SetVector<Instruction *> *forwardSlice,
+void mlir::getForwardSlice(Operation *op, SetVector<Operation *> *forwardSlice,
TransitiveFilter filter) {
- getForwardSliceImpl(inst, forwardSlice, filter);
- // Don't insert the top level instruction, we just queried on it and don't
+ getForwardSliceImpl(op, forwardSlice, filter);
+ // Don't insert the top level operation, we just queried on it and don't
// want it in the results.
- forwardSlice->remove(inst);
+ forwardSlice->remove(op);
// Reverse to get back the actual topological order.
// std::reverse does not work out of the box on SetVector and I want an
// in-place swap based thing (the real std::reverse, not the LLVM adapter).
- std::vector<Instruction *> v(forwardSlice->takeVector());
+ std::vector<Operation *> v(forwardSlice->takeVector());
forwardSlice->insert(v.rbegin(), v.rend());
}
-static void getBackwardSliceImpl(Instruction *inst,
- SetVector<Instruction *> *backwardSlice,
+static void getBackwardSliceImpl(Operation *op,
+ SetVector<Operation *> *backwardSlice,
TransitiveFilter filter) {
- if (!inst) {
+ if (!op) {
return;
}
// Evaluate whether we should keep this def.
// This is useful in particular to implement scoping; i.e. return the
// transitive forwardSlice in the current scope.
- if (!filter(inst)) {
+ if (!filter(op)) {
return;
}
- for (auto *operand : inst->getOperands()) {
- auto *inst = operand->getDefiningOp();
- if (backwardSlice->count(inst) == 0) {
- getBackwardSliceImpl(inst, backwardSlice, filter);
+ for (auto *operand : op->getOperands()) {
+ auto *op = operand->getDefiningOp();
+ if (backwardSlice->count(op) == 0) {
+ getBackwardSliceImpl(op, backwardSlice, filter);
}
}
- backwardSlice->insert(inst);
+ backwardSlice->insert(op);
}
-void mlir::getBackwardSlice(Instruction *inst,
- SetVector<Instruction *> *backwardSlice,
+void mlir::getBackwardSlice(Operation *op,
+ SetVector<Operation *> *backwardSlice,
TransitiveFilter filter) {
- getBackwardSliceImpl(inst, backwardSlice, filter);
+ getBackwardSliceImpl(op, backwardSlice, filter);
- // Don't insert the top level instruction, we just queried on it and don't
+ // Don't insert the top level operation, we just queried on it and don't
// want it in the results.
- backwardSlice->remove(inst);
+ backwardSlice->remove(op);
}
-SetVector<Instruction *> mlir::getSlice(Instruction *inst,
- TransitiveFilter backwardFilter,
- TransitiveFilter forwardFilter) {
- SetVector<Instruction *> slice;
- slice.insert(inst);
+SetVector<Operation *> mlir::getSlice(Operation *op,
+ TransitiveFilter backwardFilter,
+ TransitiveFilter forwardFilter) {
+ SetVector<Operation *> slice;
+ slice.insert(op);
unsigned currentIndex = 0;
- SetVector<Instruction *> backwardSlice;
- SetVector<Instruction *> forwardSlice;
+ SetVector<Operation *> backwardSlice;
+ SetVector<Operation *> forwardSlice;
while (currentIndex != slice.size()) {
auto *currentInst = (slice)[currentIndex];
// Compute and insert the backwardSlice starting from currentInst.
@@ -151,23 +150,23 @@ SetVector<Instruction *> mlir::getSlice(Instruction *inst,
namespace {
/// DFS post-order implementation that maintains a global count to work across
/// multiple invocations, to help implement topological sort on multi-root DAGs.
-/// We traverse all instructions but only record the ones that appear in
+/// We traverse all operations but only record the ones that appear in
/// `toSort` for the final result.
struct DFSState {
- DFSState(const SetVector<Instruction *> &set)
+ DFSState(const SetVector<Operation *> &set)
: toSort(set), topologicalCounts(), seen() {}
- const SetVector<Instruction *> &toSort;
- SmallVector<Instruction *, 16> topologicalCounts;
- DenseSet<Instruction *> seen;
+ const SetVector<Operation *> &toSort;
+ SmallVector<Operation *, 16> topologicalCounts;
+ DenseSet<Operation *> seen;
};
} // namespace
-static void DFSPostorder(Instruction *current, DFSState *state) {
+static void DFSPostorder(Operation *current, DFSState *state) {
assert(current->getNumResults() <= 1 && "NYI: multi-result");
if (current->getNumResults() > 0) {
for (auto &u : current->getResult(0)->getUses()) {
- auto *inst = u.getOwner();
- DFSPostorder(inst, state);
+ auto *op = u.getOwner();
+ DFSPostorder(op, state);
}
}
bool inserted;
@@ -181,8 +180,8 @@ static void DFSPostorder(Instruction *current, DFSState *state) {
}
}
-SetVector<Instruction *>
-mlir::topologicalSort(const SetVector<Instruction *> &toSort) {
+SetVector<Operation *>
+mlir::topologicalSort(const SetVector<Operation *> &toSort) {
if (toSort.empty()) {
return toSort;
}
@@ -195,7 +194,7 @@ mlir::topologicalSort(const SetVector<Instruction *> &toSort) {
}
// Reorder and return.
- SetVector<Instruction *> res;
+ SetVector<Operation *> res;
for (auto it = state.topologicalCounts.rbegin(),
eit = state.topologicalCounts.rend();
it != eit; ++it) {
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index a9c22d62f0b..5999b357e96 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -37,18 +37,18 @@ using namespace mlir;
using llvm::SmallDenseMap;
-/// Populates 'loops' with IVs of the loops surrounding 'inst' ordered from
-/// the outermost 'affine.for' instruction to the innermost one.
-void mlir::getLoopIVs(Instruction &inst, SmallVectorImpl<AffineForOp> *loops) {
- auto *currInst = inst.getParentInst();
+/// Populates 'loops' with IVs of the loops surrounding 'op' ordered from
+/// the outermost 'affine.for' operation to the innermost one.
+void mlir::getLoopIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops) {
+ auto *currOp = op.getParentOp();
AffineForOp currAffineForOp;
- // Traverse up the hierarchy collecing all 'affine.for' instruction while
- // skipping over 'affine.if' instructions.
- while (currInst && ((currAffineForOp = currInst->dyn_cast<AffineForOp>()) ||
- currInst->isa<AffineIfOp>())) {
+ // Traverse up the hierarchy collecing all 'affine.for' operation while
+ // skipping over 'affine.if' operations.
+ while (currOp && ((currAffineForOp = currOp->dyn_cast<AffineForOp>()) ||
+ currOp->isa<AffineIfOp>())) {
if (currAffineForOp)
loops->push_back(currAffineForOp);
- currInst = currInst->getParentInst();
+ currOp = currOp->getParentOp();
}
std::reverse(loops->begin(), loops->end());
}
@@ -73,8 +73,8 @@ ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) {
assert(cst->containsId(*value) && "value expected to be present");
if (isValidSymbol(value)) {
// Check if the symbol is a constant.
- if (auto *inst = value->getDefiningOp()) {
- if (auto constOp = inst->dyn_cast<ConstantIndexOp>()) {
+ if (auto *op = value->getDefiningOp()) {
+ if (auto constOp = op->dyn_cast<ConstantIndexOp>()) {
cst->setIdToConstant(*value, constOp.getValue());
}
}
@@ -173,23 +173,22 @@ LogicalResult MemRefRegion::unionBoundingBox(const MemRefRegion &other) {
//
// TODO(bondhugula): extend this to any other memref dereferencing ops
// (dma_start, dma_wait).
-LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
+LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
ComputationSliceState *sliceState) {
- assert((inst->isa<LoadOp>() || inst->isa<StoreOp>()) &&
- "load/store op expected");
+ assert((op->isa<LoadOp>() || op->isa<StoreOp>()) && "load/store op expected");
- MemRefAccess access(inst);
+ MemRefAccess access(op);
memref = access.memref;
write = access.isStore();
unsigned rank = access.getRank();
- LLVM_DEBUG(llvm::dbgs() << "MemRefRegion::compute: " << *inst
+ LLVM_DEBUG(llvm::dbgs() << "MemRefRegion::compute: " << *op
<< "depth: " << loopDepth << "\n";);
if (rank == 0) {
SmallVector<AffineForOp, 4> ivs;
- getLoopIVs(*inst, &ivs);
+ getLoopIVs(*op, &ivs);
SmallVector<Value *, 8> regionSymbols;
extractForInductionVars(ivs, &regionSymbols);
// A rank 0 memref has a 0-d region.
@@ -242,8 +241,8 @@ LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
auto *symbol = operand;
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
- if (auto *inst = symbol->getDefiningOp()) {
- if (auto constOp = inst->dyn_cast<ConstantIndexOp>()) {
+ if (auto *op = symbol->getDefiningOp()) {
+ if (auto constOp = op->dyn_cast<ConstantIndexOp>()) {
cst.setIdToConstant(*symbol, constOp.getValue());
}
}
@@ -267,7 +266,7 @@ LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
// Add access function equalities to connect loop IVs to data dimensions.
if (failed(cst.composeMap(&accessValueMap))) {
- inst->emitError("getMemRefRegion: compose affine map failed");
+ op->emitError("getMemRefRegion: compose affine map failed");
LLVM_DEBUG(accessValueMap.getAffineMap().dump());
return failure();
}
@@ -280,7 +279,7 @@ LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
// Eliminate any loop IVs other than the outermost 'loopDepth' IVs, on which
// this memref region is symbolic.
SmallVector<AffineForOp, 4> enclosingIVs;
- getLoopIVs(*inst, &enclosingIVs);
+ getLoopIVs(*op, &enclosingIVs);
assert(loopDepth <= enclosingIVs.size() && "invalid loop depth");
enclosingIVs.resize(loopDepth);
SmallVector<Value *, 4> ids;
@@ -374,7 +373,7 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
std::is_same<LoadOrStoreOpPointer, StoreOp>::value,
"argument should be either a LoadOp or a StoreOp");
- Instruction *opInst = loadOrStoreOp.getOperation();
+ Operation *opInst = loadOrStoreOp.getOperation();
MemRefRegion region(opInst->getLoc());
if (failed(region.compute(opInst, /*loopDepth=*/0)))
@@ -427,40 +426,40 @@ template LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOp loadOp,
template LogicalResult mlir::boundCheckLoadOrStoreOp(StoreOp storeOp,
bool emitError);
-// Returns in 'positions' the Block positions of 'inst' in each ancestor
-// Block from the Block containing instruction, stopping at 'limitBlock'.
-static void findInstPosition(Instruction *inst, Block *limitBlock,
+// Returns in 'positions' the Block positions of 'op' in each ancestor
+// Block from the Block containing operation, stopping at 'limitBlock'.
+static void findInstPosition(Operation *op, Block *limitBlock,
SmallVectorImpl<unsigned> *positions) {
- Block *block = inst->getBlock();
+ Block *block = op->getBlock();
while (block != limitBlock) {
// FIXME: This algorithm is unnecessarily O(n) and should be improved to not
// rely on linear scans.
- int instPosInBlock = std::distance(block->begin(), inst->getIterator());
+ int instPosInBlock = std::distance(block->begin(), op->getIterator());
positions->push_back(instPosInBlock);
- inst = block->getContainingOp();
- block = inst->getBlock();
+ op = block->getContainingOp();
+ block = op->getBlock();
}
std::reverse(positions->begin(), positions->end());
}
-// Returns the Instruction in a possibly nested set of Blocks, where the
-// position of the instruction is represented by 'positions', which has a
+// Returns the Operation in a possibly nested set of Blocks, where the
+// position of the operation is represented by 'positions', which has a
// Block position for each level of nesting.
-static Instruction *getInstAtPosition(ArrayRef<unsigned> positions,
- unsigned level, Block *block) {
+static Operation *getInstAtPosition(ArrayRef<unsigned> positions,
+ unsigned level, Block *block) {
unsigned i = 0;
- for (auto &inst : *block) {
+ for (auto &op : *block) {
if (i != positions[level]) {
++i;
continue;
}
if (level == positions.size() - 1)
- return &inst;
- if (auto childAffineForOp = inst.dyn_cast<AffineForOp>())
+ return &op;
+ if (auto childAffineForOp = op.dyn_cast<AffineForOp>())
return getInstAtPosition(positions, level + 1,
childAffineForOp.getBody());
- for (auto &region : inst.getRegions()) {
+ for (auto &region : op.getRegions()) {
for (auto &b : region)
if (auto *ret = getInstAtPosition(positions, level + 1, &b))
return ret;
@@ -563,9 +562,10 @@ LogicalResult mlir::getBackwardComputationSliceState(
// entire destination index set. Subtract out the dependent destination
// iterations from destination index set and check for emptiness --- this is one
// solution.
-AffineForOp mlir::insertBackwardComputationSlice(
- Instruction *srcOpInst, Instruction *dstOpInst, unsigned dstLoopDepth,
- ComputationSliceState *sliceState) {
+AffineForOp
+mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst,
+ unsigned dstLoopDepth,
+ ComputationSliceState *sliceState) {
// Get loop nest surrounding src operation.
SmallVector<AffineForOp, 4> srcLoopIVs;
getLoopIVs(*srcOpInst, &srcLoopIVs);
@@ -580,20 +580,20 @@ AffineForOp mlir::insertBackwardComputationSlice(
return AffineForOp();
}
- // Find the inst block positions of 'srcOpInst' within 'srcLoopIVs'.
+ // Find the op block positions of 'srcOpInst' within 'srcLoopIVs'.
SmallVector<unsigned, 4> positions;
// TODO(andydavis): This code is incorrect since srcLoopIVs can be 0-d.
findInstPosition(srcOpInst, srcLoopIVs[0].getOperation()->getBlock(),
&positions);
- // Clone src loop nest and insert it a the beginning of the instruction block
+ // Clone src loop nest and insert it a the beginning of the operation block
// of the loop at 'dstLoopDepth' in 'dstLoopIVs'.
auto dstAffineForOp = dstLoopIVs[dstLoopDepth - 1];
FuncBuilder b(dstAffineForOp.getBody(), dstAffineForOp.getBody()->begin());
auto sliceLoopNest =
b.clone(*srcLoopIVs[0].getOperation())->cast<AffineForOp>();
- Instruction *sliceInst =
+ Operation *sliceInst =
getInstAtPosition(positions, /*level=*/0, sliceLoopNest.getBody());
// Get loop nest surrounding 'sliceInst'.
SmallVector<AffineForOp, 4> sliceSurroundingLoops;
@@ -620,7 +620,7 @@ AffineForOp mlir::insertBackwardComputationSlice(
// Constructs MemRefAccess populating it with the memref, its indices and
// opinst from 'loadOrStoreOpInst'.
-MemRefAccess::MemRefAccess(Instruction *loadOrStoreOpInst) {
+MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) {
memref = loadOp.getMemRef();
opInst = loadOrStoreOpInst;
@@ -650,11 +650,11 @@ bool MemRefAccess::isStore() const { return opInst->isa<StoreOp>(); }
/// Returns the nesting depth of this statement, i.e., the number of loops
/// surrounding this statement.
-unsigned mlir::getNestingDepth(Instruction &inst) {
- Instruction *currInst = &inst;
+unsigned mlir::getNestingDepth(Operation &op) {
+ Operation *currOp = &op;
unsigned depth = 0;
- while ((currInst = currInst->getParentInst())) {
- if (currInst->isa<AffineForOp>())
+ while ((currOp = currOp->getParentOp())) {
+ if (currOp->isa<AffineForOp>())
depth++;
}
return depth;
@@ -662,7 +662,7 @@ unsigned mlir::getNestingDepth(Instruction &inst) {
/// Returns the number of surrounding loops common to 'loopsA' and 'loopsB',
/// where each lists loops from outer-most to inner-most in loop nest.
-unsigned mlir::getNumCommonSurroundingLoops(Instruction &A, Instruction &B) {
+unsigned mlir::getNumCommonSurroundingLoops(Operation &A, Operation &B) {
SmallVector<AffineForOp, 4> loopsA, loopsB;
getLoopIVs(A, &loopsA);
getLoopIVs(B, &loopsB);
@@ -683,9 +683,9 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
int memorySpace) {
SmallDenseMap<Value *, std::unique_ptr<MemRefRegion>, 4> regions;
- // Walk this 'affine.for' instruction to gather all memory regions.
+ // Walk this 'affine.for' operation to gather all memory regions.
bool error = false;
- block.walk(start, end, [&](Instruction *opInst) {
+ block.walk(start, end, [&](Operation *opInst) {
if (!opInst->isa<LoadOp>() && !opInst->isa<StoreOp>()) {
// Neither load nor a store op.
return;
@@ -737,8 +737,8 @@ Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
/// at 'forOp'.
void mlir::getSequentialLoops(
AffineForOp forOp, llvm::SmallDenseSet<Value *, 8> *sequentialLoops) {
- forOp.getOperation()->walk([&](Instruction *inst) {
- if (auto innerFor = inst->dyn_cast<AffineForOp>())
+ forOp.getOperation()->walk([&](Operation *op) {
+ if (auto innerFor = op->dyn_cast<AffineForOp>())
if (!isLoopParallel(innerFor))
sequentialLoops->insert(innerFor.getInductionVar());
});
@@ -747,8 +747,8 @@ void mlir::getSequentialLoops(
/// Returns true if 'forOp' is parallel.
bool mlir::isLoopParallel(AffineForOp forOp) {
// Collect all load and store ops in loop nest rooted at 'forOp'.
- SmallVector<Instruction *, 8> loadAndStoreOpInsts;
- forOp.getOperation()->walk([&](Instruction *opInst) {
+ SmallVector<Operation *, 8> loadAndStoreOpInsts;
+ forOp.getOperation()->walk([&](Operation *opInst) {
if (opInst->isa<LoadOp>() || opInst->isa<StoreOp>())
loadAndStoreOpInsts.push_back(opInst);
});
diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp
index 232fe1a16ff..9a2e72f66be 100644
--- a/mlir/lib/Analysis/VectorAnalysis.cpp
+++ b/mlir/lib/Analysis/VectorAnalysis.cpp
@@ -105,8 +105,8 @@ Optional<SmallVector<unsigned, 4>> mlir::shapeRatio(VectorType superVectorType,
/// header file.
static AffineMap makePermutationMap(
MLIRContext *context,
- llvm::iterator_range<Instruction::operand_iterator> indices,
- const DenseMap<Instruction *, unsigned> &enclosingLoopToVectorDim) {
+ llvm::iterator_range<Operation::operand_iterator> indices,
+ const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
using functional::makePtrDynCaster;
using functional::map;
auto unwrappedIndices = map(makePtrDynCaster<Value, Value>(), indices);
@@ -140,10 +140,10 @@ static AffineMap makePermutationMap(
/// TODO(ntv): could also be implemented as a collect parents followed by a
/// filter and made available outside this file.
template <typename T>
-static SetVector<Instruction *> getParentsOfType(Instruction *inst) {
- SetVector<Instruction *> res;
- auto *current = inst;
- while (auto *parent = current->getParentInst()) {
+static SetVector<Operation *> getParentsOfType(Operation *op) {
+ SetVector<Operation *> res;
+ auto *current = op;
+ while (auto *parent = current->getParentOp()) {
if (auto typedParent = parent->template dyn_cast<T>()) {
assert(res.count(parent) == 0 && "Already inserted");
res.insert(parent);
@@ -154,15 +154,14 @@ static SetVector<Instruction *> getParentsOfType(Instruction *inst) {
}
/// Returns the enclosing AffineForOp, from closest to farthest.
-static SetVector<Instruction *> getEnclosingforOps(Instruction *inst) {
- return getParentsOfType<AffineForOp>(inst);
+static SetVector<Operation *> getEnclosingforOps(Operation *op) {
+ return getParentsOfType<AffineForOp>(op);
}
AffineMap mlir::makePermutationMap(
- Instruction *opInst,
- const DenseMap<Instruction *, unsigned> &loopToVectorDim) {
- DenseMap<Instruction *, unsigned> enclosingLoopToVectorDim;
- auto enclosingLoops = getEnclosingforOps(opInst);
+ Operation *op, const DenseMap<Operation *, unsigned> &loopToVectorDim) {
+ DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
+ auto enclosingLoops = getEnclosingforOps(op);
for (auto *forInst : enclosingLoops) {
auto it = loopToVectorDim.find(forInst);
if (it != loopToVectorDim.end()) {
@@ -170,17 +169,17 @@ AffineMap mlir::makePermutationMap(
}
}
- if (auto load = opInst->dyn_cast<LoadOp>()) {
- return ::makePermutationMap(opInst->getContext(), load.getIndices(),
+ if (auto load = op->dyn_cast<LoadOp>()) {
+ return ::makePermutationMap(op->getContext(), load.getIndices(),
enclosingLoopToVectorDim);
}
- auto store = opInst->cast<StoreOp>();
- return ::makePermutationMap(opInst->getContext(), store.getIndices(),
+ auto store = op->cast<StoreOp>();
+ return ::makePermutationMap(op->getContext(), store.getIndices(),
enclosingLoopToVectorDim);
}
-bool mlir::matcher::operatesOnSuperVectors(Instruction &opInst,
+bool mlir::matcher::operatesOnSuperVectors(Operation &op,
VectorType subVectorType) {
// First, extract the vector type and ditinguish between:
// a. ops that *must* lower a super-vector (i.e. vector_transfer_read,
@@ -193,20 +192,20 @@ bool mlir::matcher::operatesOnSuperVectors(Instruction &opInst,
/// do not have to special case. Maybe a trait, or just a method, unclear atm.
bool mustDivide = false;
VectorType superVectorType;
- if (auto read = opInst.dyn_cast<VectorTransferReadOp>()) {
+ if (auto read = op.dyn_cast<VectorTransferReadOp>()) {
superVectorType = read.getResultType();
mustDivide = true;
- } else if (auto write = opInst.dyn_cast<VectorTransferWriteOp>()) {
+ } else if (auto write = op.dyn_cast<VectorTransferWriteOp>()) {
superVectorType = write.getVectorType();
mustDivide = true;
- } else if (opInst.getNumResults() == 0) {
- if (!opInst.isa<ReturnOp>()) {
- opInst.emitError("NYI: assuming only return instructions can have 0 "
- " results at this point");
+ } else if (op.getNumResults() == 0) {
+ if (!op.isa<ReturnOp>()) {
+ op.emitError("NYI: assuming only return operations can have 0 "
+ " results at this point");
}
return false;
- } else if (opInst.getNumResults() == 1) {
- if (auto v = opInst.getResult(0)->getType().dyn_cast<VectorType>()) {
+ } else if (op.getNumResults() == 1) {
+ if (auto v = op.getResult(0)->getType().dyn_cast<VectorType>()) {
superVectorType = v;
} else {
// Not a vector type.
@@ -215,7 +214,7 @@ bool mlir::matcher::operatesOnSuperVectors(Instruction &opInst,
} else {
// Not a vector_transfer and has more than 1 result, fail hard for now to
// wake us up when something changes.
- opInst.emitError("NYI: instruction has more than 1 result");
+ op.emitError("NYI: operation has more than 1 result");
return false;
}
@@ -224,7 +223,7 @@ bool mlir::matcher::operatesOnSuperVectors(Instruction &opInst,
// Sanity check.
assert((ratio.hasValue() || !mustDivide) &&
- "vector_transfer instruction in which super-vector size is not an"
+ "vector_transfer operation in which super-vector size is not an"
" integer multiple of sub-vector size");
// This catches cases that are not strictly necessary to have multiplicity but
diff --git a/mlir/lib/Analysis/Verifier.cpp b/mlir/lib/Analysis/Verifier.cpp
index d7eb578ab1a..fddd9ac25e4 100644
--- a/mlir/lib/Analysis/Verifier.cpp
+++ b/mlir/lib/Analysis/Verifier.cpp
@@ -23,9 +23,9 @@
// The checks in this file are only for things that can occur as part of IR
// transformations: e.g. violation of dominance information, malformed operation
// attributes, etc. MLIR supports transformations moving IR through locally
-// invalid states (e.g. unlinking an instruction from an instruction before
-// re-inserting it in a new place), but each transformation must complete with
-// the IR in a valid form.
+// invalid states (e.g. unlinking an operation from a block before re-inserting
+// it in a new place), but each transformation must complete with the IR in a
+// valid form.
//
// This should not check for things that are always wrong by construction (e.g.
// affine maps or other immutable structures that are incorrect), because those
@@ -52,7 +52,7 @@ namespace {
///
class FuncVerifier {
public:
- bool failure(const Twine &message, Instruction &value) {
+ bool failure(const Twine &message, Operation &value) {
return value.emitError(message);
}
@@ -61,7 +61,7 @@ public:
}
bool failure(const Twine &message, Block &bb) {
- // Take the location information for the first instruction in the block.
+ // Take the location information for the first operation in the block.
if (!bb.empty())
return failure(message, bb.front());
@@ -108,9 +108,9 @@ public:
bool verify();
bool verifyBlock(Block &block, bool isTopLevel);
- bool verifyOperation(Instruction &op);
+ bool verifyOperation(Operation &op);
bool verifyDominance(Block &block);
- bool verifyInstDominance(Instruction &inst);
+ bool verifyOpDominance(Operation &op);
explicit FuncVerifier(Function &fn)
: fn(fn), identifierRegex("^[a-zA-Z_][a-zA-Z_0-9\\.\\$]*$") {}
@@ -231,15 +231,15 @@ bool FuncVerifier::verifyBlock(Block &block, bool isTopLevel) {
return failure("block with no terminator", block);
}
- // Verify the non-terminator instructions separately so that we can verify
+ // Verify the non-terminator operations separately so that we can verify
// they has no successors.
- for (auto &inst : llvm::make_range(block.begin(), std::prev(block.end()))) {
- if (inst.getNumSuccessors() != 0)
+ for (auto &op : llvm::make_range(block.begin(), std::prev(block.end()))) {
+ if (op.getNumSuccessors() != 0)
return failure(
- "instruction with block successors must terminate its parent block",
- inst);
+ "operation with block successors must terminate its parent block",
+ op);
- if (verifyOperation(inst))
+ if (verifyOperation(op))
return true;
}
@@ -259,7 +259,7 @@ bool FuncVerifier::verifyBlock(Block &block, bool isTopLevel) {
}
/// Check the invariants of the specified operation.
-bool FuncVerifier::verifyOperation(Instruction &op) {
+bool FuncVerifier::verifyOperation(Operation &op) {
if (op.getFunction() != &fn)
return failure("operation in the wrong function", op);
@@ -304,30 +304,30 @@ bool FuncVerifier::verifyOperation(Instruction &op) {
}
bool FuncVerifier::verifyDominance(Block &block) {
- // Verify the dominance of each of the held instructions.
- for (auto &inst : block)
- if (verifyInstDominance(inst))
+ // Verify the dominance of each of the held operations.
+ for (auto &op : block)
+ if (verifyOpDominance(op))
return true;
return false;
}
-bool FuncVerifier::verifyInstDominance(Instruction &inst) {
+bool FuncVerifier::verifyOpDominance(Operation &op) {
// Check that operands properly dominate this use.
- for (unsigned operandNo = 0, e = inst.getNumOperands(); operandNo != e;
+ for (unsigned operandNo = 0, e = op.getNumOperands(); operandNo != e;
++operandNo) {
- auto *op = inst.getOperand(operandNo);
- if (domInfo->properlyDominates(op, &inst))
+ auto *operand = op.getOperand(operandNo);
+ if (domInfo->properlyDominates(operand, &op))
continue;
- inst.emitError("operand #" + Twine(operandNo) +
- " does not dominate this use");
- if (auto *useInst = op->getDefiningOp())
- useInst->emitNote("operand defined here");
+ op.emitError("operand #" + Twine(operandNo) +
+ " does not dominate this use");
+ if (auto *useOp = operand->getDefiningOp())
+ useOp->emitNote("operand defined here");
return true;
}
- // Verify the dominance of each of the nested blocks within this instruction.
- for (auto &region : inst.getRegions())
+ // Verify the dominance of each of the nested blocks within this operation.
+ for (auto &region : op.getRegions())
for (auto &block : region)
if (verifyDominance(block))
return true;
diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp
index cd5816009a5..455f2a0b5fe 100644
--- a/mlir/lib/IR/Block.cpp
+++ b/mlir/lib/IR/Block.cpp
@@ -83,7 +83,7 @@ Operation *Block::findAncestorInstInBlock(Operation &op) {
// find the ancestor operation that resides in the block of 'forInst'.
auto *currInst = &op;
while (currInst->getBlock() != this) {
- currInst = currInst->getParentInst();
+ currInst = currInst->getParentOp();
if (!currInst)
return nullptr;
}
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index 3de620b524c..c54b5a24a3d 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -273,7 +273,7 @@ Dialect *Operation::getDialect() {
return getContext()->getRegisteredDialect(dialectPrefix);
}
-Operation *Operation::getParentInst() {
+Operation *Operation::getParentOp() {
return block ? block->getContainingOp() : nullptr;
}
@@ -437,8 +437,8 @@ void Operation::moveBefore(Operation *existingInst) {
moveBefore(existingInst->getBlock(), existingInst->getIterator());
}
-/// Unlink this operation operation from its current basic block and insert
-/// it right before `iterator` in the specified basic block.
+/// Unlink this operation from its current basic block and insert it right
+/// before `iterator` in the specified basic block.
void Operation::moveBefore(Block *block,
llvm::iplist<Operation>::iterator iterator) {
block->getOperations().splice(iterator, getBlock()->getOperations(),
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 8c29d1a76b4..7a6f188e6af 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -746,7 +746,7 @@ struct LoopNestStatsCollector {
void collect(Instruction *inst) {
inst->walk<AffineForOp>([&](AffineForOp forOp) {
auto *forInst = forOp.getOperation();
- auto *parentInst = forOp.getOperation()->getParentInst();
+ auto *parentInst = forOp.getOperation()->getParentOp();
if (parentInst != nullptr) {
assert(parentInst->isa<AffineForOp>() && "Expected parent AffineForOp");
// Add mapping to 'forOp' from its parent AffineForOp.
@@ -1545,7 +1545,7 @@ static bool isFusionProfitable(Instruction *srcOpInst,
// A single store disappears: -1 for that.
computeCostMap[srcLoopIVs[numSrcLoopIVs - 1].getOperation()] = -1;
for (auto *loadOp : dstLoadOpInsts) {
- auto *parentInst = loadOp->getParentInst();
+ auto *parentInst = loadOp->getParentOp();
if (parentInst && parentInst->isa<AffineForOp>())
computeCostMap[parentInst] = -1;
}
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index c235190b4b7..f99b602cf0b 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -182,8 +182,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
// Check if the supplied for inst's are all successively nested.
for (unsigned i = 1, e = band.size(); i < e; i++) {
- assert(band[i].getOperation()->getParentInst() ==
- band[i - 1].getOperation());
+ assert(band[i].getOperation()->getParentOp() == band[i - 1].getOperation());
}
auto origLoops = band;
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 8ea9d4e8020..c15108530fb 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -687,7 +687,7 @@ static bool materialize(Function *f,
// current enclosing scope of the terminator. See the top of the function
// Note for the justification of this restriction.
// TODO(ntv): relax scoping constraints.
- auto *enclosingScope = term->getParentInst();
+ auto *enclosingScope = term->getParentOp();
auto keepIfInSameScope = [enclosingScope, &domInfo](Instruction *inst) {
assert(inst && "NULL inst");
if (!enclosingScope) {
@@ -760,7 +760,7 @@ void MaterializeVectorsPass::runOnFunction() {
pat.match(f, &matches);
SetVector<Instruction *> terminators;
for (auto m : matches) {
- terminators.insert(m.getMatchedInstruction());
+ terminators.insert(m.getMatchedOperation());
}
if (materialize(f, terminators, &state))
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index b5109a20ba9..c06e9359324 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -122,7 +122,7 @@ void VectorizerTestPass::testVectorShapeRatio() {
SmallVector<NestedMatch, 8> matches;
pat.match(f, &matches);
for (auto m : matches) {
- auto *opInst = m.getMatchedInstruction();
+ auto *opInst = m.getMatchedOperation();
// This is a unit test that only checks and prints shape ratio.
// As a consequence we write only Ops with a single return type for the
// purpose of this test. If we need to test more intricate behavior in the
@@ -164,9 +164,9 @@ void VectorizerTestPass::testBackwardSlicing() {
patternTestSlicingOps().match(f, &matches);
for (auto m : matches) {
SetVector<Instruction *> backwardSlice;
- getBackwardSlice(m.getMatchedInstruction(), &backwardSlice);
+ getBackwardSlice(m.getMatchedOperation(), &backwardSlice);
auto strs = map(toString, backwardSlice);
- outs() << "\nmatched: " << *m.getMatchedInstruction()
+ outs() << "\nmatched: " << *m.getMatchedOperation()
<< " backward static slice: ";
for (const auto &s : strs) {
outs() << "\n" << s;
@@ -180,9 +180,9 @@ void VectorizerTestPass::testForwardSlicing() {
patternTestSlicingOps().match(f, &matches);
for (auto m : matches) {
SetVector<Instruction *> forwardSlice;
- getForwardSlice(m.getMatchedInstruction(), &forwardSlice);
+ getForwardSlice(m.getMatchedOperation(), &forwardSlice);
auto strs = map(toString, forwardSlice);
- outs() << "\nmatched: " << *m.getMatchedInstruction()
+ outs() << "\nmatched: " << *m.getMatchedOperation()
<< " forward static slice: ";
for (const auto &s : strs) {
outs() << "\n" << s;
@@ -196,9 +196,9 @@ void VectorizerTestPass::testSlicing() {
SmallVector<NestedMatch, 8> matches;
patternTestSlicingOps().match(f, &matches);
for (auto m : matches) {
- SetVector<Instruction *> staticSlice = getSlice(m.getMatchedInstruction());
+ SetVector<Instruction *> staticSlice = getSlice(m.getMatchedOperation());
auto strs = map(toString, staticSlice);
- outs() << "\nmatched: " << *m.getMatchedInstruction() << " static slice: ";
+ outs() << "\nmatched: " << *m.getMatchedOperation() << " static slice: ";
for (const auto &s : strs) {
outs() << "\n" << s;
}
@@ -220,7 +220,7 @@ void VectorizerTestPass::testComposeMaps() {
SmallVector<AffineMap, 4> maps;
maps.reserve(matches.size());
for (auto m : llvm::reverse(matches)) {
- auto *opInst = m.getMatchedInstruction();
+ auto *opInst = m.getMatchedOperation();
auto map = opInst->getAttr(VectorizerTestPass::kTestAffineMapAttrName)
.cast<AffineMapAttr>()
.getValue();
@@ -257,15 +257,15 @@ void VectorizerTestPass::testNormalizeMaps() {
SmallVector<NestedMatch, 8> matches;
pattern.match(f, &matches);
for (auto m : matches) {
- auto app = m.getMatchedInstruction()->cast<AffineApplyOp>();
- FuncBuilder b(m.getMatchedInstruction());
+ auto app = m.getMatchedOperation()->cast<AffineApplyOp>();
+ FuncBuilder b(m.getMatchedOperation());
SmallVector<Value *, 8> operands(app.getOperands());
makeComposedAffineApply(&b, app.getLoc(), app.getAffineMap(), operands);
}
}
// We should now be able to erase everything in reverse order in this test.
for (auto m : llvm::reverse(toErase)) {
- m.getMatchedInstruction()->erase();
+ m.getMatchedOperation()->erase();
}
}
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 3c6ab6c2cac..8a7a7a6dbba 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -705,7 +705,7 @@ static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
patternDepth, strategy))) {
return failure();
}
- vectorizeLoopIfProfitable(m.getMatchedInstruction(), depthInPattern,
+ vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
patternDepth, strategy);
}
return success();
@@ -869,7 +869,7 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step,
SmallVector<NestedMatch, 8> loadAndStoresMatches;
loadAndStores.match(loop.getOperation(), &loadAndStoresMatches);
for (auto ls : loadAndStoresMatches) {
- auto *opInst = ls.getMatchedInstruction();
+ auto *opInst = ls.getMatchedOperation();
auto load = opInst->dyn_cast<LoadOp>();
auto store = opInst->dyn_cast<StoreOp>();
LLVM_DEBUG(opInst->print(dbgs()));
@@ -904,7 +904,7 @@ isVectorizableLoopPtrFactory(unsigned fastestVaryingMemRefDimension) {
static LogicalResult
vectorizeLoopsAndLoadsRecursively(NestedMatch oneMatch,
VectorizationState *state) {
- auto *loopInst = oneMatch.getMatchedInstruction();
+ auto *loopInst = oneMatch.getMatchedOperation();
auto loop = loopInst->cast<AffineForOp>();
auto childrenMatches = oneMatch.getMatchedChildren();
@@ -1144,7 +1144,7 @@ static LogicalResult vectorizeNonTerminals(VectorizationState *state) {
/// anything below it fails.
static LogicalResult vectorizeRootMatch(NestedMatch m,
VectorizationStrategy *strategy) {
- auto loop = m.getMatchedInstruction()->cast<AffineForOp>();
+ auto loop = m.getMatchedOperation()->cast<AffineForOp>();
VectorizationState state;
state.strategy = strategy;
@@ -1248,7 +1248,7 @@ void Vectorize::runOnFunction() {
&strategy))) {
continue;
}
- vectorizeLoopIfProfitable(m.getMatchedInstruction(), 0, patternDepth,
+ vectorizeLoopIfProfitable(m.getMatchedOperation(), 0, patternDepth,
&strategy);
// TODO(ntv): if pattern does not apply, report it; alter the
// cost/benefit.
OpenPOWER on IntegriCloud