summaryrefslogtreecommitdiffstats
path: root/mlir/include
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-22 21:59:55 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-22 22:00:23 -0800
commit35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch)
treed083d37d993a774239081509a50e3e6c65366421 /mlir/include
parent22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff)
downloadbcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz
bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/include')
-rw-r--r--mlir/include/mlir/Analysis/AffineAnalysis.h9
-rw-r--r--mlir/include/mlir/Analysis/AffineStructures.h72
-rw-r--r--mlir/include/mlir/Analysis/CallInterfaces.h4
-rw-r--r--mlir/include/mlir/Analysis/Dominance.h4
-rw-r--r--mlir/include/mlir/Analysis/Liveness.h17
-rw-r--r--mlir/include/mlir/Analysis/LoopAnalysis.h9
-rw-r--r--mlir/include/mlir/Analysis/Utils.h10
-rw-r--r--mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h13
-rw-r--r--mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h7
-rw-r--r--mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h57
-rw-r--r--mlir/include/mlir/Dialect/AffineOps/AffineOps.h105
-rw-r--r--mlir/include/mlir/Dialect/AffineOps/AffineOps.td8
-rw-r--r--mlir/include/mlir/Dialect/GPU/GPUDialect.h6
-rw-r--r--mlir/include/mlir/Dialect/GPU/GPUOps.td16
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h6
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td22
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h20
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td10
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td10
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h8
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h4
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Utils/Utils.h36
-rw-r--r--mlir/include/mlir/Dialect/LoopOps/LoopOps.h2
-rw-r--r--mlir/include/mlir/Dialect/LoopOps/LoopOps.td12
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td2
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td2
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td4
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h4
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td6
-rw-r--r--mlir/include/mlir/Dialect/StandardOps/Ops.h35
-rw-r--r--mlir/include/mlir/Dialect/StandardOps/Ops.td78
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/Utils.h5
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/VectorOps.td22
-rw-r--r--mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h5
-rw-r--r--mlir/include/mlir/EDSC/Builders.h32
-rw-r--r--mlir/include/mlir/EDSC/Helpers.h10
-rw-r--r--mlir/include/mlir/EDSC/Intrinsics.h26
-rw-r--r--mlir/include/mlir/IR/Block.h8
-rw-r--r--mlir/include/mlir/IR/BlockAndValueMapping.h8
-rw-r--r--mlir/include/mlir/IR/Builders.h10
-rw-r--r--mlir/include/mlir/IR/FunctionSupport.h2
-rw-r--r--mlir/include/mlir/IR/Matchers.h14
-rw-r--r--mlir/include/mlir/IR/OpDefinition.h40
-rw-r--r--mlir/include/mlir/IR/OpImplementation.h30
-rw-r--r--mlir/include/mlir/IR/Operation.h22
-rw-r--r--mlir/include/mlir/IR/OperationSupport.h45
-rw-r--r--mlir/include/mlir/IR/TypeUtilities.h12
-rw-r--r--mlir/include/mlir/IR/Value.h22
-rw-r--r--mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h10
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h2
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h46
-rw-r--r--mlir/include/mlir/Transforms/FoldUtils.h10
-rw-r--r--mlir/include/mlir/Transforms/InliningUtils.h14
-rw-r--r--mlir/include/mlir/Transforms/LoopLikeInterface.td2
-rw-r--r--mlir/include/mlir/Transforms/LoopUtils.h12
-rw-r--r--mlir/include/mlir/Transforms/RegionUtils.h8
-rw-r--r--mlir/include/mlir/Transforms/Utils.h20
60 files changed, 558 insertions, 511 deletions
diff --git a/mlir/include/mlir/Analysis/AffineAnalysis.h b/mlir/include/mlir/Analysis/AffineAnalysis.h
index 8243d1f6f63..f506470f36a 100644
--- a/mlir/include/mlir/Analysis/AffineAnalysis.h
+++ b/mlir/include/mlir/Analysis/AffineAnalysis.h
@@ -39,10 +39,13 @@ class FlatAffineConstraints;
class Operation;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Returns in `affineApplyOps`, the sequence of those AffineApplyOp
/// Operations that are reachable via a search starting from `operands` and
/// ending at those operands that are not the result of an AffineApplyOp.
-void getReachableAffineApplyOps(ArrayRef<Value *> operands,
+void getReachableAffineApplyOps(ArrayRef<ValuePtr> operands,
SmallVectorImpl<Operation *> &affineApplyOps);
/// Builds a system of constraints with dimensional identifiers corresponding to
@@ -56,9 +59,9 @@ LogicalResult getIndexSet(MutableArrayRef<AffineForOp> forOps,
/// Encapsulates a memref load or store access information.
struct MemRefAccess {
- Value *memref;
+ ValuePtr memref;
Operation *opInst;
- SmallVector<Value *, 4> indices;
+ SmallVector<ValuePtr, 4> indices;
/// Constructs a MemRefAccess from a load or store operation.
// TODO(b/119949820): add accessors to standard op's load, store, DMA op's to
diff --git a/mlir/include/mlir/Analysis/AffineStructures.h b/mlir/include/mlir/Analysis/AffineStructures.h
index e53af5024da..65cf13a0ce6 100644
--- a/mlir/include/mlir/Analysis/AffineStructures.h
+++ b/mlir/include/mlir/Analysis/AffineStructures.h
@@ -123,8 +123,8 @@ public:
// Creates an empty AffineValueMap (users should call 'reset' to reset map
// and operands).
AffineValueMap() {}
- AffineValueMap(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results = llvm::None);
+ AffineValueMap(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results = llvm::None);
explicit AffineValueMap(AffineApplyOp applyOp);
explicit AffineValueMap(AffineBound bound);
@@ -132,8 +132,8 @@ public:
~AffineValueMap();
// Resets this AffineValueMap with 'map', 'operands', and 'results'.
- void reset(AffineMap map, ArrayRef<Value *> operands,
- ArrayRef<Value *> results = llvm::None);
+ void reset(AffineMap map, ArrayRef<ValuePtr> operands,
+ ArrayRef<ValuePtr> results = llvm::None);
/// Return the value map that is the difference of value maps 'a' and 'b',
/// represented as an affine map and its operands. The output map + operands
@@ -146,7 +146,7 @@ public:
inline bool isMultipleOf(unsigned idx, int64_t factor) const;
/// Return true if the idx^th result depends on 'value', false otherwise.
- bool isFunctionOf(unsigned idx, Value *value) const;
+ bool isFunctionOf(unsigned idx, ValuePtr value) const;
/// Return true if the result at 'idx' is a constant, false
/// otherwise.
@@ -162,8 +162,8 @@ public:
inline unsigned getNumSymbols() const { return map.getNumSymbols(); }
inline unsigned getNumResults() const { return map.getNumResults(); }
- Value *getOperand(unsigned i) const;
- ArrayRef<Value *> getOperands() const;
+ ValuePtr getOperand(unsigned i) const;
+ ArrayRef<ValuePtr> getOperands() const;
AffineMap getAffineMap() const;
private:
@@ -172,9 +172,9 @@ private:
// TODO: make these trailing objects?
/// The SSA operands binding to the dim's and symbols of 'map'.
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
/// The SSA results binding to the results of 'map'.
- SmallVector<Value *, 4> results;
+ SmallVector<ValuePtr, 4> results;
};
/// An IntegerValueSet is an integer set plus its operands.
@@ -207,7 +207,7 @@ private:
// 'AffineCondition'.
MutableIntegerSet set;
/// The SSA operands binding to the dim's and symbols of 'set'.
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
};
/// A flat list of affine equalities and inequalities in the form.
@@ -245,7 +245,7 @@ public:
unsigned numReservedEqualities,
unsigned numReservedCols, unsigned numDims = 0,
unsigned numSymbols = 0, unsigned numLocals = 0,
- ArrayRef<Optional<Value *>> idArgs = {})
+ ArrayRef<Optional<ValuePtr>> idArgs = {})
: numReservedCols(numReservedCols), numDims(numDims),
numSymbols(numSymbols) {
assert(numReservedCols >= numDims + numSymbols + 1);
@@ -264,7 +264,7 @@ public:
/// dimensions and symbols.
FlatAffineConstraints(unsigned numDims = 0, unsigned numSymbols = 0,
unsigned numLocals = 0,
- ArrayRef<Optional<Value *>> idArgs = {})
+ ArrayRef<Optional<ValuePtr>> idArgs = {})
: numReservedCols(numDims + numSymbols + numLocals + 1), numDims(numDims),
numSymbols(numSymbols) {
assert(numReservedCols >= numDims + numSymbols + 1);
@@ -304,10 +304,10 @@ public:
// Clears any existing data and reserves memory for the specified constraints.
void reset(unsigned numReservedInequalities, unsigned numReservedEqualities,
unsigned numReservedCols, unsigned numDims, unsigned numSymbols,
- unsigned numLocals = 0, ArrayRef<Value *> idArgs = {});
+ unsigned numLocals = 0, ArrayRef<ValuePtr> idArgs = {});
void reset(unsigned numDims = 0, unsigned numSymbols = 0,
- unsigned numLocals = 0, ArrayRef<Value *> idArgs = {});
+ unsigned numLocals = 0, ArrayRef<ValuePtr> idArgs = {});
/// Appends constraints from 'other' into this. This is equivalent to an
/// intersection with no simplification of any sort attempted.
@@ -396,7 +396,7 @@ public:
/// operands. If `eq` is true, add a single equality equal to the bound map's
/// first result expr.
LogicalResult addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
- ArrayRef<Value *> operands, bool eq,
+ ArrayRef<ValuePtr> operands, bool eq,
bool lower = true);
/// Computes the lower and upper bounds of the first 'num' dimensional
@@ -415,10 +415,10 @@ public:
/// operand list 'operands'.
/// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size'.
/// Note that both lower/upper bounds use operands from 'operands'.
- LogicalResult addSliceBounds(ArrayRef<Value *> values,
+ LogicalResult addSliceBounds(ArrayRef<ValuePtr> values,
ArrayRef<AffineMap> lbMaps,
ArrayRef<AffineMap> ubMaps,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
// Adds an inequality (>= 0) from the coefficients specified in inEq.
void addInequality(ArrayRef<int64_t> inEq);
@@ -447,25 +447,25 @@ public:
/// Sets the identifier corresponding to the specified Value id to a
/// constant. Asserts if the 'id' is not found.
- void setIdToConstant(Value &id, int64_t val);
+ void setIdToConstant(ValueRef id, int64_t val);
/// Looks up the position of the identifier with the specified Value. Returns
/// true if found (false otherwise). `pos' is set to the (column) position of
/// the identifier.
- bool findId(Value &id, unsigned *pos) const;
+ bool findId(ValueRef id, unsigned *pos) const;
/// Returns true if an identifier with the specified Value exists, false
/// otherwise.
- bool containsId(Value &id) const;
+ bool containsId(ValueRef id) const;
// Add identifiers of the specified kind - specified positions are relative to
// the kind of identifier. The coefficient column corresponding to the added
// identifier is initialized to zero. 'id' is the Value corresponding to the
// identifier that can optionally be provided.
- void addDimId(unsigned pos, Value *id = nullptr);
- void addSymbolId(unsigned pos, Value *id = nullptr);
+ void addDimId(unsigned pos, ValuePtr id = nullptr);
+ void addSymbolId(unsigned pos, ValuePtr id = nullptr);
void addLocalId(unsigned pos);
- void addId(IdKind kind, unsigned pos, Value *id = nullptr);
+ void addId(IdKind kind, unsigned pos, ValuePtr id = nullptr);
/// Add the specified values as a dim or symbol id depending on its nature, if
/// it already doesn't exist in the system. `id' has to be either a terminal
@@ -473,7 +473,7 @@ public:
/// symbols or loop IVs. The identifier is added to the end of the existing
/// dims or symbols. Additional information on the identifier is extracted
/// from the IR and added to the constraint system.
- void addInductionVarOrTerminalSymbol(Value *id);
+ void addInductionVarOrTerminalSymbol(ValuePtr id);
/// Composes the affine value map with this FlatAffineConstrains, adding the
/// results of the map as dimensions at the front [0, vMap->getNumResults())
@@ -500,8 +500,8 @@ public:
void projectOut(unsigned pos, unsigned num);
inline void projectOut(unsigned pos) { return projectOut(pos, 1); }
- /// Projects out the identifier that is associate with Value *.
- void projectOut(Value *id);
+ /// Projects out the identifier that is associate with ValuePtr .
+ void projectOut(ValuePtr id);
void removeId(IdKind idKind, unsigned pos);
void removeId(unsigned pos);
@@ -577,20 +577,20 @@ public:
return numIds - numDims - numSymbols;
}
- inline ArrayRef<Optional<Value *>> getIds() const {
+ inline ArrayRef<Optional<ValuePtr>> getIds() const {
return {ids.data(), ids.size()};
}
- inline MutableArrayRef<Optional<Value *>> getIds() {
+ inline MutableArrayRef<Optional<ValuePtr>> getIds() {
return {ids.data(), ids.size()};
}
/// Returns the optional Value corresponding to the pos^th identifier.
- inline Optional<Value *> getId(unsigned pos) const { return ids[pos]; }
- inline Optional<Value *> &getId(unsigned pos) { return ids[pos]; }
+ inline Optional<ValuePtr> getId(unsigned pos) const { return ids[pos]; }
+ inline Optional<ValuePtr> &getId(unsigned pos) { return ids[pos]; }
/// Returns the Value associated with the pos^th identifier. Asserts if
/// no Value identifier was associated.
- inline Value *getIdValue(unsigned pos) const {
+ inline ValuePtr getIdValue(unsigned pos) const {
assert(ids[pos].hasValue() && "identifier's Value not set");
return ids[pos].getValue();
}
@@ -598,7 +598,7 @@ public:
/// Returns the Values associated with identifiers in range [start, end).
/// Asserts if no Value was associated with one of these identifiers.
void getIdValues(unsigned start, unsigned end,
- SmallVectorImpl<Value *> *values) const {
+ SmallVectorImpl<ValuePtr> *values) const {
assert((start < numIds || start == end) && "invalid start position");
assert(end <= numIds && "invalid end position");
values->clear();
@@ -607,17 +607,17 @@ public:
values->push_back(getIdValue(i));
}
}
- inline void getAllIdValues(SmallVectorImpl<Value *> *values) const {
+ inline void getAllIdValues(SmallVectorImpl<ValuePtr> *values) const {
getIdValues(0, numIds, values);
}
/// Sets Value associated with the pos^th identifier.
- inline void setIdValue(unsigned pos, Value *val) {
+ inline void setIdValue(unsigned pos, ValuePtr val) {
assert(pos < numIds && "invalid id position");
ids[pos] = val;
}
/// Sets Values associated with identifiers in the range [start, end).
- void setIdValues(unsigned start, unsigned end, ArrayRef<Value *> values) {
+ void setIdValues(unsigned start, unsigned end, ArrayRef<ValuePtr> values) {
assert((start < numIds || end == start) && "invalid start position");
assert(end <= numIds && "invalid end position");
assert(values.size() == end - start);
@@ -766,7 +766,7 @@ private:
/// system appearing in the order the identifiers correspond to columns.
/// Temporary ones or those that aren't associated to any Value are set to
/// None.
- SmallVector<Optional<Value *>, 8> ids;
+ SmallVector<Optional<ValuePtr>, 8> ids;
/// A parameter that controls detection of an unrealistic number of
/// constraints. If the number of constraints is this many times the number of
diff --git a/mlir/include/mlir/Analysis/CallInterfaces.h b/mlir/include/mlir/Analysis/CallInterfaces.h
index dd23d77889f..a18cfa7aba4 100644
--- a/mlir/include/mlir/Analysis/CallInterfaces.h
+++ b/mlir/include/mlir/Analysis/CallInterfaces.h
@@ -30,8 +30,8 @@ namespace mlir {
/// A callable is either a symbol, or an SSA value, that is referenced by a
/// call-like operation. This represents the destination of the call.
-struct CallInterfaceCallable : public PointerUnion<SymbolRefAttr, Value *> {
- using PointerUnion<SymbolRefAttr, Value *>::PointerUnion;
+struct CallInterfaceCallable : public PointerUnion<SymbolRefAttr, ValuePtr> {
+ using PointerUnion<SymbolRefAttr, ValuePtr>::PointerUnion;
};
#include "mlir/Analysis/CallInterfaces.h.inc"
diff --git a/mlir/include/mlir/Analysis/Dominance.h b/mlir/include/mlir/Analysis/Dominance.h
index 09114eafbb1..f46241e2af0 100644
--- a/mlir/include/mlir/Analysis/Dominance.h
+++ b/mlir/include/mlir/Analysis/Dominance.h
@@ -74,10 +74,10 @@ public:
}
/// Return true if value A properly dominates operation B.
- bool properlyDominates(Value *a, Operation *b);
+ bool properlyDominates(ValuePtr a, Operation *b);
/// Return true if operation A dominates operation B.
- bool dominates(Value *a, Operation *b) {
+ bool dominates(ValuePtr a, Operation *b) {
return (Operation *)a->getDefiningOp() == b || properlyDominates(a, b);
}
diff --git a/mlir/include/mlir/Analysis/Liveness.h b/mlir/include/mlir/Analysis/Liveness.h
index 0bdb474fd92..0aa9d9693e4 100644
--- a/mlir/include/mlir/Analysis/Liveness.h
+++ b/mlir/include/mlir/Analysis/Liveness.h
@@ -41,6 +41,9 @@ class Operation;
class Region;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Represents an analysis for computing liveness information from a
/// given top-level operation. The analysis iterates over all associated
/// regions that are attached to the given top-level operation. It
@@ -57,7 +60,7 @@ class Liveness {
public:
using OperationListT = std::vector<Operation *>;
using BlockMapT = DenseMap<Block *, LivenessBlockInfo>;
- using ValueSetT = SmallPtrSet<Value *, 16>;
+ using ValueSetT = SmallPtrSet<ValuePtr, 16>;
public:
/// Creates a new Liveness analysis that computes liveness
@@ -72,7 +75,7 @@ public:
/// Note that the operations in this list are not ordered and the current
/// implementation is computationally expensive (as it iterates over all
/// blocks in which the given value is live).
- OperationListT resolveLiveness(Value *value) const;
+ OperationListT resolveLiveness(ValuePtr value) const;
/// Gets liveness info (if any) for the block.
const LivenessBlockInfo *getLiveness(Block *block) const;
@@ -85,7 +88,7 @@ public:
/// Returns true if the given operation represent the last use of the
/// given value.
- bool isLastUse(Value *value, Operation *operation) const;
+ bool isLastUse(ValuePtr value, Operation *operation) const;
/// Dumps the liveness information in a human readable format.
void dump() const;
@@ -124,20 +127,20 @@ public:
const ValueSetT &out() const { return outValues; }
/// Returns true if the given value is in the live-in set.
- bool isLiveIn(Value *value) const;
+ bool isLiveIn(ValuePtr value) const;
/// Returns true if the given value is in the live-out set.
- bool isLiveOut(Value *value) const;
+ bool isLiveOut(ValuePtr value) const;
/// Gets the start operation for the given value. This is the first operation
/// the given value is considered to be live. This could either be the start
/// operation of the current block (in case the value is live-in) or the
/// operation that defines the given value (must be referenced in this block).
- Operation *getStartOperation(Value *value) const;
+ Operation *getStartOperation(ValuePtr value) const;
/// Gets the end operation for the given value using the start operation
/// provided (must be referenced in this block).
- Operation *getEndOperation(Value *value, Operation *startOperation) const;
+ Operation *getEndOperation(ValuePtr value, Operation *startOperation) const;
private:
/// The underlying block.
diff --git a/mlir/include/mlir/Analysis/LoopAnalysis.h b/mlir/include/mlir/Analysis/LoopAnalysis.h
index 47cc22a4923..ad7dc6d6092 100644
--- a/mlir/include/mlir/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Analysis/LoopAnalysis.h
@@ -36,6 +36,9 @@ class NestedPattern;
class Operation;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Returns the trip count of the loop as an affine map with its corresponding
/// operands if the latter is expressible as an affine expression, and nullptr
/// otherwise. This method always succeeds as long as the lower bound is not a
@@ -45,7 +48,7 @@ class Value;
// TODO(mlir-team): this should be moved into 'Transforms/' and be replaced by a
// pure analysis method relying on FlatAffineConstraints
void buildTripCountMapAndOperands(AffineForOp forOp, AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Returns the trip count of the loop if it's a constant, None otherwise. This
/// uses affine expression analysis and is able to determine constant trip count
@@ -66,8 +69,8 @@ uint64_t getLargestDivisorOfTripCount(AffineForOp forOp);
///
/// Emits a note if it encounters a chain of affine.apply and conservatively
/// those cases.
-DenseSet<Value *, DenseMapInfo<Value *>>
-getInvariantAccesses(Value *iv, ArrayRef<Value *> indices);
+DenseSet<ValuePtr, DenseMapInfo<ValuePtr>>
+getInvariantAccesses(ValuePtr iv, ArrayRef<ValuePtr> indices);
using VectorizableLoopFun = std::function<bool(AffineForOp)>;
diff --git a/mlir/include/mlir/Analysis/Utils.h b/mlir/include/mlir/Analysis/Utils.h
index cffa222154f..ea0987df3fe 100644
--- a/mlir/include/mlir/Analysis/Utils.h
+++ b/mlir/include/mlir/Analysis/Utils.h
@@ -55,7 +55,7 @@ unsigned getNestingDepth(Operation &op);
/// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
/// at 'forOp'.
void getSequentialLoops(AffineForOp forOp,
- llvm::SmallDenseSet<Value *, 8> *sequentialLoops);
+ llvm::SmallDenseSet<ValuePtr, 8> *sequentialLoops);
/// ComputationSliceState aggregates loop IVs, loop bound AffineMaps and their
/// associated operands for a set of loops within a loop nest (typically the
@@ -64,15 +64,15 @@ void getSequentialLoops(AffineForOp forOp,
struct ComputationSliceState {
// List of sliced loop IVs (ordered from outermost to innermost).
// EX: 'ivs[i]' has lower bound 'lbs[i]' and upper bound 'ubs[i]'.
- SmallVector<Value *, 4> ivs;
+ SmallVector<ValuePtr, 4> ivs;
// List of lower bound AffineMaps.
SmallVector<AffineMap, 4> lbs;
// List of upper bound AffineMaps.
SmallVector<AffineMap, 4> ubs;
// List of lower bound operands (lbOperands[i] are used by 'lbs[i]').
- std::vector<SmallVector<Value *, 4>> lbOperands;
+ std::vector<SmallVector<ValuePtr, 4>> lbOperands;
// List of upper bound operands (ubOperands[i] are used by 'ubs[i]').
- std::vector<SmallVector<Value *, 4>> ubOperands;
+ std::vector<SmallVector<ValuePtr, 4>> ubOperands;
// Slice loop nest insertion point in target loop nest.
Block::iterator insertPoint;
// Adds to 'cst' with constraints which represent the slice bounds on 'ivs'
@@ -257,7 +257,7 @@ struct MemRefRegion {
unsigned getRank() const;
/// Memref that this region corresponds to.
- Value *memref;
+ ValuePtr memref;
/// Read or write.
bool write;
diff --git a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
index b5c51ad4b4c..4bbe6610e31 100644
--- a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
+++ b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
@@ -30,14 +30,17 @@ class OpBuilder;
class RewritePattern;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
// Owning list of rewriting patterns.
class OwningRewritePatternList;
/// Emit code that computes the given affine expression using standard
/// arithmetic operations applied to the provided dimension and symbol values.
-Value *expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr,
- ArrayRef<Value *> dimValues,
- ArrayRef<Value *> symbolValues);
+ValuePtr expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr,
+ ArrayRef<ValuePtr> dimValues,
+ ArrayRef<ValuePtr> symbolValues);
/// Collect a set of patterns to convert from the Affine dialect to the Standard
/// dialect, in particular convert structured affine control flow into CFG
@@ -47,11 +50,11 @@ void populateAffineToStdConversionPatterns(OwningRewritePatternList &patterns,
/// Emit code that computes the lower bound of the given affine loop using
/// standard arithmetic operations.
-Value *lowerAffineLowerBound(AffineForOp op, OpBuilder &builder);
+ValuePtr lowerAffineLowerBound(AffineForOp op, OpBuilder &builder);
/// Emit code that computes the upper bound of the given affine loop using
/// standard arithmetic operations.
-Value *lowerAffineUpperBound(AffineForOp op, OpBuilder &builder);
+ValuePtr lowerAffineUpperBound(AffineForOp op, OpBuilder &builder);
} // namespace mlir
#endif // MLIR_CONVERSION_AFFINETOSTANDARD_AFFINETOSTANDARD_H
diff --git a/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h b/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
index 0aab8723eab..58d49a13391 100644
--- a/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
+++ b/mlir/include/mlir/Conversion/LoopsToGPU/LoopsToGPU.h
@@ -24,6 +24,9 @@ class AffineForOp;
struct LogicalResult;
class Value;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
namespace loop {
class ForOp;
} // end namespace loop
@@ -78,8 +81,8 @@ LogicalResult convertLoopNestToGPULaunch(loop::ForOp forOp,
/// The above conditions are assumed to be satisfied by the computation rooted
/// at `forOp`.
LogicalResult convertLoopToGPULaunch(loop::ForOp forOp,
- ArrayRef<Value *> numWorkGroups,
- ArrayRef<Value *> workGroupSizes);
+ ArrayRef<ValuePtr> numWorkGroups,
+ ArrayRef<ValuePtr> workGroupSizes);
} // namespace mlir
diff --git a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
index e8d16f064a8..6f41fb68633 100644
--- a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
+++ b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
@@ -74,16 +74,16 @@ public:
/// Promote the LLVM struct representation of all MemRef descriptors to stack
/// and use pointers to struct to avoid the complexity of the
/// platform-specific C/C++ ABI lowering related to struct argument passing.
- SmallVector<Value *, 4> promoteMemRefDescriptors(Location loc,
- ValueRange opOperands,
- ValueRange operands,
- OpBuilder &builder);
+ SmallVector<ValuePtr, 4> promoteMemRefDescriptors(Location loc,
+ ValueRange opOperands,
+ ValueRange operands,
+ OpBuilder &builder);
/// Promote the LLVM struct representation of one MemRef descriptor to stack
/// and use pointer to struct to avoid the complexity of the platform-specific
/// C/C++ ABI lowering related to struct argument passing.
- Value *promoteOneMemRefDescriptor(Location loc, Value *operand,
- OpBuilder &builder);
+ ValuePtr promoteOneMemRefDescriptor(Location loc, ValuePtr operand,
+ OpBuilder &builder);
protected:
/// LLVM IR module used to parse/create types.
@@ -139,24 +139,24 @@ private:
class StructBuilder {
public:
/// Construct a helper for the given value.
- explicit StructBuilder(Value *v);
+ explicit StructBuilder(ValuePtr v);
/// Builds IR creating an `undef` value of the descriptor type.
static StructBuilder undef(OpBuilder &builder, Location loc,
Type descriptorType);
- /*implicit*/ operator Value *() { return value; }
+ /*implicit*/ operator ValuePtr() { return value; }
protected:
// LLVM value
- Value *value;
+ ValuePtr value;
// Cached struct type.
Type structType;
protected:
/// Builds IR to extract a value from the struct at position pos
- Value *extractPtr(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr extractPtr(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR to set a value in the struct at position pos
- void setPtr(OpBuilder &builder, Location loc, unsigned pos, Value *ptr);
+ void setPtr(OpBuilder &builder, Location loc, unsigned pos, ValuePtr ptr);
};
/// Helper class to produce LLVM dialect operations extracting or inserting
/// elements of a MemRef descriptor. Wraps a Value pointing to the descriptor.
@@ -164,7 +164,7 @@ protected:
class MemRefDescriptor : public StructBuilder {
public:
/// Construct a helper for the given descriptor value.
- explicit MemRefDescriptor(Value *descriptor);
+ explicit MemRefDescriptor(ValuePtr descriptor);
/// Builds IR creating an `undef` value of the descriptor type.
static MemRefDescriptor undef(OpBuilder &builder, Location loc,
Type descriptorType);
@@ -173,39 +173,40 @@ public:
/// type.
static MemRefDescriptor fromStaticShape(OpBuilder &builder, Location loc,
LLVMTypeConverter &typeConverter,
- MemRefType type, Value *memory);
+ MemRefType type, ValuePtr memory);
/// Builds IR extracting the allocated pointer from the descriptor.
- Value *allocatedPtr(OpBuilder &builder, Location loc);
+ ValuePtr allocatedPtr(OpBuilder &builder, Location loc);
/// Builds IR inserting the allocated pointer into the descriptor.
- void setAllocatedPtr(OpBuilder &builder, Location loc, Value *ptr);
+ void setAllocatedPtr(OpBuilder &builder, Location loc, ValuePtr ptr);
/// Builds IR extracting the aligned pointer from the descriptor.
- Value *alignedPtr(OpBuilder &builder, Location loc);
+ ValuePtr alignedPtr(OpBuilder &builder, Location loc);
/// Builds IR inserting the aligned pointer into the descriptor.
- void setAlignedPtr(OpBuilder &builder, Location loc, Value *ptr);
+ void setAlignedPtr(OpBuilder &builder, Location loc, ValuePtr ptr);
/// Builds IR extracting the offset from the descriptor.
- Value *offset(OpBuilder &builder, Location loc);
+ ValuePtr offset(OpBuilder &builder, Location loc);
/// Builds IR inserting the offset into the descriptor.
- void setOffset(OpBuilder &builder, Location loc, Value *offset);
+ void setOffset(OpBuilder &builder, Location loc, ValuePtr offset);
void setConstantOffset(OpBuilder &builder, Location loc, uint64_t offset);
/// Builds IR extracting the pos-th size from the descriptor.
- Value *size(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr size(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR inserting the pos-th size into the descriptor
- void setSize(OpBuilder &builder, Location loc, unsigned pos, Value *size);
+ void setSize(OpBuilder &builder, Location loc, unsigned pos, ValuePtr size);
void setConstantSize(OpBuilder &builder, Location loc, unsigned pos,
uint64_t size);
/// Builds IR extracting the pos-th size from the descriptor.
- Value *stride(OpBuilder &builder, Location loc, unsigned pos);
+ ValuePtr stride(OpBuilder &builder, Location loc, unsigned pos);
/// Builds IR inserting the pos-th stride into the descriptor
- void setStride(OpBuilder &builder, Location loc, unsigned pos, Value *stride);
+ void setStride(OpBuilder &builder, Location loc, unsigned pos,
+ ValuePtr stride);
void setConstantStride(OpBuilder &builder, Location loc, unsigned pos,
uint64_t stride);
@@ -220,19 +221,19 @@ private:
class UnrankedMemRefDescriptor : public StructBuilder {
public:
/// Construct a helper for the given descriptor value.
- explicit UnrankedMemRefDescriptor(Value *descriptor);
+ explicit UnrankedMemRefDescriptor(ValuePtr descriptor);
/// Builds IR creating an `undef` value of the descriptor type.
static UnrankedMemRefDescriptor undef(OpBuilder &builder, Location loc,
Type descriptorType);
/// Builds IR extracting the rank from the descriptor
- Value *rank(OpBuilder &builder, Location loc);
+ ValuePtr rank(OpBuilder &builder, Location loc);
/// Builds IR setting the rank in the descriptor
- void setRank(OpBuilder &builder, Location loc, Value *value);
+ void setRank(OpBuilder &builder, Location loc, ValuePtr value);
/// Builds IR extracting ranked memref descriptor ptr
- Value *memRefDescPtr(OpBuilder &builder, Location loc);
+ ValuePtr memRefDescPtr(OpBuilder &builder, Location loc);
/// Builds IR setting ranked memref descriptor ptr
- void setMemRefDescPtr(OpBuilder &builder, Location loc, Value *value);
+ void setMemRefDescPtr(OpBuilder &builder, Location loc, ValuePtr value);
};
/// Base class for operation conversions targeting the LLVM IR dialect. Provides
/// conversion patterns with an access to the containing LLVMLowering for the
diff --git a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
index 36b4e55e77c..764f439e020 100644
--- a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
@@ -41,7 +41,7 @@ class OpBuilder;
/// A utility function to check if a value is defined at the top level of a
/// function. A value of index type defined at the top level is always a valid
/// symbol.
-bool isTopLevelValue(Value *value);
+bool isTopLevelValue(ValuePtr value);
class AffineOpsDialect : public Dialect {
public:
@@ -148,18 +148,19 @@ class AffineDmaStartOp : public Op<AffineDmaStartOp, OpTrait::VariadicOperands,
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *srcMemRef,
- AffineMap srcMap, ValueRange srcIndices, Value *destMemRef,
- AffineMap dstMap, ValueRange destIndices, Value *tagMemRef,
- AffineMap tagMap, ValueRange tagIndices, Value *numElements,
- Value *stride = nullptr,
- Value *elementsPerStride = nullptr);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr srcMemRef, AffineMap srcMap, ValueRange srcIndices,
+ ValuePtr destMemRef, AffineMap dstMap,
+ ValueRange destIndices, ValuePtr tagMemRef,
+ AffineMap tagMap, ValueRange tagIndices,
+ ValuePtr numElements, ValuePtr stride = nullptr,
+ ValuePtr elementsPerStride = nullptr);
/// Returns the operand index of the src memref.
unsigned getSrcMemRefOperandIndex() { return 0; }
/// Returns the source MemRefType for this DMA operation.
- Value *getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
+ ValuePtr getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
MemRefType getSrcMemRefType() {
return getSrcMemRef()->getType().cast<MemRefType>();
}
@@ -191,7 +192,7 @@ public:
}
/// Returns the destination MemRefType for this DMA operations.
- Value *getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
+ ValuePtr getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
MemRefType getDstMemRefType() {
return getDstMemRef()->getType().cast<MemRefType>();
}
@@ -225,7 +226,7 @@ public:
}
/// Returns the Tag MemRef for this DMA operation.
- Value *getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
+ ValuePtr getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
MemRefType getTagMemRefType() {
return getTagMemRef()->getType().cast<MemRefType>();
}
@@ -249,13 +250,13 @@ public:
}
/// Returns the number of elements being transferred by this DMA operation.
- Value *getNumElements() {
+ ValuePtr getNumElements() {
return getOperand(getTagMemRefOperandIndex() + 1 +
getTagMap().getNumInputs());
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
if (memref == getSrcMemRef())
return {Identifier::get(getSrcMapAttrName(), getContext()),
getSrcMapAttr()};
@@ -305,14 +306,14 @@ public:
}
/// Returns the stride value for this DMA operation.
- Value *getStride() {
+ ValuePtr getStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1 - 1);
}
/// Returns the number of elements to transfer per stride for this DMA op.
- Value *getNumElementsPerStride() {
+ ValuePtr getNumElementsPerStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1);
@@ -337,14 +338,14 @@ class AffineDmaWaitOp : public Op<AffineDmaWaitOp, OpTrait::VariadicOperands,
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *tagMemRef,
- AffineMap tagMap, ValueRange tagIndices,
- Value *numElements);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr tagMemRef, AffineMap tagMap, ValueRange tagIndices,
+ ValuePtr numElements);
static StringRef getOperationName() { return "affine.dma_wait"; }
// Returns the Tag MemRef associated with the DMA operation being waited on.
- Value *getTagMemRef() { return getOperand(0); }
+ ValuePtr getTagMemRef() { return getOperand(0); }
MemRefType getTagMemRefType() {
return getTagMemRef()->getType().cast<MemRefType>();
}
@@ -367,14 +368,16 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getTagMemRef());
return {Identifier::get(getTagMapAttrName(), getContext()),
getTagMapAttr()};
}
/// Returns the number of elements transferred in the associated DMA op.
- Value *getNumElements() { return getOperand(1 + getTagMap().getNumInputs()); }
+ ValuePtr getNumElements() {
+ return getOperand(1 + getTagMap().getNumInputs());
+ }
static StringRef getTagMapAttrName() { return "tag_map"; }
static ParseResult parse(OpAsmParser &parser, OperationState &result);
@@ -409,18 +412,18 @@ public:
static void build(Builder *builder, OperationState &result, AffineMap map,
ValueRange operands);
/// Builds an affine load op with an identity map and operands.
- static void build(Builder *builder, OperationState &result, Value *memref,
+ static void build(Builder *builder, OperationState &result, ValuePtr memref,
ValueRange indices = {});
/// Builds an affine load op with the specified map and its operands.
- static void build(Builder *builder, OperationState &result, Value *memref,
+ static void build(Builder *builder, OperationState &result, ValuePtr memref,
AffineMap map, ValueRange mapOperands);
/// Returns the operand index of the memref.
unsigned getMemRefOperandIndex() { return 0; }
/// Get memref operand.
- Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
- void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
+ ValuePtr getMemRef() { return getOperand(getMemRefOperandIndex()); }
+ void setMemRef(ValuePtr value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -435,7 +438,7 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getMemRef());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
@@ -476,21 +479,21 @@ public:
/// Builds an affine store operation with the provided indices (identity map).
static void build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref, ValueRange indices);
+ ValuePtr valueToStore, ValuePtr memref, ValueRange indices);
/// Builds an affine store operation with the specified map and its operands.
static void build(Builder *builder, OperationState &result,
- Value *valueToStore, Value *memref, AffineMap map,
+ ValuePtr valueToStore, ValuePtr memref, AffineMap map,
ValueRange mapOperands);
/// Get value to be stored by store operation.
- Value *getValueToStore() { return getOperand(0); }
+ ValuePtr getValueToStore() { return getOperand(0); }
/// Returns the operand index of the memref.
unsigned getMemRefOperandIndex() { return 1; }
/// Get memref operand.
- Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
- void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
+ ValuePtr getMemRef() { return getOperand(getMemRefOperandIndex()); }
+ void setMemRef(ValuePtr value) { setOperand(getMemRefOperandIndex(), value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
@@ -506,7 +509,7 @@ public:
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *memref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr memref) {
assert(memref == getMemRef());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
@@ -526,10 +529,10 @@ public:
};
/// Returns true if the given Value can be used as a dimension id.
-bool isValidDim(Value *value);
+bool isValidDim(ValuePtr value);
/// Returns true if the given Value can be used as a symbol.
-bool isValidSymbol(Value *value);
+bool isValidSymbol(ValuePtr value);
/// Modifies both `map` and `operands` in-place so as to:
/// 1. drop duplicate operands
@@ -538,17 +541,17 @@ bool isValidSymbol(Value *value);
/// dimensional operands
/// 4. propagate constant operands and drop them
void canonicalizeMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Canonicalizes an integer set the same way canonicalizeMapAndOperands does
/// for affine maps.
void canonicalizeSetAndOperands(IntegerSet *set,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
/// other AffineApplyOps supplying those operands. The operands of the resulting
/// AffineApplyOp do not change the length of AffineApplyOp chains.
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
/// Given an affine map `map` and its input `operands`, this method composes
/// into `map`, maps of AffineApplyOps whose results are the values in
@@ -558,22 +561,22 @@ AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
/// terminal symbol, i.e., a symbol defined at the top level or a block/function
/// argument.
void fullyComposeAffineMapAndOperands(AffineMap *map,
- SmallVectorImpl<Value *> *operands);
+ SmallVectorImpl<ValuePtr> *operands);
#define GET_OP_CLASSES
#include "mlir/Dialect/AffineOps/AffineOps.h.inc"
/// Returns if the provided value is the induction variable of a AffineForOp.
-bool isForInductionVar(Value *val);
+bool isForInductionVar(ValuePtr val);
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-AffineForOp getForInductionVarOwner(Value *val);
+AffineForOp getForInductionVarOwner(ValuePtr val);
/// Extracts the induction variables from a list of AffineForOps and places them
/// in the output argument `ivs`.
void extractForInductionVars(ArrayRef<AffineForOp> forInsts,
- SmallVectorImpl<Value *> *ivs);
+ SmallVectorImpl<ValuePtr> *ivs);
/// AffineBound represents a lower or upper bound in the for operation.
/// This class does not own the underlying operands. Instead, it refers
@@ -588,7 +591,7 @@ public:
AffineValueMap getAsAffineValueMap();
unsigned getNumOperands() { return opEnd - opStart; }
- Value *getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
+ ValuePtr getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
using operand_iterator = AffineForOp::operand_iterator;
using operand_range = AffineForOp::operand_range;
@@ -613,7 +616,7 @@ private:
};
/// An `AffineApplyNormalizer` is a helper class that supports renumbering
-/// operands of AffineApplyOp. This acts as a reindexing map of Value* to
+/// operands of AffineApplyOp. This acts as a reindexing map of Value to
/// positional dims or symbols and allows simplifications such as:
///
/// ```mlir
@@ -626,13 +629,13 @@ private:
/// %1 = affine.apply () -> (0)
/// ```
struct AffineApplyNormalizer {
- AffineApplyNormalizer(AffineMap map, ArrayRef<Value *> operands);
+ AffineApplyNormalizer(AffineMap map, ArrayRef<ValuePtr> operands);
/// Returns the AffineMap resulting from normalization.
AffineMap getAffineMap() { return affineMap; }
- SmallVector<Value *, 8> getOperands() {
- SmallVector<Value *, 8> res(reorderedDims);
+ SmallVector<ValuePtr, 8> getOperands() {
+ SmallVector<ValuePtr, 8> res(reorderedDims);
res.append(concatenatedSymbols.begin(), concatenatedSymbols.end());
return res;
}
@@ -642,13 +645,13 @@ struct AffineApplyNormalizer {
/// Normalizes 'otherMap' and its operands 'otherOperands' to map to this
/// normalizer's coordinate space.
- void normalize(AffineMap *otherMap, SmallVectorImpl<Value *> *otherOperands);
+ void normalize(AffineMap *otherMap, SmallVectorImpl<ValuePtr> *otherOperands);
private:
/// Helper function to insert `v` into the coordinate system of the current
/// AffineApplyNormalizer. Returns the AffineDimExpr with the corresponding
/// renumbered position.
- AffineDimExpr renumberOneDim(Value *v);
+ AffineDimExpr renumberOneDim(ValuePtr v);
/// Given an `other` normalizer, this rewrites `other.affineMap` in the
/// coordinate system of the current AffineApplyNormalizer.
@@ -656,13 +659,13 @@ private:
/// `this`.
AffineMap renumber(const AffineApplyNormalizer &other);
- /// Maps of Value* to position in `affineMap`.
- DenseMap<Value *, unsigned> dimValueToPosition;
+ /// Maps of Value to position in `affineMap`.
+ DenseMap<ValuePtr, unsigned> dimValueToPosition;
/// Ordered dims and symbols matching positional dims and symbols in
/// `affineMap`.
- SmallVector<Value *, 8> reorderedDims;
- SmallVector<Value *, 8> concatenatedSymbols;
+ SmallVector<ValuePtr, 8> reorderedDims;
+ SmallVector<ValuePtr, 8> concatenatedSymbols;
AffineMap affineMap;
diff --git a/mlir/include/mlir/Dialect/AffineOps/AffineOps.td b/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
index b40990ecb5d..befdc2f6237 100644
--- a/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
+++ b/mlir/include/mlir/Dialect/AffineOps/AffineOps.td
@@ -101,7 +101,7 @@ def AffineForOp : Affine_Op<"for",
static StringRef getUpperBoundAttrName() { return "upper_bound"; }
Block *getBody() { return &region().front(); }
- Value *getInductionVar() { return getBody()->getArgument(0); }
+ ValuePtr getInductionVar() { return getBody()->getArgument(0); }
OpBuilder getBodyBuilder() {
return OpBuilder(getBody(), std::prev(getBody()->end()));
}
@@ -286,8 +286,8 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> {
BoolAttr:$isDataCache);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref,"
- "AffineMap map, ArrayRef<Value *> mapOperands, bool isWrite,"
+ "Builder *builder, OperationState &result, ValuePtr memref,"
+ "AffineMap map, ArrayRef<ValuePtr> mapOperands, bool isWrite,"
"unsigned localityHint, bool isDataCache",
[{
assert(map.getNumInputs() == mapOperands.size()
@@ -315,7 +315,7 @@ def AffinePrefetchOp : Affine_Op<"prefetch"> {
}
/// Returns the AffineMapAttr associated with 'memref'.
- NamedAttribute getAffineMapAttrForMemRef(Value *mref) {
+ NamedAttribute getAffineMapAttrForMemRef(ValuePtr mref) {
assert(mref == memref());
return {Identifier::get(getMapAttrName(), getContext()),
getAffineMapAttr()};
diff --git a/mlir/include/mlir/Dialect/GPU/GPUDialect.h b/mlir/include/mlir/Dialect/GPU/GPUDialect.h
index 93c0b13ee3e..12c2aa1bbd1 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUDialect.h
+++ b/mlir/include/mlir/Dialect/GPU/GPUDialect.h
@@ -77,9 +77,9 @@ public:
/// Utility class for the GPU dialect to represent triples of `Value`s
/// accessible through `.x`, `.y`, and `.z` similarly to CUDA notation.
struct KernelDim3 {
- Value *x;
- Value *y;
- Value *z;
+ ValuePtr x;
+ ValuePtr y;
+ ValuePtr z;
};
#define GET_OP_CLASSES
diff --git a/mlir/include/mlir/Dialect/GPU/GPUOps.td b/mlir/include/mlir/Dialect/GPU/GPUOps.td
index 6751f0a3f70..def1ff2b8a1 100644
--- a/mlir/include/mlir/Dialect/GPU/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/GPUOps.td
@@ -157,7 +157,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> {
/// Returns a list of block arguments that correspond to buffers located in
/// the workgroup memory
- ArrayRef<BlockArgument *> getWorkgroupAttributions() {
+ ArrayRef<BlockArgumentPtr> getWorkgroupAttributions() {
auto begin =
std::next(getBody().front().args_begin(), getType().getNumInputs());
auto end = std::next(begin, getNumWorkgroupAttributions());
@@ -166,7 +166,7 @@ def GPU_GPUFuncOp : GPU_Op<"func", [FunctionLike, IsolatedFromAbove, Symbol]> {
/// Returns a list of block arguments that correspond to buffers located in
/// the private memory.
- ArrayRef<BlockArgument *> getPrivateAttributions() {
+ ArrayRef<BlockArgumentPtr> getPrivateAttributions() {
auto begin =
std::next(getBody().front().args_begin(),
getType().getNumInputs() + getNumWorkgroupAttributions());
@@ -282,8 +282,8 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">,
let builders = [
OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, "
- "Value *gridSizeX, Value *gridSizeY, Value *gridSizeZ, "
- "Value *blockSizeX, Value *blockSizeY, Value *blockSizeZ, "
+ "ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ, "
+ "ValuePtr blockSizeX, ValuePtr blockSizeY, ValuePtr blockSizeZ, "
"ValueRange kernelOperands">,
OpBuilder<"Builder *builder, OperationState &result, GPUFuncOp kernelFunc, "
"KernelDim3 gridSize, KernelDim3 blockSize, "
@@ -302,7 +302,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func">,
StringRef getKernelModuleName();
/// The i-th operand passed to the kernel function.
- Value *getKernelOperand(unsigned i);
+ ValuePtr getKernelOperand(unsigned i);
/// Get the SSA values passed as operands to specify the grid size.
KernelDim3 getGridSizeOperandValues();
@@ -415,9 +415,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [IsolatedFromAbove]>,
let skipDefaultBuilders = 1;
let builders = [
- OpBuilder<"Builder *builder, OperationState &result, Value *gridSizeX,"
- "Value *gridSizeY, Value *gridSizeZ, Value *blockSizeX,"
- "Value *blockSizeY, Value *blockSizeZ,"
+ OpBuilder<"Builder *builder, OperationState &result, ValuePtr gridSizeX,"
+ "ValuePtr gridSizeY, ValuePtr gridSizeZ, ValuePtr blockSizeX,"
+ "ValuePtr blockSizeY, ValuePtr blockSizeZ,"
"ValueRange operands">
];
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
index dae27d00e5a..a599d51b31f 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
@@ -194,9 +194,9 @@ private:
/// surrounding the insertion point of builder. Obtain the address of that
/// global and use it to compute the address of the first character in the
/// string (operations inserted at the builder insertion point).
-Value *createGlobalString(Location loc, OpBuilder &builder, StringRef name,
- StringRef value, LLVM::Linkage linkage,
- LLVM::LLVMDialect *llvmDialect);
+ValuePtr createGlobalString(Location loc, OpBuilder &builder, StringRef name,
+ StringRef value, LLVM::Linkage linkage,
+ LLVM::LLVMDialect *llvmDialect);
/// LLVM requires some operations to be inside of a Module operation. This
/// function confirms that the Operation has the desired properties.
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 00acc539dab..cfbbf7da65d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -185,8 +185,8 @@ def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>,
$res = builder.CreateICmp(getLLVMCmpPredicate($predicate), $lhs, $rhs);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, ICmpPredicate predicate, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, ICmpPredicate predicate, ValuePtr lhs, "
+ "ValuePtr rhs", [{
LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
@@ -232,8 +232,8 @@ def LLVM_FCmpOp : LLVM_OneResultOp<"fcmp", [NoSideEffect]>,
$res = builder.CreateFCmp(getLLVMCmpPredicate($predicate), $lhs, $rhs);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, FCmpPredicate predicate, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, FCmpPredicate predicate, ValuePtr lhs, "
+ "ValuePtr rhs", [{
LLVMDialect *dialect = &lhs->getType().cast<LLVMType>().getDialect();
build(b, result, LLVMType::getInt1Ty(dialect),
b->getI64IntegerAttr(static_cast<int64_t>(predicate)), lhs, rhs);
@@ -265,7 +265,7 @@ def LLVM_AllocaOp :
$res = alloca;
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Type resultType, Value *arraySize, "
+ "Builder *b, OperationState &result, Type resultType, ValuePtr arraySize, "
"unsigned alignment",
[{
if (alignment == 0)
@@ -292,7 +292,7 @@ def LLVM_GEPOp : LLVM_OneResultOp<"getelementptr", [NoSideEffect]>,
def LLVM_LoadOp : LLVM_OneResultOp<"load">, Arguments<(ins LLVM_Type:$addr)>,
LLVM_Builder<"$res = builder.CreateLoad($addr);"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *addr",
+ "Builder *b, OperationState &result, ValuePtr addr",
[{
auto type = addr->getType().cast<LLVM::LLVMType>().getPointerElementTy();
build(b, result, type, addr);
@@ -353,7 +353,7 @@ def LLVM_ExtractElementOp : LLVM_OneResultOp<"extractelement", [NoSideEffect]>,
$res = builder.CreateExtractElement($vector, $position);
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *vector, Value *position,"
+ "Builder *b, OperationState &result, ValuePtr vector, ValuePtr position,"
"ArrayRef<NamedAttribute> attrs = {}">];
let parser = [{ return parseExtractElementOp(parser, result); }];
let printer = [{ printExtractElementOp(p, *this); }];
@@ -384,7 +384,7 @@ def LLVM_InsertValueOp : LLVM_OneResultOp<"insertvalue", [NoSideEffect]>,
extractPosition($position));
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *container, Value *value, "
+ "Builder *b, OperationState &result, ValuePtr container, ValuePtr value, "
"ArrayAttr position",
[{
build(b, result, container->getType(), container, value, position);
@@ -398,7 +398,7 @@ def LLVM_ShuffleVectorOp
LLVM_Builder<
"$res = builder.CreateShuffleVector($v1, $v2, extractPosition($mask));"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *v1, Value *v2, "
+ "Builder *b, OperationState &result, ValuePtr v1, ValuePtr v2, "
"ArrayAttr mask, ArrayRef<NamedAttribute> attrs = {}">];
let verifier = [{
auto wrappedVectorType1 = v1()->getType().cast<LLVM::LLVMType>();
@@ -422,8 +422,8 @@ def LLVM_SelectOp
LLVM_Builder<
"$res = builder.CreateSelect($condition, $trueValue, $falseValue);"> {
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *condition, Value *lhs, "
- "Value *rhs", [{
+ "Builder *b, OperationState &result, ValuePtr condition, ValuePtr lhs, "
+ "ValuePtr rhs", [{
build(b, result, lhs->getType(), condition, lhs, rhs);
}]>];
let parser = [{ return parseSelectOp(parser, result); }];
diff --git a/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h b/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
index 01d3e4b239c..426708b14a8 100644
--- a/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
+++ b/mlir/include/mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h
@@ -37,15 +37,15 @@ class LinalgOp;
class Aliases {
public:
/// Returns true if v1 and v2 alias.
- bool alias(Value *v1, Value *v2) { return find(v1) == find(v2); }
+ bool alias(ValuePtr v1, ValuePtr v2) { return find(v1) == find(v2); }
private:
/// Returns the base buffer or block argument into which the view `v` aliases.
/// This lazily records the new aliases discovered while walking back the
/// use-def chain.
- Value *find(Value *v);
+ ValuePtr find(ValuePtr v);
- DenseMap<Value *, Value *> aliases;
+ DenseMap<ValuePtr, ValuePtr> aliases;
};
/// Data structure for holding a dependence graph that operates on LinalgOp and
@@ -54,7 +54,7 @@ class LinalgDependenceGraph {
public:
struct LinalgOpView {
Operation *op;
- Value *view;
+ ValuePtr view;
};
struct LinalgDependenceGraphElem {
// dependentOpView may be either:
@@ -64,7 +64,7 @@ public:
// View in the op that is used to index in the graph:
// 1. src in the case of dependencesFromDstGraphs.
// 2. dst in the case of dependencesIntoGraphs.
- Value *indexingView;
+ ValuePtr indexingView;
};
using LinalgDependences = SmallVector<LinalgDependenceGraphElem, 8>;
using DependenceGraph = DenseMap<Operation *, LinalgDependences>;
@@ -97,14 +97,14 @@ public:
/// Dependences are restricted to views aliasing `view`.
SmallVector<Operation *, 8> findCoveringReads(LinalgOp srcLinalgOp,
LinalgOp dstLinalgOp,
- Value *view) const;
+ ValuePtr view) const;
/// Returns the operations that are interleaved between `srcLinalgOp` and
/// `dstLinalgOp` and that are involved in a WAR or WAW with `srcLinalgOp`.
/// Dependences are restricted to views aliasing `view`.
SmallVector<Operation *, 8> findCoveringWrites(LinalgOp srcLinalgOp,
LinalgOp dstLinalgOp,
- Value *view) const;
+ ValuePtr view) const;
private:
// Keep dependences in both directions, this is not just a performance gain
@@ -130,7 +130,7 @@ private:
/// Implementation detail for findCoveringxxx.
SmallVector<Operation *, 8>
findOperationsWithCoveringDependences(LinalgOp srcLinalgOp,
- LinalgOp dstLinalgOp, Value *view,
+ LinalgOp dstLinalgOp, ValuePtr view,
ArrayRef<DependenceType> types) const;
Aliases &aliases;
diff --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
index cf6335278b7..8375e750a5c 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/Builders.h
@@ -55,34 +55,34 @@ inline StringRef toString(IterType t) {
/// makeLinalgGenericOp({A({m, n}), B({k, n})}, {C({m, n})}, ... );
/// ```
struct StructuredIndexed {
- StructuredIndexed(Value *v) : value(v) {}
+ StructuredIndexed(ValuePtr v) : value(v) {}
StructuredIndexed operator()(ArrayRef<AffineExpr> indexings) {
return StructuredIndexed(value, indexings);
}
- operator Value *() const /* implicit */ { return value; }
+ operator ValuePtr() const /* implicit */ { return value; }
ArrayRef<AffineExpr> getExprs() { return exprs; }
private:
- StructuredIndexed(Value *v, ArrayRef<AffineExpr> indexings)
+ StructuredIndexed(ValuePtr v, ArrayRef<AffineExpr> indexings)
: value(v), exprs(indexings.begin(), indexings.end()) {
assert(v->getType().isa<MemRefType>() && "MemRefType expected");
}
StructuredIndexed(ValueHandle v, ArrayRef<AffineExpr> indexings)
: StructuredIndexed(v.getValue(), indexings) {}
- Value *value;
+ ValuePtr value;
SmallVector<AffineExpr, 4> exprs;
};
-inline void defaultRegionBuilder(ArrayRef<BlockArgument *> args) {}
+inline void defaultRegionBuilder(ArrayRef<BlockArgumentPtr> args) {}
Operation *makeLinalgGenericOp(ArrayRef<IterType> iteratorTypes,
ArrayRef<StructuredIndexed> inputs,
ArrayRef<StructuredIndexed> outputs,
- function_ref<void(ArrayRef<BlockArgument *>)>
+ function_ref<void(ArrayRef<BlockArgumentPtr>)>
regionBuilder = defaultRegionBuilder,
- ArrayRef<Value *> otherValues = {},
+ ArrayRef<ValuePtr> otherValues = {},
ArrayRef<Attribute> otherAttributes = {});
namespace ops {
@@ -96,7 +96,7 @@ using edsc::intrinsics::linalg_yield;
/// Build the body of a region to compute a multiply-accumulate, under the
/// current ScopedContext, at the current insert point.
-void macRegionBuilder(ArrayRef<BlockArgument *> args);
+void macRegionBuilder(ArrayRef<BlockArgumentPtr> args);
/// TODO(ntv): In the future we should tie these implementations to something in
/// Tablegen that generates the proper interfaces and the proper sugared named
@@ -120,7 +120,7 @@ void macRegionBuilder(ArrayRef<BlockArgument *> args);
/// with in-place semantics and parallelism.
/// Unary pointwise operation (with broadcast) entry point.
-using UnaryPointwiseOpBuilder = function_ref<Value *(ValueHandle)>;
+using UnaryPointwiseOpBuilder = function_ref<ValuePtr(ValueHandle)>;
Operation *linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
StructuredIndexed I, StructuredIndexed O);
@@ -131,7 +131,7 @@ Operation *linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O);
/// Binary pointwise operation (with broadcast) entry point.
using BinaryPointwiseOpBuilder =
- function_ref<Value *(ValueHandle, ValueHandle)>;
+ function_ref<ValuePtr(ValueHandle, ValueHandle)>;
Operation *linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
StructuredIndexed I1, StructuredIndexed I2,
StructuredIndexed O);
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
index 12318a244df..18ca31cc376 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgLibraryOps.td
@@ -92,22 +92,22 @@ def LinalgLibraryInterface : OpInterface<"LinalgOp"> {
"Query the number of loops within the current operation.",
"unsigned", "getNumLoops">,
InterfaceMethod<"Query the input view at the given index.",
- "Value *", "getInput", (ins "unsigned":$i)
+ "ValuePtr ", "getInput", (ins "unsigned":$i)
>,
InterfaceMethod<"Query the output view at the given index.",
- "Value *", "getOutput", (ins "unsigned":$i)
+ "ValuePtr ", "getOutput", (ins "unsigned":$i)
>,
InterfaceMethod<[{
Query the index of the given input value, or `None` if the value is not
an input.
}],
- "Optional<unsigned>", "getIndexOfInput", (ins "Value *":$view)
+ "Optional<unsigned>", "getIndexOfInput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the index of the given view value, or `None` if the value is not
an view.
}],
- "Optional<unsigned>", "getIndexOfOutput", (ins "Value *":$view)
+ "Optional<unsigned>", "getIndexOfOutput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the type of the input view at the given index.
@@ -228,7 +228,7 @@ def CopyOp : LinalgLibrary_Op<"copy", [NInputs<1>, NOutputs<1>]> {
// TODO(ntv) this should go away once the usage of OptionalAttr triggers
// emission of builders with default arguments left unspecified.
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *input, Value *output", [{
+ "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{
return build(
builder, result, input, output, AffineMapAttr(), AffineMapAttr());
}]>];
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
index b806d7548fb..5d402a9ded9 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
@@ -56,8 +56,8 @@ def Linalg_RangeOp :
````
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *min, Value *max, "
- "Value *step",
+ "Builder *builder, OperationState &result, ValuePtr min, ValuePtr max, "
+ "ValuePtr step",
[{
auto rangeType = RangeType::get(builder->getContext());
build(builder, result, rangeType, min, max, step);
@@ -112,7 +112,7 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>,
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *base, "
+ "Builder *b, OperationState &result, ValuePtr base, "
"ValueRange indexings">];
let extraClassDeclaration = [{
@@ -124,12 +124,12 @@ def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>,
MemRefType getBaseViewType() { return view()->getType().cast<MemRefType>(); }
// Get the underlying indexing at a given rank.
- Value *indexing(unsigned rank) { return *(indexings().begin() + rank); }
+ ValuePtr indexing(unsigned rank) { return *(indexings().begin() + rank); }
// Get the subset of indexings that are of RangeType.
- SmallVector<Value *, 8> getRanges() {
- SmallVector<Value *, 8> res;
- for (auto *operand : indexings())
+ SmallVector<ValuePtr, 8> getRanges() {
+ SmallVector<ValuePtr, 8> res;
+ for (auto operand : indexings())
if (!operand->getType().isa<IndexType>())
res.push_back(operand);
return res;
@@ -154,7 +154,7 @@ def Linalg_TransposeOp : Linalg_Op<"transpose", [NoSideEffect]>,
}];
let builders = [OpBuilder<
- "Builder *b, OperationState &result, Value *view, "
+ "Builder *b, OperationState &result, ValuePtr view, "
"AffineMapAttr permutation, ArrayRef<NamedAttribute> attrs = {}">];
let verifier = [{
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 75b63c93cd8..774be6616cd 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -92,22 +92,22 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
"Query the number of loops within the current operation.",
"unsigned", "getNumLoops">,
InterfaceMethod<"Query the input view at the given index.",
- "Value *", "getInput", (ins "unsigned":$i)
+ "ValuePtr ", "getInput", (ins "unsigned":$i)
>,
InterfaceMethod<"Query the output view at the given index.",
- "Value *", "getOutput", (ins "unsigned":$i)
+ "ValuePtr ", "getOutput", (ins "unsigned":$i)
>,
InterfaceMethod<[{
Query the index of the given input value, or `None` if the value is not
an input.
}],
- "llvm::Optional<unsigned>", "getIndexOfInput", (ins "Value *":$view)
+ "llvm::Optional<unsigned>", "getIndexOfInput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the index of the given view value, or `None` if the value is not
an view.
}],
- "llvm::Optional<unsigned>", "getIndexOfOutput", (ins "Value *":$view)
+ "llvm::Optional<unsigned>", "getIndexOfOutput", (ins "ValuePtr ":$view)
>,
InterfaceMethod<[{
Query the type of the input view at the given index.
@@ -228,7 +228,7 @@ def CopyOp : LinalgStructured_Op<"copy", [NInputs<1>, NOutputs<1>]> {
// TODO(ntv) this should go away once the usage of OptionalAttr triggers
// emission of builders with default arguments left unspecified.
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *input, Value *output", [{
+ "Builder *builder, OperationState &result, ValuePtr input, ValuePtr output", [{
return build(
builder, result, input, output, AffineMapAttr(), AffineMapAttr());
}]>];
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
index a24c1ca63c4..d196e6ccf94 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgTraits.h
@@ -77,13 +77,13 @@ private:
public:
/// Return the `i`-th input view.
- Value *getInput(unsigned i) {
+ ValuePtr getInput(unsigned i) {
assert(i < nInputs());
return this->getOperation()->getOperand(i);
}
/// Return the index of `view` in the list of input views if found, llvm::None
/// otherwise.
- Optional<unsigned> getIndexOfInput(Value *view) {
+ Optional<unsigned> getIndexOfInput(ValuePtr view) {
auto it = llvm::find(getInputs(), view);
if (it != getInputs().end())
return it - getInputs().begin();
@@ -99,12 +99,12 @@ public:
return {range.begin(), range.begin() + nInputs()};
}
/// Return the `i`-th output view.
- Value *getOutput(unsigned i) {
+ ValuePtr getOutput(unsigned i) {
return this->getOperation()->getOperand(nInputs() + i);
}
/// Return the index of `view` in the list of output views if found,
/// llvm::None otherwise.
- Optional<unsigned> getIndexOfOutput(Value *view) {
+ Optional<unsigned> getIndexOfOutput(ValuePtr view) {
auto it = llvm::find(getOutputs(), view);
if (it != getOutputs().end())
return it - getOutputs().begin();
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
index 415dd918f74..dbc162f4132 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransformPatterns.td
@@ -45,7 +45,7 @@ class AffineMapDomainHasDim<int n> : CPred<[{
class HasOperandsOfType<string type>: CPred<[{
llvm::any_of($0.getOperands(),
- [](Value* v) {
+ [](ValuePtr v) {
return dyn_cast_or_null<}] # type # [{>(v->getDefiningOp());
})
}]>;
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
index dfbac5ac193..a1a7458ae7f 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/LinalgTransforms.h
@@ -38,7 +38,7 @@ struct LinalgTransforms {
namespace detail {
// Implementation detail of isProducedByOpOfType avoids the need for explicit
// template instantiations.
-bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView,
+bool isProducedByOpOfTypeImpl(Operation *consumerOp, ValuePtr consumedView,
function_ref<bool(Operation *)> isaOpType);
} // namespace detail
@@ -46,7 +46,7 @@ bool isProducedByOpOfTypeImpl(Operation *consumerOp, Value *consumedView,
// an op of type `OpTy`. This is used to implement use-def type information on
// buffers.
template <typename OpTy>
-bool isProducedByOpOfType(Operation *consumerOp, Value *consumedView) {
+bool isProducedByOpOfType(Operation *consumerOp, ValuePtr consumedView) {
return detail::isProducedByOpOfTypeImpl(
consumerOp, consumedView, [](Operation *op) { return isa<OpTy>(op); });
}
diff --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index f8d10ecfa57..50039dd9336 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -34,7 +34,7 @@ namespace edsc {
/// A LoopRangeBuilder is a generic NestedBuilder for loop.for operations.
/// More specifically it is meant to be used as a temporary object for
-/// representing any nested MLIR construct that is "related to" an mlir::Value*
+/// representing any nested MLIR construct that is "related to" an mlir::Value
/// (for now an induction variable).
class LoopRangeBuilder : public NestedBuilder {
public:
@@ -42,7 +42,7 @@ public:
/// variable. A ValueHandle pointer is passed as the first argument and is the
/// *only* way to capture the loop induction variable.
LoopRangeBuilder(ValueHandle *iv, ValueHandle range);
- LoopRangeBuilder(ValueHandle *iv, Value *range);
+ LoopRangeBuilder(ValueHandle *iv, ValuePtr range);
LoopRangeBuilder(ValueHandle *iv, SubViewOp::Range range);
LoopRangeBuilder(const LoopRangeBuilder &) = delete;
@@ -65,7 +65,7 @@ public:
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
ArrayRef<edsc::ValueHandle> ranges);
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
- ArrayRef<Value *> ranges);
+ ArrayRef<ValuePtr> ranges);
LoopNestRangeBuilder(ArrayRef<edsc::ValueHandle *> ivs,
ArrayRef<SubViewOp::Range> ranges);
edsc::ValueHandle operator()(std::function<void(void)> fun = nullptr);
@@ -88,14 +88,14 @@ struct FusionInfo {
/// whole `consumedView`. This checks structural dominance, that the dependence
/// is a RAW without any interleaved write to any piece of `consumedView`.
bool isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
- LinalgOp consumer, Value *consumedView,
+ LinalgOp consumer, ValuePtr consumedView,
LinalgOp producer);
/// Checks whether fusing the specific `producer` of the `consumedView` is
/// feasible. This checks `producer` is the last write of `consumedView` and
/// that no interleaved dependence would be violated (RAW, WAR or WAW).
bool isFusableInto(const LinalgDependenceGraph &graph, LinalgOp consumer,
- Value *consumedView, LinalgOp producer);
+ ValuePtr consumedView, LinalgOp producer);
/// Fuses producer into consumer if the producer is structurally feasible and
/// the fusion would not violate dependencies.
@@ -111,8 +111,8 @@ Optional<FusionInfo> fuseProducerOf(OpBuilder &b, LinalgOp consumer,
/// the inverse, concatenated loopToOperandRangeMaps to this list allows the
/// derivation of loop ranges for any linalgOp.
template <typename ConcreteOp>
-SmallVector<Value *, 8> getViewSizes(ConcreteOp linalgOp) {
- SmallVector<Value *, 8> res;
+SmallVector<ValuePtr, 8> getViewSizes(ConcreteOp linalgOp) {
+ SmallVector<ValuePtr, 8> res;
for (auto v : linalgOp.getInputsAndOutputs()) {
MemRefType t = v->getType().template cast<MemRefType>();
for (unsigned i = 0; i < t.getRank(); ++i)
@@ -125,10 +125,10 @@ SmallVector<Value *, 8> getViewSizes(ConcreteOp linalgOp) {
/// When non-null, the optional pointer `folder` is used to call into the
/// `createAndFold` builder method. If `folder` is null, the regular `create`
/// method is called.
-SmallVector<Value *, 4> applyMapToValues(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> values,
- OperationFolder *folder = nullptr);
+SmallVector<ValuePtr, 4> applyMapToValues(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> values,
+ OperationFolder *folder = nullptr);
struct TiledLinalgOp {
LinalgOp op;
@@ -151,7 +151,7 @@ struct TiledLinalgOp {
/// `createAndFold` builder method. If `folder` is null, the regular `create`
/// method is called.
Optional<TiledLinalgOp> tileLinalgOp(OpBuilder &b, LinalgOp op,
- ArrayRef<Value *> tileSizes,
+ ArrayRef<ValuePtr> tileSizes,
ArrayRef<unsigned> permutation = {},
OperationFolder *folder = nullptr);
@@ -182,9 +182,9 @@ Optional<TiledLinalgOp> tileLinalgOperation(OpBuilder &b, Operation *op,
}
struct PromotionInfo {
- Value *buffer;
- Value *fullLocalView;
- Value *partialLocalView;
+ ValuePtr buffer;
+ ValuePtr fullLocalView;
+ ValuePtr partialLocalView;
};
/// Promotes the `subViews` into a new buffer allocated at the insertion point
@@ -199,13 +199,13 @@ struct PromotionInfo {
/// Returns a list of PromotionInfo which hold the promoted buffer and the
/// full and partial views indexing into the buffer.
SmallVector<PromotionInfo, 8>
-promoteSubViews(OpBuilder &b, Location loc, ArrayRef<Value *> subViews,
+promoteSubViews(OpBuilder &b, Location loc, ArrayRef<ValuePtr> subViews,
bool dynamicBuffers = false, OperationFolder *folder = nullptr);
/// Returns all the operands of `linalgOp` that are not views.
/// Asserts that these operands are value types to allow transformations like
/// tiling to just use the values when cloning `linalgOp`.
-SmallVector<Value *, 4> getAssumedNonViewOperands(LinalgOp linalgOp);
+SmallVector<ValuePtr, 4> getAssumedNonViewOperands(LinalgOp linalgOp);
/// Apply the permutation defined by `permutation` to `inVec`.
/// Element `i` in `inVec` is mapped to location `j = permutation[i]`.
@@ -226,7 +226,7 @@ void applyPermutationToVector(SmallVector<T, N> &inVec,
/// It is the entry point for declarative transformation
/// Returns the cloned `LinalgOp` with the new operands
LinalgOp promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- llvm::SetVector<Value *> subViews,
+ llvm::SetVector<ValuePtr> subViews,
bool dynamicBuffers = false,
OperationFolder *folder = nullptr);
diff --git a/mlir/include/mlir/Dialect/LoopOps/LoopOps.h b/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
index fdadf4a40dd..e7ff6f84977 100644
--- a/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
+++ b/mlir/include/mlir/Dialect/LoopOps/LoopOps.h
@@ -50,7 +50,7 @@ void ensureLoopTerminator(Region &region, Builder &builder, Location loc);
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-ForOp getForInductionVarOwner(Value *val);
+ForOp getForInductionVarOwner(ValuePtr val);
} // end namespace loop
} // end namespace mlir
diff --git a/mlir/include/mlir/Dialect/LoopOps/LoopOps.td b/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
index 5e0b8098411..e0f5b896309 100644
--- a/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
+++ b/mlir/include/mlir/Dialect/LoopOps/LoopOps.td
@@ -74,18 +74,18 @@ def ForOp : Loop_Op<"for",
let skipDefaultBuilders = 1;
let builders = [
OpBuilder<"Builder *builder, OperationState &result, "
- "Value *lowerBound, Value *upperBound, Value *step">
+ "ValuePtr lowerBound, ValuePtr upperBound, ValuePtr step">
];
let extraClassDeclaration = [{
Block *getBody() { return &region().front(); }
- Value *getInductionVar() { return getBody()->getArgument(0); }
+ ValuePtr getInductionVar() { return getBody()->getArgument(0); }
OpBuilder getBodyBuilder() {
return OpBuilder(getBody(), std::prev(getBody()->end()));
}
- void setLowerBound(Value *bound) { getOperation()->setOperand(0, bound); }
- void setUpperBound(Value *bound) { getOperation()->setOperand(1, bound); }
- void setStep(Value *step) { getOperation()->setOperand(2, step); }
+ void setLowerBound(ValuePtr bound) { getOperation()->setOperand(0, bound); }
+ void setUpperBound(ValuePtr bound) { getOperation()->setOperand(1, bound); }
+ void setStep(ValuePtr step) { getOperation()->setOperand(2, step); }
}];
}
@@ -116,7 +116,7 @@ def IfOp : Loop_Op<"if",
let skipDefaultBuilders = 1;
let builders = [
OpBuilder<"Builder *builder, OperationState &result, "
- "Value *cond, bool withElseRegion">
+ "ValuePtr cond, bool withElseRegion">
];
let extraClassDeclaration = [{
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
index d6e2e1c6fda..d19fd974684 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVCompositeOps.td
@@ -120,7 +120,7 @@ def SPV_CompositeExtractOp : SPV_Op<"CompositeExtract", [NoSideEffect]> {
let builders = [
OpBuilder<[{Builder *builder, OperationState &state,
- Value *composite, ArrayRef<int32_t> indices}]>
+ ValuePtr composite, ArrayRef<int32_t> indices}]>
];
let hasFolder = 1;
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
index 464b670dae9..32a78024560 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVControlFlowOps.td
@@ -132,7 +132,7 @@ def SPV_BranchConditionalOp : SPV_Op<"BranchConditional",
let builders = [
OpBuilder<
- "Builder *builder, OperationState &state, Value *condition, "
+ "Builder *builder, OperationState &state, ValuePtr condition, "
"Block *trueBlock, ValueRange trueArguments, "
"Block *falseBlock, ValueRange falseArguments, "
"Optional<std::pair<uint32_t, uint32_t>> weights = {}",
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
index 0c4b2902a12..e1e94bcd861 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVLogicalOps.td
@@ -858,8 +858,8 @@ def SPV_SelectOp : SPV_Op<"Select", [NoSideEffect]> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *cond, Value *trueValue,
- Value *falseValue}]>];
+ ValuePtr cond, ValuePtr trueValue,
+ ValuePtr falseValue}]>];
}
// -----
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h b/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
index f48a1d0b129..37b4ee24237 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVLowering.h
@@ -64,8 +64,8 @@ protected:
namespace spirv {
/// Returns a value that represents a builtin variable value within the SPIR-V
/// module.
-Value *getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin,
- OpBuilder &builder);
+ValuePtr getBuiltinVariableValue(Operation *op, spirv::BuiltIn builtin,
+ OpBuilder &builder);
/// Attribute name for specifying argument ABI information.
StringRef getInterfaceVarABIAttrName();
diff --git a/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td b/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
index 91ea8d7d676..777e5750486 100644
--- a/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/SPIRVOps.td
@@ -102,7 +102,7 @@ def SPV_AccessChainOp : SPV_Op<"AccessChain", [NoSideEffect]> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *basePtr, ValueRange indices}]>];
+ ValuePtr basePtr, ValueRange indices}]>];
let hasCanonicalizer = 1;
}
@@ -272,7 +272,7 @@ def SPV_LoadOp : SPV_Op<"Load", []> {
);
let builders = [OpBuilder<[{Builder *builder, OperationState &state,
- Value *basePtr, /*optional*/IntegerAttr memory_access,
+ ValuePtr basePtr, /*optional*/IntegerAttr memory_access,
/*optional*/IntegerAttr alignment}]>];
}
@@ -367,7 +367,7 @@ def SPV_StoreOp : SPV_Op<"Store", []> {
let builders = [
OpBuilder<"Builder *builder, OperationState &state, "
- "Value *ptr, Value *value, ArrayRef<NamedAttribute> namedAttrs", [{
+ "ValuePtr ptr, ValuePtr value, ArrayRef<NamedAttribute> namedAttrs", [{
state.addOperands(ptr);
state.addOperands(value);
state.addAttributes(namedAttrs);
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.h b/mlir/include/mlir/Dialect/StandardOps/Ops.h
index 1b1cf02d204..563116823d9 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Ops.h
+++ b/mlir/include/mlir/Dialect/StandardOps/Ops.h
@@ -182,15 +182,15 @@ class DmaStartOp
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *srcMemRef,
- ValueRange srcIndices, Value *destMemRef,
- ValueRange destIndices, Value *numElements,
- Value *tagMemRef, ValueRange tagIndices,
- Value *stride = nullptr,
- Value *elementsPerStride = nullptr);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr srcMemRef, ValueRange srcIndices,
+ ValuePtr destMemRef, ValueRange destIndices,
+ ValuePtr numElements, ValuePtr tagMemRef,
+ ValueRange tagIndices, ValuePtr stride = nullptr,
+ ValuePtr elementsPerStride = nullptr);
// Returns the source MemRefType for this DMA operation.
- Value *getSrcMemRef() { return getOperand(0); }
+ ValuePtr getSrcMemRef() { return getOperand(0); }
// Returns the rank (number of indices) of the source MemRefType.
unsigned getSrcMemRefRank() {
return getSrcMemRef()->getType().cast<MemRefType>().getRank();
@@ -202,7 +202,7 @@ public:
}
// Returns the destination MemRefType for this DMA operations.
- Value *getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
+ ValuePtr getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
// Returns the rank (number of indices) of the destination MemRefType.
unsigned getDstMemRefRank() {
return getDstMemRef()->getType().cast<MemRefType>().getRank();
@@ -222,12 +222,12 @@ public:
}
// Returns the number of elements being transferred by this DMA operation.
- Value *getNumElements() {
+ ValuePtr getNumElements() {
return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank());
}
// Returns the Tag MemRef for this DMA operation.
- Value *getTagMemRef() {
+ ValuePtr getTagMemRef() {
return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1);
}
// Returns the rank (number of indices) of the tag MemRefType.
@@ -276,13 +276,13 @@ public:
1 + 1 + getTagMemRefRank();
}
- Value *getStride() {
+ ValuePtr getStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1 - 1);
}
- Value *getNumElementsPerStride() {
+ ValuePtr getNumElementsPerStride() {
if (!isStrided())
return nullptr;
return getOperand(getNumOperands() - 1);
@@ -307,13 +307,14 @@ class DmaWaitOp
public:
using Op::Op;
- static void build(Builder *builder, OperationState &result, Value *tagMemRef,
- ValueRange tagIndices, Value *numElements);
+ static void build(Builder *builder, OperationState &result,
+ ValuePtr tagMemRef, ValueRange tagIndices,
+ ValuePtr numElements);
static StringRef getOperationName() { return "std.dma_wait"; }
// Returns the Tag MemRef associated with the DMA operation being waited on.
- Value *getTagMemRef() { return getOperand(0); }
+ ValuePtr getTagMemRef() { return getOperand(0); }
// Returns the tag memref index for this DMA operation.
operand_range getTagIndices() {
@@ -327,7 +328,7 @@ public:
}
// Returns the number of elements transferred in the associated DMA operation.
- Value *getNumElements() { return getOperand(1 + getTagMemRefRank()); }
+ ValuePtr getNumElements() { return getOperand(1 + getTagMemRefRank()); }
static ParseResult parse(OpAsmParser &parser, OperationState &result);
void print(OpAsmPrinter &p);
@@ -342,7 +343,7 @@ void printDimAndSymbolList(Operation::operand_iterator begin,
/// Parses dimension and symbol list and returns true if parsing failed.
ParseResult parseDimAndSymbolList(OpAsmParser &parser,
- SmallVectorImpl<Value *> &operands,
+ SmallVectorImpl<ValuePtr> &operands,
unsigned &numDims);
raw_ostream &operator<<(raw_ostream &os, SubViewOp::Range &range);
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td
index c26baf6a76e..e00674708f6 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td
@@ -52,7 +52,7 @@ class CastOp<string mnemonic, list<OpTrait> traits = []> :
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr source, Type destType", [{
impl::buildCastOp(builder, result, source, destType);
}]>];
@@ -191,7 +191,7 @@ def AllocOp : Std_Op<"alloc"> {
}]>,
OpBuilder<
"Builder *builder, OperationState &result, MemRefType memrefType, " #
- "ArrayRef<Value*> operands, IntegerAttr alignment = IntegerAttr()", [{
+ "ArrayRef<ValuePtr> operands, IntegerAttr alignment = IntegerAttr()", [{
result.addOperands(operands);
result.types.push_back(memrefType);
if (alignment)
@@ -330,7 +330,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
let results = (outs Variadic<AnyType>);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *callee,"
+ "Builder *, OperationState &result, ValuePtr callee,"
"ValueRange operands = {}", [{
result.operands.push_back(callee);
result.addOperands(operands);
@@ -338,7 +338,7 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
}]>];
let extraClassDeclaration = [{
- Value *getCallee() { return getOperand(0); }
+ ValuePtr getCallee() { return getOperand(0); }
/// Get the argument operands to the called function.
operand_range getArgOperands() {
@@ -395,7 +395,7 @@ def CmpFOp : Std_Op<"cmpf",
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpFPredicate predicate,"
- "Value *lhs, Value *rhs", [{
+ "ValuePtr lhs, ValuePtr rhs", [{
::buildCmpFOp(builder, result, predicate, lhs, rhs);
}]>];
@@ -463,7 +463,7 @@ def CmpIOp : Std_Op<"cmpi",
let builders = [OpBuilder<
"Builder *builder, OperationState &result, CmpIPredicate predicate,"
- "Value *lhs, Value *rhs", [{
+ "ValuePtr lhs, ValuePtr rhs", [{
::buildCmpIOp(builder, result, predicate, lhs, rhs);
}]>];
@@ -502,7 +502,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
let arguments = (ins I1:$condition, Variadic<AnyType>:$branchOperands);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *condition,"
+ "Builder *, OperationState &result, ValuePtr condition,"
"Block *trueDest, ValueRange trueOperands,"
"Block *falseDest, ValueRange falseOperands", [{
result.addOperands(condition);
@@ -518,7 +518,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
enum { trueIndex = 0, falseIndex = 1 };
// The condition operand is the first operand in the list.
- Value *getCondition() { return getOperand(0); }
+ ValuePtr getCondition() { return getOperand(0); }
/// Return the destination if the condition is true.
Block *getTrueDest() {
@@ -531,12 +531,12 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
// Accessors for operands to the 'true' destination.
- Value *getTrueOperand(unsigned idx) {
+ ValuePtr getTrueOperand(unsigned idx) {
assert(idx < getNumTrueOperands());
return getOperand(getTrueDestOperandIndex() + idx);
}
- void setTrueOperand(unsigned idx, Value *value) {
+ void setTrueOperand(unsigned idx, ValuePtr value) {
assert(idx < getNumTrueOperands());
setOperand(getTrueDestOperandIndex() + idx, value);
}
@@ -561,11 +561,11 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
// Accessors for operands to the 'false' destination.
- Value *getFalseOperand(unsigned idx) {
+ ValuePtr getFalseOperand(unsigned idx) {
assert(idx < getNumFalseOperands());
return getOperand(getFalseDestOperandIndex() + idx);
}
- void setFalseOperand(unsigned idx, Value *value) {
+ void setFalseOperand(unsigned idx, ValuePtr value) {
assert(idx < getNumFalseOperands());
setOperand(getFalseDestOperandIndex() + idx, value);
}
@@ -678,7 +678,7 @@ def DimOp : Std_Op<"dim", [NoSideEffect]> {
let results = (outs Index);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memrefOrTensor,"
+ "Builder *builder, OperationState &result, ValuePtr memrefOrTensor,"
"unsigned index", [{
auto indexType = builder->getIndexType();
auto indexAttr = builder->getIntegerAttr(indexType, index);
@@ -730,7 +730,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *aggregate,"
+ "Builder *builder, OperationState &result, ValuePtr aggregate,"
"ValueRange indices = {}", [{
auto resType = aggregate->getType().cast<ShapedType>()
.getElementType();
@@ -738,7 +738,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> {
}]>];
let extraClassDeclaration = [{
- Value *getAggregate() { return getOperand(0); }
+ ValuePtr getAggregate() { return getOperand(0); }
operand_range getIndices() {
return {operand_begin() + 1, operand_end()};
@@ -816,7 +816,7 @@ def LoadOp : Std_Op<"load"> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *memref,"
+ "Builder *, OperationState &result, ValuePtr memref,"
"ValueRange indices = {}", [{
auto memrefType = memref->getType().cast<MemRefType>();
result.addOperands(memref);
@@ -825,8 +825,8 @@ def LoadOp : Std_Op<"load"> {
}]>];
let extraClassDeclaration = [{
- Value *getMemRef() { return getOperand(0); }
- void setMemRef(Value *value) { setOperand(0, value); }
+ ValuePtr getMemRef() { return getOperand(0); }
+ void setMemRef(ValuePtr value) { setOperand(0, value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -952,8 +952,8 @@ def PrefetchOp : Std_Op<"prefetch"> {
BoolAttr:$isDataCache);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref,"
- "ArrayRef<Value *> indices, bool isWrite, unsigned hint, bool isData",
+ "Builder *builder, OperationState &result, ValuePtr memref,"
+ "ArrayRef<ValuePtr> indices, bool isWrite, unsigned hint, bool isData",
[{
auto hintAttr = builder->getI32IntegerAttr(hint);
auto isWriteAttr = builder->getBoolAttr(isWrite);
@@ -990,7 +990,7 @@ def RankOp : Std_Op<"rank", [NoSideEffect]> {
let verifier = ?;
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *tensor", [{
+ "Builder *builder, OperationState &result, ValuePtr tensor", [{
auto indexType = builder->getIndexType();
build(builder, result, indexType, tensor);
}]>];
@@ -1052,16 +1052,16 @@ def SelectOp : Std_Op<"select", [NoSideEffect, SameOperandsAndResultShape]> {
let results = (outs AnyType);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *condition,"
- "Value *trueValue, Value *falseValue", [{
+ "Builder *builder, OperationState &result, ValuePtr condition,"
+ "ValuePtr trueValue, ValuePtr falseValue", [{
result.addOperands({condition, trueValue, falseValue});
result.addTypes(trueValue->getType());
}]>];
let extraClassDeclaration = [{
- Value *getCondition() { return condition(); }
- Value *getTrueValue() { return true_value(); }
- Value *getFalseValue() { return false_value(); }
+ ValuePtr getCondition() { return condition(); }
+ ValuePtr getTrueValue() { return true_value(); }
+ ValuePtr getFalseValue() { return false_value(); }
}];
let hasFolder = 1;
@@ -1089,7 +1089,7 @@ def SignExtendIOp : Std_Op<"sexti",
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
@@ -1189,7 +1189,7 @@ def SplatOp : Std_Op<"splat", [NoSideEffect]> {
let results = (outs AnyTypeOf<[AnyVector, AnyStaticShapeTensor]>:$aggregate);
let builders =
- [OpBuilder<"Builder *builder, OperationState &result, Value *element, "
+ [OpBuilder<"Builder *builder, OperationState &result, ValuePtr element, "
"Type aggregateType",
[{ build(builder, result, aggregateType, element); }]>];
@@ -1213,16 +1213,16 @@ def StoreOp : Std_Op<"store"> {
Variadic<Index>:$indices);
let builders = [OpBuilder<
- "Builder *, OperationState &result, Value *valueToStore, Value *memref", [{
+ "Builder *, OperationState &result, ValuePtr valueToStore, ValuePtr memref", [{
result.addOperands(valueToStore);
result.addOperands(memref);
}]>];
let extraClassDeclaration = [{
- Value *getValueToStore() { return getOperand(0); }
+ ValuePtr getValueToStore() { return getOperand(0); }
- Value *getMemRef() { return getOperand(1); }
- void setMemRef(Value *value) { setOperand(1, value); }
+ ValuePtr getMemRef() { return getOperand(1); }
+ void setMemRef(ValuePtr value) { setOperand(1, value); }
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
@@ -1364,13 +1364,13 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
let builders = [
OpBuilder<
- "Builder *b, OperationState &result, Value *source, "
+ "Builder *b, OperationState &result, ValuePtr source, "
"ValueRange offsets, ValueRange sizes, "
"ValueRange strides, Type resultType = Type(), "
"ArrayRef<NamedAttribute> attrs = {}">,
OpBuilder<
"Builder *builder, OperationState &result, "
- "Type resultType, Value *source">
+ "Type resultType, ValuePtr source">
];
let extraClassDeclaration = [{
@@ -1403,7 +1403,7 @@ def SubViewOp : Std_Op<"subview", [AttrSizedOperandSegments, NoSideEffect]> {
// offset, size and stride operands of the SubViewOp into a list of triples.
// Such a list of triple is sometimes more convenient to manipulate.
struct Range {
- Value *offset, *size, *stride;
+ ValuePtr offset, size, stride;
};
SmallVector<Range, 8> getRanges();
}];
@@ -1465,7 +1465,7 @@ def TensorLoadOp : Std_Op<"tensor_load",
let verifier = ?;
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *memref", [{
+ "Builder *builder, OperationState &result, ValuePtr memref", [{
auto memrefType = memref->getType().cast<MemRefType>();
auto resultType = RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType());
@@ -1519,7 +1519,7 @@ def TruncateIOp : Std_Op<"trunci", [NoSideEffect, SameOperandsAndResultShape]> {
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
@@ -1578,7 +1578,7 @@ def ViewOp : Std_Op<"view", [NoSideEffect]> {
/// Returns the dynamic offset for this view operation if specified.
/// Returns nullptr if no dynamic offset was specified.
- Value *getDynamicOffset();
+ ValuePtr getDynamicOffset();
/// Returns the starting operand list position of the dynamic size operands.
unsigned getDynamicSizesOperandStart() {
@@ -1619,7 +1619,7 @@ def ZeroExtendIOp : Std_Op<"zexti", [NoSideEffect, SameOperandsAndResultShape]>
let results = (outs IntegerLike);
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *value, Type destType", [{
+ "Builder *builder, OperationState &result, ValuePtr value, Type destType", [{
result.addOperands(value);
result.addTypes(destType);
}]>];
diff --git a/mlir/include/mlir/Dialect/VectorOps/Utils.h b/mlir/include/mlir/Dialect/VectorOps/Utils.h
index f61a813855d..68c62cc7ec7 100644
--- a/mlir/include/mlir/Dialect/VectorOps/Utils.h
+++ b/mlir/include/mlir/Dialect/VectorOps/Utils.h
@@ -34,6 +34,9 @@ class Operation;
class Value;
class VectorType;
+// TODO(riverriddle) Remove this after Value is value-typed.
+using ValuePtr = Value *;
+
/// Computes and returns the multi-dimensional ratio of `superShape` to
/// `subShape`. This is calculated by performing a traversal from minor to major
/// dimensions (i.e. in reverse shape order). If integral division is not
@@ -122,7 +125,7 @@ Optional<SmallVector<int64_t, 4>> shapeRatio(VectorType superVectorType,
/// `%arg0[%c0, %c0]` into vector<128xf32> which needs a 1-D vector broadcast.
///
AffineMap
-makePermutationMap(Operation *op, ArrayRef<Value *> indices,
+makePermutationMap(Operation *op, ArrayRef<ValuePtr> indices,
const DenseMap<Operation *, unsigned> &loopToVectorDim);
namespace matcher {
diff --git a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
index 5fd19498350..94262e6f1ff 100644
--- a/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
+++ b/mlir/include/mlir/Dialect/VectorOps/VectorOps.td
@@ -128,8 +128,8 @@ def Vector_ContractionOp :
: vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x15x8x5xf32>
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *lhs, Value *rhs, "
- "Value *acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">];
+ "Builder *builder, OperationState &result, ValuePtr lhs, ValuePtr rhs, "
+ "ValuePtr acc, ArrayAttr indexingMaps, ArrayAttr iteratorTypes">];
let extraClassDeclaration = [{
VectorType getLhsType() {
return lhs()->getType().cast<VectorType>();
@@ -252,7 +252,8 @@ def Vector_ShuffleOp :
```
}];
- let builders = [OpBuilder<"Builder *builder, OperationState &result, Value *v1, Value *v2, ArrayRef<int64_t>">];
+ let builders = [OpBuilder<"Builder *builder, OperationState &result,"
+ "ValuePtr v1, ValuePtr v2, ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getMaskAttrName() { return "mask"; }
VectorType getV1VectorType() {
@@ -312,7 +313,8 @@ def Vector_ExtractOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, ArrayRef<int64_t>">];
+ "Builder *builder, OperationState &result, ValuePtr source,"
+ "ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
VectorType getVectorType() {
@@ -357,7 +359,7 @@ def Vector_ExtractSlicesOp :
}];
let builders = [OpBuilder<
"Builder *builder, OperationState &result, TupleType tupleType, " #
- "Value *vector, ArrayRef<int64_t> sizes, " #
+ "ValuePtr vector, ArrayRef<int64_t> sizes, " #
"ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
VectorType getSourceVectorType() {
@@ -428,8 +430,8 @@ def Vector_InsertOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, " #
- "Value *dest, ArrayRef<int64_t>">];
+ "Builder *builder, OperationState &result, ValuePtr source, " #
+ "ValuePtr dest, ArrayRef<int64_t>">];
let extraClassDeclaration = [{
static StringRef getPositionAttrName() { return "position"; }
Type getSourceType() { return source()->getType(); }
@@ -521,7 +523,7 @@ def Vector_InsertStridedSliceOp :
```
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, Value *dest, " #
+ "Builder *builder, OperationState &result, ValuePtr source, ValuePtr dest, " #
"ArrayRef<int64_t> offsets, ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
static StringRef getOffsetsAttrName() { return "offsets"; }
@@ -723,7 +725,7 @@ def Vector_StridedSliceOp :
vector<4x8x16xf32> to vector<2x4x16xf32>
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source, " #
+ "Builder *builder, OperationState &result, ValuePtr source, " #
"ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, " #
"ArrayRef<int64_t> strides">];
let extraClassDeclaration = [{
@@ -975,7 +977,7 @@ def Vector_TypeCastOp :
}];
let builders = [OpBuilder<
- "Builder *builder, OperationState &result, Value *source">];
+ "Builder *builder, OperationState &result, ValuePtr source">];
let parser = [{
return impl::parseCastOp(parser, result);
diff --git a/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h b/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
index 2c2e4e7c4fa..b48cb51533f 100644
--- a/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
+++ b/mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
@@ -73,8 +73,9 @@ namespace vector {
//
// This will be extended in the future to support more advanced use cases than
// simple pointwise ops.
-Value *unrollSingleResultOpMatchingType(PatternRewriter &builder, Operation *op,
- ArrayRef<int64_t> targetShape);
+ValuePtr unrollSingleResultOpMatchingType(PatternRewriter &builder,
+ Operation *op,
+ ArrayRef<int64_t> targetShape);
} // namespace vector
} // namespace mlir
diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h
index 69c72a50870..11ee0bff342 100644
--- a/mlir/include/mlir/EDSC/Builders.h
+++ b/mlir/include/mlir/EDSC/Builders.h
@@ -152,7 +152,7 @@ private:
/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR operations.
/// More specifically it is meant to be used as a temporary object for
-/// representing any nested MLIR construct that is "related to" an mlir::Value*
+/// representing any nested MLIR construct that is "related to" an mlir::Value
/// (for now an induction variable).
/// This is extensible and will evolve in the future as MLIR evolves, hence
/// the name LoopBuilder (as opposed to say ForBuilder or AffineForBuilder).
@@ -242,7 +242,7 @@ class Append {};
/// A BlockBuilder is a NestedBuilder for mlir::Block*.
/// This exists by opposition to LoopBuilder which is not related to an
-/// mlir::Block* but to a mlir::Value*.
+/// mlir::Block* but to a mlir::Value.
/// It is meant to be used as a temporary object for representing any nested
/// MLIR construct that is "related to" an mlir::Block*.
class BlockBuilder : public NestedBuilder {
@@ -257,7 +257,7 @@ public:
///
/// Prerequisites:
/// The ValueHandle `args` are typed delayed ValueHandles; i.e. they are
- /// not yet bound to mlir::Value*.
+ /// not yet bound to mlir::Value.
BlockBuilder(BlockHandle *bh, ArrayRef<ValueHandle *> args);
/// The only purpose of this operator is to serve as a sequence point so that
@@ -291,10 +291,10 @@ protected:
/// typed "delayed" value that can be hold a Value in the future;
/// 3. constructed state,in which case it holds a Value.
///
-/// A ValueHandle is meant to capture a single Value* and should be used for
+/// A ValueHandle is meant to capture a single Value and should be used for
/// operations that have a single result. For convenience of use, we also
/// include AffineForOp in this category although it does not return a value.
-/// In the case of AffineForOp, the captured Value* is the loop induction
+/// In the case of AffineForOp, the captured Value is the loop induction
/// variable.
class ValueHandle : public CapturableHandle {
public:
@@ -304,15 +304,15 @@ public:
/// A ValueHandle that is constructed from a Type represents a typed "delayed"
/// Value. A delayed Value can only capture Values of the specified type.
/// Such a delayed value represents the declaration (in the PL sense) of a
- /// placeholder for an mlir::Value* that will be constructed and captured at
+ /// placeholder for an mlir::Value that will be constructed and captured at
/// some later point in the program.
explicit ValueHandle(Type t) : t(t), v(nullptr) {}
- /// A ValueHandle that is constructed from an mlir::Value* is an "eager"
+ /// A ValueHandle that is constructed from an mlir::Value is an "eager"
/// Value. An eager Value represents both the declaration and the definition
- /// (in the PL sense) of a placeholder for an mlir::Value* that has already
+ /// (in the PL sense) of a placeholder for an mlir::Value that has already
/// been constructed in the past and that is captured "now" in the program.
- explicit ValueHandle(Value *v) : t(v->getType()), v(v) {}
+ explicit ValueHandle(ValuePtr v) : t(v->getType()), v(v) {}
/// Builds a ConstantIndexOp of value `cst`. The constant is created at the
/// current insertion point.
@@ -336,8 +336,8 @@ public:
std::swap(v, other.v);
}
- /// Implicit conversion useful for automatic conversion to Container<Value*>.
- operator Value *() const { return getValue(); }
+ /// Implicit conversion useful for automatic conversion to Container<Value>.
+ operator ValuePtr() const { return getValue(); }
/// Generic mlir::Op create. This is the key to being extensible to the whole
/// of MLIR without duplicating the type system or the op definitions.
@@ -355,7 +355,7 @@ public:
/// Special case to build composed AffineApply operations.
// TODO: createOrFold when available and move inside of the `create` method.
static ValueHandle createComposedAffineApply(AffineMap map,
- ArrayRef<Value *> operands);
+ ArrayRef<ValuePtr> operands);
/// Generic create for a named operation producing a single value.
static ValueHandle create(StringRef name, ArrayRef<ValueHandle> operands,
@@ -363,7 +363,7 @@ public:
ArrayRef<NamedAttribute> attributes = {});
bool hasValue() const { return v != nullptr; }
- Value *getValue() const {
+ ValuePtr getValue() const {
assert(hasValue() && "Unexpected null value;");
return v;
}
@@ -380,12 +380,12 @@ protected:
ValueHandle() : t(), v(nullptr) {}
Type t;
- Value *v;
+ ValuePtr v;
};
/// An OperationHandle can be used in lieu of ValueHandle to capture the
/// operation in cases when one does not care about, or cannot extract, a
-/// unique Value* from the operation.
+/// unique Value from the operation.
/// This can be used for capturing zero result operations as well as
/// multi-result operations that are not supported by ValueHandle.
/// We do not distinguish further between zero and multi-result operations at
@@ -529,7 +529,7 @@ ValueHandle operator>=(ValueHandle lhs, ValueHandle rhs);
} // namespace op
-/// Entry point to build multiple ValueHandle from a `Container` of Value* or
+/// Entry point to build multiple ValueHandle from a `Container` of Value or
/// Type.
template <typename Container>
inline SmallVector<ValueHandle, 8> makeValueHandles(Container values) {
diff --git a/mlir/include/mlir/EDSC/Helpers.h b/mlir/include/mlir/EDSC/Helpers.h
index 423c92b2d06..c18307e7121 100644
--- a/mlir/include/mlir/EDSC/Helpers.h
+++ b/mlir/include/mlir/EDSC/Helpers.h
@@ -75,7 +75,7 @@ protected:
// TODO(ntv): Support MemRefs with layoutMaps.
class MemRefView : public View {
public:
- explicit MemRefView(Value *v);
+ explicit MemRefView(ValuePtr v);
MemRefView(const MemRefView &) = default;
MemRefView &operator=(const MemRefView &) = default;
@@ -91,7 +91,7 @@ private:
/// a MemRefView but for vectors. This exists purely for boilerplate avoidance.
class VectorView : public View {
public:
- explicit VectorView(Value *v);
+ explicit VectorView(ValuePtr v);
VectorView(const VectorView &) = default;
VectorView &operator=(const VectorView &) = default;
@@ -120,7 +120,7 @@ private:
template <typename Load, typename Store> class TemplatedIndexedValue {
public:
explicit TemplatedIndexedValue(Type t) : base(t) {}
- explicit TemplatedIndexedValue(Value *v)
+ explicit TemplatedIndexedValue(ValuePtr v)
: TemplatedIndexedValue(ValueHandle(v)) {}
explicit TemplatedIndexedValue(ValueHandle v) : base(v) {}
@@ -161,8 +161,8 @@ public:
return Load(getBase(), {indices.begin(), indices.end()});
}
- /// Emits a `load` when converting to a Value*.
- Value *operator*(void)const {
+ /// Emits a `load` when converting to a Value.
+ ValuePtr operator*(void) const {
return Load(getBase(), {indices.begin(), indices.end()}).getValue();
}
diff --git a/mlir/include/mlir/EDSC/Intrinsics.h b/mlir/include/mlir/EDSC/Intrinsics.h
index 06c75505cb7..dc0c1186c7a 100644
--- a/mlir/include/mlir/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/EDSC/Intrinsics.h
@@ -44,7 +44,7 @@ struct IndexHandle : public ValueHandle {
explicit IndexHandle()
: ValueHandle(ScopedContext::getBuilder().getIndexType()) {}
explicit IndexHandle(index_t v) : ValueHandle(v) {}
- explicit IndexHandle(Value *v) : ValueHandle(v) {
+ explicit IndexHandle(ValuePtr v) : ValueHandle(v) {
assert(v->getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
}
@@ -79,9 +79,9 @@ makeHandlePointers(MutableArrayRef<T> ivs) {
return pivs;
}
-/// Returns a vector of the underlying Value* from `ivs`.
-inline SmallVector<Value *, 8> extractValues(ArrayRef<IndexHandle> ivs) {
- SmallVector<Value *, 8> vals;
+/// Returns a vector of the underlying Value from `ivs`.
+inline SmallVector<ValuePtr, 8> extractValues(ArrayRef<IndexHandle> ivs) {
+ SmallVector<ValuePtr, 8> vals;
vals.reserve(ivs.size());
for (auto &iv : ivs) {
vals.push_back(iv.getValue());
@@ -96,7 +96,7 @@ namespace intrinsics {
namespace detail {
/// Helper structure to be used with ValueBuilder / OperationBuilder.
/// It serves the purpose of removing boilerplate specialization for the sole
-/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value*>.
+/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value>.
class ValueHandleArray {
public:
ValueHandleArray(ArrayRef<ValueHandle> vals) {
@@ -109,11 +109,11 @@ public:
SmallVector<IndexHandle, 8> tmp(vals.begin(), vals.end());
values.append(tmp.begin(), tmp.end());
}
- operator ArrayRef<Value *>() { return values; }
+ operator ArrayRef<ValuePtr>() { return values; }
private:
ValueHandleArray() = default;
- SmallVector<Value *, 8> values;
+ SmallVector<ValuePtr, 8> values;
};
template <typename T> inline T unpack(T value) { return value; }
@@ -128,8 +128,8 @@ inline detail::ValueHandleArray unpack(ArrayRef<ValueHandle> values) {
/// boilerplate or Tablegen.
/// Arguably a builder is not a ValueHandle but in practice it is only used as
/// an alias to a notional ValueHandle<Op>.
-/// Implementing it as a subclass allows it to compose all the way to Value*.
-/// Without subclassing, implicit conversion to Value* would fail when composing
+/// Implementing it as a subclass allows it to compose all the way to Value.
+/// Without subclassing, implicit conversion to Value would fail when composing
/// in patterns such as: `select(a, b, select(c, d, e))`.
template <typename Op> struct ValueBuilder : public ValueHandle {
// Builder-based
@@ -238,8 +238,8 @@ OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
///
/// Prerequisites:
/// `b` has not yet captured an mlir::Block*.
-/// No `captures` have captured any mlir::Value*.
-/// All `operands` have already captured an mlir::Value*
+/// No `captures` have captured any mlir::Value.
+/// All `operands` have already captured an mlir::Value
/// captures.size() == operands.size()
/// captures and operands are pairwise of the same type.
OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
@@ -266,8 +266,8 @@ OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
///
/// Prerequisites:
/// `trueBranch`/`falseBranch` has not yet captured an mlir::Block*.
-/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value*.
-/// All `trueOperands`/`trueOperands` have already captured an mlir::Value*
+/// No `trueCaptures`/`falseCaptures` have captured any mlir::Value.
+/// All `trueOperands`/`trueOperands` have already captured an mlir::Value
/// `trueCaptures`.size() == `trueOperands`.size()
/// `falseCaptures`.size() == `falseOperands`.size()
/// `trueCaptures` and `trueOperands` are pairwise of the same type
diff --git a/mlir/include/mlir/IR/Block.h b/mlir/include/mlir/IR/Block.h
index 6c5099b06da..87c77160e1d 100644
--- a/mlir/include/mlir/IR/Block.h
+++ b/mlir/include/mlir/IR/Block.h
@@ -72,7 +72,7 @@ public:
//===--------------------------------------------------------------------===//
// This is the list of arguments to the block.
- using BlockArgListType = ArrayRef<BlockArgument *>;
+ using BlockArgListType = ArrayRef<BlockArgumentPtr>;
BlockArgListType getArguments() { return arguments; }
@@ -86,7 +86,7 @@ public:
bool args_empty() { return arguments.empty(); }
/// Add one value to the argument list.
- BlockArgument *addArgument(Type type);
+ BlockArgumentPtr addArgument(Type type);
/// Add one argument to the argument list for each type specified in the list.
iterator_range<args_iterator> addArguments(ArrayRef<Type> types);
@@ -97,7 +97,7 @@ public:
void eraseArgument(unsigned index, bool updatePredTerms = true);
unsigned getNumArguments() { return arguments.size(); }
- BlockArgument *getArgument(unsigned i) { return arguments[i]; }
+ BlockArgumentPtr getArgument(unsigned i) { return arguments[i]; }
//===--------------------------------------------------------------------===//
// Operation list management
@@ -332,7 +332,7 @@ private:
OpListType operations;
/// This is the list of arguments to the block.
- std::vector<BlockArgument *> arguments;
+ std::vector<BlockArgumentPtr> arguments;
Block(Block &) = delete;
void operator=(Block &) = delete;
diff --git a/mlir/include/mlir/IR/BlockAndValueMapping.h b/mlir/include/mlir/IR/BlockAndValueMapping.h
index cd15d457a77..287dd508fa6 100644
--- a/mlir/include/mlir/IR/BlockAndValueMapping.h
+++ b/mlir/include/mlir/IR/BlockAndValueMapping.h
@@ -37,7 +37,7 @@ public:
/// Inserts a new mapping for 'from' to 'to'. If there is an existing mapping,
/// it is overwritten.
void map(Block *from, Block *to) { valueMap[from] = to; }
- void map(Value *from, Value *to) { valueMap[from] = to; }
+ void map(ValuePtr from, ValuePtr to) { valueMap[from] = to; }
/// Erases a mapping for 'from'.
void erase(IRObjectWithUseList *from) { valueMap.erase(from); }
@@ -52,8 +52,8 @@ public:
Block *lookupOrNull(Block *from) const {
return lookupOrValue(from, (Block *)nullptr);
}
- Value *lookupOrNull(Value *from) const {
- return lookupOrValue(from, (Value *)nullptr);
+ ValuePtr lookupOrNull(ValuePtr from) const {
+ return lookupOrValue(from, (ValuePtr) nullptr);
}
/// Lookup a mapped value within the map. If a mapping for the provided value
@@ -61,7 +61,7 @@ public:
Block *lookupOrDefault(Block *from) const {
return lookupOrValue(from, from);
}
- Value *lookupOrDefault(Value *from) const {
+ ValuePtr lookupOrDefault(ValuePtr from) const {
return lookupOrValue(from, from);
}
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 766902fabfa..c199c09feb5 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -313,7 +313,7 @@ public:
/// and immediately try to fold it. This functions populates 'results' with
/// the results after folding the operation.
template <typename OpTy, typename... Args>
- void createOrFold(SmallVectorImpl<Value *> &results, Location location,
+ void createOrFold(SmallVectorImpl<ValuePtr> &results, Location location,
Args &&... args) {
// Create the operation without using 'createOperation' as we don't want to
// insert it yet.
@@ -331,9 +331,9 @@ public:
/// Overload to create or fold a single result operation.
template <typename OpTy, typename... Args>
typename std::enable_if<OpTy::template hasTrait<OpTrait::OneResult>(),
- Value *>::type
+ ValuePtr>::type
createOrFold(Location location, Args &&... args) {
- SmallVector<Value *, 1> results;
+ SmallVector<ValuePtr, 1> results;
createOrFold<OpTy>(results, location, std::forward<Args>(args)...);
return results.front();
}
@@ -344,7 +344,7 @@ public:
OpTy>::type
createOrFold(Location location, Args &&... args) {
auto op = create<OpTy>(location, std::forward<Args>(args)...);
- SmallVector<Value *, 0> unused;
+ SmallVector<ValuePtr, 0> unused;
tryFold(op.getOperation(), unused);
// Folding cannot remove a zero-result operation, so for convenience we
@@ -355,7 +355,7 @@ public:
/// Attempts to fold the given operation and places new results within
/// 'results'. Returns success if the operation was folded, failure otherwise.
/// Note: This function does not erase the operation on a successful fold.
- LogicalResult tryFold(Operation *op, SmallVectorImpl<Value *> &results);
+ LogicalResult tryFold(Operation *op, SmallVectorImpl<ValuePtr> &results);
/// Creates a deep copy of the specified operation, remapping any operands
/// that use values outside of the operation using the map that is provided
diff --git a/mlir/include/mlir/IR/FunctionSupport.h b/mlir/include/mlir/IR/FunctionSupport.h
index b15b056a3ec..1ba85d73df9 100644
--- a/mlir/include/mlir/IR/FunctionSupport.h
+++ b/mlir/include/mlir/IR/FunctionSupport.h
@@ -183,7 +183,7 @@ public:
}
/// Gets argument.
- BlockArgument *getArgument(unsigned idx) {
+ BlockArgumentPtr getArgument(unsigned idx) {
return getBlocks().front().getArgument(idx);
}
diff --git a/mlir/include/mlir/IR/Matchers.h b/mlir/include/mlir/IR/Matchers.h
index 1261916dae2..3b36f2fb5eb 100644
--- a/mlir/include/mlir/IR/Matchers.h
+++ b/mlir/include/mlir/IR/Matchers.h
@@ -142,7 +142,7 @@ using has_operation_or_value_matcher_t =
/// Statically switch to a Value matcher.
template <typename MatcherClass>
typename std::enable_if_t<is_detected<detail::has_operation_or_value_matcher_t,
- MatcherClass, Value *>::value,
+ MatcherClass, ValuePtr>::value,
bool>
matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) {
return matcher.match(op->getOperand(idx));
@@ -161,14 +161,14 @@ matchOperandOrValueAtIndex(Operation *op, unsigned idx, MatcherClass &matcher) {
/// Terminal matcher, always returns true.
struct AnyValueMatcher {
- bool match(Value *op) const { return true; }
+ bool match(ValuePtr op) const { return true; }
};
/// Binds to a specific value and matches it.
struct PatternMatcherValue {
- PatternMatcherValue(Value *val) : value(val) {}
- bool match(Value *val) const { return val == value; }
- Value *value;
+ PatternMatcherValue(ValuePtr val) : value(val) {}
+ bool match(ValuePtr val) const { return val == value; }
+ ValuePtr value;
};
template <typename TupleT, class CallbackT, std::size_t... Is>
@@ -235,7 +235,7 @@ inline detail::constant_int_not_value_matcher<0> m_NonZero() {
/// Entry point for matching a pattern over a Value.
template <typename Pattern>
-inline bool matchPattern(Value *value, const Pattern &pattern) {
+inline bool matchPattern(ValuePtr value, const Pattern &pattern) {
// TODO: handle other cases
if (auto *op = value->getDefiningOp())
return const_cast<Pattern &>(pattern).match(op);
@@ -262,7 +262,7 @@ auto m_Op(Matchers... matchers) {
namespace matchers {
inline auto m_Any() { return detail::AnyValueMatcher(); }
-inline auto m_Val(Value *v) { return detail::PatternMatcherValue(v); }
+inline auto m_Val(ValuePtr v) { return detail::PatternMatcherValue(v); }
} // namespace matchers
} // end namespace mlir
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index c220120b337..437540117c4 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -257,8 +257,8 @@ inline bool operator!=(OpState lhs, OpState rhs) {
}
/// This class represents a single result from folding an operation.
-class OpFoldResult : public PointerUnion<Attribute, Value *> {
- using PointerUnion<Attribute, Value *>::PointerUnion;
+class OpFoldResult : public PointerUnion<Attribute, ValuePtr> {
+ using PointerUnion<Attribute, ValuePtr>::PointerUnion;
};
/// This template defines the foldHook as used by AbstractOperation.
@@ -311,8 +311,8 @@ class FoldingHook<ConcreteType, isSingleResult,
typename std::enable_if<isSingleResult>::type> {
public:
/// If the operation returns a single value, then the Op can be implicitly
- /// converted to an Value*. This yields the value of the only result.
- operator Value *() {
+ /// converted to an Value. This yields the value of the only result.
+ operator ValuePtr() {
return static_cast<ConcreteType *>(this)->getOperation()->getResult(0);
}
@@ -326,7 +326,7 @@ public:
// Check if the operation was folded in place. In this case, the operation
// returns itself.
- if (result.template dyn_cast<Value *>() != op->getResult(0))
+ if (result.template dyn_cast<ValuePtr>() != op->getResult(0))
results.push_back(result);
return success();
}
@@ -428,10 +428,12 @@ struct MultiOperandTraitBase : public TraitBase<ConcreteType, TraitType> {
unsigned getNumOperands() { return this->getOperation()->getNumOperands(); }
/// Return the operand at index 'i'.
- Value *getOperand(unsigned i) { return this->getOperation()->getOperand(i); }
+ ValuePtr getOperand(unsigned i) {
+ return this->getOperation()->getOperand(i);
+ }
/// Set the operand at index 'i' to 'value'.
- void setOperand(unsigned i, Value *value) {
+ void setOperand(unsigned i, ValuePtr value) {
this->getOperation()->setOperand(i, value);
}
@@ -475,9 +477,11 @@ private:
template <typename ConcreteType>
class OneOperand : public TraitBase<ConcreteType, OneOperand> {
public:
- Value *getOperand() { return this->getOperation()->getOperand(0); }
+ ValuePtr getOperand() { return this->getOperation()->getOperand(0); }
- void setOperand(Value *value) { this->getOperation()->setOperand(0, value); }
+ void setOperand(ValuePtr value) {
+ this->getOperation()->setOperand(0, value);
+ }
static LogicalResult verifyTrait(Operation *op) {
return impl::verifyOneOperand(op);
@@ -550,7 +554,7 @@ struct MultiResultTraitBase : public TraitBase<ConcreteType, TraitType> {
unsigned getNumResults() { return this->getOperation()->getNumResults(); }
/// Return the result at index 'i'.
- Value *getResult(unsigned i) { return this->getOperation()->getResult(i); }
+ ValuePtr getResult(unsigned i) { return this->getOperation()->getResult(i); }
/// Replace all uses of results of this operation with the provided 'values'.
/// 'values' may correspond to an existing operation, or a range of 'Value'.
@@ -586,13 +590,13 @@ struct MultiResultTraitBase : public TraitBase<ConcreteType, TraitType> {
template <typename ConcreteType>
class OneResult : public TraitBase<ConcreteType, OneResult> {
public:
- Value *getResult() { return this->getOperation()->getResult(0); }
+ ValuePtr getResult() { return this->getOperation()->getResult(0); }
Type getType() { return getResult()->getType(); }
/// Replace all uses of 'this' value with the new value, updating anything in
/// the IR that uses 'this' to use the other value instead. When this returns
/// there are zero uses of 'this'.
- void replaceAllUsesWith(Value *newValue) {
+ void replaceAllUsesWith(ValuePtr newValue) {
getResult()->replaceAllUsesWith(newValue);
}
@@ -820,10 +824,10 @@ public:
return this->getOperation()->setSuccessor(block, index);
}
- void addSuccessorOperand(unsigned index, Value *value) {
+ void addSuccessorOperand(unsigned index, ValuePtr value) {
return this->getOperation()->addSuccessorOperand(index, value);
}
- void addSuccessorOperands(unsigned index, ArrayRef<Value *> values) {
+ void addSuccessorOperands(unsigned index, ArrayRef<ValuePtr> values) {
return this->getOperation()->addSuccessorOperand(index, values);
}
};
@@ -1209,8 +1213,8 @@ namespace impl {
ParseResult parseOneResultOneOperandTypeOp(OpAsmParser &parser,
OperationState &result);
-void buildBinaryOp(Builder *builder, OperationState &result, Value *lhs,
- Value *rhs);
+void buildBinaryOp(Builder *builder, OperationState &result, ValuePtr lhs,
+ ValuePtr rhs);
ParseResult parseOneResultSameOperandTypeOp(OpAsmParser &parser,
OperationState &result);
@@ -1223,11 +1227,11 @@ void printOneResultOp(Operation *op, OpAsmPrinter &p);
// These functions are out-of-line implementations of the methods in CastOp,
// which avoids them being template instantiated/duplicated.
namespace impl {
-void buildCastOp(Builder *builder, OperationState &result, Value *source,
+void buildCastOp(Builder *builder, OperationState &result, ValuePtr source,
Type destType);
ParseResult parseCastOp(OpAsmParser &parser, OperationState &result);
void printCastOp(Operation *op, OpAsmPrinter &p);
-Value *foldCastOp(Operation *op);
+ValuePtr foldCastOp(Operation *op);
} // namespace impl
} // end namespace mlir
diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h
index 7dd11d089c2..fcadce9ab16 100644
--- a/mlir/include/mlir/IR/OpImplementation.h
+++ b/mlir/include/mlir/IR/OpImplementation.h
@@ -45,7 +45,7 @@ public:
virtual raw_ostream &getStream() const = 0;
/// Print implementations for various things an operation contains.
- virtual void printOperand(Value *value) = 0;
+ virtual void printOperand(ValuePtr value) = 0;
/// Print a comma separated list of operands.
template <typename ContainerType>
@@ -121,7 +121,7 @@ public:
void printFunctionalType(Operation *op) {
auto &os = getStream();
os << "(";
- interleaveComma(op->getNonSuccessorOperands(), os, [&](Value *operand) {
+ interleaveComma(op->getNonSuccessorOperands(), os, [&](ValuePtr operand) {
if (operand)
printType(operand->getType());
else
@@ -150,18 +150,18 @@ private:
};
// Make the implementations convenient to use.
-inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value &value) {
+inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValueRef value) {
p.printOperand(&value);
return p;
}
-inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Value *value) {
+inline OpAsmPrinter &operator<<(OpAsmPrinter &p, ValuePtr value) {
return p << *value;
}
-template <typename T,
- typename std::enable_if<std::is_convertible<T &, ValueRange>::value &&
- !std::is_convertible<T &, Value *>::value,
- T>::type * = nullptr>
+template <typename T, typename std::enable_if<
+ std::is_convertible<T &, ValueRange>::value &&
+ !std::is_convertible<T &, ValuePtr>::value,
+ T>::type * = nullptr>
inline OpAsmPrinter &operator<<(OpAsmPrinter &p, const T &values) {
p.printOperands(values);
return p;
@@ -181,8 +181,8 @@ inline OpAsmPrinter &operator<<(OpAsmPrinter &p, Attribute attr) {
// even if it isn't exactly one of them. For example, we want to print
// FunctionType with the Type version above, not have it match this.
template <typename T, typename std::enable_if<
- !std::is_convertible<T &, Value &>::value &&
- !std::is_convertible<T &, Value *>::value &&
+ !std::is_convertible<T &, ValueRef>::value &&
+ !std::is_convertible<T &, ValuePtr>::value &&
!std::is_convertible<T &, Type &>::value &&
!std::is_convertible<T &, Attribute &>::value &&
!std::is_convertible<T &, ValueRange>::value &&
@@ -467,13 +467,13 @@ public:
/// Resolve an operand to an SSA value, emitting an error on failure.
virtual ParseResult resolveOperand(const OperandType &operand, Type type,
- SmallVectorImpl<Value *> &result) = 0;
+ SmallVectorImpl<ValuePtr> &result) = 0;
/// Resolve a list of operands to SSA values, emitting an error on failure, or
/// appending the results to the list on success. This method should be used
/// when all operands have the same type.
ParseResult resolveOperands(ArrayRef<OperandType> operands, Type type,
- SmallVectorImpl<Value *> &result) {
+ SmallVectorImpl<ValuePtr> &result) {
for (auto elt : operands)
if (resolveOperand(elt, type, result))
return failure();
@@ -485,7 +485,7 @@ public:
/// to the list on success.
ParseResult resolveOperands(ArrayRef<OperandType> operands,
ArrayRef<Type> types, llvm::SMLoc loc,
- SmallVectorImpl<Value *> &result) {
+ SmallVectorImpl<ValuePtr> &result) {
if (operands.size() != types.size())
return emitError(loc)
<< operands.size() << " operands present, but expected "
@@ -556,7 +556,7 @@ public:
/// Parse a single operation successor and its operand list.
virtual ParseResult
parseSuccessorAndUseList(Block *&dest,
- SmallVectorImpl<Value *> &operands) = 0;
+ SmallVectorImpl<ValuePtr> &operands) = 0;
//===--------------------------------------------------------------------===//
// Type Parsing
@@ -634,7 +634,7 @@ private:
/// A functor used to set the name of the start of a result group of an
/// operation. See 'getAsmResultNames' below for more details.
-using OpAsmSetValueNameFn = function_ref<void(Value *, StringRef)>;
+using OpAsmSetValueNameFn = function_ref<void(ValuePtr, StringRef)>;
class OpAsmDialectInterface
: public DialectInterface::Base<OpAsmDialectInterface> {
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index 2159d10fd2a..ad0dc600f8f 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -44,7 +44,7 @@ public:
/// Create a new Operation with the specific fields.
static Operation *create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<NamedAttribute> attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList);
@@ -53,7 +53,7 @@ public:
/// unnecessarily uniquing a list of attributes.
static Operation *create(Location location, OperationName name,
ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
NamedAttributeList attributes,
ArrayRef<Block *> successors, unsigned numRegions,
bool resizableOperandList);
@@ -64,7 +64,7 @@ public:
/// Create a new Operation with the specific fields.
static Operation *
create(Location location, OperationName name, ArrayRef<Type> resultTypes,
- ArrayRef<Value *> operands, NamedAttributeList attributes,
+ ArrayRef<ValuePtr> operands, NamedAttributeList attributes,
ArrayRef<Block *> successors = {}, RegionRange regions = {},
bool resizableOperandList = false);
@@ -149,7 +149,7 @@ public:
}
/// Replace any uses of 'from' with 'to' within this operation.
- void replaceUsesOfWith(Value *from, Value *to);
+ void replaceUsesOfWith(ValuePtr from, ValuePtr to);
/// Replace all uses of results of this operation with the provided 'values'.
template <typename ValuesT,
@@ -215,8 +215,8 @@ public:
unsigned getNumOperands() { return getOperandStorage().size(); }
- Value *getOperand(unsigned idx) { return getOpOperand(idx).get(); }
- void setOperand(unsigned idx, Value *value) {
+ ValuePtr getOperand(unsigned idx) { return getOpOperand(idx).get(); }
+ void setOperand(unsigned idx, ValuePtr value) {
return getOpOperand(idx).set(value);
}
@@ -227,7 +227,7 @@ public:
operand_iterator operand_begin() { return getOperands().begin(); }
operand_iterator operand_end() { return getOperands().end(); }
- /// Returns an iterator on the underlying Value's (Value *).
+ /// Returns an iterator on the underlying Value's (ValuePtr ).
operand_range getOperands() { return operand_range(this); }
/// Erase the operand at position `idx`.
@@ -255,7 +255,7 @@ public:
unsigned getNumResults() { return numResults; }
- Value *getResult(unsigned idx) { return &getOpResult(idx); }
+ ValuePtr getResult(unsigned idx) { return &getOpResult(idx); }
/// Support result iteration.
using result_range = ResultRange;
@@ -399,7 +399,7 @@ public:
operand_range getSuccessorOperands(unsigned index);
- Value *getSuccessorOperand(unsigned succIndex, unsigned opIndex) {
+ ValuePtr getSuccessorOperand(unsigned succIndex, unsigned opIndex) {
assert(!isKnownNonTerminator() && "only terminators may have successors");
assert(opIndex < getNumSuccessorOperands(succIndex));
return getOperand(getSuccessorOperandIndex(succIndex) + opIndex);
@@ -441,9 +441,9 @@ public:
Optional<std::pair<unsigned, unsigned>>
decomposeSuccessorOperandIndex(unsigned operandIndex);
- /// Returns the `BlockArgument*` corresponding to operand `operandIndex` in
+ /// Returns the `BlockArgument` corresponding to operand `operandIndex` in
/// some successor, or None if `operandIndex` isn't a successor operand index.
- Optional<BlockArgument *> getSuccessorBlockArgument(unsigned operandIndex) {
+ Optional<BlockArgumentPtr> getSuccessorBlockArgument(unsigned operandIndex) {
auto decomposed = decomposeSuccessorOperandIndex(operandIndex);
if (!decomposed.hasValue())
return None;
diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h
index 23ef0ce5937..b7f63218ba5 100644
--- a/mlir/include/mlir/IR/OperationSupport.h
+++ b/mlir/include/mlir/IR/OperationSupport.h
@@ -270,7 +270,7 @@ inline llvm::hash_code hash_value(OperationName arg) {
struct OperationState {
Location location;
OperationName name;
- SmallVector<Value *, 4> operands;
+ SmallVector<ValuePtr, 4> operands;
/// Types of the results of this operation.
SmallVector<Type, 4> types;
SmallVector<NamedAttribute, 4> attributes;
@@ -534,8 +534,8 @@ private:
/// This class implements iteration on the types of a given range of values.
template <typename ValueIteratorT>
class ValueTypeIterator final
- : public llvm::mapped_iterator<ValueIteratorT, Type (*)(Value *)> {
- static Type unwrap(Value *value) { return value->getType(); }
+ : public llvm::mapped_iterator<ValueIteratorT, Type (*)(ValuePtr)> {
+ static Type unwrap(ValuePtr value) { return value->getType(); }
public:
using reference = Type;
@@ -545,7 +545,8 @@ public:
/// Initializes the type iterator to the specified value iterator.
ValueTypeIterator(ValueIteratorT it)
- : llvm::mapped_iterator<ValueIteratorT, Type (*)(Value *)>(it, &unwrap) {}
+ : llvm::mapped_iterator<ValueIteratorT, Type (*)(ValuePtr)>(it, &unwrap) {
+ }
};
//===----------------------------------------------------------------------===//
@@ -554,7 +555,7 @@ public:
/// This class implements the operand iterators for the Operation class.
class OperandRange final
: public detail::indexed_accessor_range_base<OperandRange, OpOperand *,
- Value *, Value *, Value *> {
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
OperandRange(Operation *op);
@@ -569,7 +570,7 @@ private:
return object + index;
}
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(OpOperand *object, ptrdiff_t index) {
+ static ValuePtr dereference_iterator(OpOperand *object, ptrdiff_t index) {
return object[index].get();
}
@@ -582,8 +583,8 @@ private:
/// This class implements the result iterators for the Operation class.
class ResultRange final
- : public detail::indexed_accessor_range_base<ResultRange, OpResult *,
- Value *, Value *, Value *> {
+ : public detail::indexed_accessor_range_base<ResultRange, OpResultPtr,
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
ResultRange(Operation *op);
@@ -594,11 +595,11 @@ public:
private:
/// See `detail::indexed_accessor_range_base` for details.
- static OpResult *offset_base(OpResult *object, ptrdiff_t index) {
+ static OpResultPtr offset_base(OpResultPtr object, ptrdiff_t index) {
return object + index;
}
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(OpResult *object, ptrdiff_t index) {
+ static ValuePtr dereference_iterator(OpResultPtr object, ptrdiff_t index) {
return &object[index];
}
@@ -610,31 +611,31 @@ private:
// ValueRange
/// This class provides an abstraction over the different types of ranges over
-/// Value*s. In many cases, this prevents the need to explicitly materialize a
+/// Values. In many cases, this prevents the need to explicitly materialize a
/// SmallVector/std::vector. This class should be used in places that are not
/// suitable for a more derived type (e.g. ArrayRef) or a template range
/// parameter.
class ValueRange final
: public detail::indexed_accessor_range_base<
- ValueRange, PointerUnion<Value *const *, OpOperand *, OpResult *>,
- Value *, Value *, Value *> {
+ ValueRange, PointerUnion<ValuePtr const *, OpOperand *, OpResultPtr>,
+ ValuePtr, ValuePtr, ValuePtr> {
public:
using RangeBaseT::RangeBaseT;
template <typename Arg,
typename = typename std::enable_if_t<
- std::is_constructible<ArrayRef<Value *>, Arg>::value &&
- !std::is_convertible<Arg, Value *>::value>>
+ std::is_constructible<ArrayRef<ValuePtr>, Arg>::value &&
+ !std::is_convertible<Arg, ValuePtr>::value>>
ValueRange(Arg &&arg)
- : ValueRange(ArrayRef<Value *>(std::forward<Arg>(arg))) {}
- ValueRange(Value *const &value) : ValueRange(&value, /*count=*/1) {}
- ValueRange(const std::initializer_list<Value *> &values)
- : ValueRange(ArrayRef<Value *>(values)) {}
+ : ValueRange(ArrayRef<ValuePtr>(std::forward<Arg>(arg))) {}
+ ValueRange(ValuePtr const &value) : ValueRange(&value, /*count=*/1) {}
+ ValueRange(const std::initializer_list<ValuePtr> &values)
+ : ValueRange(ArrayRef<ValuePtr>(values)) {}
ValueRange(iterator_range<OperandRange::iterator> values)
: ValueRange(OperandRange(values)) {}
ValueRange(iterator_range<ResultRange::iterator> values)
: ValueRange(ResultRange(values)) {}
- ValueRange(ArrayRef<Value *> values = llvm::None);
+ ValueRange(ArrayRef<ValuePtr> values = llvm::None);
ValueRange(OperandRange values);
ValueRange(ResultRange values);
@@ -645,12 +646,12 @@ public:
private:
/// The type representing the owner of this range. This is either a list of
/// values, operands, or results.
- using OwnerT = PointerUnion<Value *const *, OpOperand *, OpResult *>;
+ using OwnerT = PointerUnion<ValuePtr const *, OpOperand *, OpResultPtr>;
/// See `detail::indexed_accessor_range_base` for details.
static OwnerT offset_base(const OwnerT &owner, ptrdiff_t index);
/// See `detail::indexed_accessor_range_base` for details.
- static Value *dereference_iterator(const OwnerT &owner, ptrdiff_t index);
+ static ValuePtr dereference_iterator(const OwnerT &owner, ptrdiff_t index);
/// Allow access to `offset_base` and `dereference_iterator`.
friend RangeBaseT;
diff --git a/mlir/include/mlir/IR/TypeUtilities.h b/mlir/include/mlir/IR/TypeUtilities.h
index 2cce4dbb6cf..af22f9c4a9f 100644
--- a/mlir/include/mlir/IR/TypeUtilities.h
+++ b/mlir/include/mlir/IR/TypeUtilities.h
@@ -41,8 +41,8 @@ Type getElementTypeOrSelf(Type type);
/// Return the element type or return the type itself.
Type getElementTypeOrSelf(Attribute attr);
-Type getElementTypeOrSelf(Value *val);
-Type getElementTypeOrSelf(Value &val);
+Type getElementTypeOrSelf(ValuePtr val);
+Type getElementTypeOrSelf(ValueRef val);
/// Get the types within a nested Tuple. A helper for the class method that
/// handles storage concerns, which is tricky to do in tablegen.
@@ -72,7 +72,7 @@ LogicalResult verifyCompatibleShape(Type type1, Type type2);
// An iterator for the element types of an op's operands of shaped types.
class OperandElementTypeIterator final
: public llvm::mapped_iterator<Operation::operand_iterator,
- Type (*)(Value *)> {
+ Type (*)(ValuePtr)> {
public:
using reference = Type;
@@ -81,7 +81,7 @@ public:
explicit OperandElementTypeIterator(Operation::operand_iterator it);
private:
- static Type unwrap(Value *value);
+ static Type unwrap(ValuePtr value);
};
using OperandElementTypeRange = iterator_range<OperandElementTypeIterator>;
@@ -89,7 +89,7 @@ using OperandElementTypeRange = iterator_range<OperandElementTypeIterator>;
// An iterator for the tensor element types of an op's results of shaped types.
class ResultElementTypeIterator final
: public llvm::mapped_iterator<Operation::result_iterator,
- Type (*)(Value *)> {
+ Type (*)(ValuePtr)> {
public:
using reference = Type;
@@ -98,7 +98,7 @@ public:
explicit ResultElementTypeIterator(Operation::result_iterator it);
private:
- static Type unwrap(Value *value);
+ static Type unwrap(ValuePtr value);
};
using ResultElementTypeRange = iterator_range<ResultElementTypeIterator>;
diff --git a/mlir/include/mlir/IR/Value.h b/mlir/include/mlir/IR/Value.h
index 34c74c888cb..11cb8cdcbc7 100644
--- a/mlir/include/mlir/IR/Value.h
+++ b/mlir/include/mlir/IR/Value.h
@@ -28,10 +28,18 @@
namespace mlir {
class Block;
+class BlockArgument;
class Operation;
+class OpResult;
class Region;
class Value;
+/// Using directives that simplify the transition of Value to being value typed.
+using BlockArgumentPtr = BlockArgument *;
+using OpResultPtr = OpResult *;
+using ValueRef = Value &;
+using ValuePtr = Value *;
+
/// Operands contain a Value.
using OpOperand = IROperandImpl<Value>;
@@ -48,6 +56,15 @@ public:
~Value() {}
+ template <typename U> bool isa() const { return U::classof(this); }
+ template <typename U> U *dyn_cast() const {
+ return isa<U>() ? (U *)this : nullptr;
+ }
+ template <typename U> U *cast() const {
+ assert(isa<U>());
+ return (U *)this;
+ }
+
Kind getKind() const { return typeAndKind.getInt(); }
Type getType() const { return typeAndKind.getPointer(); }
@@ -66,7 +83,7 @@ public:
/// Replace all uses of 'this' value with the new value, updating anything in
/// the IR that uses 'this' to use the other value instead. When this returns
/// there are zero uses of 'this'.
- void replaceAllUsesWith(Value *newValue) {
+ void replaceAllUsesWith(ValuePtr newValue) {
IRObjectWithUseList::replaceAllUsesWith(newValue);
}
@@ -100,7 +117,7 @@ private:
llvm::PointerIntPair<Type, 1, Kind> typeAndKind;
};
-inline raw_ostream &operator<<(raw_ostream &os, Value &value) {
+inline raw_ostream &operator<<(raw_ostream &os, ValueRef value) {
value.print(os);
return os;
}
@@ -160,7 +177,6 @@ private:
/// through bitpacking shenanigans.
Operation *const owner;
};
-
} // namespace mlir
#endif
diff --git a/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h b/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
index 070b3c36e8c..202e86566fc 100644
--- a/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
+++ b/mlir/include/mlir/Quantizer/Support/ConstraintAnalysisGraph.h
@@ -163,7 +163,7 @@ public:
}
virtual Operation *getOp() const = 0;
- virtual Value *getValue() const = 0;
+ virtual ValuePtr getValue() const = 0;
static bool classof(const CAGNode *n) {
return n->getKind() >= Kind::Anchor && n->getKind() <= Kind::LastAnchor;
@@ -210,7 +210,7 @@ public:
return n->getKind() == Kind::Anchor || n->getKind() == Kind::OperandAnchor;
}
- Value *getValue() const final { return op->getOperand(operandIdx); }
+ ValuePtr getValue() const final { return op->getOperand(operandIdx); }
void printLabel(raw_ostream &os) const override;
@@ -221,7 +221,7 @@ private:
/// An anchor tied to a specific result.
/// Since a result is already anchored to its defining op, result anchors refer
-/// directly to the underlying Value*.
+/// directly to the underlying Value.
class CAGResultAnchor : public CAGAnchorNode {
public:
CAGResultAnchor(Operation *op, unsigned resultIdx);
@@ -231,12 +231,12 @@ public:
}
Operation *getOp() const final { return resultValue->getDefiningOp(); }
- Value *getValue() const final { return resultValue; }
+ ValuePtr getValue() const final { return resultValue; }
void printLabel(raw_ostream &os) const override;
private:
- Value *resultValue;
+ ValuePtr resultValue;
};
/// Base class for constraint nodes.
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
index 7adb4aac2e2..7464e2a347d 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
@@ -113,7 +113,7 @@ private:
protected:
// Mappings between original and translated values, used for lookups.
llvm::StringMap<llvm::Function *> functionMapping;
- DenseMap<Value *, llvm::Value *> valueMapping;
+ DenseMap<ValuePtr, llvm::Value *> valueMapping;
DenseMap<Block *, llvm::BasicBlock *> blockMapping;
};
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 814f2202f01..f9f1207c0a0 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -60,7 +60,7 @@ public:
/// remaps an existing signature input.
struct InputMapping {
size_t inputNo, size;
- Value *replacementValue;
+ ValuePtr replacementValue;
};
/// Return the argument types for the new signature.
@@ -90,7 +90,7 @@ public:
/// Remap an input of the original signature to another `replacement`
/// value. This drops the original argument.
- void remapInput(unsigned origInputNo, Value *replacement);
+ void remapInput(unsigned origInputNo, ValuePtr replacement);
private:
/// The remapping information for each of the original arguments.
@@ -143,7 +143,7 @@ public:
/// the conversion has finished.
virtual Operation *materializeConversion(PatternRewriter &rewriter,
Type resultType,
- ArrayRef<Value *> inputs,
+ ArrayRef<ValuePtr> inputs,
Location loc) {
llvm_unreachable("expected 'materializeConversion' to be overridden");
}
@@ -172,7 +172,7 @@ public:
/// ConversionPattern ever needs to replace an operation that does not
/// have successors. This function should not fail. If some specific cases of
/// the operation are not supported, these cases should not be matched.
- virtual void rewrite(Operation *op, ArrayRef<Value *> operands,
+ virtual void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite");
}
@@ -187,18 +187,18 @@ public:
/// terminator operation that has successors. This function should not fail
/// the pass. If some specific cases of the operation are not supported,
/// these cases should not be matched.
- virtual void rewrite(Operation *op, ArrayRef<Value *> properOperands,
+ virtual void rewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite for terminators");
}
/// Hook for derived classes to implement combined matching and rewriting.
virtual PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -208,7 +208,7 @@ public:
/// Hook for derived classes to implement combined matching and rewriting.
virtual PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -234,27 +234,27 @@ struct OpConversionPattern : public ConversionPattern {
/// Wrappers around the ConversionPattern methods that pass the derived op
/// type.
- void rewrite(Operation *op, ArrayRef<Value *> operands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
rewrite(cast<SourceOp>(op), operands, rewriter);
}
- void rewrite(Operation *op, ArrayRef<Value *> properOperands,
+ void rewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const final {
rewrite(cast<SourceOp>(op), properOperands, destinations, operands,
rewriter);
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const final {
return matchAndRewrite(cast<SourceOp>(op), properOperands, destinations,
operands, rewriter);
}
PatternMatchResult
- matchAndRewrite(Operation *op, ArrayRef<Value *> operands,
+ matchAndRewrite(Operation *op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const final {
return matchAndRewrite(cast<SourceOp>(op), operands, rewriter);
}
@@ -264,22 +264,22 @@ struct OpConversionPattern : public ConversionPattern {
/// Rewrite and Match methods that operate on the SourceOp type. These must be
/// overridden by the derived pattern class.
- virtual void rewrite(SourceOp op, ArrayRef<Value *> operands,
+ virtual void rewrite(SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("must override matchAndRewrite or a rewrite method");
}
- virtual void rewrite(SourceOp op, ArrayRef<Value *> properOperands,
+ virtual void rewrite(SourceOp op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
llvm_unreachable("unimplemented rewrite for terminators");
}
virtual PatternMatchResult
- matchAndRewrite(SourceOp op, ArrayRef<Value *> properOperands,
+ matchAndRewrite(SourceOp op, ArrayRef<ValuePtr> properOperands,
ArrayRef<Block *> destinations,
- ArrayRef<ArrayRef<Value *>> operands,
+ ArrayRef<ArrayRef<ValuePtr>> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -288,7 +288,7 @@ struct OpConversionPattern : public ConversionPattern {
}
virtual PatternMatchResult
- matchAndRewrite(SourceOp op, ArrayRef<Value *> operands,
+ matchAndRewrite(SourceOp op, ArrayRef<ValuePtr> operands,
ConversionPatternRewriter &rewriter) const {
if (!match(op))
return matchFailure();
@@ -330,11 +330,11 @@ public:
TypeConverter::SignatureConversion &conversion);
/// Replace all the uses of the block argument `from` with value `to`.
- void replaceUsesOfBlockArgument(BlockArgument *from, Value *to);
+ void replaceUsesOfBlockArgument(BlockArgumentPtr from, ValuePtr to);
/// Return the converted value that replaces 'key'. Return 'key' if there is
/// no such a converted value.
- Value *getRemappedValue(Value *key);
+ ValuePtr getRemappedValue(ValuePtr key);
//===--------------------------------------------------------------------===//
// PatternRewriter Hooks
diff --git a/mlir/include/mlir/Transforms/FoldUtils.h b/mlir/include/mlir/Transforms/FoldUtils.h
index bdf88d3bfb2..65dd1b6df16 100644
--- a/mlir/include/mlir/Transforms/FoldUtils.h
+++ b/mlir/include/mlir/Transforms/FoldUtils.h
@@ -82,7 +82,7 @@ public:
/// and immediately try to fold it. This function populates 'results' with
/// the results after folding the operation.
template <typename OpTy, typename... Args>
- void create(OpBuilder &builder, SmallVectorImpl<Value *> &results,
+ void create(OpBuilder &builder, SmallVectorImpl<ValuePtr> &results,
Location location, Args &&... args) {
Operation *op = builder.create<OpTy>(location, std::forward<Args>(args)...);
if (failed(tryToFold(op, results)))
@@ -94,9 +94,9 @@ public:
/// Overload to create or fold a single result operation.
template <typename OpTy, typename... Args>
typename std::enable_if<OpTy::template hasTrait<OpTrait::OneResult>(),
- Value *>::type
+ ValuePtr>::type
create(OpBuilder &builder, Location location, Args &&... args) {
- SmallVector<Value *, 1> results;
+ SmallVector<ValuePtr, 1> results;
create<OpTy>(builder, results, location, std::forward<Args>(args)...);
return results.front();
}
@@ -107,7 +107,7 @@ public:
OpTy>::type
create(OpBuilder &builder, Location location, Args &&... args) {
auto op = builder.create<OpTy>(location, std::forward<Args>(args)...);
- SmallVector<Value *, 0> unused;
+ SmallVector<ValuePtr, 0> unused;
(void)tryToFold(op.getOperation(), unused);
// Folding cannot remove a zero-result operation, so for convenience we
@@ -126,7 +126,7 @@ private:
/// Tries to perform folding on the given `op`. If successful, populates
/// `results` with the results of the folding.
LogicalResult tryToFold(
- Operation *op, SmallVectorImpl<Value *> &results,
+ Operation *op, SmallVectorImpl<ValuePtr> &results,
function_ref<void(Operation *)> processGeneratedConstants = nullptr);
/// Try to get or create a new constant entry. On success this returns the
diff --git a/mlir/include/mlir/Transforms/InliningUtils.h b/mlir/include/mlir/Transforms/InliningUtils.h
index 590b46a5d12..47c4f48f468 100644
--- a/mlir/include/mlir/Transforms/InliningUtils.h
+++ b/mlir/include/mlir/Transforms/InliningUtils.h
@@ -105,7 +105,7 @@ public:
/// operation). The given 'op' will be removed by the caller, after this
/// function has been called.
virtual void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToReplace) const {
+ ArrayRef<ValuePtr> valuesToReplace) const {
llvm_unreachable(
"must implement handleTerminator in the case of one inlined block");
}
@@ -125,8 +125,8 @@ public:
/// ... = foo.call @foo(%input : i32) -> i16
///
/// NOTE: This hook may be invoked before the 'isLegal' checks above.
- virtual Operation *materializeCallConversion(OpBuilder &builder, Value *input,
- Type resultType,
+ virtual Operation *materializeCallConversion(OpBuilder &builder,
+ ValuePtr input, Type resultType,
Location conversionLoc) const {
return nullptr;
}
@@ -165,7 +165,7 @@ public:
virtual void handleTerminator(Operation *op, Block *newDest) const;
virtual void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const;
+ ArrayRef<ValuePtr> valuesToRepl) const;
};
//===----------------------------------------------------------------------===//
@@ -187,7 +187,7 @@ public:
/// be cloned into the 'inlinePoint' or spliced directly.
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint, BlockAndValueMapping &mapper,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc = llvm::None,
bool shouldCloneInlinedRegion = true);
@@ -196,8 +196,8 @@ LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
/// in-favor of the region arguments when inlining.
LogicalResult inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
- ArrayRef<Value *> inlinedOperands,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> inlinedOperands,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc = llvm::None,
bool shouldCloneInlinedRegion = true);
diff --git a/mlir/include/mlir/Transforms/LoopLikeInterface.td b/mlir/include/mlir/Transforms/LoopLikeInterface.td
index 5c324b79f67..583cfe26d87 100644
--- a/mlir/include/mlir/Transforms/LoopLikeInterface.td
+++ b/mlir/include/mlir/Transforms/LoopLikeInterface.td
@@ -38,7 +38,7 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
explicit capture of dependencies, an implementation could check whether
the value corresponds to a captured dependency.
}],
- "bool", "isDefinedOutsideOfLoop", (ins "Value *":$value)
+ "bool", "isDefinedOutsideOfLoop", (ins "ValuePtr ":$value)
>,
InterfaceMethod<[{
Returns the region that makes up the body of the loop and should be
diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h
index 5ca3f7f6510..37434ea2ea8 100644
--- a/mlir/include/mlir/Transforms/LoopUtils.h
+++ b/mlir/include/mlir/Transforms/LoopUtils.h
@@ -85,7 +85,7 @@ void promoteSingleIterationLoops(FuncOp f);
/// expression.
void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
- SmallVectorImpl<Value *> *operands,
+ SmallVectorImpl<ValuePtr> *operands,
OpBuilder &builder);
/// Skew the operations in the body of a 'affine.for' operation with the
@@ -140,7 +140,7 @@ SmallVector<SmallVector<AffineForOp, 8>, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes,
ArrayRef<AffineForOp> targets);
SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps,
- ArrayRef<Value *> sizes,
+ ArrayRef<ValuePtr> sizes,
ArrayRef<loop::ForOp> targets);
/// Performs tiling (with interchange) by strip-mining the `forOps` by `sizes`
@@ -149,7 +149,7 @@ SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps,
/// `target`.
SmallVector<AffineForOp, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes, AffineForOp target);
-Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
+Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<ValuePtr> sizes,
loop::ForOp target);
/// Tile a nest of loop::ForOp loops rooted at `rootForOp` with the given
@@ -157,7 +157,7 @@ Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
/// runtime. If more sizes than loops are provided, discard the trailing values
/// in sizes. Assumes the loop nest is permutable.
/// Returns the newly created intra-tile loops.
-Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value *> sizes);
+Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<ValuePtr> sizes);
/// Explicit copy / DMA generation options for mlir::affineDataCopyGenerate.
struct AffineCopyOptions {
@@ -229,8 +229,8 @@ void coalesceLoops(MutableArrayRef<loop::ForOp> loops);
/// ...
/// }
/// ```
-void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<Value *> processorId,
- ArrayRef<Value *> numProcessors);
+void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<ValuePtr> processorId,
+ ArrayRef<ValuePtr> numProcessors);
} // end namespace mlir
#endif // MLIR_TRANSFORMS_LOOP_UTILS_H
diff --git a/mlir/include/mlir/Transforms/RegionUtils.h b/mlir/include/mlir/Transforms/RegionUtils.h
index 48080b26c2c..63236d6a5a0 100644
--- a/mlir/include/mlir/Transforms/RegionUtils.h
+++ b/mlir/include/mlir/Transforms/RegionUtils.h
@@ -30,14 +30,14 @@ namespace mlir {
/// of `limit`.
template <typename Range>
bool areValuesDefinedAbove(Range values, Region &limit) {
- for (Value *v : values)
+ for (ValuePtr v : values)
if (!v->getParentRegion()->isProperAncestor(&limit))
return false;
return true;
}
/// Replace all uses of `orig` within the given region with `replacement`.
-void replaceAllUsesInRegionWith(Value *orig, Value *replacement,
+void replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement,
Region &region);
/// Calls `callback` for each use of a value within `region` or its descendants
@@ -53,12 +53,12 @@ void visitUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
/// Fill `values` with a list of values defined at the ancestors of the `limit`
/// region and used within `region` or its descendants.
void getUsedValuesDefinedAbove(Region &region, Region &limit,
- llvm::SetVector<Value *> &values);
+ llvm::SetVector<ValuePtr> &values);
/// Fill `values` with a list of values used within any of the regions provided
/// but defined in one of the ancestors.
void getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
- llvm::SetVector<Value *> &values);
+ llvm::SetVector<ValuePtr> &values);
/// Run a set of structural simplifications over the given regions. This
/// includes transformations like unreachable block elimination, dead argument
diff --git a/mlir/include/mlir/Transforms/Utils.h b/mlir/include/mlir/Transforms/Utils.h
index c682b48f331..02c368ec496 100644
--- a/mlir/include/mlir/Transforms/Utils.h
+++ b/mlir/include/mlir/Transforms/Utils.h
@@ -66,22 +66,22 @@ class OpBuilder;
// extra operands, note that 'indexRemap' would just be applied to existing
// indices (%i, %j).
// TODO(bondhugula): allow extraIndices to be added at any position.
-LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- ArrayRef<Value *> extraIndices = {},
+LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef,
+ ArrayRef<ValuePtr> extraIndices = {},
AffineMap indexRemap = AffineMap(),
- ArrayRef<Value *> extraOperands = {},
- ArrayRef<Value *> symbolOperands = {},
+ ArrayRef<ValuePtr> extraOperands = {},
+ ArrayRef<ValuePtr> symbolOperands = {},
Operation *domInstFilter = nullptr,
Operation *postDomInstFilter = nullptr);
/// Performs the same replacement as the other version above but only for the
/// dereferencing uses of `oldMemRef` in `op`.
-LogicalResult replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
+LogicalResult replaceAllMemRefUsesWith(ValuePtr oldMemRef, ValuePtr newMemRef,
Operation *op,
- ArrayRef<Value *> extraIndices = {},
+ ArrayRef<ValuePtr> extraIndices = {},
AffineMap indexRemap = AffineMap(),
- ArrayRef<Value *> extraOperands = {},
- ArrayRef<Value *> symbolOperands = {});
+ ArrayRef<ValuePtr> extraOperands = {},
+ ArrayRef<ValuePtr> symbolOperands = {});
/// Rewrites the memref defined by this alloc op to have an identity layout map
/// and updates all its indexing uses. Returns failure if any of its uses
@@ -96,9 +96,9 @@ LogicalResult normalizeMemRef(AllocOp op);
/// The final results of the composed AffineApplyOp are returned in output
/// parameter 'results'. Returns the affine apply op created.
Operation *createComposedAffineApplyOp(OpBuilder &builder, Location loc,
- ArrayRef<Value *> operands,
+ ArrayRef<ValuePtr> operands,
ArrayRef<Operation *> affineApplyOps,
- SmallVectorImpl<Value *> *results);
+ SmallVectorImpl<ValuePtr> *results);
/// Given an operation, inserts one or more single result affine apply
/// operations, results of which are exclusively used by this operation.
OpenPOWER on IntegriCloud