summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Dialect
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-23 14:45:01 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-23 16:36:53 -0800
commite62a69561fb9d7b1013d2853da68d79a7907fead (patch)
tree0dd059094cbfb8d904513abcdc1fbe8cfa89bb09 /mlir/lib/Dialect
parent5d5bd2e1da29d976cb125dbb3cd097a5e42b2be4 (diff)
downloadbcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.tar.gz
bcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.zip
NFC: Replace ValuePtr with Value and remove it now that Value is value-typed.
ValuePtr was a temporary typedef during the transition to a value-typed Value. PiperOrigin-RevId: 286945714
Diffstat (limited to 'mlir/lib/Dialect')
-rw-r--r--mlir/lib/Dialect/AffineOps/AffineOps.cpp124
-rw-r--r--mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp64
-rw-r--r--mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h6
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp29
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp12
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp30
-rw-r--r--mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp8
-rw-r--r--mlir/lib/Dialect/Linalg/EDSC/Builders.cpp19
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp4
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp29
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp46
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp25
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp55
-rw-r--r--mlir/lib/Dialect/Linalg/Utils/Utils.cpp26
-rw-r--r--mlir/lib/Dialect/LoopOps/LoopOps.cpp10
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp2
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp8
-rw-r--r--mlir/lib/Dialect/SPIRV/SPIRVOps.cpp35
-rw-r--r--mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp28
-rw-r--r--mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp14
-rw-r--r--mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp6
-rw-r--r--mlir/lib/Dialect/StandardOps/Ops.cpp60
-rw-r--r--mlir/lib/Dialect/VectorOps/VectorOps.cpp25
-rw-r--r--mlir/lib/Dialect/VectorOps/VectorTransforms.cpp71
25 files changed, 363 insertions, 379 deletions
diff --git a/mlir/lib/Dialect/AffineOps/AffineOps.cpp b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
index d80f9865ccb..5f4cc2e1060 100644
--- a/mlir/lib/Dialect/AffineOps/AffineOps.cpp
+++ b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
@@ -106,7 +106,7 @@ static bool isFunctionRegion(Region *region) {
/// A utility function to check if a value is defined at the top level of a
/// function. A value of index type defined at the top level is always a valid
/// symbol.
-bool mlir::isTopLevelValue(ValuePtr value) {
+bool mlir::isTopLevelValue(Value value) {
if (auto arg = value.dyn_cast<BlockArgument>())
return isFunctionRegion(arg->getOwner()->getParent());
return isFunctionRegion(value->getDefiningOp()->getParentRegion());
@@ -115,7 +115,7 @@ bool mlir::isTopLevelValue(ValuePtr value) {
// Value can be used as a dimension id if it is valid as a symbol, or
// it is an induction variable, or it is a result of affine apply operation
// with dimension id arguments.
-bool mlir::isValidDim(ValuePtr value) {
+bool mlir::isValidDim(Value value) {
// The value must be an index type.
if (!value->getType().isIndex())
return false;
@@ -175,7 +175,7 @@ static bool isDimOpValidSymbol(DimOp dimOp) {
// the top level, or it is a result of affine apply operation with symbol
// arguments, or a result of the dim op on a memref satisfying certain
// constraints.
-bool mlir::isValidSymbol(ValuePtr value) {
+bool mlir::isValidSymbol(Value value) {
// The value must be an index type.
if (!value->getType().isIndex())
return false;
@@ -198,7 +198,7 @@ bool mlir::isValidSymbol(ValuePtr value) {
// Returns true if 'value' is a valid index to an affine operation (e.g.
// affine.load, affine.store, affine.dma_start, affine.dma_wait).
// Returns false otherwise.
-static bool isValidAffineIndexOperand(ValuePtr value) {
+static bool isValidAffineIndexOperand(Value value) {
return isValidDim(value) || isValidSymbol(value);
}
@@ -297,14 +297,14 @@ LogicalResult AffineApplyOp::verify() {
// its operands are valid dimension ids.
bool AffineApplyOp::isValidDim() {
return llvm::all_of(getOperands(),
- [](ValuePtr op) { return mlir::isValidDim(op); });
+ [](Value op) { return mlir::isValidDim(op); });
}
// The result of the affine apply operation can be used as a symbol if all its
// operands are symbols.
bool AffineApplyOp::isValidSymbol() {
return llvm::all_of(getOperands(),
- [](ValuePtr op) { return mlir::isValidSymbol(op); });
+ [](Value op) { return mlir::isValidSymbol(op); });
}
OpFoldResult AffineApplyOp::fold(ArrayRef<Attribute> operands) {
@@ -324,8 +324,8 @@ OpFoldResult AffineApplyOp::fold(ArrayRef<Attribute> operands) {
return result[0];
}
-AffineDimExpr AffineApplyNormalizer::renumberOneDim(ValuePtr v) {
- DenseMap<ValuePtr, unsigned>::iterator iterPos;
+AffineDimExpr AffineApplyNormalizer::renumberOneDim(Value v) {
+ DenseMap<Value, unsigned>::iterator iterPos;
bool inserted = false;
std::tie(iterPos, inserted) =
dimValueToPosition.insert(std::make_pair(v, dimValueToPosition.size()));
@@ -362,7 +362,7 @@ AffineMap AffineApplyNormalizer::renumber(const AffineApplyNormalizer &other) {
// Gather the positions of the operands that are produced by an AffineApplyOp.
static llvm::SetVector<unsigned>
-indicesFromAffineApplyOp(ArrayRef<ValuePtr> operands) {
+indicesFromAffineApplyOp(ArrayRef<Value> operands) {
llvm::SetVector<unsigned> res;
for (auto en : llvm::enumerate(operands))
if (isa_and_nonnull<AffineApplyOp>(en.value()->getDefiningOp()))
@@ -384,7 +384,7 @@ indicesFromAffineApplyOp(ArrayRef<ValuePtr> operands) {
// results in better simplifications and foldings. But we should evaluate
// whether this behavior is what we really want after using more.
static AffineMap promoteComposedSymbolsAsDims(AffineMap map,
- ArrayRef<ValuePtr> symbols) {
+ ArrayRef<Value> symbols) {
if (symbols.empty()) {
return map;
}
@@ -453,7 +453,7 @@ static AffineMap promoteComposedSymbolsAsDims(AffineMap map,
/// benefit potentially big: simpler and more maintainable code for a
/// non-trivial, recursive, procedure.
AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
- ArrayRef<ValuePtr> operands)
+ ArrayRef<Value> operands)
: AffineApplyNormalizer() {
static_assert(kMaxAffineApplyDepth > 0, "kMaxAffineApplyDepth must be > 0");
assert(map.getNumInputs() == operands.size() &&
@@ -509,7 +509,7 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
LLVM_DEBUG(affineApply.getOperation()->print(
dbgs() << "\nCompose AffineApplyOp recursively: "));
AffineMap affineApplyMap = affineApply.getAffineMap();
- SmallVector<ValuePtr, 8> affineApplyOperands(
+ SmallVector<Value, 8> affineApplyOperands(
affineApply.getOperands().begin(), affineApply.getOperands().end());
AffineApplyNormalizer normalizer(affineApplyMap, affineApplyOperands);
@@ -560,8 +560,8 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
LLVM_DEBUG(dbgs() << "\n");
}
-void AffineApplyNormalizer::normalize(
- AffineMap *otherMap, SmallVectorImpl<ValuePtr> *otherOperands) {
+void AffineApplyNormalizer::normalize(AffineMap *otherMap,
+ SmallVectorImpl<Value> *otherOperands) {
AffineApplyNormalizer other(*otherMap, *otherOperands);
*otherMap = renumber(other);
@@ -575,7 +575,7 @@ void AffineApplyNormalizer::normalize(
/// on `map` and `operands` without creating an AffineApplyOp that needs to be
/// immediately deleted.
static void composeAffineMapAndOperands(AffineMap *map,
- SmallVectorImpl<ValuePtr> *operands) {
+ SmallVectorImpl<Value> *operands) {
AffineApplyNormalizer normalizer(*map, *operands);
auto normalizedMap = normalizer.getAffineMap();
auto normalizedOperands = normalizer.getOperands();
@@ -585,9 +585,9 @@ static void composeAffineMapAndOperands(AffineMap *map,
assert(*map);
}
-void mlir::fullyComposeAffineMapAndOperands(
- AffineMap *map, SmallVectorImpl<ValuePtr> *operands) {
- while (llvm::any_of(*operands, [](ValuePtr v) {
+void mlir::fullyComposeAffineMapAndOperands(AffineMap *map,
+ SmallVectorImpl<Value> *operands) {
+ while (llvm::any_of(*operands, [](Value v) {
return isa_and_nonnull<AffineApplyOp>(v->getDefiningOp());
})) {
composeAffineMapAndOperands(map, operands);
@@ -596,9 +596,9 @@ void mlir::fullyComposeAffineMapAndOperands(
AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
AffineMap map,
- ArrayRef<ValuePtr> operands) {
+ ArrayRef<Value> operands) {
AffineMap normalizedMap = map;
- SmallVector<ValuePtr, 8> normalizedOperands(operands.begin(), operands.end());
+ SmallVector<Value, 8> normalizedOperands(operands.begin(), operands.end());
composeAffineMapAndOperands(&normalizedMap, &normalizedOperands);
assert(normalizedMap);
return b.create<AffineApplyOp>(loc, normalizedMap, normalizedOperands);
@@ -608,7 +608,7 @@ AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
// canonicalizes dims that are valid symbols into actual symbols.
template <class MapOrSet>
static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
- SmallVectorImpl<ValuePtr> *operands) {
+ SmallVectorImpl<Value> *operands) {
if (!mapOrSet || operands->empty())
return;
@@ -616,9 +616,9 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
"map/set inputs must match number of operands");
auto *context = mapOrSet->getContext();
- SmallVector<ValuePtr, 8> resultOperands;
+ SmallVector<Value, 8> resultOperands;
resultOperands.reserve(operands->size());
- SmallVector<ValuePtr, 8> remappedSymbols;
+ SmallVector<Value, 8> remappedSymbols;
remappedSymbols.reserve(operands->size());
unsigned nextDim = 0;
unsigned nextSym = 0;
@@ -650,9 +650,8 @@ static void canonicalizePromotedSymbols(MapOrSet *mapOrSet,
// Works for either an affine map or an integer set.
template <class MapOrSet>
-static void
-canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
- SmallVectorImpl<ValuePtr> *operands) {
+static void canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
+ SmallVectorImpl<Value> *operands) {
static_assert(std::is_same<MapOrSet, AffineMap>::value ||
std::is_same<MapOrSet, IntegerSet>::value,
"Argument must be either of AffineMap or IntegerSet type");
@@ -677,10 +676,10 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
auto *context = mapOrSet->getContext();
- SmallVector<ValuePtr, 8> resultOperands;
+ SmallVector<Value, 8> resultOperands;
resultOperands.reserve(operands->size());
- llvm::SmallDenseMap<ValuePtr, AffineExpr, 8> seenDims;
+ llvm::SmallDenseMap<Value, AffineExpr, 8> seenDims;
SmallVector<AffineExpr, 8> dimRemapping(mapOrSet->getNumDims());
unsigned nextDim = 0;
for (unsigned i = 0, e = mapOrSet->getNumDims(); i != e; ++i) {
@@ -696,7 +695,7 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
}
}
}
- llvm::SmallDenseMap<ValuePtr, AffineExpr, 8> seenSymbols;
+ llvm::SmallDenseMap<Value, AffineExpr, 8> seenSymbols;
SmallVector<AffineExpr, 8> symRemapping(mapOrSet->getNumSymbols());
unsigned nextSym = 0;
for (unsigned i = 0, e = mapOrSet->getNumSymbols(); i != e; ++i) {
@@ -729,12 +728,12 @@ canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
}
void mlir::canonicalizeMapAndOperands(AffineMap *map,
- SmallVectorImpl<ValuePtr> *operands) {
+ SmallVectorImpl<Value> *operands) {
canonicalizeMapOrSetAndOperands<AffineMap>(map, operands);
}
void mlir::canonicalizeSetAndOperands(IntegerSet *set,
- SmallVectorImpl<ValuePtr> *operands) {
+ SmallVectorImpl<Value> *operands) {
canonicalizeMapOrSetAndOperands<IntegerSet>(set, operands);
}
@@ -749,7 +748,7 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
/// Replace the affine op with another instance of it with the supplied
/// map and mapOperands.
void replaceAffineOp(PatternRewriter &rewriter, AffineOpTy affineOp,
- AffineMap map, ArrayRef<ValuePtr> mapOperands) const;
+ AffineMap map, ArrayRef<Value> mapOperands) const;
PatternMatchResult matchAndRewrite(AffineOpTy affineOp,
PatternRewriter &rewriter) const override {
@@ -761,7 +760,7 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
auto map = affineOp.getAffineMap();
AffineMap oldMap = map;
auto oldOperands = affineOp.getMapOperands();
- SmallVector<ValuePtr, 8> resultOperands(oldOperands);
+ SmallVector<Value, 8> resultOperands(oldOperands);
composeAffineMapAndOperands(&map, &resultOperands);
if (map == oldMap && std::equal(oldOperands.begin(), oldOperands.end(),
resultOperands.begin()))
@@ -777,14 +776,14 @@ struct SimplifyAffineOp : public OpRewritePattern<AffineOpTy> {
template <>
void SimplifyAffineOp<AffineLoadOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineLoadOp load, AffineMap map,
- ArrayRef<ValuePtr> mapOperands) const {
+ ArrayRef<Value> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineLoadOp>(load, load.getMemRef(), map,
mapOperands);
}
template <>
void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
PatternRewriter &rewriter, AffinePrefetchOp prefetch, AffineMap map,
- ArrayRef<ValuePtr> mapOperands) const {
+ ArrayRef<Value> mapOperands) const {
rewriter.replaceOpWithNewOp<AffinePrefetchOp>(
prefetch, prefetch.memref(), map, mapOperands,
prefetch.localityHint().getZExtValue(), prefetch.isWrite(),
@@ -793,14 +792,14 @@ void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
template <>
void SimplifyAffineOp<AffineStoreOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineStoreOp store, AffineMap map,
- ArrayRef<ValuePtr> mapOperands) const {
+ ArrayRef<Value> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineStoreOp>(
store, store.getValueToStore(), store.getMemRef(), map, mapOperands);
}
template <>
void SimplifyAffineOp<AffineApplyOp>::replaceAffineOp(
PatternRewriter &rewriter, AffineApplyOp apply, AffineMap map,
- ArrayRef<ValuePtr> mapOperands) const {
+ ArrayRef<Value> mapOperands) const {
rewriter.replaceOpWithNewOp<AffineApplyOp>(apply, map, mapOperands);
}
} // end anonymous namespace.
@@ -835,12 +834,12 @@ static LogicalResult foldMemRefCast(Operation *op) {
// TODO(b/133776335) Check that map operands are loop IVs or symbols.
void AffineDmaStartOp::build(Builder *builder, OperationState &result,
- ValuePtr srcMemRef, AffineMap srcMap,
- ValueRange srcIndices, ValuePtr destMemRef,
+ Value srcMemRef, AffineMap srcMap,
+ ValueRange srcIndices, Value destMemRef,
AffineMap dstMap, ValueRange destIndices,
- ValuePtr tagMemRef, AffineMap tagMap,
- ValueRange tagIndices, ValuePtr numElements,
- ValuePtr stride, ValuePtr elementsPerStride) {
+ Value tagMemRef, AffineMap tagMap,
+ ValueRange tagIndices, Value numElements,
+ Value stride, Value elementsPerStride) {
result.addOperands(srcMemRef);
result.addAttribute(getSrcMapAttrName(), AffineMapAttr::get(srcMap));
result.addOperands(srcIndices);
@@ -1004,8 +1003,8 @@ LogicalResult AffineDmaStartOp::fold(ArrayRef<Attribute> cstOperands,
// TODO(b/133776335) Check that map operands are loop IVs or symbols.
void AffineDmaWaitOp::build(Builder *builder, OperationState &result,
- ValuePtr tagMemRef, AffineMap tagMap,
- ValueRange tagIndices, ValuePtr numElements) {
+ Value tagMemRef, AffineMap tagMap,
+ ValueRange tagIndices, Value numElements) {
result.addOperands(tagMemRef);
result.addAttribute(getTagMapAttrName(), AffineMapAttr::get(tagMap));
result.addOperands(tagIndices);
@@ -1014,7 +1013,7 @@ void AffineDmaWaitOp::build(Builder *builder, OperationState &result,
void AffineDmaWaitOp::print(OpAsmPrinter &p) {
p << "affine.dma_wait " << *getTagMemRef() << '[';
- SmallVector<ValuePtr, 2> operands(getTagIndices());
+ SmallVector<Value, 2> operands(getTagIndices());
p.printAffineMapOfSSAIds(getTagMapAttr(), operands);
p << "], ";
p.printOperand(getNumElements());
@@ -1399,8 +1398,8 @@ static LogicalResult foldLoopBounds(AffineForOp forOp) {
/// Canonicalize the bounds of the given loop.
static LogicalResult canonicalizeLoopBounds(AffineForOp forOp) {
- SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
- SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<Value, 4> ubOperands(forOp.getUpperBoundOperands());
auto lbMap = forOp.getLowerBoundMap();
auto ubMap = forOp.getUpperBoundMap();
@@ -1465,7 +1464,7 @@ void AffineForOp::setLowerBound(ValueRange lbOperands, AffineMap map) {
assert(lbOperands.size() == map.getNumInputs());
assert(map.getNumResults() >= 1 && "bound map has at least one result");
- SmallVector<ValuePtr, 4> newOperands(lbOperands.begin(), lbOperands.end());
+ SmallVector<Value, 4> newOperands(lbOperands.begin(), lbOperands.end());
auto ubOperands = getUpperBoundOperands();
newOperands.append(ubOperands.begin(), ubOperands.end());
@@ -1478,7 +1477,7 @@ void AffineForOp::setUpperBound(ValueRange ubOperands, AffineMap map) {
assert(ubOperands.size() == map.getNumInputs());
assert(map.getNumResults() >= 1 && "bound map has at least one result");
- SmallVector<ValuePtr, 4> newOperands(getLowerBoundOperands());
+ SmallVector<Value, 4> newOperands(getLowerBoundOperands());
newOperands.append(ubOperands.begin(), ubOperands.end());
getOperation()->setOperands(newOperands);
@@ -1544,7 +1543,7 @@ bool AffineForOp::matchingBoundOperandList() {
unsigned numOperands = lbMap.getNumInputs();
for (unsigned i = 0, e = lbMap.getNumInputs(); i < e; i++) {
- // Compare ValuePtr 's.
+ // Compare Value 's.
if (getOperand(i) != getOperand(numOperands + i))
return false;
}
@@ -1553,7 +1552,7 @@ bool AffineForOp::matchingBoundOperandList() {
Region &AffineForOp::getLoopBody() { return region(); }
-bool AffineForOp::isDefinedOutsideOfLoop(ValuePtr value) {
+bool AffineForOp::isDefinedOutsideOfLoop(Value value) {
return !region().isAncestor(value->getParentRegion());
}
@@ -1564,13 +1563,13 @@ LogicalResult AffineForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
}
/// Returns if the provided value is the induction variable of a AffineForOp.
-bool mlir::isForInductionVar(ValuePtr val) {
+bool mlir::isForInductionVar(Value val) {
return getForInductionVarOwner(val) != AffineForOp();
}
/// Returns the loop parent of an induction variable. If the provided value is
/// not an induction variable, then return nullptr.
-AffineForOp mlir::getForInductionVarOwner(ValuePtr val) {
+AffineForOp mlir::getForInductionVarOwner(Value val) {
auto ivArg = val.dyn_cast<BlockArgument>();
if (!ivArg || !ivArg->getOwner())
return AffineForOp();
@@ -1581,7 +1580,7 @@ AffineForOp mlir::getForInductionVarOwner(ValuePtr val) {
/// Extracts the induction variables from a list of AffineForOps and returns
/// them.
void mlir::extractForInductionVars(ArrayRef<AffineForOp> forInsts,
- SmallVectorImpl<ValuePtr> *ivs) {
+ SmallVectorImpl<Value> *ivs) {
ivs->reserve(forInsts.size());
for (auto forInst : forInsts)
ivs->push_back(forInst.getInductionVar());
@@ -1720,7 +1719,7 @@ void AffineIfOp::build(Builder *builder, OperationState &result, IntegerSet set,
LogicalResult AffineIfOp::fold(ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &) {
auto set = getIntegerSet();
- SmallVector<ValuePtr, 4> operands(getOperands());
+ SmallVector<Value, 4> operands(getOperands());
canonicalizeSetAndOperands(&set, &operands);
// Any canonicalization change always leads to either a reduction in the
@@ -1749,9 +1748,8 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
result.types.push_back(memrefType.getElementType());
}
-void AffineLoadOp::build(Builder *builder, OperationState &result,
- ValuePtr memref, AffineMap map,
- ValueRange mapOperands) {
+void AffineLoadOp::build(Builder *builder, OperationState &result, Value memref,
+ AffineMap map, ValueRange mapOperands) {
assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info");
result.addOperands(memref);
result.addOperands(mapOperands);
@@ -1760,8 +1758,8 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
result.types.push_back(memrefType.getElementType());
}
-void AffineLoadOp::build(Builder *builder, OperationState &result,
- ValuePtr memref, ValueRange indices) {
+void AffineLoadOp::build(Builder *builder, OperationState &result, Value memref,
+ ValueRange indices) {
auto memrefType = memref->getType().cast<MemRefType>();
auto rank = memrefType.getRank();
// Create identity map for memrefs with at least one dimension or () -> ()
@@ -1843,7 +1841,7 @@ OpFoldResult AffineLoadOp::fold(ArrayRef<Attribute> cstOperands) {
//===----------------------------------------------------------------------===//
void AffineStoreOp::build(Builder *builder, OperationState &result,
- ValuePtr valueToStore, ValuePtr memref, AffineMap map,
+ Value valueToStore, Value memref, AffineMap map,
ValueRange mapOperands) {
assert(map.getNumInputs() == mapOperands.size() && "inconsistent index info");
result.addOperands(valueToStore);
@@ -1854,7 +1852,7 @@ void AffineStoreOp::build(Builder *builder, OperationState &result,
// Use identity map.
void AffineStoreOp::build(Builder *builder, OperationState &result,
- ValuePtr valueToStore, ValuePtr memref,
+ Value valueToStore, Value memref,
ValueRange indices) {
auto memrefType = memref->getType().cast<MemRefType>();
auto rank = memrefType.getRank();
@@ -2064,7 +2062,7 @@ void print(OpAsmPrinter &p, AffinePrefetchOp op) {
p << AffinePrefetchOp::getOperationName() << " " << *op.memref() << '[';
AffineMapAttr mapAttr = op.getAttrOfType<AffineMapAttr>(op.getMapAttrName());
if (mapAttr) {
- SmallVector<ValuePtr, 2> operands(op.getMapOperands());
+ SmallVector<Value, 2> operands(op.getMapOperands());
p.printAffineMapOfSSAIds(mapAttr, operands);
}
p << ']' << ", " << (op.isWrite() ? "write" : "read") << ", "
diff --git a/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp b/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
index 725751eb6c1..df6015de1b9 100644
--- a/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
+++ b/mlir/lib/Dialect/FxpMathOps/Transforms/LowerUniformRealMath.cpp
@@ -37,9 +37,9 @@ struct LowerUniformCastsPass : public FunctionPass<LowerUniformCastsPass> {
// Dequantize
//===----------------------------------------------------------------------===//
-static ValuePtr emitUniformPerLayerDequantize(Location loc, ValuePtr input,
- UniformQuantizedType elementType,
- PatternRewriter &rewriter) {
+static Value emitUniformPerLayerDequantize(Location loc, Value input,
+ UniformQuantizedType elementType,
+ PatternRewriter &rewriter) {
// Pre-conditions.
if (!elementType.isSigned()) {
// TODO: Support unsigned storage type.
@@ -62,7 +62,7 @@ static ValuePtr emitUniformPerLayerDequantize(Location loc, ValuePtr input,
// Apply zero-point offset.
if (elementType.getZeroPoint() != 0) {
- ValuePtr negZeroPointConst = rewriter.create<ConstantOp>(
+ Value negZeroPointConst = rewriter.create<ConstantOp>(
loc, broadcastScalarConstIntValue(intermediateType,
-elementType.getZeroPoint()));
input = rewriter.create<AddIOp>(loc, input, negZeroPointConst);
@@ -72,14 +72,14 @@ static ValuePtr emitUniformPerLayerDequantize(Location loc, ValuePtr input,
input = rewriter.create<ConvertISToFOp>(loc, realType, input);
// Mul by scale.
- ValuePtr scaleConst = rewriter.create<ConstantOp>(
+ Value scaleConst = rewriter.create<ConstantOp>(
loc, broadcastScalarConstFloatValue(realType,
APFloat(elementType.getScale())));
return rewriter.create<MulFOp>(loc, input, scaleConst);
}
-static ValuePtr
-emitUniformPerAxisDequantize(Location loc, ValuePtr input,
+static Value
+emitUniformPerAxisDequantize(Location loc, Value input,
UniformQuantizedPerAxisType elementType,
PatternRewriter &rewriter) {
// TODO: Support per-axis dequantize.
@@ -88,8 +88,8 @@ emitUniformPerAxisDequantize(Location loc, ValuePtr input,
return nullptr;
}
-static ValuePtr emitDequantize(Location loc, ValuePtr input,
- PatternRewriter &rewriter) {
+static Value emitDequantize(Location loc, Value input,
+ PatternRewriter &rewriter) {
Type inputType = input->getType();
QuantizedType qElementType =
QuantizedType::getQuantizedElementType(inputType);
@@ -124,7 +124,7 @@ struct UniformDequantizePattern : public OpRewritePattern<DequantizeCastOp> {
return matchFailure();
}
- ValuePtr dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter);
+ Value dequantizedValue = emitDequantize(op.getLoc(), op.arg(), rewriter);
if (!dequantizedValue) {
return matchFailure();
}
@@ -161,14 +161,14 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
castElementType(info.resultStorageType, intermediateElementType);
// Cast operands to storage type.
- ValuePtr lhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.lhsStorageType, info.lhs)
- .getResult();
- ValuePtr rhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.rhsStorageType, info.rhs)
- .getResult();
+ Value lhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.lhsStorageType, info.lhs)
+ .getResult();
+ Value rhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.rhsStorageType, info.rhs)
+ .getResult();
// Cast to the intermediate sized type.
lhsValue = rewriter.create<ConvertISOp>(info.op->getLoc(), intermediateType,
@@ -177,7 +177,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
rhsValue);
// Add.
- ValuePtr resultValue =
+ Value resultValue =
rewriter.create<AddIOp>(info.op->getLoc(), lhsValue, rhsValue);
// Zero point offset adjustment.
@@ -185,7 +185,7 @@ tryRewriteAffineAddEwIsomorphicSigned(const UniformBinaryOpInfo &info,
// zpOffset = -zp
int zpOffset = -1 * info.resultType.getZeroPoint();
if (zpOffset != 0) {
- ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
+ Value zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(),
broadcastScalarConstIntValue(intermediateType, zpOffset));
resultValue =
@@ -237,14 +237,14 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
castElementType(info.resultStorageType, intermediateElementType);
// Cast operands to storage type.
- ValuePtr lhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.lhsStorageType, info.lhs)
- .getResult();
- ValuePtr rhsValue = rewriter
- .create<StorageCastOp>(info.op->getLoc(),
- info.rhsStorageType, info.rhs)
- .getResult();
+ Value lhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.lhsStorageType, info.lhs)
+ .getResult();
+ Value rhsValue = rewriter
+ .create<StorageCastOp>(info.op->getLoc(),
+ info.rhsStorageType, info.rhs)
+ .getResult();
// Cast to the intermediate sized type.
lhsValue = rewriter.create<ConvertISOp>(info.op->getLoc(), intermediateType,
@@ -254,7 +254,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
// Apply argument zeroPoints.
if (info.lhsType.getZeroPoint() != 0) {
- ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
+ Value zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(), broadcastScalarConstIntValue(
intermediateType, -info.lhsType.getZeroPoint()));
lhsValue =
@@ -262,7 +262,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
}
if (info.rhsType.getZeroPoint() != 0) {
- ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
+ Value zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(), broadcastScalarConstIntValue(
intermediateType, -info.rhsType.getZeroPoint()));
rhsValue =
@@ -270,7 +270,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
}
// Mul.
- ValuePtr resultValue =
+ Value resultValue =
rewriter.create<MulIOp>(info.op->getLoc(), lhsValue, rhsValue);
// Scale output.
@@ -284,7 +284,7 @@ tryRewriteAffineMulEwSigned(const UniformBinaryOpInfo &info,
// Zero point offset adjustment.
if (info.resultType.getZeroPoint() != 0) {
- ValuePtr zpOffsetConst = rewriter.create<ConstantOp>(
+ Value zpOffsetConst = rewriter.create<ConstantOp>(
info.op->getLoc(),
broadcastScalarConstIntValue(intermediateType,
info.resultType.getZeroPoint()));
diff --git a/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h b/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
index bce5285a8b0..8cea97c693c 100644
--- a/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
+++ b/mlir/lib/Dialect/FxpMathOps/Transforms/UniformKernelUtils.h
@@ -50,7 +50,7 @@ template <typename F> bool integralLog2(F x, int &log2Result) {
/// Helper class for operating on binary operations where all operands
/// and the result are a UniformQuantizedType.
struct UniformBinaryOpInfo {
- UniformBinaryOpInfo(Operation *op, ValuePtr lhs, ValuePtr rhs,
+ UniformBinaryOpInfo(Operation *op, Value lhs, Value rhs,
Optional<APFloat> clampMin, Optional<APFloat> clampMax)
: op(op), lhs(lhs), rhs(rhs), clampMin(clampMin), clampMax(clampMax),
lhsType(getUniformElementType(lhs->getType())),
@@ -119,8 +119,8 @@ struct UniformBinaryOpInfo {
}
Operation *op;
- ValuePtr lhs;
- ValuePtr rhs;
+ Value lhs;
+ Value rhs;
Optional<APFloat> clampMin;
Optional<APFloat> clampMax;
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 422597fe90d..bda8032fc21 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -204,15 +204,14 @@ static ParseResult parseShuffleOp(OpAsmParser &parser, OperationState &state) {
static SmallVector<Type, 4> getValueTypes(ValueRange values) {
SmallVector<Type, 4> types;
types.reserve(values.size());
- for (ValuePtr v : values)
+ for (Value v : values)
types.push_back(v->getType());
return types;
}
-void LaunchOp::build(Builder *builder, OperationState &result,
- ValuePtr gridSizeX, ValuePtr gridSizeY, ValuePtr gridSizeZ,
- ValuePtr blockSizeX, ValuePtr blockSizeY,
- ValuePtr blockSizeZ, ValueRange operands) {
+void LaunchOp::build(Builder *builder, OperationState &result, Value gridSizeX,
+ Value gridSizeY, Value gridSizeZ, Value blockSizeX,
+ Value blockSizeY, Value blockSizeZ, ValueRange operands) {
// Add grid and block sizes as op operands, followed by the data operands.
result.addOperands(
{gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ});
@@ -519,10 +518,9 @@ void LaunchOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
//===----------------------------------------------------------------------===//
void LaunchFuncOp::build(Builder *builder, OperationState &result,
- GPUFuncOp kernelFunc, ValuePtr gridSizeX,
- ValuePtr gridSizeY, ValuePtr gridSizeZ,
- ValuePtr blockSizeX, ValuePtr blockSizeY,
- ValuePtr blockSizeZ, ValueRange kernelOperands) {
+ GPUFuncOp kernelFunc, Value gridSizeX, Value gridSizeY,
+ Value gridSizeZ, Value blockSizeX, Value blockSizeY,
+ Value blockSizeZ, ValueRange kernelOperands) {
// Add grid and block sizes as op operands, followed by the data operands.
result.addOperands(
{gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ});
@@ -555,7 +553,7 @@ StringRef LaunchFuncOp::getKernelModuleName() {
.getRootReference();
}
-ValuePtr LaunchFuncOp::getKernelOperand(unsigned i) {
+Value LaunchFuncOp::getKernelOperand(unsigned i) {
return getOperation()->getOperand(i + kNumConfigOperands);
}
@@ -718,14 +716,13 @@ static ParseResult parseGPUFuncOp(OpAsmParser &parser, OperationState &result) {
}
static void printAttributions(OpAsmPrinter &p, StringRef keyword,
- ArrayRef<BlockArgumentPtr> values) {
+ ArrayRef<BlockArgument> values) {
if (values.empty())
return;
p << ' ' << keyword << '(';
- interleaveComma(values, p, [&p](BlockArgumentPtr v) {
- p << *v << " : " << v->getType();
- });
+ interleaveComma(values, p,
+ [&p](BlockArgument v) { p << *v << " : " << v->getType(); });
p << ')';
}
@@ -772,9 +769,9 @@ LogicalResult GPUFuncOp::verifyType() {
}
static LogicalResult verifyAttributions(Operation *op,
- ArrayRef<BlockArgumentPtr> attributions,
+ ArrayRef<BlockArgument> attributions,
unsigned memorySpace) {
- for (ValuePtr v : attributions) {
+ for (Value v : attributions) {
auto type = v->getType().dyn_cast<MemRefType>();
if (!type)
return op->emitOpError() << "expected memref type in attribution";
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 6a7cd290dd2..2d00ac03d33 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -22,10 +22,10 @@ using namespace mlir;
template <typename OpTy>
static void createForAllDimensions(OpBuilder &builder, Location loc,
- SmallVectorImpl<ValuePtr> &values) {
+ SmallVectorImpl<Value> &values) {
for (StringRef dim : {"x", "y", "z"}) {
- ValuePtr v = builder.create<OpTy>(loc, builder.getIndexType(),
- builder.getStringAttr(dim));
+ Value v = builder.create<OpTy>(loc, builder.getIndexType(),
+ builder.getStringAttr(dim));
values.push_back(v);
}
}
@@ -37,7 +37,7 @@ static void injectGpuIndexOperations(Location loc, Region &body) {
OpBuilder builder(loc->getContext());
Block &firstBlock = body.front();
builder.setInsertionPointToStart(&firstBlock);
- SmallVector<ValuePtr, 12> indexOps;
+ SmallVector<Value, 12> indexOps;
createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
@@ -60,7 +60,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc,
gpu::LaunchFuncOp launch) {
OpBuilder kernelBuilder(kernelFunc.getBody());
auto &firstBlock = kernelFunc.getBody().front();
- SmallVector<ValuePtr, 8> newLaunchArgs;
+ SmallVector<Value, 8> newLaunchArgs;
BlockAndValueMapping map;
for (int i = 0, e = launch.getNumKernelOperands(); i < e; ++i) {
map.map(launch.getKernelOperand(i), kernelFunc.getArgument(i));
@@ -73,7 +73,7 @@ static gpu::LaunchFuncOp inlineBeneficiaryOps(gpu::GPUFuncOp kernelFunc,
}
// Only inline operations that do not create new arguments.
if (!llvm::all_of(operandOp->getOperands(),
- [map](ValuePtr value) { return map.contains(value); })) {
+ [map](Value value) { return map.contains(value); })) {
continue;
}
auto clone = kernelBuilder.clone(*operandOp, map);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index b8d2d242657..71b7064ac63 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -406,7 +406,7 @@ static ParseResult parseCallOp(OpAsmParser &parser, OperationState &result) {
// Expects vector to be of wrapped LLVM vector type and position to be of
// wrapped LLVM i32 type.
void LLVM::ExtractElementOp::build(Builder *b, OperationState &result,
- ValuePtr vector, ValuePtr position,
+ Value vector, Value position,
ArrayRef<NamedAttribute> attrs) {
auto wrappedVectorType = vector->getType().cast<LLVM::LLVMType>();
auto llvmType = wrappedVectorType.getVectorElementType();
@@ -672,7 +672,7 @@ static void printBrOp(OpAsmPrinter &p, BrOp &op) {
// attribute-dict?
static ParseResult parseBrOp(OpAsmParser &parser, OperationState &result) {
Block *dest;
- SmallVector<ValuePtr, 4> operands;
+ SmallVector<Value, 4> operands;
if (parser.parseSuccessorAndUseList(dest, operands) ||
parser.parseOptionalAttrDict(result.attributes))
return failure();
@@ -699,8 +699,8 @@ static void printCondBrOp(OpAsmPrinter &p, CondBrOp &op) {
static ParseResult parseCondBrOp(OpAsmParser &parser, OperationState &result) {
Block *trueDest;
Block *falseDest;
- SmallVector<ValuePtr, 4> trueOperands;
- SmallVector<ValuePtr, 4> falseOperands;
+ SmallVector<Value, 4> trueOperands;
+ SmallVector<Value, 4> falseOperands;
OpAsmParser::OperandType condition;
Builder &builder = parser.getBuilder();
@@ -1057,8 +1057,8 @@ static LogicalResult verify(GlobalOp op) {
//===----------------------------------------------------------------------===//
// Expects vector to be of wrapped LLVM vector type and position to be of
// wrapped LLVM i32 type.
-void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result,
- ValuePtr v1, ValuePtr v2, ArrayAttr mask,
+void LLVM::ShuffleVectorOp::build(Builder *b, OperationState &result, Value v1,
+ Value v2, ArrayAttr mask,
ArrayRef<NamedAttribute> attrs) {
auto wrappedContainerType1 = v1->getType().cast<LLVM::LLVMType>();
auto vType = LLVMType::getVectorTy(
@@ -1655,10 +1655,10 @@ LLVMType LLVMType::getVoidTy(LLVMDialect *dialect) {
// Utility functions.
//===----------------------------------------------------------------------===//
-ValuePtr mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
- StringRef name, StringRef value,
- LLVM::Linkage linkage,
- LLVM::LLVMDialect *llvmDialect) {
+Value mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
+ StringRef name, StringRef value,
+ LLVM::Linkage linkage,
+ LLVM::LLVMDialect *llvmDialect) {
assert(builder.getInsertionBlock() &&
builder.getInsertionBlock()->getParentOp() &&
"expected builder to point to a block constrained in an op");
@@ -1675,13 +1675,13 @@ ValuePtr mlir::LLVM::createGlobalString(Location loc, OpBuilder &builder,
builder.getStringAttr(value));
// Get the pointer to the first character in the global string.
- ValuePtr globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
- ValuePtr cst0 = builder.create<LLVM::ConstantOp>(
+ Value globalPtr = builder.create<LLVM::AddressOfOp>(loc, global);
+ Value cst0 = builder.create<LLVM::ConstantOp>(
loc, LLVM::LLVMType::getInt64Ty(llvmDialect),
builder.getIntegerAttr(builder.getIndexType(), 0));
- return builder.create<LLVM::GEPOp>(
- loc, LLVM::LLVMType::getInt8PtrTy(llvmDialect), globalPtr,
- ArrayRef<ValuePtr>({cst0, cst0}));
+ return builder.create<LLVM::GEPOp>(loc,
+ LLVM::LLVMType::getInt8PtrTy(llvmDialect),
+ globalPtr, ArrayRef<Value>({cst0, cst0}));
}
bool mlir::LLVM::satisfiesLLVMModule(Operation *op) {
diff --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
index be90b1ce5a6..e8667f07822 100644
--- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
+++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
@@ -40,7 +40,7 @@ static StringRef toStringRef(LinalgDependenceGraph::DependenceType dt) {
llvm_unreachable("Unexpected DependenceType");
}
-ValuePtr Aliases::find(ValuePtr v) {
+Value Aliases::find(Value v) {
if (v.isa<BlockArgument>())
return v;
@@ -185,14 +185,14 @@ LinalgDependenceGraph::findCoveringDependences(LinalgOp srcLinalgOp,
}
SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringWrites(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const {
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value view) const {
return findOperationsWithCoveringDependences(
srcLinalgOp, dstLinalgOp, view,
{DependenceType::WAW, DependenceType::WAR});
}
SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringReads(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view) const {
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value view) const {
return findOperationsWithCoveringDependences(
srcLinalgOp, dstLinalgOp, view,
{DependenceType::RAR, DependenceType::RAW});
@@ -200,7 +200,7 @@ SmallVector<Operation *, 8> LinalgDependenceGraph::findCoveringReads(
SmallVector<Operation *, 8>
LinalgDependenceGraph::findOperationsWithCoveringDependences(
- LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, ValuePtr view,
+ LinalgOp srcLinalgOp, LinalgOp dstLinalgOp, Value view,
ArrayRef<DependenceType> types) const {
auto *src = srcLinalgOp.getOperation();
auto *dst = dstLinalgOp.getOperation();
diff --git a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
index af5e576b290..37c63b74f14 100644
--- a/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
+++ b/mlir/lib/Dialect/Linalg/EDSC/Builders.cpp
@@ -35,8 +35,8 @@ static void getMaxDimIndex(ArrayRef<StructuredIndexed> structuredIndices,
Operation *mlir::edsc::makeLinalgGenericOp(
ArrayRef<IterType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
ArrayRef<StructuredIndexed> outputs,
- function_ref<void(ArrayRef<BlockArgumentPtr>)> regionBuilder,
- ArrayRef<ValuePtr> otherValues, ArrayRef<Attribute> otherAttributes) {
+ function_ref<void(ArrayRef<BlockArgument>)> regionBuilder,
+ ArrayRef<Value> otherValues, ArrayRef<Attribute> otherAttributes) {
auto &builder = edsc::ScopedContext::getBuilder();
auto *ctx = builder.getContext();
unsigned nInputs = inputs.size();
@@ -57,7 +57,7 @@ Operation *mlir::edsc::makeLinalgGenericOp(
AffineMap::get(/*dimCount=*/nDims, /*symbolCount=*/0, out.getExprs()));
unsigned nViews = nInputs + nOutputs;
- SmallVector<ValuePtr, 4> values;
+ SmallVector<Value, 4> values;
values.reserve(nViews);
values.append(inputs.begin(), inputs.end());
values.append(outputs.begin(), outputs.end());
@@ -100,7 +100,7 @@ Operation *mlir::edsc::makeLinalgGenericOp(
return op;
}
-void mlir::edsc::ops::macRegionBuilder(ArrayRef<BlockArgumentPtr> args) {
+void mlir::edsc::ops::macRegionBuilder(ArrayRef<BlockArgument> args) {
using edsc::op::operator+;
using edsc::op::operator*;
assert(args.size() == 3 && "expected 3 block arguments");
@@ -113,7 +113,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
StructuredIndexed O) {
SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
edsc::IterType::Parallel);
- auto fun = [&unaryOp](ArrayRef<BlockArgumentPtr> args) {
+ auto fun = [&unaryOp](ArrayRef<BlockArgument> args) {
assert(args.size() == 2 && "expected 2 block arguments");
ValueHandle a(args[0]);
linalg_yield(unaryOp(a));
@@ -125,8 +125,7 @@ Operation *mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I,
StructuredIndexed O) {
;
using edsc::intrinsics::tanh;
- UnaryPointwiseOpBuilder unOp(
- [](ValueHandle a) -> ValuePtr { return tanh(a); });
+ UnaryPointwiseOpBuilder unOp([](ValueHandle a) -> Value { return tanh(a); });
return linalg_pointwise(unOp, I, O);
}
@@ -137,7 +136,7 @@ Operation *mlir::edsc::ops::linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
StructuredIndexed O) {
SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
edsc::IterType::Parallel);
- auto fun = [&binaryOp](ArrayRef<BlockArgumentPtr> args) {
+ auto fun = [&binaryOp](ArrayRef<BlockArgument> args) {
assert(args.size() == 3 && "expected 3 block arguments");
ValueHandle a(args[0]), b(args[1]);
linalg_yield(binaryOp(a, b));
@@ -150,14 +149,14 @@ Operation *mlir::edsc::ops::linalg_pointwise_add(StructuredIndexed I1,
StructuredIndexed O) {
using edsc::op::operator+;
BinaryPointwiseOpBuilder binOp(
- [](ValueHandle a, ValueHandle b) -> ValuePtr { return a + b; });
+ [](ValueHandle a, ValueHandle b) -> Value { return a + b; });
return linalg_pointwise(binOp, I1, I2, O);
}
Operation *mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1,
StructuredIndexed I2,
StructuredIndexed O) {
- BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> ValuePtr {
+ BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> Value {
using edsc::intrinsics::select;
using edsc::op::operator>;
return select(a > b, a, b).getValue();
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 10c37c0ec43..0f9f8f8d51f 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -309,7 +309,7 @@ static ParseResult parseRangeOp(OpAsmParser &parser, OperationState &result) {
// SliceOp
//===----------------------------------------------------------------------===//
void mlir::linalg::SliceOp::build(Builder *b, OperationState &result,
- ValuePtr base, ValueRange indexings) {
+ Value base, ValueRange indexings) {
result.addOperands(base);
result.addOperands(indexings);
@@ -385,7 +385,7 @@ static LogicalResult verify(SliceOp op) {
// TransposeOp
//===----------------------------------------------------------------------===//
void mlir::linalg::TransposeOp::build(Builder *b, OperationState &result,
- ValuePtr view, AffineMapAttr permutation,
+ Value view, AffineMapAttr permutation,
ArrayRef<NamedAttribute> attrs) {
auto permutationMap = permutation.getValue();
assert(permutationMap);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 27dcf663d23..9df7bce0879 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -68,16 +68,16 @@ static llvm::cl::list<unsigned> clTileSizes(
static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
ArrayRef<SubViewOp::Range> loopRanges) {
auto maps = loopToOperandRangesMaps(op);
- SmallVector<ValuePtr, 8> clonedViews;
+ SmallVector<Value, 8> clonedViews;
clonedViews.reserve(op.getNumInputsAndOutputs());
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
+ SmallVector<Value, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "map: " << map << "\n");
- ValuePtr view = en.value();
+ Value view = en.value();
SmallVector<SubViewOp::Range, 4> viewRanges(map.getNumResults());
for (auto en2 : llvm::enumerate(map.getResults())) {
unsigned d = en2.index();
@@ -90,7 +90,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
// Construct a new subview for the tile.
unsigned rank = viewRanges.size();
- SmallVector<ValuePtr, 4> offsets, sizes, strides;
+ SmallVector<Value, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -108,7 +108,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
struct ViewDimension {
- ValuePtr view;
+ Value view;
unsigned dimension;
};
@@ -121,14 +121,14 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
auto maps = loopToOperandRangesMaps(op);
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
+ SmallVector<Value, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n");
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n");
- ValuePtr view = en.value();
- SmallVector<ValuePtr, 8> viewRanges(map.getNumResults(), nullptr);
+ Value view = en.value();
+ SmallVector<Value, 8> viewRanges(map.getNumResults(), nullptr);
for (auto en2 : llvm::enumerate(map.getResults())) {
if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) {
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth
@@ -142,9 +142,9 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
llvm_unreachable("Expect to be able to extract a view defining loop range");
}
-static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
- LinalgOp consumer, unsigned consumerIdx,
- unsigned producerIdx, OperationFolder *folder) {
+static LinalgOp fuse(Value producedView, LinalgOp producer, LinalgOp consumer,
+ unsigned consumerIdx, unsigned producerIdx,
+ OperationFolder *folder) {
auto subView = dyn_cast_or_null<SubViewOp>(
consumer.getInput(consumerIdx)->getDefiningOp());
auto slice = dyn_cast_or_null<SliceOp>(
@@ -196,8 +196,7 @@ static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
// Encode structural fusion safety preconditions.
// Some of these will be lifted in the future with better analysis.
-static bool isStructurallyFusableProducer(LinalgOp producer,
- ValuePtr consumedView,
+static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView,
LinalgOp consumer) {
if (producer.getNumOutputs() != 1) {
LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)");
@@ -217,7 +216,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer,
bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
LinalgOp consumer,
- ValuePtr consumedView,
+ Value consumedView,
LinalgOp producer) {
// Make some simple structural checks that alleviate the need for more
// complex analyses.
@@ -236,7 +235,7 @@ bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
}
bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
- LinalgOp consumer, ValuePtr consumedView,
+ LinalgOp consumer, Value consumedView,
LinalgOp producer) {
if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer))
return false;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
index 0f333791dd7..d7cc4a86d21 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
@@ -40,7 +40,7 @@ using edsc::op::operator==;
static SmallVector<ValueHandle, 8>
makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<ValuePtr> vals) {
+ ArrayRef<Value> vals) {
assert(map.getNumSymbols() == 0);
assert(map.getNumInputs() == vals.size());
SmallVector<ValueHandle, 8> res;
@@ -48,35 +48,34 @@ makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
auto dims = map.getNumDims();
for (auto e : map.getResults()) {
auto exprMap = AffineMap::get(dims, 0, e);
- SmallVector<ValuePtr, 4> operands(vals.begin(), vals.end());
+ SmallVector<Value, 4> operands(vals.begin(), vals.end());
canonicalizeMapAndOperands(&exprMap, &operands);
res.push_back(affine_apply(exprMap, operands));
}
return res;
}
-static SmallVector<ValuePtr, 4> permuteIvs(ArrayRef<ValuePtr> ivs,
- Optional<AffineMap> permutation) {
+static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
+ Optional<AffineMap> permutation) {
return permutation ? applyMapToValues(ScopedContext::getBuilder(),
ScopedContext::getLocation(),
permutation.getValue(), ivs)
- : SmallVector<ValuePtr, 4>(ivs.begin(), ivs.end());
+ : SmallVector<Value, 4>(ivs.begin(), ivs.end());
}
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<ValuePtr> allViewSizes);
-SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<ValuePtr> allViewSizes) {
+static SmallVector<Value, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<Value> allViewSizes);
+SmallVector<Value, 4> emitLoopRanges(OpBuilder &b, Location loc, AffineMap map,
+ ArrayRef<Value> allViewSizes) {
// Apply `map` to get view sizes in loop order.
auto sizes = applyMapToValues(b, loc, map, allViewSizes);
// Create a new range with the applied tile sizes.
ScopedContext scope(b, loc);
- SmallVector<ValuePtr, 4> res;
+ SmallVector<Value, 4> res;
for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
res.push_back(range(constant_index(0), sizes[idx], constant_index(1)));
}
@@ -89,8 +88,7 @@ class LinalgScopedEmitter {};
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, CopyOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- CopyOp copyOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
auto nPar = copyOp.getNumParallelLoops();
assert(nPar == allIvs.size());
auto inputIvs =
@@ -112,8 +110,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, FillOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- FillOp fillOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
auto nPar = fillOp.getNumParallelLoops();
assert(nPar == allIvs.size());
auto ivs =
@@ -129,7 +126,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, DotOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs, DotOp dotOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, DotOp dotOp) {
assert(allIvs.size() == 1);
IndexHandle r_i(allIvs[0]);
IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
@@ -142,7 +139,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
MatvecOp matvecOp) {
assert(allIvs.size() == 2);
IndexHandle i(allIvs[0]), r_j(allIvs[1]);
@@ -156,7 +153,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
MatmulOp matmulOp) {
assert(allIvs.size() == 3);
IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
@@ -170,8 +167,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, ConvOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- ConvOp convOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
auto maps = loopToOperandRangesMaps(convOp);
@@ -220,14 +216,14 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, GenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
GenericOp genericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
using edsc::intrinsics::detail::ValueHandleArray;
unsigned nInputs = genericOp.getNumInputs();
unsigned nOutputs = genericOp.getNumOutputs();
- SmallVector<ValuePtr, 4> indexedValues(nInputs + nOutputs);
+ SmallVector<Value, 4> indexedValues(nInputs + nOutputs);
// 1.a. Emit std_load from input views.
for (unsigned i = 0; i < nInputs; ++i) {
@@ -315,7 +311,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
IndexedGenericOp indexedGenericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -323,7 +319,7 @@ public:
unsigned nInputs = indexedGenericOp.getNumInputs();
unsigned nOutputs = indexedGenericOp.getNumOutputs();
unsigned nLoops = allIvs.size();
- SmallVector<ValuePtr, 4> indexedValues(nLoops + nInputs + nOutputs);
+ SmallVector<Value, 4> indexedValues(nLoops + nInputs + nOutputs);
for (unsigned i = 0; i < nLoops; ++i) {
indexedValues[i] = allIvs[i];
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
index 451803797f4..eb23a8ceb1a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
@@ -90,7 +90,7 @@ LogicalResult mlir::linalg::tileAndFuseLinalgOpAndSetMarker(
}
bool mlir::linalg::detail::isProducedByOpOfTypeImpl(
- Operation *consumerOp, ValuePtr consumedView,
+ Operation *consumerOp, Value consumedView,
function_ref<bool(Operation *)> isaOpType) {
LinalgOp consumer = dyn_cast<LinalgOp>(consumerOp);
if (!consumer)
@@ -166,7 +166,7 @@ LogicalResult mlir::linalg::vectorizeGenericOp(PatternRewriter &rewriter,
return failure();
// TODO(ntv): non-identity layout.
- auto isStaticMemRefWithIdentityLayout = [](ValuePtr v) {
+ auto isStaticMemRefWithIdentityLayout = [](Value v) {
auto m = v->getType().dyn_cast<MemRefType>();
if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty())
return false;
@@ -226,7 +226,7 @@ mlir::linalg::permuteGenericLinalgOp(PatternRewriter &rewriter, Operation *op,
LogicalResult mlir::linalg::linalgOpPromoteSubviews(PatternRewriter &rewriter,
Operation *op) {
LinalgOp linOp = dyn_cast<LinalgOp>(op);
- SetVector<ValuePtr> subViews;
+ SetVector<Value> subViews;
for (auto it : linOp.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
subViews.insert(sv);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 08bc1518a19..b8b27958ff5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -46,15 +46,14 @@ static llvm::cl::opt<bool> clPromoteDynamic(
llvm::cl::desc("Test generation of dynamic promoted buffers"),
llvm::cl::cat(clOptionsCategory), llvm::cl::init(false));
-static ValuePtr allocBuffer(Type elementType, ValuePtr size,
- bool dynamicBuffers) {
+static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) {
auto *ctx = size->getContext();
auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
if (!dynamicBuffers)
if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size->getDefiningOp()))
return alloc(
MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
- ValuePtr mul = muli(constant_index(width), size);
+ Value mul = muli(constant_index(width), size);
return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
}
@@ -84,14 +83,14 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
auto viewType = subView.getType();
auto rank = viewType.getRank();
- ValuePtr allocSize = one;
- SmallVector<ValuePtr, 8> fullRanges, partialRanges;
+ Value allocSize = one;
+ SmallVector<Value, 8> fullRanges, partialRanges;
fullRanges.reserve(rank);
partialRanges.reserve(rank);
for (auto en : llvm::enumerate(subView.getRanges())) {
auto rank = en.index();
auto rangeValue = en.value();
- ValuePtr d = rangeValue.size;
+ Value d = rangeValue.size;
allocSize = muli(folder, allocSize, d).getValue();
fullRanges.push_back(d);
partialRanges.push_back(range(folder, zero, dim(subView, rank), one));
@@ -107,7 +106,7 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
SmallVector<PromotionInfo, 8>
mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
- ArrayRef<ValuePtr> subViews, bool dynamicBuffers,
+ ArrayRef<Value> subViews, bool dynamicBuffers,
OperationFolder *folder) {
if (subViews.empty())
return {};
@@ -115,7 +114,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
ScopedContext scope(b, loc);
SmallVector<PromotionInfo, 8> res;
res.reserve(subViews.size());
- DenseMap<ValuePtr, PromotionInfo> promotionInfoMap;
+ DenseMap<Value, PromotionInfo> promotionInfoMap;
for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto viewType = subView.getType();
@@ -136,7 +135,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
// TODO(ntv): value to fill with should be related to the operation.
// For now, just use APFloat(0.0f).
auto t = subView.getType().getElementType().cast<FloatType>();
- ValuePtr fillVal = constant_float(folder, APFloat(0.0f), t);
+ Value fillVal = constant_float(folder, APFloat(0.0f), t);
// TODO(ntv): fill is only necessary if `promotionInfo` has a full local
// view that is different from the partial local view and we are on the
// boundary.
@@ -153,16 +152,16 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
}
LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- SetVector<ValuePtr> subViews,
+ SetVector<Value> subViews,
bool dynamicBuffers,
OperationFolder *folder) {
// 1. Promote the specified views and use them in the new op.
ScopedContext scope(b, op.getLoc());
auto promotedBufferAndViews = promoteSubViews(
b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder);
- SmallVector<ValuePtr, 8> opViews;
+ SmallVector<Value, 8> opViews;
opViews.reserve(op.getNumInputsAndOutputs());
- SmallVector<std::pair<ValuePtr, ValuePtr>, 8> writebackViews;
+ SmallVector<std::pair<Value, Value>, 8> writebackViews;
writebackViews.reserve(subViews.size());
unsigned promotedIdx = 0;
for (auto view : op.getInputsAndOutputs()) {
@@ -206,7 +205,7 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) {
f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) {
// TODO(ntv) some heuristic here to decide what to promote. Atm it is all or
// nothing.
- SetVector<ValuePtr> subViews;
+ SetVector<Value> subViews;
OpBuilder b(op);
for (auto it : op.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 99645a23100..964f540c099 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -44,7 +44,7 @@ static llvm::cl::list<unsigned>
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated,
llvm::cl::cat(clOptionsCategory));
-static bool isZero(ValuePtr v) {
+static bool isZero(Value v) {
return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
}
@@ -62,12 +62,12 @@ using LoopIndexToRangeIndexMap = DenseMap<int, int>;
// indices of newly created loops.
static std::tuple<SmallVector<SubViewOp::Range, 4>, LoopIndexToRangeIndexMap>
makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<ValuePtr> allViewSizes,
- ArrayRef<ValuePtr> allTileSizes, OperationFolder *folder) {
+ ArrayRef<Value> allViewSizes, ArrayRef<Value> allTileSizes,
+ OperationFolder *folder) {
assert(allTileSizes.size() == map.getNumResults());
// Apply `map` to get view sizes in loop order.
auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder);
- SmallVector<ValuePtr, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
+ SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
// Traverse the tile sizes, which are in loop order, erase zeros everywhere.
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
@@ -101,8 +101,7 @@ namespace {
// `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0]
//
struct TileCheck : public AffineExprVisitor<TileCheck> {
- TileCheck(ArrayRef<ValuePtr> tileSizes)
- : isTiled(false), tileSizes(tileSizes) {}
+ TileCheck(ArrayRef<Value> tileSizes) : isTiled(false), tileSizes(tileSizes) {}
void visitDimExpr(AffineDimExpr expr) {
isTiled |= !isZero(tileSizes[expr.getPosition()]);
@@ -115,7 +114,7 @@ struct TileCheck : public AffineExprVisitor<TileCheck> {
"nonpositive multiplying coefficient");
}
bool isTiled;
- ArrayRef<ValuePtr> tileSizes;
+ ArrayRef<Value> tileSizes;
};
} // namespace
@@ -197,11 +196,11 @@ void transformIndexedGenericOpIndices(
auto rangeIndex = loopIndexToRangeIndex.find(i);
if (rangeIndex == loopIndexToRangeIndex.end())
continue;
- ValuePtr oldIndex = block.getArgument(i);
+ Value oldIndex = block.getArgument(i);
// Offset the index argument `i` by the value of the corresponding induction
// variable and replace all uses of the previous value.
- ValuePtr newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
- pivs[rangeIndex->second]->getValue());
+ Value newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
+ pivs[rangeIndex->second]->getValue());
for (auto &use : oldIndex->getUses()) {
if (use.getOwner() == newIndex->getDefiningOp())
continue;
@@ -210,7 +209,7 @@ void transformIndexedGenericOpIndices(
}
}
-static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
+static bool isTiled(AffineExpr expr, ArrayRef<Value> tileSizes) {
if (!expr)
return false;
TileCheck t(tileSizes);
@@ -220,7 +219,7 @@ static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
// Checks whether the view with index `viewIndex` within `linalgOp` varies with
// respect to a non-zero `tileSize`.
-static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
+static bool isTiled(AffineMap map, ArrayRef<Value> tileSizes) {
if (!map)
return false;
for (unsigned r = 0; r < map.getNumResults(); ++r)
@@ -229,13 +228,13 @@ static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
return false;
}
-static SmallVector<ValuePtr, 4>
+static SmallVector<Value, 4>
makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
- ArrayRef<ValuePtr> ivs, ArrayRef<ValuePtr> tileSizes,
- ArrayRef<ValuePtr> viewSizes, OperationFolder *folder) {
+ ArrayRef<Value> ivs, ArrayRef<Value> tileSizes,
+ ArrayRef<Value> viewSizes, OperationFolder *folder) {
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
llvm::make_range(tileSizes.begin(), tileSizes.end()),
- [](ValuePtr v) { return !isZero(v); })) &&
+ [](Value v) { return !isZero(v); })) &&
"expected as many ivs as non-zero sizes");
using edsc::intrinsics::select;
@@ -244,22 +243,21 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Construct (potentially temporary) mins and maxes on which to apply maps
// that define tile subviews.
- SmallVector<ValuePtr, 8> lbs, subViewSizes;
+ SmallVector<Value, 8> lbs, subViewSizes;
for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
bool isTiled = !isZero(tileSizes[idx]);
- lbs.push_back(isTiled ? ivs[idxIvs++]
- : (ValuePtr)constant_index(folder, 0));
+ lbs.push_back(isTiled ? ivs[idxIvs++] : (Value)constant_index(folder, 0));
subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]);
}
auto *op = linalgOp.getOperation();
- SmallVector<ValuePtr, 4> res;
+ SmallVector<Value, 4> res;
res.reserve(op->getNumOperands());
auto viewIteratorBegin = linalgOp.getInputsAndOutputs().begin();
for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs();
++viewIndex) {
- ValuePtr view = *(viewIteratorBegin + viewIndex);
+ Value view = *(viewIteratorBegin + viewIndex);
unsigned rank = view->getType().cast<MemRefType>().getRank();
auto map = loopToOperandRangesMaps(linalgOp)[viewIndex];
// If the view is not tiled, we can use it as is.
@@ -269,7 +267,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
// Construct a new subview for the tile.
- SmallVector<ValuePtr, 4> offsets, sizes, strides;
+ SmallVector<Value, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -300,16 +298,17 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// This is a special type of folding that we only apply when `folder` is
// defined.
if (folder)
- for (auto v : llvm::concat<ValuePtr>(lbs, subViewSizes))
+ for (auto v : llvm::concat<Value>(lbs, subViewSizes))
if (v->use_empty())
v->getDefiningOp()->erase();
return res;
}
-Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
- OpBuilder &b, LinalgOp op, ArrayRef<ValuePtr> tileSizes,
- ArrayRef<unsigned> permutation, OperationFolder *folder) {
+Optional<TiledLinalgOp>
+mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes,
+ ArrayRef<unsigned> permutation,
+ OperationFolder *folder) {
// 1. Enforce the convention that "tiling by zero" skips tiling a particular
// dimension. This convention is significantly simpler to handle instead of
// adjusting affine maps to account for missing dimensions.
@@ -352,7 +351,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
LoopNestRangeBuilder(pivs, loopRanges)([&] {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- SmallVector<ValuePtr, 4> ivValues(ivs.begin(), ivs.end());
+ SmallVector<Value, 4> ivValues(ivs.begin(), ivs.end());
// If we have to apply a permutation to the tiled loop nest, we have to
// reorder the induction variables This permutation is the right one
@@ -403,7 +402,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
ScopedContext scope(b, op.getLoc());
// Materialize concrete tile size values to pass the generic tiling function.
- SmallVector<ValuePtr, 8> tileSizeValues;
+ SmallVector<Value, 8> tileSizeValues;
tileSizeValues.reserve(tileSizes.size());
for (auto ts : tileSizes)
tileSizeValues.push_back(constant_index(folder, ts));
diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index ae02af0ecc8..560a0235a38 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -83,7 +83,7 @@ mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder(
}
mlir::edsc::LoopNestRangeBuilder::LoopNestRangeBuilder(
- ArrayRef<ValueHandle *> ivs, ArrayRef<ValuePtr> ranges)
+ ArrayRef<ValueHandle *> ivs, ArrayRef<Value> ranges)
: LoopNestRangeBuilder(
ivs, SmallVector<ValueHandle, 4>(ranges.begin(), ranges.end())) {}
@@ -97,22 +97,22 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()(
return ValueHandle::null();
}
-static ValuePtr emitOrFoldComposedAffineApply(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<ValuePtr> operandsRef,
- OperationFolder *folder) {
- SmallVector<ValuePtr, 4> operands(operandsRef.begin(), operandsRef.end());
+static Value emitOrFoldComposedAffineApply(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<Value> operandsRef,
+ OperationFolder *folder) {
+ SmallVector<Value, 4> operands(operandsRef.begin(), operandsRef.end());
fullyComposeAffineMapAndOperands(&map, &operands);
canonicalizeMapAndOperands(&map, &operands);
return folder ? folder->create<AffineApplyOp>(b, loc, map, operands)
: b.create<AffineApplyOp>(loc, map, operands);
}
-SmallVector<ValuePtr, 4>
-mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<ValuePtr> values,
- OperationFolder *folder) {
- SmallVector<ValuePtr, 4> res;
+SmallVector<Value, 4> mlir::linalg::applyMapToValues(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<Value> values,
+ OperationFolder *folder) {
+ SmallVector<Value, 4> res;
res.reserve(map.getNumResults());
unsigned numDims = map.getNumDims();
// For each `expr` in `map`, applies the `expr` to the values extracted from
@@ -128,12 +128,12 @@ mlir::linalg::applyMapToValues(OpBuilder &b, Location loc, AffineMap map,
/// Returns all the operands of `linalgOp` that are not views.
/// Asserts that these operands are value types to allow transformations like
/// tiling to just use the values when cloning `linalgOp`.
-SmallVector<ValuePtr, 4>
+SmallVector<Value, 4>
mlir::linalg::getAssumedNonViewOperands(LinalgOp linalgOp) {
auto *op = linalgOp.getOperation();
unsigned numViews = linalgOp.getNumInputsAndOutputs();
unsigned nOperands = op->getNumOperands() - numViews;
- SmallVector<ValuePtr, 4> res;
+ SmallVector<Value, 4> res;
res.reserve(nOperands);
for (unsigned i = 0; i < nOperands; ++i) {
res.push_back(op->getOperand(numViews + i));
diff --git a/mlir/lib/Dialect/LoopOps/LoopOps.cpp b/mlir/lib/Dialect/LoopOps/LoopOps.cpp
index 8e19eba911a..acbab01df79 100644
--- a/mlir/lib/Dialect/LoopOps/LoopOps.cpp
+++ b/mlir/lib/Dialect/LoopOps/LoopOps.cpp
@@ -60,8 +60,8 @@ LoopOpsDialect::LoopOpsDialect(MLIRContext *context)
// ForOp
//===----------------------------------------------------------------------===//
-void ForOp::build(Builder *builder, OperationState &result, ValuePtr lb,
- ValuePtr ub, ValuePtr step) {
+void ForOp::build(Builder *builder, OperationState &result, Value lb, Value ub,
+ Value step) {
result.addOperands({lb, ub, step});
Region *bodyRegion = result.addRegion();
ForOp::ensureTerminator(*bodyRegion, *builder, result.location);
@@ -125,7 +125,7 @@ static ParseResult parseForOp(OpAsmParser &parser, OperationState &result) {
Region &ForOp::getLoopBody() { return region(); }
-bool ForOp::isDefinedOutsideOfLoop(ValuePtr value) {
+bool ForOp::isDefinedOutsideOfLoop(Value value) {
return !region().isAncestor(value->getParentRegion());
}
@@ -135,7 +135,7 @@ LogicalResult ForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
return success();
}
-ForOp mlir::loop::getForInductionVarOwner(ValuePtr val) {
+ForOp mlir::loop::getForInductionVarOwner(Value val) {
auto ivArg = val.dyn_cast<BlockArgument>();
if (!ivArg)
return ForOp();
@@ -148,7 +148,7 @@ ForOp mlir::loop::getForInductionVarOwner(ValuePtr val) {
// IfOp
//===----------------------------------------------------------------------===//
-void IfOp::build(Builder *builder, OperationState &result, ValuePtr cond,
+void IfOp::build(Builder *builder, OperationState &result, Value cond,
bool withElseRegion) {
result.addOperands(cond);
Region *thenRegion = result.addRegion();
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
index 4416e1e6b04..144252bb272 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVDialect.cpp
@@ -94,7 +94,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<ValuePtr> valuesToRepl) const final {
+ ArrayRef<Value> valuesToRepl) const final {
// Only spv.ReturnValue needs to be handled here.
auto retValOp = dyn_cast<spirv::ReturnValueOp>(op);
if (!retValOp)
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp b/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
index 7b6c013f9ed..0d2348c2626 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVLowering.cpp
@@ -220,9 +220,9 @@ getOrInsertBuiltinVariable(spirv::ModuleOp &moduleOp, Location loc,
/// Gets the global variable associated with a builtin and add
/// it if it doesn't exist.
-ValuePtr mlir::spirv::getBuiltinVariableValue(Operation *op,
- spirv::BuiltIn builtin,
- OpBuilder &builder) {
+Value mlir::spirv::getBuiltinVariableValue(Operation *op,
+ spirv::BuiltIn builtin,
+ OpBuilder &builder) {
auto moduleOp = op->getParentOfType<spirv::ModuleOp>();
if (!moduleOp) {
op->emitError("expected operation to be within a SPIR-V module");
@@ -230,7 +230,7 @@ ValuePtr mlir::spirv::getBuiltinVariableValue(Operation *op,
}
spirv::GlobalVariableOp varOp =
getOrInsertBuiltinVariable(moduleOp, op->getLoc(), builtin, builder);
- ValuePtr ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
+ Value ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
return builder.create<spirv::LoadOp>(op->getLoc(), ptr,
/*memory_access =*/nullptr,
/*alignment =*/nullptr);
diff --git a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
index e42dc10f55d..f42c077f77e 100644
--- a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp
@@ -264,8 +264,8 @@ static LogicalResult verifyMemorySemantics(BarrierOp op) {
}
template <typename LoadStoreOpTy>
-static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op,
- ValuePtr ptr, ValuePtr val) {
+static LogicalResult verifyLoadStorePtrAndValTypes(LoadStoreOpTy op, Value ptr,
+ Value val) {
// ODS already checks ptr is spirv::PointerType. Just check that the pointee
// type of the pointer and the type of the value are the same
//
@@ -655,8 +655,8 @@ static ParseResult parseShiftOp(OpAsmParser &parser, OperationState &state) {
}
static void printShiftOp(Operation *op, OpAsmPrinter &printer) {
- ValuePtr base = op->getOperand(0);
- ValuePtr shift = op->getOperand(1);
+ Value base = op->getOperand(0);
+ Value shift = op->getOperand(1);
printer << op->getName() << ' ' << *base << ", " << *shift << " : "
<< base->getType() << ", " << shift->getType();
}
@@ -733,7 +733,7 @@ static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) {
}
void spirv::AccessChainOp::build(Builder *builder, OperationState &state,
- ValuePtr basePtr, ValueRange indices) {
+ Value basePtr, ValueRange indices) {
auto type = getElementPtrType(basePtr->getType(), indices, state.location);
assert(type && "Unable to deduce return type based on basePtr and indices");
build(builder, state, type, basePtr, indices);
@@ -773,8 +773,8 @@ static void print(spirv::AccessChainOp op, OpAsmPrinter &printer) {
}
static LogicalResult verify(spirv::AccessChainOp accessChainOp) {
- SmallVector<ValuePtr, 4> indices(accessChainOp.indices().begin(),
- accessChainOp.indices().end());
+ SmallVector<Value, 4> indices(accessChainOp.indices().begin(),
+ accessChainOp.indices().end());
auto resultType = getElementPtrType(accessChainOp.base_ptr()->getType(),
indices, accessChainOp.getLoc());
if (!resultType) {
@@ -815,7 +815,7 @@ struct CombineChainedAccessChain
}
// Combine indices.
- SmallVector<ValuePtr, 4> indices(parentAccessChainOp.indices());
+ SmallVector<Value, 4> indices(parentAccessChainOp.indices());
indices.append(accessChainOp.indices().begin(),
accessChainOp.indices().end());
@@ -1051,7 +1051,7 @@ static LogicalResult verify(spirv::BitFieldInsertOp bitFieldOp) {
static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &state) {
Block *dest;
- SmallVector<ValuePtr, 4> destOperands;
+ SmallVector<Value, 4> destOperands;
if (parser.parseSuccessorAndUseList(dest, destOperands))
return failure();
state.addSuccessor(dest, destOperands);
@@ -1080,7 +1080,7 @@ static ParseResult parseBranchConditionalOp(OpAsmParser &parser,
auto &builder = parser.getBuilder();
OpAsmParser::OperandType condInfo;
Block *dest;
- SmallVector<ValuePtr, 4> destOperands;
+ SmallVector<Value, 4> destOperands;
// Parse the condition.
Type boolTy = builder.getI1Type();
@@ -1205,7 +1205,7 @@ static void print(spirv::CompositeConstructOp compositeConstructOp,
static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) {
auto cType = compositeConstructOp.getType().cast<spirv::CompositeType>();
- SmallVector<ValuePtr, 4> constituents(compositeConstructOp.constituents());
+ SmallVector<Value, 4> constituents(compositeConstructOp.constituents());
if (constituents.size() != cType.getNumElements()) {
return compositeConstructOp.emitError(
"has incorrect number of operands: expected ")
@@ -1230,7 +1230,7 @@ static LogicalResult verify(spirv::CompositeConstructOp compositeConstructOp) {
//===----------------------------------------------------------------------===//
void spirv::CompositeExtractOp::build(Builder *builder, OperationState &state,
- ValuePtr composite,
+ Value composite,
ArrayRef<int32_t> indices) {
auto indexAttr = builder->getI32ArrayAttr(indices);
auto elementType =
@@ -1954,7 +1954,7 @@ OpFoldResult spirv::ISubOp::fold(ArrayRef<Attribute> operands) {
//===----------------------------------------------------------------------===//
void spirv::LoadOp::build(Builder *builder, OperationState &state,
- ValuePtr basePtr, IntegerAttr memory_access,
+ Value basePtr, IntegerAttr memory_access,
IntegerAttr alignment) {
auto ptrType = basePtr->getType().cast<spirv::PointerType>();
build(builder, state, ptrType.getPointeeType(), basePtr, memory_access,
@@ -2487,9 +2487,8 @@ static LogicalResult verify(spirv::ReturnValueOp retValOp) {
// spv.Select
//===----------------------------------------------------------------------===//
-void spirv::SelectOp::build(Builder *builder, OperationState &state,
- ValuePtr cond, ValuePtr trueValue,
- ValuePtr falseValue) {
+void spirv::SelectOp::build(Builder *builder, OperationState &state, Value cond,
+ Value trueValue, Value falseValue) {
build(builder, state, trueValue->getType(), cond, trueValue, falseValue);
}
@@ -2739,13 +2738,13 @@ private:
}
// Returns a soruce value for the given block.
- ValuePtr getSrcValue(Block *block) const {
+ Value getSrcValue(Block *block) const {
auto storeOp = cast<spirv::StoreOp>(block->front());
return storeOp.value();
}
// Returns a destination value for the given block.
- ValuePtr getDstPtr(Block *block) const {
+ Value getDstPtr(Block *block) const {
auto storeOp = cast<spirv::StoreOp>(block->front());
return storeOp.ptr();
}
diff --git a/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp b/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
index 9e820c6f42b..17ddc48573a 100644
--- a/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
+++ b/mlir/lib/Dialect/SPIRV/Serialization/Deserializer.cpp
@@ -318,7 +318,7 @@ private:
/// This method materializes normal constants and inserts "casting" ops
/// (`spv._address_of` and `spv._reference_of`) to turn an symbol into a SSA
/// value for handling uses of module scope constants/variables in functions.
- ValuePtr getValue(uint32_t id);
+ Value getValue(uint32_t id);
/// Slices the first instruction out of `binary` and returns its opcode and
/// operands via `opcode` and `operands` respectively. Returns failure if
@@ -437,7 +437,7 @@ private:
DenseMap<Block *, BlockPhiInfo> blockPhiInfo;
// Result <id> to value mapping.
- DenseMap<uint32_t, ValuePtr> valueMap;
+ DenseMap<uint32_t, Value> valueMap;
// Mapping from result <id> to undef value of a type.
DenseMap<uint32_t, Type> undefMap;
@@ -1522,8 +1522,8 @@ Deserializer::processBranchConditional(ArrayRef<uint32_t> operands) {
opBuilder.create<spirv::BranchConditionalOp>(
unknownLoc, condition, trueBlock,
- /*trueArguments=*/ArrayRef<ValuePtr>(), falseBlock,
- /*falseArguments=*/ArrayRef<ValuePtr>(), weights);
+ /*trueArguments=*/ArrayRef<Value>(), falseBlock,
+ /*falseArguments=*/ArrayRef<Value>(), weights);
return success();
}
@@ -1617,7 +1617,7 @@ LogicalResult Deserializer::processPhi(ArrayRef<uint32_t> operands) {
// Create a block argument for this OpPhi instruction.
Type blockArgType = getType(operands[0]);
- BlockArgumentPtr blockArg = curBlock->addArgument(blockArgType);
+ BlockArgument blockArg = curBlock->addArgument(blockArgType);
valueMap[operands[1]] = blockArg;
LLVM_DEBUG(llvm::dbgs() << "[phi] created block argument " << blockArg
<< " id = " << operands[1] << " of type "
@@ -1774,7 +1774,7 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
LLVM_DEBUG(llvm::dbgs() << "[cf] cloned block " << newBlock
<< " from block " << block << "\n");
if (!isFnEntryBlock(block)) {
- for (BlockArgumentPtr blockArg : block->getArguments()) {
+ for (BlockArgument blockArg : block->getArguments()) {
auto newArg = newBlock->addArgument(blockArg->getType());
mapper.map(blockArg, newArg);
LLVM_DEBUG(llvm::dbgs() << "[cf] remapped block argument " << blockArg
@@ -1815,13 +1815,13 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
// we place the selection/loop op inside the old merge block, we need to
// make sure the old merge block has the same block argument list.
assert(mergeBlock->args_empty() && "OpPhi in loop merge block unsupported");
- for (BlockArgumentPtr blockArg : headerBlock->getArguments()) {
+ for (BlockArgument blockArg : headerBlock->getArguments()) {
mergeBlock->addArgument(blockArg->getType());
}
// If the loop header block has block arguments, make sure the spv.branch op
// matches.
- SmallVector<ValuePtr, 4> blockArgs;
+ SmallVector<Value, 4> blockArgs;
if (!headerBlock->args_empty())
blockArgs = {mergeBlock->args_begin(), mergeBlock->args_end()};
@@ -1829,7 +1829,7 @@ LogicalResult ControlFlowStructurizer::structurizeImpl() {
// loop header block.
builder.setInsertionPointToEnd(&body.front());
builder.create<spirv::BranchOp>(location, mapper.lookupOrNull(headerBlock),
- ArrayRef<ValuePtr>(blockArgs));
+ ArrayRef<Value>(blockArgs));
}
// All the blocks cloned into the SelectionOp/LoopOp's region can now be
@@ -1915,10 +1915,10 @@ LogicalResult Deserializer::wireUpBlockArgument() {
auto *op = block->getTerminator();
opBuilder.setInsertionPoint(op);
- SmallVector<ValuePtr, 4> blockArgs;
+ SmallVector<Value, 4> blockArgs;
blockArgs.reserve(phiInfo.size());
for (uint32_t valueId : phiInfo) {
- if (ValuePtr value = getValue(valueId)) {
+ if (Value value = getValue(valueId)) {
blockArgs.push_back(value);
LLVM_DEBUG(llvm::dbgs() << "[phi] block argument " << value
<< " id = " << valueId << '\n');
@@ -1987,7 +1987,7 @@ LogicalResult Deserializer::structurizeControlFlow() {
// Instruction
//===----------------------------------------------------------------------===//
-ValuePtr Deserializer::getValue(uint32_t id) {
+Value Deserializer::getValue(uint32_t id) {
if (auto constInfo = getConstant(id)) {
// Materialize a `spv.constant` op at every use site.
return opBuilder.create<spirv::ConstantOp>(unknownLoc, constInfo->second,
@@ -2183,7 +2183,7 @@ LogicalResult Deserializer::processBitcast(ArrayRef<uint32_t> words) {
}
}
valueID = words[wordIndex++];
- SmallVector<ValuePtr, 4> operands;
+ SmallVector<Value, 4> operands;
SmallVector<NamedAttribute, 4> attributes;
if (wordIndex < words.size()) {
auto arg = getValue(words[wordIndex]);
@@ -2357,7 +2357,7 @@ Deserializer::processOp<spirv::FunctionCallOp>(ArrayRef<uint32_t> operands) {
auto functionName = getFunctionSymbol(functionID);
- SmallVector<ValuePtr, 4> arguments;
+ SmallVector<Value, 4> arguments;
for (auto operand : llvm::drop_begin(operands, 3)) {
auto value = getValue(operand);
if (!value) {
diff --git a/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
index 424c2e0427e..0cdcc25b77d 100644
--- a/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Dialect/SPIRV/Serialization/Serializer.cpp
@@ -314,7 +314,7 @@ private:
uint32_t opcode,
ArrayRef<uint32_t> operands);
- uint32_t getValueID(ValuePtr val) const { return valueIDMap.lookup(val); }
+ uint32_t getValueID(Value val) const { return valueIDMap.lookup(val); }
LogicalResult processAddressOfOp(spirv::AddressOfOp addressOfOp);
@@ -405,7 +405,7 @@ private:
DenseMap<Type, uint32_t> undefValIDMap;
/// Map from results of normal operations to their <id>s.
- DenseMap<ValuePtr, uint32_t> valueIDMap;
+ DenseMap<Value, uint32_t> valueIDMap;
/// Map from extended instruction set name to <id>s.
llvm::StringMap<uint32_t> extendedInstSetIDMap;
@@ -448,7 +448,7 @@ private:
/// placed inside `functions`) here. And then after emitting all blocks, we
/// replace the dummy <id> 0 with the real result <id> by overwriting
/// `functions[offset]`.
- DenseMap<ValuePtr, SmallVector<size_t, 1>> deferredPhiValues;
+ DenseMap<Value, SmallVector<size_t, 1>> deferredPhiValues;
};
} // namespace
@@ -504,7 +504,7 @@ void Serializer::collect(SmallVectorImpl<uint32_t> &binary) {
void Serializer::printValueIDMap(raw_ostream &os) {
os << "\n= Value <id> Map =\n\n";
for (auto valueIDPair : valueIDMap) {
- ValuePtr val = valueIDPair.first;
+ Value val = valueIDPair.first;
os << " " << val << " "
<< "id = " << valueIDPair.second << ' ';
if (auto *op = val->getDefiningOp()) {
@@ -743,7 +743,7 @@ LogicalResult Serializer::processFuncOp(FuncOp op) {
// There might be OpPhi instructions who have value references needing to fix.
for (auto deferredValue : deferredPhiValues) {
- ValuePtr value = deferredValue.first;
+ Value value = deferredValue.first;
uint32_t id = getValueID(value);
LLVM_DEBUG(llvm::dbgs() << "[phi] fix reference of value " << value
<< " to id = " << id << '\n');
@@ -1393,7 +1393,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) {
// Then create OpPhi instruction for each of the block argument.
for (auto argIndex : llvm::seq<unsigned>(0, block->getNumArguments())) {
- BlockArgumentPtr arg = block->getArgument(argIndex);
+ BlockArgument arg = block->getArgument(argIndex);
// Get the type <id> and result <id> for this OpPhi instruction.
uint32_t phiTypeID = 0;
@@ -1409,7 +1409,7 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) {
phiArgs.push_back(phiID);
for (auto predIndex : llvm::seq<unsigned>(0, predecessors.size())) {
- ValuePtr value = *(predecessors[predIndex].second + argIndex);
+ Value value = *(predecessors[predIndex].second + argIndex);
uint32_t predBlockId = getOrCreateBlockID(predecessors[predIndex].first);
LLVM_DEBUG(llvm::dbgs() << "[phi] use predecessor (id = " << predBlockId
<< ") value " << value << ' ');
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index 0be24bf169c..d7194da0778 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -131,7 +131,7 @@ class FuncOpLowering final : public SPIRVOpLowering<FuncOp> {
public:
using SPIRVOpLowering<FuncOp>::SPIRVOpLowering;
PatternMatchResult
- matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
+ matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override;
};
@@ -144,7 +144,7 @@ private:
} // namespace
PatternMatchResult
-FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
+FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
if (!funcOp.getAttrOfType<spirv::EntryPointABIAttr>(
spirv::getEntryPointABIAttrName())) {
@@ -174,7 +174,7 @@ FuncOpLowering::matchAndRewrite(FuncOp funcOp, ArrayRef<ValuePtr> operands,
OpBuilder::InsertionGuard funcInsertionGuard(rewriter);
rewriter.setInsertionPointToStart(&funcOp.front());
// Insert spirv::AddressOf and spirv::AccessChain operations.
- ValuePtr replacement =
+ Value replacement =
rewriter.create<spirv::AddressOfOp>(funcOp.getLoc(), var);
// Check if the arg is a scalar or vector type. In that case, the value
// needs to be loaded into registers.
diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp
index 55da59a0c74..831c78a4521 100644
--- a/mlir/lib/Dialect/StandardOps/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/Ops.cpp
@@ -72,7 +72,7 @@ struct StdInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<ValuePtr> valuesToRepl) const final {
+ ArrayRef<Value> valuesToRepl) const final {
// Only "std.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -175,7 +175,7 @@ void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
// dimension operands parsed.
// Returns 'false' on success and 'true' on error.
ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser,
- SmallVectorImpl<ValuePtr> &operands,
+ SmallVectorImpl<Value> &operands,
unsigned &numDims) {
SmallVector<OpAsmParser::OperandType, 8> opInfos;
if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren))
@@ -316,7 +316,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
PatternRewriter &rewriter) const override {
// Check to see if any dimensions operands are constants. If so, we can
// substitute and drop them.
- if (llvm::none_of(alloc.getOperands(), [](ValuePtr operand) {
+ if (llvm::none_of(alloc.getOperands(), [](Value operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -327,8 +327,8 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
// and keep track of the resultant memref type to build.
SmallVector<int64_t, 4> newShapeConstants;
newShapeConstants.reserve(memrefType.getRank());
- SmallVector<ValuePtr, 4> newOperands;
- SmallVector<ValuePtr, 4> droppedOperands;
+ SmallVector<Value, 4> newOperands;
+ SmallVector<Value, 4> droppedOperands;
unsigned dynamicDimPos = 0;
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
@@ -420,7 +420,7 @@ struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &result) {
Block *dest;
- SmallVector<ValuePtr, 4> destOperands;
+ SmallVector<Value, 4> destOperands;
if (parser.parseSuccessorAndUseList(dest, destOperands))
return failure();
result.addSuccessor(dest, destOperands);
@@ -614,7 +614,7 @@ static Type getI1SameShape(Builder *build, Type type) {
//===----------------------------------------------------------------------===//
static void buildCmpIOp(Builder *build, OperationState &result,
- CmpIPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
+ CmpIPredicate predicate, Value lhs, Value rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -768,7 +768,7 @@ CmpFPredicate CmpFOp::getPredicateByName(StringRef name) {
}
static void buildCmpFOp(Builder *build, OperationState &result,
- CmpFPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
+ CmpFPredicate predicate, Value lhs, Value rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -937,7 +937,7 @@ struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
static ParseResult parseCondBranchOp(OpAsmParser &parser,
OperationState &result) {
- SmallVector<ValuePtr, 4> destOperands;
+ SmallVector<Value, 4> destOperands;
Block *dest;
OpAsmParser::OperandType condInfo;
@@ -1079,7 +1079,7 @@ OpFoldResult ConstantOp::fold(ArrayRef<Attribute> operands) {
}
void ConstantOp::getAsmResultNames(
- function_ref<void(ValuePtr, StringRef)> setNameFn) {
+ function_ref<void(Value, StringRef)> setNameFn) {
Type type = getType();
if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
IntegerType intTy = type.dyn_cast<IntegerType>();
@@ -1174,7 +1174,7 @@ struct SimplifyDeadDealloc : public OpRewritePattern<DeallocOp> {
PatternMatchResult matchAndRewrite(DeallocOp dealloc,
PatternRewriter &rewriter) const override {
// Check that the memref operand's defining operation is an AllocOp.
- ValuePtr memref = dealloc.memref();
+ Value memref = dealloc.memref();
if (!isa_and_nonnull<AllocOp>(memref->getDefiningOp()))
return matchFailure();
@@ -1353,11 +1353,10 @@ OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) {
// ---------------------------------------------------------------------------
void DmaStartOp::build(Builder *builder, OperationState &result,
- ValuePtr srcMemRef, ValueRange srcIndices,
- ValuePtr destMemRef, ValueRange destIndices,
- ValuePtr numElements, ValuePtr tagMemRef,
- ValueRange tagIndices, ValuePtr stride,
- ValuePtr elementsPerStride) {
+ Value srcMemRef, ValueRange srcIndices, Value destMemRef,
+ ValueRange destIndices, Value numElements,
+ Value tagMemRef, ValueRange tagIndices, Value stride,
+ Value elementsPerStride) {
result.addOperands(srcMemRef);
result.addOperands(srcIndices);
result.addOperands(destMemRef);
@@ -1497,9 +1496,8 @@ LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
// DmaWaitOp
// ---------------------------------------------------------------------------
-void DmaWaitOp::build(Builder *builder, OperationState &result,
- ValuePtr tagMemRef, ValueRange tagIndices,
- ValuePtr numElements) {
+void DmaWaitOp::build(Builder *builder, OperationState &result, Value tagMemRef,
+ ValueRange tagIndices, Value numElements) {
result.addOperands(tagMemRef);
result.addOperands(tagIndices);
result.addOperands(numElements);
@@ -2356,7 +2354,7 @@ static void print(OpAsmPrinter &p, ViewOp op) {
p << " : " << op.getOperand(0)->getType() << " to " << op.getType();
}
-ValuePtr ViewOp::getDynamicOffset() {
+Value ViewOp::getDynamicOffset() {
int64_t offset;
SmallVector<int64_t, 4> strides;
auto result =
@@ -2431,7 +2429,7 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
PatternMatchResult matchAndRewrite(ViewOp viewOp,
PatternRewriter &rewriter) const override {
// Return if none of the operands are constants.
- if (llvm::none_of(viewOp.getOperands(), [](ValuePtr operand) {
+ if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -2448,8 +2446,8 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
return matchFailure();
- SmallVector<ValuePtr, 4> newOperands;
- SmallVector<ValuePtr, 4> droppedOperands;
+ SmallVector<Value, 4> newOperands;
+ SmallVector<Value, 4> droppedOperands;
// Fold dynamic offset operand if it is produced by a constant.
auto dynamicOffset = viewOp.getDynamicOffset();
@@ -2567,7 +2565,7 @@ static Type inferSubViewResultType(MemRefType memRefType) {
memRefType.getMemorySpace());
}
-void mlir::SubViewOp::build(Builder *b, OperationState &result, ValuePtr source,
+void mlir::SubViewOp::build(Builder *b, OperationState &result, Value source,
ValueRange offsets, ValueRange sizes,
ValueRange strides, Type resultType,
ArrayRef<NamedAttribute> attrs) {
@@ -2581,7 +2579,7 @@ void mlir::SubViewOp::build(Builder *b, OperationState &result, ValuePtr source,
}
void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType,
- ValuePtr source) {
+ Value source) {
build(b, result, source, /*offsets=*/{}, /*sizes=*/{}, /*strides=*/{},
resultType);
}
@@ -2817,7 +2815,7 @@ public:
// Follow all or nothing approach for shapes for now. If all the operands
// for sizes are constants then fold it into the type of the result memref.
if (subViewType.hasStaticShape() ||
- llvm::any_of(subViewOp.sizes(), [](ValuePtr operand) {
+ llvm::any_of(subViewOp.sizes(), [](Value operand) {
return !matchPattern(operand, m_ConstantIndex());
})) {
return matchFailure();
@@ -2833,7 +2831,7 @@ public:
subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- ArrayRef<ValuePtr>(), subViewOp.strides(), newMemRefType);
+ ArrayRef<Value>(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.sizes(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2862,7 +2860,7 @@ public:
failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) ||
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
- llvm::any_of(subViewOp.strides(), [](ValuePtr stride) {
+ llvm::any_of(subViewOp.strides(), [](Value stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2883,7 +2881,7 @@ public:
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- subViewOp.sizes(), ArrayRef<ValuePtr>(), newMemRefType);
+ subViewOp.sizes(), ArrayRef<Value>(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.strides(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2913,7 +2911,7 @@ public:
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
baseOffset == MemRefType::getDynamicStrideOrOffset() ||
- llvm::any_of(subViewOp.offsets(), [](ValuePtr stride) {
+ llvm::any_of(subViewOp.offsets(), [](Value stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2934,7 +2932,7 @@ public:
MemRefType::get(subViewType.getShape(), subViewType.getElementType(),
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
- subViewOp.getLoc(), subViewOp.source(), ArrayRef<ValuePtr>(),
+ subViewOp.getLoc(), subViewOp.source(), ArrayRef<Value>(),
subViewOp.sizes(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
diff --git a/mlir/lib/Dialect/VectorOps/VectorOps.cpp b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
index 8ceff014029..a3904ef97a2 100644
--- a/mlir/lib/Dialect/VectorOps/VectorOps.cpp
+++ b/mlir/lib/Dialect/VectorOps/VectorOps.cpp
@@ -63,7 +63,7 @@ ArrayAttr vector::getVectorSubscriptAttr(Builder &builder,
//===----------------------------------------------------------------------===//
void vector::ContractionOp::build(Builder *builder, OperationState &result,
- ValuePtr lhs, ValuePtr rhs, ValuePtr acc,
+ Value lhs, Value rhs, Value acc,
ArrayAttr indexingMaps,
ArrayAttr iteratorTypes) {
result.addOperands({lhs, rhs, acc});
@@ -395,7 +395,7 @@ static Type inferExtractOpResultType(VectorType vectorType,
}
void vector::ExtractOp::build(Builder *builder, OperationState &result,
- ValuePtr source, ArrayRef<int64_t> position) {
+ Value source, ArrayRef<int64_t> position) {
result.addOperands(source);
auto positionAttr = getVectorSubscriptAttr(*builder, position);
result.addTypes(inferExtractOpResultType(source->getType().cast<VectorType>(),
@@ -462,7 +462,7 @@ static LogicalResult verify(vector::ExtractOp op) {
//===----------------------------------------------------------------------===//
void ExtractSlicesOp::build(Builder *builder, OperationState &result,
- TupleType tupleType, ValuePtr vector,
+ TupleType tupleType, Value vector,
ArrayRef<int64_t> sizes,
ArrayRef<int64_t> strides) {
result.addOperands(vector);
@@ -638,8 +638,8 @@ static ParseResult parseBroadcastOp(OpAsmParser &parser,
// ShuffleOp
//===----------------------------------------------------------------------===//
-void ShuffleOp::build(Builder *builder, OperationState &result, ValuePtr v1,
- ValuePtr v2, ArrayRef<int64_t> mask) {
+void ShuffleOp::build(Builder *builder, OperationState &result, Value v1,
+ Value v2, ArrayRef<int64_t> mask) {
result.addOperands({v1, v2});
auto maskAttr = getVectorSubscriptAttr(*builder, mask);
result.addTypes(v1->getType());
@@ -762,8 +762,8 @@ static LogicalResult verify(InsertElementOp op) {
// InsertOp
//===----------------------------------------------------------------------===//
-void InsertOp::build(Builder *builder, OperationState &result, ValuePtr source,
- ValuePtr dest, ArrayRef<int64_t> position) {
+void InsertOp::build(Builder *builder, OperationState &result, Value source,
+ Value dest, ArrayRef<int64_t> position) {
result.addOperands({source, dest});
auto positionAttr = getVectorSubscriptAttr(*builder, position);
result.addTypes(dest->getType());
@@ -884,7 +884,7 @@ void InsertSlicesOp::getStrides(SmallVectorImpl<int64_t> &results) {
//===----------------------------------------------------------------------===//
void InsertStridedSliceOp::build(Builder *builder, OperationState &result,
- ValuePtr source, ValuePtr dest,
+ Value source, Value dest,
ArrayRef<int64_t> offsets,
ArrayRef<int64_t> strides) {
result.addOperands({source, dest});
@@ -1192,7 +1192,7 @@ static LogicalResult verify(ReshapeOp op) {
// If all shape operands are produced by constant ops, verify that product
// of dimensions for input/output shape match.
- auto isDefByConstant = [](ValuePtr operand) {
+ auto isDefByConstant = [](Value operand) {
return isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
};
if (llvm::all_of(op.input_shape(), isDefByConstant) &&
@@ -1238,7 +1238,7 @@ static Type inferStridedSliceOpResultType(VectorType vectorType,
}
void StridedSliceOp::build(Builder *builder, OperationState &result,
- ValuePtr source, ArrayRef<int64_t> offsets,
+ Value source, ArrayRef<int64_t> offsets,
ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides) {
result.addOperands(source);
auto offsetsAttr = getVectorSubscriptAttr(*builder, offsets);
@@ -1593,8 +1593,7 @@ static MemRefType inferVectorTypeCastResultType(MemRefType t) {
return MemRefType::get({}, VectorType::get(t.getShape(), t.getElementType()));
}
-void TypeCastOp::build(Builder *builder, OperationState &result,
- ValuePtr source) {
+void TypeCastOp::build(Builder *builder, OperationState &result, Value source) {
result.addOperands(source);
result.addTypes(
inferVectorTypeCastResultType(source->getType().cast<MemRefType>()));
@@ -1784,7 +1783,7 @@ public:
PatternMatchResult matchAndRewrite(CreateMaskOp createMaskOp,
PatternRewriter &rewriter) const override {
// Return if any of 'createMaskOp' operands are not defined by a constant.
- auto is_not_def_by_constant = [](ValuePtr operand) {
+ auto is_not_def_by_constant = [](Value operand) {
return !isa_and_nonnull<ConstantIndexOp>(operand->getDefiningOp());
};
if (llvm::any_of(createMaskOp.operands(), is_not_def_by_constant))
diff --git a/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp b/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
index 927aeda4ecd..28b803f7cde 100644
--- a/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/VectorOps/VectorTransforms.cpp
@@ -97,17 +97,17 @@ static SmallVector<int64_t, 8> delinearize(int64_t linearIndex,
// `resultTypes`.
static Operation *cloneOpWithOperandsAndTypes(PatternRewriter &builder,
Location loc, Operation *op,
- ArrayRef<ValuePtr> operands,
+ ArrayRef<Value> operands,
ArrayRef<Type> resultTypes) {
OperationState res(loc, op->getName().getStringRef(), operands, resultTypes,
op->getAttrs());
return builder.createOperation(res);
}
-static ValuePtr makeSplatZero(Location loc, PatternRewriter &rewriter,
- VectorType vt) {
+static Value makeSplatZero(Location loc, PatternRewriter &rewriter,
+ VectorType vt) {
auto t = vt.getElementType();
- ValuePtr f = nullptr;
+ Value f = nullptr;
if (t.isBF16() || t.isF16())
f = rewriter.create<ConstantOp>(loc, t, rewriter.getF64FloatAttr(0.0f));
else if (t.isF32())
@@ -181,12 +181,12 @@ struct UnrolledVectorState {
SmallVector<int64_t, 4> unrollFactors;
SmallVector<int64_t, 8> basis;
int64_t numInstances;
- ValuePtr slicesTuple;
+ Value slicesTuple;
};
// Populates 'state' with unrolled shape, unroll factors, basis and
// num unrolled instances for 'vectorType'.
-static void initUnrolledVectorState(VectorType vectorType, ValuePtr initValue,
+static void initUnrolledVectorState(VectorType vectorType, Value initValue,
const DenseMap<int64_t, int64_t> &indexMap,
ArrayRef<int64_t> targetShape,
UnrolledVectorState &state,
@@ -230,11 +230,10 @@ getUnrolledVectorLinearIndex(UnrolledVectorState &state,
// Returns an unrolled vector at 'vectorOffsets' within the vector
// represented by 'state'. The vector is created from a slice of 'initValue'
// if not present in 'cache'.
-static ValuePtr getOrCreateUnrolledVectorSlice(
+static Value getOrCreateUnrolledVectorSlice(
Location loc, UnrolledVectorState &state, ArrayRef<int64_t> vectorOffsets,
ArrayRef<int64_t> offsets, DenseMap<int64_t, int64_t> &indexMap,
- ValuePtr initValue, SmallVectorImpl<ValuePtr> &cache,
- PatternRewriter &builder) {
+ Value initValue, SmallVectorImpl<Value> &cache, PatternRewriter &builder) {
// Compute slice offsets.
SmallVector<int64_t, 4> sliceOffsets(state.unrolledShape.size());
getMappedElements(indexMap, offsets, sliceOffsets);
@@ -321,10 +320,12 @@ struct VectorState {
// TODO(andydavis) Generalize this to support structured ops beyond
// vector ContractionOp, and merge it with 'unrollSingleResultOpMatchingType'
-static ValuePtr unrollSingleResultStructuredOp(
- Operation *op, ArrayRef<int64_t> iterationBounds,
- std::vector<VectorState> &vectors, unsigned resultIndex,
- ArrayRef<int64_t> targetShape, PatternRewriter &builder) {
+static Value unrollSingleResultStructuredOp(Operation *op,
+ ArrayRef<int64_t> iterationBounds,
+ std::vector<VectorState> &vectors,
+ unsigned resultIndex,
+ ArrayRef<int64_t> targetShape,
+ PatternRewriter &builder) {
auto shapedType = op->getResult(0)->getType().dyn_cast_or_null<ShapedType>();
if (!shapedType || !shapedType.hasStaticShape())
assert(false && "Expected a statically shaped result type");
@@ -353,7 +354,7 @@ static ValuePtr unrollSingleResultStructuredOp(
shapedType.getElementType());
// Initialize caches for intermediate vector results.
- std::vector<SmallVector<ValuePtr, 4>> caches(numVectors);
+ std::vector<SmallVector<Value, 4>> caches(numVectors);
for (unsigned i = 0; i < numVectors; ++i)
caches[i].resize(unrolledVectorState[i].numInstances);
@@ -365,7 +366,7 @@ static ValuePtr unrollSingleResultStructuredOp(
auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; },
vectorOffsets, targetShape);
// Get cached slice (or create slice) for each operand at 'offsets'.
- SmallVector<ValuePtr, 3> operands;
+ SmallVector<Value, 3> operands;
operands.resize(op->getNumOperands());
for (unsigned i = 0; i < numVectors; ++i) {
int64_t operandIndex = vectors[i].operandIndex;
@@ -391,21 +392,21 @@ static ValuePtr unrollSingleResultStructuredOp(
// Create TupleOp of unrolled result vectors.
SmallVector<Type, 4> vectorTupleTypes(resultValueState.numInstances);
- SmallVector<ValuePtr, 4> vectorTupleValues(resultValueState.numInstances);
+ SmallVector<Value, 4> vectorTupleValues(resultValueState.numInstances);
for (unsigned i = 0; i < resultValueState.numInstances; ++i) {
vectorTupleTypes[i] = caches[resultIndex][i]->getType().cast<VectorType>();
vectorTupleValues[i] = caches[resultIndex][i];
}
TupleType tupleType = builder.getTupleType(vectorTupleTypes);
- ValuePtr tupleOp = builder.create<vector::TupleOp>(op->getLoc(), tupleType,
- vectorTupleValues);
+ Value tupleOp = builder.create<vector::TupleOp>(op->getLoc(), tupleType,
+ vectorTupleValues);
// Create InsertSlicesOp(Tuple(result_vectors)).
auto resultVectorType = op->getResult(0)->getType().cast<VectorType>();
SmallVector<int64_t, 4> sizes(resultValueState.unrolledShape);
SmallVector<int64_t, 4> strides(resultValueState.unrollFactors.size(), 1);
- ValuePtr insertSlicesOp = builder.create<vector::InsertSlicesOp>(
+ Value insertSlicesOp = builder.create<vector::InsertSlicesOp>(
op->getLoc(), resultVectorType, tupleOp, builder.getI64ArrayAttr(sizes),
builder.getI64ArrayAttr(strides));
return insertSlicesOp;
@@ -476,7 +477,7 @@ getVectorElementwiseOpUnrollState(Operation *op, ArrayRef<int64_t> targetShape,
}
// Entry point for unrolling declarative pattern rewrites.
-ValuePtr mlir::vector::unrollSingleResultOpMatchingType(
+Value mlir::vector::unrollSingleResultOpMatchingType(
PatternRewriter &builder, Operation *op, ArrayRef<int64_t> targetShape) {
assert(op->getNumResults() == 1 && "Expected single result operation");
@@ -505,8 +506,8 @@ ValuePtr mlir::vector::unrollSingleResultOpMatchingType(
static void
generateTransferOpSlices(VectorType vectorType, TupleType tupleType,
ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides,
- ArrayRef<ValuePtr> indices, PatternRewriter &rewriter,
- function_ref<void(unsigned, ArrayRef<ValuePtr>)> fn) {
+ ArrayRef<Value> indices, PatternRewriter &rewriter,
+ function_ref<void(unsigned, ArrayRef<Value>)> fn) {
// Compute strides w.r.t. to slice counts in each dimension.
auto maybeDimSliceCounts = shapeRatio(vectorType.getShape(), sizes);
assert(maybeDimSliceCounts.hasValue());
@@ -523,13 +524,13 @@ generateTransferOpSlices(VectorType vectorType, TupleType tupleType,
auto offsets = zipMap([](int64_t v1, int64_t v2) { return v1 * v2; },
vectorOffsets, sizes);
// Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'.
- SmallVector<ValuePtr, 4> sliceIndices(numSliceIndices);
+ SmallVector<Value, 4> sliceIndices(numSliceIndices);
for (auto it : llvm::enumerate(indices)) {
auto expr = getAffineDimExpr(0, ctx) +
getAffineConstantExpr(offsets[it.index()], ctx);
auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr);
sliceIndices[it.index()] = rewriter.create<AffineApplyOp>(
- it.value()->getLoc(), map, ArrayRef<ValuePtr>(it.value()));
+ it.value()->getLoc(), map, ArrayRef<Value>(it.value()));
}
// Call 'fn' to generate slice 'i' at 'sliceIndices'.
fn(i, sliceIndices);
@@ -548,7 +549,7 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
if (!xferReadOp.permutation_map().isIdentity())
return matchFailure();
// Return unless the unique 'xferReadOp' user is an ExtractSlicesOp.
- ValuePtr xferReadResult = xferReadOp.getResult();
+ Value xferReadResult = xferReadOp.getResult();
auto extractSlicesOp =
dyn_cast<vector::ExtractSlicesOp>(*xferReadResult->getUsers().begin());
if (!xferReadResult->hasOneUse() || !extractSlicesOp)
@@ -565,10 +566,10 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
Location loc = xferReadOp.getLoc();
int64_t numSlices = resultTupleType.size();
- SmallVector<ValuePtr, 4> vectorTupleValues(numSlices);
- SmallVector<ValuePtr, 4> indices(xferReadOp.indices().begin(),
- xferReadOp.indices().end());
- auto createSlice = [&](unsigned index, ArrayRef<ValuePtr> sliceIndices) {
+ SmallVector<Value, 4> vectorTupleValues(numSlices);
+ SmallVector<Value, 4> indices(xferReadOp.indices().begin(),
+ xferReadOp.indices().end());
+ auto createSlice = [&](unsigned index, ArrayRef<Value> sliceIndices) {
// Get VectorType for slice 'i'.
auto sliceVectorType = resultTupleType.getType(index);
// Create split TransferReadOp for 'sliceUser'.
@@ -580,8 +581,8 @@ struct SplitTransferReadOp : public OpRewritePattern<vector::TransferReadOp> {
indices, rewriter, createSlice);
// Create tuple of splice xfer read operations.
- ValuePtr tupleOp = rewriter.create<vector::TupleOp>(loc, resultTupleType,
- vectorTupleValues);
+ Value tupleOp = rewriter.create<vector::TupleOp>(loc, resultTupleType,
+ vectorTupleValues);
// Replace 'xferReadOp' with result 'insertSlicesResult'.
rewriter.replaceOpWithNewOp<vector::InsertSlicesOp>(
xferReadOp, sourceVectorType, tupleOp, extractSlicesOp.sizes(),
@@ -621,9 +622,9 @@ struct SplitTransferWriteOp : public OpRewritePattern<vector::TransferWriteOp> {
insertSlicesOp.getStrides(strides);
Location loc = xferWriteOp.getLoc();
- SmallVector<ValuePtr, 4> indices(xferWriteOp.indices().begin(),
- xferWriteOp.indices().end());
- auto createSlice = [&](unsigned index, ArrayRef<ValuePtr> sliceIndices) {
+ SmallVector<Value, 4> indices(xferWriteOp.indices().begin(),
+ xferWriteOp.indices().end());
+ auto createSlice = [&](unsigned index, ArrayRef<Value> sliceIndices) {
// Create split TransferWriteOp for source vector 'tupleOp.operand[i]'.
rewriter.create<vector::TransferWriteOp>(
loc, tupleOp.getOperand(index), xferWriteOp.memref(), sliceIndices,
@@ -665,7 +666,7 @@ struct TupleGetFolderOp : public OpRewritePattern<vector::TupleGetOp> {
return matchFailure();
// Forward Value from 'tupleOp' at 'tupleGetOp.index'.
- ValuePtr tupleValue = tupleOp.getOperand(tupleGetOp.getIndex());
+ Value tupleValue = tupleOp.getOperand(tupleGetOp.getIndex());
rewriter.replaceOp(tupleGetOp, tupleValue);
return matchSuccess();
}
OpenPOWER on IntegriCloud