summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Dialect/Linalg/Transforms
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-23 14:45:01 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-23 16:36:53 -0800
commite62a69561fb9d7b1013d2853da68d79a7907fead (patch)
tree0dd059094cbfb8d904513abcdc1fbe8cfa89bb09 /mlir/lib/Dialect/Linalg/Transforms
parent5d5bd2e1da29d976cb125dbb3cd097a5e42b2be4 (diff)
downloadbcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.tar.gz
bcm5719-llvm-e62a69561fb9d7b1013d2853da68d79a7907fead.zip
NFC: Replace ValuePtr with Value and remove it now that Value is value-typed.
ValuePtr was a temporary typedef during the transition to a value-typed Value. PiperOrigin-RevId: 286945714
Diffstat (limited to 'mlir/lib/Dialect/Linalg/Transforms')
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp29
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp46
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp25
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp55
5 files changed, 77 insertions, 84 deletions
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 27dcf663d23..9df7bce0879 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -68,16 +68,16 @@ static llvm::cl::list<unsigned> clTileSizes(
static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
ArrayRef<SubViewOp::Range> loopRanges) {
auto maps = loopToOperandRangesMaps(op);
- SmallVector<ValuePtr, 8> clonedViews;
+ SmallVector<Value, 8> clonedViews;
clonedViews.reserve(op.getNumInputsAndOutputs());
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
+ SmallVector<Value, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "map: " << map << "\n");
- ValuePtr view = en.value();
+ Value view = en.value();
SmallVector<SubViewOp::Range, 4> viewRanges(map.getNumResults());
for (auto en2 : llvm::enumerate(map.getResults())) {
unsigned d = en2.index();
@@ -90,7 +90,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
// Construct a new subview for the tile.
unsigned rank = viewRanges.size();
- SmallVector<ValuePtr, 4> offsets, sizes, strides;
+ SmallVector<Value, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -108,7 +108,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
struct ViewDimension {
- ValuePtr view;
+ Value view;
unsigned dimension;
};
@@ -121,14 +121,14 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
auto maps = loopToOperandRangesMaps(op);
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
+ SmallVector<Value, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n");
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n");
- ValuePtr view = en.value();
- SmallVector<ValuePtr, 8> viewRanges(map.getNumResults(), nullptr);
+ Value view = en.value();
+ SmallVector<Value, 8> viewRanges(map.getNumResults(), nullptr);
for (auto en2 : llvm::enumerate(map.getResults())) {
if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) {
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth
@@ -142,9 +142,9 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
llvm_unreachable("Expect to be able to extract a view defining loop range");
}
-static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
- LinalgOp consumer, unsigned consumerIdx,
- unsigned producerIdx, OperationFolder *folder) {
+static LinalgOp fuse(Value producedView, LinalgOp producer, LinalgOp consumer,
+ unsigned consumerIdx, unsigned producerIdx,
+ OperationFolder *folder) {
auto subView = dyn_cast_or_null<SubViewOp>(
consumer.getInput(consumerIdx)->getDefiningOp());
auto slice = dyn_cast_or_null<SliceOp>(
@@ -196,8 +196,7 @@ static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
// Encode structural fusion safety preconditions.
// Some of these will be lifted in the future with better analysis.
-static bool isStructurallyFusableProducer(LinalgOp producer,
- ValuePtr consumedView,
+static bool isStructurallyFusableProducer(LinalgOp producer, Value consumedView,
LinalgOp consumer) {
if (producer.getNumOutputs() != 1) {
LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)");
@@ -217,7 +216,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer,
bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
LinalgOp consumer,
- ValuePtr consumedView,
+ Value consumedView,
LinalgOp producer) {
// Make some simple structural checks that alleviate the need for more
// complex analyses.
@@ -236,7 +235,7 @@ bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
}
bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
- LinalgOp consumer, ValuePtr consumedView,
+ LinalgOp consumer, Value consumedView,
LinalgOp producer) {
if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer))
return false;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
index 0f333791dd7..d7cc4a86d21 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
@@ -40,7 +40,7 @@ using edsc::op::operator==;
static SmallVector<ValueHandle, 8>
makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<ValuePtr> vals) {
+ ArrayRef<Value> vals) {
assert(map.getNumSymbols() == 0);
assert(map.getNumInputs() == vals.size());
SmallVector<ValueHandle, 8> res;
@@ -48,35 +48,34 @@ makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
auto dims = map.getNumDims();
for (auto e : map.getResults()) {
auto exprMap = AffineMap::get(dims, 0, e);
- SmallVector<ValuePtr, 4> operands(vals.begin(), vals.end());
+ SmallVector<Value, 4> operands(vals.begin(), vals.end());
canonicalizeMapAndOperands(&exprMap, &operands);
res.push_back(affine_apply(exprMap, operands));
}
return res;
}
-static SmallVector<ValuePtr, 4> permuteIvs(ArrayRef<ValuePtr> ivs,
- Optional<AffineMap> permutation) {
+static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
+ Optional<AffineMap> permutation) {
return permutation ? applyMapToValues(ScopedContext::getBuilder(),
ScopedContext::getLocation(),
permutation.getValue(), ivs)
- : SmallVector<ValuePtr, 4>(ivs.begin(), ivs.end());
+ : SmallVector<Value, 4>(ivs.begin(), ivs.end());
}
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<ValuePtr> allViewSizes);
-SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<ValuePtr> allViewSizes) {
+static SmallVector<Value, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<Value> allViewSizes);
+SmallVector<Value, 4> emitLoopRanges(OpBuilder &b, Location loc, AffineMap map,
+ ArrayRef<Value> allViewSizes) {
// Apply `map` to get view sizes in loop order.
auto sizes = applyMapToValues(b, loc, map, allViewSizes);
// Create a new range with the applied tile sizes.
ScopedContext scope(b, loc);
- SmallVector<ValuePtr, 4> res;
+ SmallVector<Value, 4> res;
for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
res.push_back(range(constant_index(0), sizes[idx], constant_index(1)));
}
@@ -89,8 +88,7 @@ class LinalgScopedEmitter {};
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, CopyOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- CopyOp copyOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
auto nPar = copyOp.getNumParallelLoops();
assert(nPar == allIvs.size());
auto inputIvs =
@@ -112,8 +110,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, FillOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- FillOp fillOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
auto nPar = fillOp.getNumParallelLoops();
assert(nPar == allIvs.size());
auto ivs =
@@ -129,7 +126,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, DotOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs, DotOp dotOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, DotOp dotOp) {
assert(allIvs.size() == 1);
IndexHandle r_i(allIvs[0]);
IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
@@ -142,7 +139,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
MatvecOp matvecOp) {
assert(allIvs.size() == 2);
IndexHandle i(allIvs[0]), r_j(allIvs[1]);
@@ -156,7 +153,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
MatmulOp matmulOp) {
assert(allIvs.size() == 3);
IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
@@ -170,8 +167,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, ConvOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
- ConvOp convOp) {
+ static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
auto maps = loopToOperandRangesMaps(convOp);
@@ -220,14 +216,14 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, GenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
GenericOp genericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
using edsc::intrinsics::detail::ValueHandleArray;
unsigned nInputs = genericOp.getNumInputs();
unsigned nOutputs = genericOp.getNumOutputs();
- SmallVector<ValuePtr, 4> indexedValues(nInputs + nOutputs);
+ SmallVector<Value, 4> indexedValues(nInputs + nOutputs);
// 1.a. Emit std_load from input views.
for (unsigned i = 0; i < nInputs; ++i) {
@@ -315,7 +311,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
+ static void emitScalarImplementation(ArrayRef<Value> allIvs,
IndexedGenericOp indexedGenericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -323,7 +319,7 @@ public:
unsigned nInputs = indexedGenericOp.getNumInputs();
unsigned nOutputs = indexedGenericOp.getNumOutputs();
unsigned nLoops = allIvs.size();
- SmallVector<ValuePtr, 4> indexedValues(nLoops + nInputs + nOutputs);
+ SmallVector<Value, 4> indexedValues(nLoops + nInputs + nOutputs);
for (unsigned i = 0; i < nLoops; ++i) {
indexedValues[i] = allIvs[i];
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
index 451803797f4..eb23a8ceb1a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
@@ -90,7 +90,7 @@ LogicalResult mlir::linalg::tileAndFuseLinalgOpAndSetMarker(
}
bool mlir::linalg::detail::isProducedByOpOfTypeImpl(
- Operation *consumerOp, ValuePtr consumedView,
+ Operation *consumerOp, Value consumedView,
function_ref<bool(Operation *)> isaOpType) {
LinalgOp consumer = dyn_cast<LinalgOp>(consumerOp);
if (!consumer)
@@ -166,7 +166,7 @@ LogicalResult mlir::linalg::vectorizeGenericOp(PatternRewriter &rewriter,
return failure();
// TODO(ntv): non-identity layout.
- auto isStaticMemRefWithIdentityLayout = [](ValuePtr v) {
+ auto isStaticMemRefWithIdentityLayout = [](Value v) {
auto m = v->getType().dyn_cast<MemRefType>();
if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty())
return false;
@@ -226,7 +226,7 @@ mlir::linalg::permuteGenericLinalgOp(PatternRewriter &rewriter, Operation *op,
LogicalResult mlir::linalg::linalgOpPromoteSubviews(PatternRewriter &rewriter,
Operation *op) {
LinalgOp linOp = dyn_cast<LinalgOp>(op);
- SetVector<ValuePtr> subViews;
+ SetVector<Value> subViews;
for (auto it : linOp.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
subViews.insert(sv);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index 08bc1518a19..b8b27958ff5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -46,15 +46,14 @@ static llvm::cl::opt<bool> clPromoteDynamic(
llvm::cl::desc("Test generation of dynamic promoted buffers"),
llvm::cl::cat(clOptionsCategory), llvm::cl::init(false));
-static ValuePtr allocBuffer(Type elementType, ValuePtr size,
- bool dynamicBuffers) {
+static Value allocBuffer(Type elementType, Value size, bool dynamicBuffers) {
auto *ctx = size->getContext();
auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
if (!dynamicBuffers)
if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size->getDefiningOp()))
return alloc(
MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
- ValuePtr mul = muli(constant_index(width), size);
+ Value mul = muli(constant_index(width), size);
return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
}
@@ -84,14 +83,14 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
auto viewType = subView.getType();
auto rank = viewType.getRank();
- ValuePtr allocSize = one;
- SmallVector<ValuePtr, 8> fullRanges, partialRanges;
+ Value allocSize = one;
+ SmallVector<Value, 8> fullRanges, partialRanges;
fullRanges.reserve(rank);
partialRanges.reserve(rank);
for (auto en : llvm::enumerate(subView.getRanges())) {
auto rank = en.index();
auto rangeValue = en.value();
- ValuePtr d = rangeValue.size;
+ Value d = rangeValue.size;
allocSize = muli(folder, allocSize, d).getValue();
fullRanges.push_back(d);
partialRanges.push_back(range(folder, zero, dim(subView, rank), one));
@@ -107,7 +106,7 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
SmallVector<PromotionInfo, 8>
mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
- ArrayRef<ValuePtr> subViews, bool dynamicBuffers,
+ ArrayRef<Value> subViews, bool dynamicBuffers,
OperationFolder *folder) {
if (subViews.empty())
return {};
@@ -115,7 +114,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
ScopedContext scope(b, loc);
SmallVector<PromotionInfo, 8> res;
res.reserve(subViews.size());
- DenseMap<ValuePtr, PromotionInfo> promotionInfoMap;
+ DenseMap<Value, PromotionInfo> promotionInfoMap;
for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto viewType = subView.getType();
@@ -136,7 +135,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
// TODO(ntv): value to fill with should be related to the operation.
// For now, just use APFloat(0.0f).
auto t = subView.getType().getElementType().cast<FloatType>();
- ValuePtr fillVal = constant_float(folder, APFloat(0.0f), t);
+ Value fillVal = constant_float(folder, APFloat(0.0f), t);
// TODO(ntv): fill is only necessary if `promotionInfo` has a full local
// view that is different from the partial local view and we are on the
// boundary.
@@ -153,16 +152,16 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
}
LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- SetVector<ValuePtr> subViews,
+ SetVector<Value> subViews,
bool dynamicBuffers,
OperationFolder *folder) {
// 1. Promote the specified views and use them in the new op.
ScopedContext scope(b, op.getLoc());
auto promotedBufferAndViews = promoteSubViews(
b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder);
- SmallVector<ValuePtr, 8> opViews;
+ SmallVector<Value, 8> opViews;
opViews.reserve(op.getNumInputsAndOutputs());
- SmallVector<std::pair<ValuePtr, ValuePtr>, 8> writebackViews;
+ SmallVector<std::pair<Value, Value>, 8> writebackViews;
writebackViews.reserve(subViews.size());
unsigned promotedIdx = 0;
for (auto view : op.getInputsAndOutputs()) {
@@ -206,7 +205,7 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) {
f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) {
// TODO(ntv) some heuristic here to decide what to promote. Atm it is all or
// nothing.
- SetVector<ValuePtr> subViews;
+ SetVector<Value> subViews;
OpBuilder b(op);
for (auto it : op.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 99645a23100..964f540c099 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -44,7 +44,7 @@ static llvm::cl::list<unsigned>
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated,
llvm::cl::cat(clOptionsCategory));
-static bool isZero(ValuePtr v) {
+static bool isZero(Value v) {
return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
}
@@ -62,12 +62,12 @@ using LoopIndexToRangeIndexMap = DenseMap<int, int>;
// indices of newly created loops.
static std::tuple<SmallVector<SubViewOp::Range, 4>, LoopIndexToRangeIndexMap>
makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<ValuePtr> allViewSizes,
- ArrayRef<ValuePtr> allTileSizes, OperationFolder *folder) {
+ ArrayRef<Value> allViewSizes, ArrayRef<Value> allTileSizes,
+ OperationFolder *folder) {
assert(allTileSizes.size() == map.getNumResults());
// Apply `map` to get view sizes in loop order.
auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder);
- SmallVector<ValuePtr, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
+ SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
// Traverse the tile sizes, which are in loop order, erase zeros everywhere.
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
@@ -101,8 +101,7 @@ namespace {
// `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0]
//
struct TileCheck : public AffineExprVisitor<TileCheck> {
- TileCheck(ArrayRef<ValuePtr> tileSizes)
- : isTiled(false), tileSizes(tileSizes) {}
+ TileCheck(ArrayRef<Value> tileSizes) : isTiled(false), tileSizes(tileSizes) {}
void visitDimExpr(AffineDimExpr expr) {
isTiled |= !isZero(tileSizes[expr.getPosition()]);
@@ -115,7 +114,7 @@ struct TileCheck : public AffineExprVisitor<TileCheck> {
"nonpositive multiplying coefficient");
}
bool isTiled;
- ArrayRef<ValuePtr> tileSizes;
+ ArrayRef<Value> tileSizes;
};
} // namespace
@@ -197,11 +196,11 @@ void transformIndexedGenericOpIndices(
auto rangeIndex = loopIndexToRangeIndex.find(i);
if (rangeIndex == loopIndexToRangeIndex.end())
continue;
- ValuePtr oldIndex = block.getArgument(i);
+ Value oldIndex = block.getArgument(i);
// Offset the index argument `i` by the value of the corresponding induction
// variable and replace all uses of the previous value.
- ValuePtr newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
- pivs[rangeIndex->second]->getValue());
+ Value newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
+ pivs[rangeIndex->second]->getValue());
for (auto &use : oldIndex->getUses()) {
if (use.getOwner() == newIndex->getDefiningOp())
continue;
@@ -210,7 +209,7 @@ void transformIndexedGenericOpIndices(
}
}
-static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
+static bool isTiled(AffineExpr expr, ArrayRef<Value> tileSizes) {
if (!expr)
return false;
TileCheck t(tileSizes);
@@ -220,7 +219,7 @@ static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
// Checks whether the view with index `viewIndex` within `linalgOp` varies with
// respect to a non-zero `tileSize`.
-static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
+static bool isTiled(AffineMap map, ArrayRef<Value> tileSizes) {
if (!map)
return false;
for (unsigned r = 0; r < map.getNumResults(); ++r)
@@ -229,13 +228,13 @@ static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
return false;
}
-static SmallVector<ValuePtr, 4>
+static SmallVector<Value, 4>
makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
- ArrayRef<ValuePtr> ivs, ArrayRef<ValuePtr> tileSizes,
- ArrayRef<ValuePtr> viewSizes, OperationFolder *folder) {
+ ArrayRef<Value> ivs, ArrayRef<Value> tileSizes,
+ ArrayRef<Value> viewSizes, OperationFolder *folder) {
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
llvm::make_range(tileSizes.begin(), tileSizes.end()),
- [](ValuePtr v) { return !isZero(v); })) &&
+ [](Value v) { return !isZero(v); })) &&
"expected as many ivs as non-zero sizes");
using edsc::intrinsics::select;
@@ -244,22 +243,21 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Construct (potentially temporary) mins and maxes on which to apply maps
// that define tile subviews.
- SmallVector<ValuePtr, 8> lbs, subViewSizes;
+ SmallVector<Value, 8> lbs, subViewSizes;
for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
bool isTiled = !isZero(tileSizes[idx]);
- lbs.push_back(isTiled ? ivs[idxIvs++]
- : (ValuePtr)constant_index(folder, 0));
+ lbs.push_back(isTiled ? ivs[idxIvs++] : (Value)constant_index(folder, 0));
subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]);
}
auto *op = linalgOp.getOperation();
- SmallVector<ValuePtr, 4> res;
+ SmallVector<Value, 4> res;
res.reserve(op->getNumOperands());
auto viewIteratorBegin = linalgOp.getInputsAndOutputs().begin();
for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs();
++viewIndex) {
- ValuePtr view = *(viewIteratorBegin + viewIndex);
+ Value view = *(viewIteratorBegin + viewIndex);
unsigned rank = view->getType().cast<MemRefType>().getRank();
auto map = loopToOperandRangesMaps(linalgOp)[viewIndex];
// If the view is not tiled, we can use it as is.
@@ -269,7 +267,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
// Construct a new subview for the tile.
- SmallVector<ValuePtr, 4> offsets, sizes, strides;
+ SmallVector<Value, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -300,16 +298,17 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// This is a special type of folding that we only apply when `folder` is
// defined.
if (folder)
- for (auto v : llvm::concat<ValuePtr>(lbs, subViewSizes))
+ for (auto v : llvm::concat<Value>(lbs, subViewSizes))
if (v->use_empty())
v->getDefiningOp()->erase();
return res;
}
-Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
- OpBuilder &b, LinalgOp op, ArrayRef<ValuePtr> tileSizes,
- ArrayRef<unsigned> permutation, OperationFolder *folder) {
+Optional<TiledLinalgOp>
+mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes,
+ ArrayRef<unsigned> permutation,
+ OperationFolder *folder) {
// 1. Enforce the convention that "tiling by zero" skips tiling a particular
// dimension. This convention is significantly simpler to handle instead of
// adjusting affine maps to account for missing dimensions.
@@ -352,7 +351,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
LoopNestRangeBuilder(pivs, loopRanges)([&] {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- SmallVector<ValuePtr, 4> ivValues(ivs.begin(), ivs.end());
+ SmallVector<Value, 4> ivValues(ivs.begin(), ivs.end());
// If we have to apply a permutation to the tiled loop nest, we have to
// reorder the induction variables This permutation is the right one
@@ -403,7 +402,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
ScopedContext scope(b, op.getLoc());
// Materialize concrete tile size values to pass the generic tiling function.
- SmallVector<ValuePtr, 8> tileSizeValues;
+ SmallVector<Value, 8> tileSizeValues;
tileSizeValues.reserve(tileSizes.size());
for (auto ts : tileSizes)
tileSizeValues.push_back(constant_index(folder, ts));
OpenPOWER on IntegriCloud