summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Dialect/Linalg/Transforms
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-22 21:59:55 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-22 22:00:23 -0800
commit35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch)
treed083d37d993a774239081509a50e3e6c65366421 /mlir/lib/Dialect/Linalg/Transforms
parent22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff)
downloadbcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz
bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/lib/Dialect/Linalg/Transforms')
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp32
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp44
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp35
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp53
5 files changed, 86 insertions, 84 deletions
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 453daba204c..49cea7e4170 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -77,16 +77,16 @@ static llvm::cl::list<unsigned> clTileSizes(
static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
ArrayRef<SubViewOp::Range> loopRanges) {
auto maps = loopToOperandRangesMaps(op);
- SmallVector<Value *, 8> clonedViews;
+ SmallVector<ValuePtr, 8> clonedViews;
clonedViews.reserve(op.getNumInputsAndOutputs());
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<Value *, 8> ios(op.getInputsAndOutputs());
+ SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "map: " << map << "\n");
- Value *view = en.value();
+ ValuePtr view = en.value();
SmallVector<SubViewOp::Range, 4> viewRanges(map.getNumResults());
for (auto en2 : llvm::enumerate(map.getResults())) {
unsigned d = en2.index();
@@ -99,7 +99,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
// Construct a new subview for the tile.
unsigned rank = viewRanges.size();
- SmallVector<Value *, 4> offsets, sizes, strides;
+ SmallVector<ValuePtr, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -117,7 +117,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
}
struct ViewDimension {
- Value *view;
+ ValuePtr view;
unsigned dimension;
};
@@ -130,14 +130,14 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
auto maps = loopToOperandRangesMaps(op);
// Iterate over the inputs and outputs in order.
// Extract the subranges from the linearized ranges.
- SmallVector<Value *, 8> ios(op.getInputsAndOutputs());
+ SmallVector<ValuePtr, 8> ios(op.getInputsAndOutputs());
for (auto en : llvm::enumerate(ios)) {
unsigned idx = en.index();
auto map = maps[idx];
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange I/O idx: " << idx << "\n");
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange map: " << map << "\n");
- Value *view = en.value();
- SmallVector<Value *, 8> viewRanges(map.getNumResults(), nullptr);
+ ValuePtr view = en.value();
+ SmallVector<ValuePtr, 8> viewRanges(map.getNumResults(), nullptr);
for (auto en2 : llvm::enumerate(map.getResults())) {
if (loopDepth == en2.value().cast<AffineDimExpr>().getPosition()) {
LLVM_DEBUG(dbgs() << "getViewDefiningLoopRange loopDepth: " << loopDepth
@@ -151,9 +151,9 @@ static ViewDimension getViewDefiningLoopRange(LinalgOp op, unsigned loopDepth) {
llvm_unreachable("Expect to be able to extract a view defining loop range");
}
-static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer,
- unsigned consumerIdx, unsigned producerIdx,
- OperationFolder *folder) {
+static LinalgOp fuse(ValuePtr producedView, LinalgOp producer,
+ LinalgOp consumer, unsigned consumerIdx,
+ unsigned producerIdx, OperationFolder *folder) {
auto subView = dyn_cast_or_null<SubViewOp>(
consumer.getInput(consumerIdx)->getDefiningOp());
auto slice = dyn_cast_or_null<SliceOp>(
@@ -206,7 +206,7 @@ static LinalgOp fuse(Value *producedView, LinalgOp producer, LinalgOp consumer,
// Encode structural fusion safety preconditions.
// Some of these will be lifted in the future with better analysis.
static bool isStructurallyFusableProducer(LinalgOp producer,
- Value *consumedView,
+ ValuePtr consumedView,
LinalgOp consumer) {
if (producer.getNumOutputs() != 1) {
LLVM_DEBUG(dbgs() << "\nNot structurally fusable (multi-output)");
@@ -226,7 +226,7 @@ static bool isStructurallyFusableProducer(LinalgOp producer,
bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
LinalgOp consumer,
- Value *consumedView,
+ ValuePtr consumedView,
LinalgOp producer) {
// Make some simple structural checks that alleviate the need for more
// complex analyses.
@@ -245,7 +245,7 @@ bool mlir::linalg::isProducerLastWriteOfView(const LinalgDependenceGraph &graph,
}
bool mlir::linalg::isFusableInto(const LinalgDependenceGraph &graph,
- LinalgOp consumer, Value *consumedView,
+ LinalgOp consumer, ValuePtr consumedView,
LinalgOp producer) {
if (!isProducerLastWriteOfView(graph, consumer, consumedView, producer))
return false;
@@ -272,13 +272,13 @@ Optional<FusionInfo> mlir::linalg::fuseProducerOf(
auto producer = cast<LinalgOp>(dependence.dependentOpView.op);
// Check that the dependence is indeed on the input `consumerIdx` view.
- auto *consumedView = dependence.indexingView;
+ auto consumedView = dependence.indexingView;
if (consumer.getInput(consumerIdx) != consumedView)
continue;
// Consumer consumes this view, `isStructurallyFusableProducer` also checks
// whether it is a strict subview of the producer view.
- auto *producedView = dependence.dependentOpView.view;
+ auto producedView = dependence.dependentOpView.view;
auto producerIdx = producer.getIndexOfOutput(producedView).getValue();
// `consumerIdx` and `producerIdx` exist by construction.
LLVM_DEBUG(dbgs() << "\nRAW producer: " << *producer.getOperation()
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
index c50c495750f..e468c19a0b4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgToLoops.cpp
@@ -49,7 +49,7 @@ using edsc::op::operator==;
static SmallVector<ValueHandle, 8>
makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> vals) {
+ ArrayRef<ValuePtr> vals) {
assert(map.getNumSymbols() == 0);
assert(map.getNumInputs() == vals.size());
SmallVector<ValueHandle, 8> res;
@@ -57,35 +57,35 @@ makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map,
auto dims = map.getNumDims();
for (auto e : map.getResults()) {
auto exprMap = AffineMap::get(dims, 0, e);
- SmallVector<Value *, 4> operands(vals.begin(), vals.end());
+ SmallVector<ValuePtr, 4> operands(vals.begin(), vals.end());
canonicalizeMapAndOperands(&exprMap, &operands);
res.push_back(affine_apply(exprMap, operands));
}
return res;
}
-static SmallVector<Value *, 4> permuteIvs(ArrayRef<Value *> ivs,
- Optional<AffineMap> permutation) {
+static SmallVector<ValuePtr, 4> permuteIvs(ArrayRef<ValuePtr> ivs,
+ Optional<AffineMap> permutation) {
return permutation ? applyMapToValues(ScopedContext::getBuilder(),
ScopedContext::getLocation(),
permutation.getValue(), ivs)
- : SmallVector<Value *, 4>(ivs.begin(), ivs.end());
+ : SmallVector<ValuePtr, 4>(ivs.begin(), ivs.end());
}
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<Value *, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> allViewSizes);
-SmallVector<Value *, 4> emitLoopRanges(OpBuilder &b, Location loc,
- AffineMap map,
- ArrayRef<Value *> allViewSizes) {
+static SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> allViewSizes);
+SmallVector<ValuePtr, 4> emitLoopRanges(OpBuilder &b, Location loc,
+ AffineMap map,
+ ArrayRef<ValuePtr> allViewSizes) {
// Apply `map` to get view sizes in loop order.
auto sizes = applyMapToValues(b, loc, map, allViewSizes);
// Create a new range with the applied tile sizes.
ScopedContext scope(b, loc);
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
res.push_back(range(constant_index(0), sizes[idx], constant_index(1)));
}
@@ -98,7 +98,7 @@ class LinalgScopedEmitter {};
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, CopyOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
CopyOp copyOp) {
auto nPar = copyOp.getNumParallelLoops();
assert(nPar == allIvs.size());
@@ -121,7 +121,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, FillOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
FillOp fillOp) {
auto nPar = fillOp.getNumParallelLoops();
assert(nPar == allIvs.size());
@@ -138,7 +138,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, DotOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs, DotOp dotOp) {
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs, DotOp dotOp) {
assert(allIvs.size() == 1);
IndexHandle r_i(allIvs[0]);
IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
@@ -151,7 +151,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
MatvecOp matvecOp) {
assert(allIvs.size() == 2);
IndexHandle i(allIvs[0]), r_j(allIvs[1]);
@@ -165,7 +165,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
MatmulOp matmulOp) {
assert(allIvs.size() == 3);
IndexHandle i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
@@ -179,7 +179,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, ConvOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
ConvOp convOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -229,14 +229,14 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, GenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
GenericOp genericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
using edsc::intrinsics::detail::ValueHandleArray;
unsigned nInputs = genericOp.getNumInputs();
unsigned nOutputs = genericOp.getNumOutputs();
- SmallVector<Value *, 4> indexedValues(nInputs + nOutputs);
+ SmallVector<ValuePtr, 4> indexedValues(nInputs + nOutputs);
// 1.a. Emit std_load from input views.
for (unsigned i = 0; i < nInputs; ++i) {
@@ -324,7 +324,7 @@ public:
template <typename IndexedValueType>
class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
public:
- static void emitScalarImplementation(ArrayRef<Value *> allIvs,
+ static void emitScalarImplementation(ArrayRef<ValuePtr> allIvs,
IndexedGenericOp indexedGenericOp) {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
@@ -332,7 +332,7 @@ public:
unsigned nInputs = indexedGenericOp.getNumInputs();
unsigned nOutputs = indexedGenericOp.getNumOutputs();
unsigned nLoops = allIvs.size();
- SmallVector<Value *, 4> indexedValues(nLoops + nInputs + nOutputs);
+ SmallVector<ValuePtr, 4> indexedValues(nLoops + nInputs + nOutputs);
for (unsigned i = 0; i < nLoops; ++i) {
indexedValues[i] = allIvs[i];
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
index f4364928af8..999406e05cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgTransforms.cpp
@@ -99,7 +99,7 @@ LogicalResult mlir::linalg::tileAndFuseLinalgOpAndSetMarker(
}
bool mlir::linalg::detail::isProducedByOpOfTypeImpl(
- Operation *consumerOp, Value *consumedView,
+ Operation *consumerOp, ValuePtr consumedView,
function_ref<bool(Operation *)> isaOpType) {
LinalgOp consumer = dyn_cast<LinalgOp>(consumerOp);
if (!consumer)
@@ -175,7 +175,7 @@ LogicalResult mlir::linalg::vectorizeGenericOp(PatternRewriter &rewriter,
return failure();
// TODO(ntv): non-identity layout.
- auto isStaticMemRefWithIdentityLayout = [](Value *v) {
+ auto isStaticMemRefWithIdentityLayout = [](ValuePtr v) {
auto m = v->getType().dyn_cast<MemRefType>();
if (!m || !m.hasStaticShape() || !m.getAffineMaps().empty())
return false;
@@ -235,7 +235,7 @@ mlir::linalg::permuteGenericLinalgOp(PatternRewriter &rewriter, Operation *op,
LogicalResult mlir::linalg::linalgOpPromoteSubviews(PatternRewriter &rewriter,
Operation *op) {
LinalgOp linOp = dyn_cast<LinalgOp>(op);
- SetVector<Value *> subViews;
+ SetVector<ValuePtr> subViews;
for (auto it : linOp.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
subViews.insert(sv);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index c7fbebce383..b1dae455194 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -55,14 +55,15 @@ static llvm::cl::opt<bool> clPromoteDynamic(
llvm::cl::desc("Test generation of dynamic promoted buffers"),
llvm::cl::cat(clOptionsCategory), llvm::cl::init(false));
-static Value *allocBuffer(Type elementType, Value *size, bool dynamicBuffers) {
+static ValuePtr allocBuffer(Type elementType, ValuePtr size,
+ bool dynamicBuffers) {
auto *ctx = size->getContext();
auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
if (!dynamicBuffers)
if (auto cst = dyn_cast_or_null<ConstantIndexOp>(size->getDefiningOp()))
return alloc(
MemRefType::get(width * cst.getValue(), IntegerType::get(8, ctx)));
- Value *mul = muli(constant_index(width), size);
+ ValuePtr mul = muli(constant_index(width), size);
return alloc(MemRefType::get(-1, IntegerType::get(8, ctx)), mul);
}
@@ -92,20 +93,20 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
auto viewType = subView.getType();
auto rank = viewType.getRank();
- Value *allocSize = one;
- SmallVector<Value *, 8> fullRanges, partialRanges;
+ ValuePtr allocSize = one;
+ SmallVector<ValuePtr, 8> fullRanges, partialRanges;
fullRanges.reserve(rank);
partialRanges.reserve(rank);
for (auto en : llvm::enumerate(subView.getRanges())) {
auto rank = en.index();
auto rangeValue = en.value();
- Value *d = rangeValue.size;
+ ValuePtr d = rangeValue.size;
allocSize = muli(folder, allocSize, d).getValue();
fullRanges.push_back(d);
partialRanges.push_back(range(folder, zero, dim(subView, rank), one));
}
SmallVector<int64_t, 4> dynSizes(fullRanges.size(), -1);
- auto *buffer =
+ auto buffer =
allocBuffer(viewType.getElementType(), allocSize, dynamicBuffers);
auto fullLocalView = view(
MemRefType::get(dynSizes, viewType.getElementType()), buffer, fullRanges);
@@ -115,7 +116,7 @@ static PromotionInfo promoteFullTileBuffer(OpBuilder &b, Location loc,
SmallVector<PromotionInfo, 8>
mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
- ArrayRef<Value *> subViews, bool dynamicBuffers,
+ ArrayRef<ValuePtr> subViews, bool dynamicBuffers,
OperationFolder *folder) {
if (subViews.empty())
return {};
@@ -123,8 +124,8 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
ScopedContext scope(b, loc);
SmallVector<PromotionInfo, 8> res;
res.reserve(subViews.size());
- DenseMap<Value *, PromotionInfo> promotionInfoMap;
- for (auto *v : subViews) {
+ DenseMap<ValuePtr, PromotionInfo> promotionInfoMap;
+ for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto viewType = subView.getType();
// TODO(ntv): support more cases than just float.
@@ -136,7 +137,7 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
res.push_back(promotionInfo);
}
- for (auto *v : subViews) {
+ for (auto v : subViews) {
SubViewOp subView = cast<SubViewOp>(v->getDefiningOp());
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
@@ -144,14 +145,14 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
// TODO(ntv): value to fill with should be related to the operation.
// For now, just use APFloat(0.0f).
auto t = subView.getType().getElementType().cast<FloatType>();
- Value *fillVal = constant_float(folder, APFloat(0.0f), t);
+ ValuePtr fillVal = constant_float(folder, APFloat(0.0f), t);
// TODO(ntv): fill is only necessary if `promotionInfo` has a full local
// view that is different from the partial local view and we are on the
// boundary.
fill(info->second.fullLocalView, fillVal);
}
- for (auto *v : subViews) {
+ for (auto v : subViews) {
auto info = promotionInfoMap.find(v);
if (info == promotionInfoMap.end())
continue;
@@ -161,19 +162,19 @@ mlir::linalg::promoteSubViews(OpBuilder &b, Location loc,
}
LinalgOp mlir::linalg::promoteSubViewOperands(OpBuilder &b, LinalgOp op,
- SetVector<Value *> subViews,
+ SetVector<ValuePtr> subViews,
bool dynamicBuffers,
OperationFolder *folder) {
// 1. Promote the specified views and use them in the new op.
ScopedContext scope(b, op.getLoc());
auto promotedBufferAndViews = promoteSubViews(
b, op.getLoc(), subViews.getArrayRef(), dynamicBuffers, folder);
- SmallVector<Value *, 8> opViews;
+ SmallVector<ValuePtr, 8> opViews;
opViews.reserve(op.getNumInputsAndOutputs());
- SmallVector<std::pair<Value *, Value *>, 8> writebackViews;
+ SmallVector<std::pair<ValuePtr, ValuePtr>, 8> writebackViews;
writebackViews.reserve(subViews.size());
unsigned promotedIdx = 0;
- for (auto *view : op.getInputsAndOutputs()) {
+ for (auto view : op.getInputsAndOutputs()) {
if (subViews.count(view) != 0) {
opViews.push_back(promotedBufferAndViews[promotedIdx].fullLocalView);
writebackViews.emplace_back(std::make_pair(
@@ -214,7 +215,7 @@ static void promoteSubViews(FuncOp f, bool dynamicBuffers) {
f.walk([dynamicBuffers, &folder, &toErase](LinalgOp op) {
// TODO(ntv) some heuristic here to decide what to promote. Atm it is all or
// nothing.
- SetVector<Value *> subViews;
+ SetVector<ValuePtr> subViews;
OpBuilder b(op);
for (auto it : op.getInputsAndOutputs())
if (auto sv = dyn_cast_or_null<SubViewOp>(it->getDefiningOp()))
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 4d8a24cb6cb..07d559918cf 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -53,7 +53,7 @@ static llvm::cl::list<unsigned>
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated,
llvm::cl::cat(clOptionsCategory));
-static bool isZero(Value *v) {
+static bool isZero(ValuePtr v) {
return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
}
@@ -71,12 +71,12 @@ using LoopIndexToRangeIndexMap = DenseMap<int, int>;
// indices of newly created loops.
static std::tuple<SmallVector<SubViewOp::Range, 4>, LoopIndexToRangeIndexMap>
makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
- ArrayRef<Value *> allViewSizes,
- ArrayRef<Value *> allTileSizes, OperationFolder *folder) {
+ ArrayRef<ValuePtr> allViewSizes,
+ ArrayRef<ValuePtr> allTileSizes, OperationFolder *folder) {
assert(allTileSizes.size() == map.getNumResults());
// Apply `map` to get view sizes in loop order.
auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder);
- SmallVector<Value *, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
+ SmallVector<ValuePtr, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
// Traverse the tile sizes, which are in loop order, erase zeros everywhere.
LoopIndexToRangeIndexMap loopIndexToRangeIndex;
@@ -110,7 +110,7 @@ namespace {
// `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0]
//
struct TileCheck : public AffineExprVisitor<TileCheck> {
- TileCheck(ArrayRef<Value *> tileSizes)
+ TileCheck(ArrayRef<ValuePtr> tileSizes)
: isTiled(false), tileSizes(tileSizes) {}
void visitDimExpr(AffineDimExpr expr) {
@@ -124,7 +124,7 @@ struct TileCheck : public AffineExprVisitor<TileCheck> {
"nonpositive multiplying coefficient");
}
bool isTiled;
- ArrayRef<Value *> tileSizes;
+ ArrayRef<ValuePtr> tileSizes;
};
} // namespace
@@ -206,11 +206,11 @@ void transformIndexedGenericOpIndices(
auto rangeIndex = loopIndexToRangeIndex.find(i);
if (rangeIndex == loopIndexToRangeIndex.end())
continue;
- Value *oldIndex = block.getArgument(i);
+ ValuePtr oldIndex = block.getArgument(i);
// Offset the index argument `i` by the value of the corresponding induction
// variable and replace all uses of the previous value.
- Value *newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
- pivs[rangeIndex->second]->getValue());
+ ValuePtr newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex,
+ pivs[rangeIndex->second]->getValue());
for (auto &use : oldIndex->getUses()) {
if (use.getOwner() == newIndex->getDefiningOp())
continue;
@@ -219,7 +219,7 @@ void transformIndexedGenericOpIndices(
}
}
-static bool isTiled(AffineExpr expr, ArrayRef<Value *> tileSizes) {
+static bool isTiled(AffineExpr expr, ArrayRef<ValuePtr> tileSizes) {
if (!expr)
return false;
TileCheck t(tileSizes);
@@ -229,7 +229,7 @@ static bool isTiled(AffineExpr expr, ArrayRef<Value *> tileSizes) {
// Checks whether the view with index `viewIndex` within `linalgOp` varies with
// respect to a non-zero `tileSize`.
-static bool isTiled(AffineMap map, ArrayRef<Value *> tileSizes) {
+static bool isTiled(AffineMap map, ArrayRef<ValuePtr> tileSizes) {
if (!map)
return false;
for (unsigned r = 0; r < map.getNumResults(); ++r)
@@ -238,13 +238,13 @@ static bool isTiled(AffineMap map, ArrayRef<Value *> tileSizes) {
return false;
}
-static SmallVector<Value *, 4>
+static SmallVector<ValuePtr, 4>
makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
- ArrayRef<Value *> ivs, ArrayRef<Value *> tileSizes,
- ArrayRef<Value *> viewSizes, OperationFolder *folder) {
+ ArrayRef<ValuePtr> ivs, ArrayRef<ValuePtr> tileSizes,
+ ArrayRef<ValuePtr> viewSizes, OperationFolder *folder) {
assert(ivs.size() == static_cast<size_t>(llvm::count_if(
llvm::make_range(tileSizes.begin(), tileSizes.end()),
- [](Value *v) { return !isZero(v); })) &&
+ [](ValuePtr v) { return !isZero(v); })) &&
"expected as many ivs as non-zero sizes");
using edsc::intrinsics::select;
@@ -253,21 +253,22 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Construct (potentially temporary) mins and maxes on which to apply maps
// that define tile subviews.
- SmallVector<Value *, 8> lbs, subViewSizes;
+ SmallVector<ValuePtr, 8> lbs, subViewSizes;
for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
bool isTiled = !isZero(tileSizes[idx]);
- lbs.push_back(isTiled ? ivs[idxIvs++] : (Value *)constant_index(folder, 0));
+ lbs.push_back(isTiled ? ivs[idxIvs++]
+ : (ValuePtr)constant_index(folder, 0));
subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]);
}
auto *op = linalgOp.getOperation();
- SmallVector<Value *, 4> res;
+ SmallVector<ValuePtr, 4> res;
res.reserve(op->getNumOperands());
auto viewIteratorBegin = linalgOp.getInputsAndOutputs().begin();
for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs();
++viewIndex) {
- Value *view = *(viewIteratorBegin + viewIndex);
+ ValuePtr view = *(viewIteratorBegin + viewIndex);
unsigned rank = view->getType().cast<MemRefType>().getRank();
auto map = loopToOperandRangesMaps(linalgOp)[viewIndex];
// If the view is not tiled, we can use it as is.
@@ -277,7 +278,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
// Construct a new subview for the tile.
- SmallVector<Value *, 4> offsets, sizes, strides;
+ SmallVector<ValuePtr, 4> offsets, sizes, strides;
offsets.reserve(rank);
sizes.reserve(rank);
strides.reserve(rank);
@@ -292,9 +293,9 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// Tiling creates a new slice at the proper index, the slice step is 1
// (i.e. the slice view does not subsample, stepping occurs in the loop).
auto m = map.getSubMap({r});
- auto *offset = applyMapToValues(b, loc, m, lbs, folder).front();
+ auto offset = applyMapToValues(b, loc, m, lbs, folder).front();
offsets.push_back(offset);
- auto *size = applyMapToValues(b, loc, m, subViewSizes, folder).front();
+ auto size = applyMapToValues(b, loc, m, subViewSizes, folder).front();
sizes.push_back(size);
strides.push_back(constant_index(folder, 1));
}
@@ -308,7 +309,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
// This is a special type of folding that we only apply when `folder` is
// defined.
if (folder)
- for (auto *v : llvm::concat<Value *>(lbs, subViewSizes))
+ for (auto v : llvm::concat<ValuePtr>(lbs, subViewSizes))
if (v->use_empty())
v->getDefiningOp()->erase();
@@ -316,7 +317,7 @@ makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp,
}
Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
- OpBuilder &b, LinalgOp op, ArrayRef<Value *> tileSizes,
+ OpBuilder &b, LinalgOp op, ArrayRef<ValuePtr> tileSizes,
ArrayRef<unsigned> permutation, OperationFolder *folder) {
// 1. Enforce the convention that "tiling by zero" skips tiling a particular
// dimension. This convention is significantly simpler to handle instead of
@@ -360,7 +361,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
LoopNestRangeBuilder(pivs, loopRanges)([&] {
auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- SmallVector<Value *, 4> ivValues(ivs.begin(), ivs.end());
+ SmallVector<ValuePtr, 4> ivValues(ivs.begin(), ivs.end());
// If we have to apply a permutation to the tiled loop nest, we have to
// reorder the induction variables This permutation is the right one
@@ -411,7 +412,7 @@ Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
ScopedContext scope(b, op.getLoc());
// Materialize concrete tile size values to pass the generic tiling function.
- SmallVector<Value *, 8> tileSizeValues;
+ SmallVector<ValuePtr, 8> tileSizeValues;
tileSizeValues.reserve(tileSizes.size());
for (auto ts : tileSizes)
tileSizeValues.push_back(constant_index(folder, ts));
OpenPOWER on IntegriCloud