summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Vasilache <ntv@google.com>2019-06-20 15:10:35 -0700
committerjpienaar <jpienaar@google.com>2019-06-22 09:14:49 -0700
commit0804750c9b529f93764a0fcbb3557064dfeda72a (patch)
treec918f92bd227180fb1d4a79af603da651d4dcc21
parente19a0857a2925fc9aada9293206f7e9ad93ad8c5 (diff)
downloadbcm5719-llvm-0804750c9b529f93764a0fcbb3557064dfeda72a.tar.gz
bcm5719-llvm-0804750c9b529f93764a0fcbb3557064dfeda72a.zip
Uniformize usage of OpBuilder& (NFC)
Historically the pointer-based version of builders was used. This CL uniformizes to OpBuilder & PiperOrigin-RevId: 254280885
-rw-r--r--mlir/examples/Linalg/Linalg1/lib/Utils.cpp2
-rw-r--r--mlir/examples/Linalg/Linalg3/lib/Transforms.cpp4
-rw-r--r--mlir/examples/Linalg/Linalg4/lib/Transforms.cpp2
-rw-r--r--mlir/g3doc/Tutorials/Toy/Ch-2.md2
-rw-r--r--mlir/g3doc/Tutorials/Toy/Ch-3.md2
-rw-r--r--mlir/include/mlir/AffineOps/AffineOps.h2
-rw-r--r--mlir/include/mlir/EDSC/Builders.h10
-rw-r--r--mlir/include/mlir/EDSC/Intrinsics.h8
-rw-r--r--mlir/include/mlir/Linalg/Utils/Utils.h2
-rw-r--r--mlir/include/mlir/Transforms/LoopUtils.h2
-rw-r--r--mlir/include/mlir/Transforms/Utils.h2
-rw-r--r--mlir/lib/AffineOps/AffineOps.cpp4
-rw-r--r--mlir/lib/EDSC/Builders.cpp30
-rw-r--r--mlir/lib/Linalg/Transforms/Fusion.cpp8
-rw-r--r--mlir/lib/Linalg/Transforms/LowerToLoops.cpp8
-rw-r--r--mlir/lib/Linalg/Transforms/Tiling.cpp24
-rw-r--r--mlir/lib/Linalg/Utils/Utils.cpp6
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp22
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp2
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp19
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp38
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp38
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp2
23 files changed, 118 insertions, 121 deletions
diff --git a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp
index 05070a9de30..b26b24214cb 100644
--- a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp
+++ b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp
@@ -46,6 +46,6 @@ ViewOp linalg::emitAndReturnViewOpFromMemRef(Value *memRef) {
for (unsigned i = 0; i < v.rank(); ++i) {
indices[i] = range(v.lb(i), v.ub(i), constant_index(v.step(i)));
}
- return ScopedContext::getBuilder()->create<ViewOp>(
+ return ScopedContext::getBuilder().create<ViewOp>(
ScopedContext::getLocation(), memRef, indices);
}
diff --git a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
index 5b16ce0eda5..d5c8641acbe 100644
--- a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
@@ -78,9 +78,9 @@ Value *linalg::makeFoldedComposedAffineApply(AffineMap map,
if (auto *v = tryFold(map, operands)) {
return v;
}
- auto *b = ScopedContext::getBuilder();
+ auto &b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- return b->create<AffineApplyOp>(loc, map, operands).getResult();
+ return b.create<AffineApplyOp>(loc, map, operands).getResult();
}
linalg::RangeParts::RangeParts(unsigned reserved) {
diff --git a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
index 11cd6e5cd9a..1a308df1313 100644
--- a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
@@ -161,7 +161,7 @@ writeContractionAsTiledViews(TensorContractionBase<ConcreteOp> &contraction,
[&contraction, &tileSizes, &ivs]() {
SmallVector<Value *, 4> ivValues(ivs.begin(), ivs.end());
auto views = makeTiledViews(contraction, ivValues, tileSizes);
- ScopedContext::getBuilder()->create<ConcreteOp>(
+ ScopedContext::getBuilder().create<ConcreteOp>(
ScopedContext::getLocation(), views);
});
// clang-format on
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-2.md b/mlir/g3doc/Tutorials/Toy/Ch-2.md
index 9b07385bb89..37a43acfdbc 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-2.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-2.md
@@ -123,7 +123,7 @@ generation through a simple depth-first search traversal of the Toy AST. Here is
how we create a `toy.transpose` operation:
```
-mlir::Operation *createTransposeOp(OpBuilder *builder,
+mlir::Operation *createTransposeOp(OpBuilder &builder,
mlir::Value *input_array) {
// We bundle our custom type in a `toy` dialect.
auto toyDialect = mlir::Identifier::get("toy", builder->getContext());
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-3.md b/mlir/g3doc/Tutorials/Toy/Ch-3.md
index 9ff6c401b55..a8a7ca8b3b9 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-3.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-3.md
@@ -206,7 +206,7 @@ class GenericCallOp
/// This method populate the `state` that MLIR use to create operations.
/// The `toy.generic_call` operation accepts a callee name and a list of
/// arguments for the call.
- static void build(mlir::OpBuilder *builder, mlir::OperationState *state,
+ static void build(mlir::OpBuilder &builder, mlir::OperationState *state,
llvm::StringRef callee,
llvm::ArrayRef<mlir::Value *> arguments);
diff --git a/mlir/include/mlir/AffineOps/AffineOps.h b/mlir/include/mlir/AffineOps/AffineOps.h
index 8fcd0abe920..4e048eba9d0 100644
--- a/mlir/include/mlir/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/AffineOps/AffineOps.h
@@ -361,7 +361,7 @@ void canonicalizeMapAndOperands(AffineMap *map,
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
/// other AffineApplyOps supplying those operands. The operands of the resulting
/// AffineApplyOp do not change the length of AffineApplyOp chains.
-AffineApplyOp makeComposedAffineApply(OpBuilder *b, Location loc, AffineMap map,
+AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
llvm::ArrayRef<Value *> operands);
/// Given an affine map `map` and its input `operands`, this method composes
diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h
index aa5c321627d..4f3177daafa 100644
--- a/mlir/include/mlir/EDSC/Builders.h
+++ b/mlir/include/mlir/EDSC/Builders.h
@@ -60,7 +60,7 @@ public:
~ScopedContext();
static MLIRContext *getContext();
- static OpBuilder *getBuilder();
+ static OpBuilder &getBuilder();
static Location getLocation();
private:
@@ -122,7 +122,7 @@ protected:
/// point, which is useful for non-empty blocks.
void enter(mlir::Block *block, int prev = 0) {
bodyScope = new ScopedContext(
- *ScopedContext::getBuilder(),
+ ScopedContext::getBuilder(),
OpBuilder::InsertPoint(block, std::prev(block->end(), prev)),
ScopedContext::getLocation());
bodyScope->nestedBuilder = this;
@@ -432,7 +432,7 @@ private:
template <typename Op, typename... Args>
OperationHandle OperationHandle::create(Args... args) {
return OperationHandle(ScopedContext::getBuilder()
- ->create<Op>(ScopedContext::getLocation(), args...)
+ .create<Op>(ScopedContext::getLocation(), args...)
.getOperation());
}
@@ -440,7 +440,7 @@ template <typename Op, typename... Args>
Op OperationHandle::createOp(Args... args) {
return cast<Op>(
OperationHandle(ScopedContext::getBuilder()
- ->create<Op>(ScopedContext::getLocation(), args...)
+ .create<Op>(ScopedContext::getLocation(), args...)
.getOperation())
.getOperation());
}
@@ -448,7 +448,7 @@ Op OperationHandle::createOp(Args... args) {
template <typename Op, typename... Args>
ValueHandle ValueHandle::create(Args... args) {
Operation *op = ScopedContext::getBuilder()
- ->create<Op>(ScopedContext::getLocation(), args...)
+ .create<Op>(ScopedContext::getLocation(), args...)
.getOperation();
if (op->getNumResults() == 1) {
return ValueHandle(op->getResult(0));
diff --git a/mlir/include/mlir/EDSC/Intrinsics.h b/mlir/include/mlir/EDSC/Intrinsics.h
index 05d69438d23..9b5c9d6c594 100644
--- a/mlir/include/mlir/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/EDSC/Intrinsics.h
@@ -42,18 +42,18 @@ namespace edsc {
/// ```
struct IndexHandle : public ValueHandle {
explicit IndexHandle()
- : ValueHandle(ScopedContext::getBuilder()->getIndexType()) {}
+ : ValueHandle(ScopedContext::getBuilder().getIndexType()) {}
explicit IndexHandle(index_t v) : ValueHandle(v) {}
explicit IndexHandle(Value *v) : ValueHandle(v) {
- assert(v->getType() == ScopedContext::getBuilder()->getIndexType() &&
+ assert(v->getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
}
explicit IndexHandle(ValueHandle v) : ValueHandle(v) {
- assert(v.getType() == ScopedContext::getBuilder()->getIndexType() &&
+ assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
}
IndexHandle &operator=(const ValueHandle &v) {
- assert(v.getType() == ScopedContext::getBuilder()->getIndexType() &&
+ assert(v.getType() == ScopedContext::getBuilder().getIndexType() &&
"Expected index type");
/// Creating a new IndexHandle(v) and then std::swap rightly complains the
/// binding has already occurred and that we should use another name.
diff --git a/mlir/include/mlir/Linalg/Utils/Utils.h b/mlir/include/mlir/Linalg/Utils/Utils.h
index e5a93770a43..6a50fb80d04 100644
--- a/mlir/include/mlir/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Linalg/Utils/Utils.h
@@ -76,7 +76,7 @@ SmallVector<Value *, 8> getViewSizes(LinalgOp &linalgOp);
/// Returns the values obtained by applying `map` to the list of values.
/// Performs simplifications and foldings where possible.
-SmallVector<Value *, 4> applyMapToValues(OpBuilder *b, Location loc,
+SmallVector<Value *, 4> applyMapToValues(OpBuilder &b, Location loc,
AffineMap map,
ArrayRef<Value *> values,
OperationFolder &state);
diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h
index 8a255228893..c243a3d6fb7 100644
--- a/mlir/include/mlir/Transforms/LoopUtils.h
+++ b/mlir/include/mlir/Transforms/LoopUtils.h
@@ -80,7 +80,7 @@ void promoteSingleIterationLoops(Function *f);
void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
- OpBuilder *builder);
+ OpBuilder &builder);
/// Skew the operations in the body of a 'affine.for' operation with the
/// specified operation-wise shifts. The shifts are with respect to the
diff --git a/mlir/include/mlir/Transforms/Utils.h b/mlir/include/mlir/Transforms/Utils.h
index 1b32a98206c..89c79a615d1 100644
--- a/mlir/include/mlir/Transforms/Utils.h
+++ b/mlir/include/mlir/Transforms/Utils.h
@@ -81,7 +81,7 @@ bool replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
/// these will also be collected into a single (multi-result) affine apply op.
/// The final results of the composed AffineApplyOp are returned in output
/// parameter 'results'. Returns the affine apply op created.
-Operation *createComposedAffineApplyOp(OpBuilder *builder, Location loc,
+Operation *createComposedAffineApplyOp(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands,
ArrayRef<Operation *> affineApplyOps,
SmallVectorImpl<Value *> *results);
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index 26a35ad844b..9869a3a777e 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -553,14 +553,14 @@ void mlir::fullyComposeAffineMapAndOperands(
}
}
-AffineApplyOp mlir::makeComposedAffineApply(OpBuilder *b, Location loc,
+AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
AffineMap map,
ArrayRef<Value *> operands) {
AffineMap normalizedMap = map;
SmallVector<Value *, 8> normalizedOperands(operands.begin(), operands.end());
composeAffineMapAndOperands(&normalizedMap, &normalizedOperands);
assert(normalizedMap);
- return b->create<AffineApplyOp>(loc, normalizedMap, normalizedOperands);
+ return b.create<AffineApplyOp>(loc, normalizedMap, normalizedOperands);
}
// A symbol may appear as a dim in affine.apply operations. This function
diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp
index 6f6363ffcc5..af59d3a8dde 100644
--- a/mlir/lib/EDSC/Builders.cpp
+++ b/mlir/lib/EDSC/Builders.cpp
@@ -58,10 +58,10 @@ ScopedContext *&mlir::edsc::ScopedContext::getCurrentScopedContext() {
return context;
}
-OpBuilder *mlir::edsc::ScopedContext::getBuilder() {
+OpBuilder &mlir::edsc::ScopedContext::getBuilder() {
assert(ScopedContext::getCurrentScopedContext() &&
"Unexpected Null ScopedContext");
- return &ScopedContext::getCurrentScopedContext()->builder;
+ return ScopedContext::getCurrentScopedContext()->builder;
}
Location mlir::edsc::ScopedContext::getLocation() {
@@ -71,14 +71,13 @@ Location mlir::edsc::ScopedContext::getLocation() {
}
MLIRContext *mlir::edsc::ScopedContext::getContext() {
- assert(getBuilder() && "Unexpected null builder");
- return getBuilder()->getContext();
+ return getBuilder().getContext();
}
mlir::edsc::ValueHandle::ValueHandle(index_t cst) {
- auto *b = ScopedContext::getBuilder();
+ auto &b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
- v = b->create<ConstantIndexOp>(loc, cst.v).getResult();
+ v = b.create<ConstantIndexOp>(loc, cst.v).getResult();
t = v->getType();
}
@@ -92,7 +91,6 @@ ValueHandle &mlir::edsc::ValueHandle::operator=(const ValueHandle &other) {
ValueHandle
mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map,
ArrayRef<Value *> operands) {
- assert(ScopedContext::getBuilder() && "Unexpected null builder");
Operation *op =
makeComposedAffineApply(ScopedContext::getBuilder(),
ScopedContext::getLocation(), map, operands)
@@ -127,18 +125,18 @@ OperationHandle OperationHandle::create(StringRef name,
for (const auto &attr : attributes) {
state.addAttribute(attr.first, attr.second);
}
- return OperationHandle(ScopedContext::getBuilder()->createOperation(state));
+ return OperationHandle(ScopedContext::getBuilder().createOperation(state));
}
BlockHandle mlir::edsc::BlockHandle::create(ArrayRef<Type> argTypes) {
- auto *currentB = ScopedContext::getBuilder();
- auto *ib = currentB->getInsertionBlock();
- auto ip = currentB->getInsertionPoint();
+ auto &currentB = ScopedContext::getBuilder();
+ auto *ib = currentB.getInsertionBlock();
+ auto ip = currentB.getInsertionPoint();
BlockHandle res;
- res.block = ScopedContext::getBuilder()->createBlock();
+ res.block = ScopedContext::getBuilder().createBlock();
// createBlock sets the insertion point inside the block.
// We do not want this behavior when using declarative builders with nesting.
- currentB->setInsertionPoint(ib, ip);
+ currentB.setInsertionPoint(ib, ip);
for (auto t : argTypes) {
res.block->addArgument(t);
}
@@ -175,8 +173,8 @@ mlir::edsc::LoopBuilder::LoopBuilder(ValueHandle *iv,
SmallVector<Value *, 4> lbs(lbHandles.begin(), lbHandles.end());
SmallVector<Value *, 4> ubs(ubHandles.begin(), ubHandles.end());
*iv = ValueHandle::create<AffineForOp>(
- lbs, ScopedContext::getBuilder()->getMultiDimIdentityMap(lbs.size()),
- ubs, ScopedContext::getBuilder()->getMultiDimIdentityMap(ubs.size()),
+ lbs, ScopedContext::getBuilder().getMultiDimIdentityMap(lbs.size()),
+ ubs, ScopedContext::getBuilder().getMultiDimIdentityMap(ubs.size()),
step);
}
auto *body = getForInductionVarOwner(iv->getValue()).getBody();
@@ -402,7 +400,7 @@ static ValueHandle createComparisonExpr(CmpIPredicate predicate,
assert((lhsType.isa<IndexType>() || lhsType.isa<IntegerType>()) &&
"only integer comparisons are supported");
- auto op = ScopedContext::getBuilder()->create<CmpIOp>(
+ auto op = ScopedContext::getBuilder().create<CmpIOp>(
ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
return ValueHandle(op.getResult());
}
diff --git a/mlir/lib/Linalg/Transforms/Fusion.cpp b/mlir/lib/Linalg/Transforms/Fusion.cpp
index 12bf536e6d4..ad5bf550b58 100644
--- a/mlir/lib/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Linalg/Transforms/Fusion.cpp
@@ -76,10 +76,10 @@ static llvm::cl::list<unsigned> clTileSizes(
// a subset of the original loop ranges of `op`.
// This is achieved by applying the `loopToOperandRangesMaps` permutation maps
// to the `loopRanges` in order to obtain view ranges.
-static LinalgOp cloneWithLoopRanges(OpBuilder *b, Location loc, LinalgOp op,
+static LinalgOp cloneWithLoopRanges(OpBuilder &b, Location loc, LinalgOp op,
ArrayRef<Value *> loopRanges,
OperationFolder &state) {
- ScopedContext scope(*b, loc);
+ ScopedContext scope(b, loc);
auto maps = loopToOperandRangesMaps(op);
SmallVector<Value *, 8> clonedViews;
@@ -105,7 +105,7 @@ static LinalgOp cloneWithLoopRanges(OpBuilder *b, Location loc, LinalgOp op,
// IR (i.e. if dim folds away and the ranges are the same).
clonedViews.push_back(slice(view, viewRanges));
}
- return op.create(*b, loc, clonedViews);
+ return op.create(b, loc, clonedViews);
}
struct ViewDimension {
@@ -192,7 +192,7 @@ static Optional<LinalgOp> fuse(Value *producedView, LinalgOp producer,
}
}
- return cloneWithLoopRanges(&b, loc, producer, loopRanges, state);
+ return cloneWithLoopRanges(b, loc, producer, loopRanges, state);
}
// Encode structural fusion safety preconditions.
diff --git a/mlir/lib/Linalg/Transforms/LowerToLoops.cpp b/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
index 5f5d12bb386..12bb07c5533 100644
--- a/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
+++ b/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
@@ -37,7 +37,7 @@ using namespace mlir::linalg;
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<Value *, 4> emitLoopRanges(OpBuilder *b, Location loc,
+static SmallVector<Value *, 4> emitLoopRanges(OpBuilder &b, Location loc,
AffineMap map,
ArrayRef<Value *> allViewSizes,
OperationFolder &state) {
@@ -46,9 +46,9 @@ static SmallVector<Value *, 4> emitLoopRanges(OpBuilder *b, Location loc,
// Create a new range with the applied tile sizes.
SmallVector<Value *, 4> res;
for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
- res.push_back(b->create<RangeOp>(
- loc, state.create<ConstantIndexOp>(*b, loc, 0), sizes[idx],
- state.create<ConstantIndexOp>(*b, loc, 1)));
+ res.push_back(b.create<RangeOp>(
+ loc, state.create<ConstantIndexOp>(b, loc, 0), sizes[idx],
+ state.create<ConstantIndexOp>(b, loc, 1)));
}
return res;
}
diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp
index 5718e8eeb15..0530a8147c9 100644
--- a/mlir/lib/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Linalg/Transforms/Tiling.cpp
@@ -63,7 +63,7 @@ static bool isZero(Value *v) {
// The returned ranges correspond to the loop ranges, in the proper order, that
// are tiled and for which new loops will be created.
static SmallVector<Value *, 4>
-makeTiledLoopRanges(OpBuilder *b, Location loc, AffineMap map,
+makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
ArrayRef<Value *> allViewSizes,
ArrayRef<Value *> allTileSizes, OperationFolder &state) {
assert(allTileSizes.size() == map.getNumResults());
@@ -82,9 +82,9 @@ makeTiledLoopRanges(OpBuilder *b, Location loc, AffineMap map,
// Create a new range with the applied tile sizes.
SmallVector<Value *, 4> res;
for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx) {
- res.push_back(b->create<RangeOp>(loc,
- state.create<ConstantIndexOp>(*b, loc, 0),
- viewSizes[idx], tileSizes[idx]));
+ res.push_back(b.create<RangeOp>(loc,
+ state.create<ConstantIndexOp>(b, loc, 0),
+ viewSizes[idx], tileSizes[idx]));
}
return res;
}
@@ -133,7 +133,7 @@ static Value *foldRange(Value *view, unsigned dim) {
return nullptr;
}
-static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
+static SmallVector<Value *, 4> makeTiledViews(OpBuilder &b, Location loc,
LinalgOp linalgOp,
ArrayRef<Value *> ivs,
ArrayRef<Value *> tileSizes,
@@ -173,9 +173,9 @@ static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
auto *foldedRange = foldRange(view, r);
foldedRange ? newRanges.push_back(foldedRange)
: newRanges.push_back(
- range(state.create<ConstantIndexOp>(*b, loc, 0),
+ range(state.create<ConstantIndexOp>(b, loc, 0),
linalg::intrinsics::dim(view, r),
- state.create<ConstantIndexOp>(*b, loc, 1)));
+ state.create<ConstantIndexOp>(b, loc, 1)));
continue;
}
@@ -186,7 +186,7 @@ static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
[](Value *v) { return isZero(v); });
auto iv = ivs[pos - count];
- ScopedContext scope(*b, loc);
+ ScopedContext scope(b, loc);
// TODO(ntv): lb = iv is a poor man's folding of max(0, i) == i which is
// generally wrong but correct in the specific case of tiling linalg ops.
// Tie this loose end in the future.
@@ -198,10 +198,10 @@ static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
// Tiling creates a new slice at the proper index, the slice step is 1
// (i.e. the slice view does not subsample, stepping occurs in the loop).
newRanges.push_back(
- range(lb, ub, state.create<ConstantIndexOp>(*b, loc, 1)));
+ range(lb, ub, state.create<ConstantIndexOp>(b, loc, 1)));
}
// res.push_back(createOrReturnView(b, loc, viewDefiningOp, newRanges));
- res.push_back(b->create<SliceOp>(loc, view, newRanges));
+ res.push_back(b.create<SliceOp>(loc, view, newRanges));
}
return res;
}
@@ -230,14 +230,14 @@ mlir::linalg::tileLinalgOp(LinalgOp op, ArrayRef<Value *> tileSizes,
SmallVector<IndexHandle, 4> ivs(loopRanges.size());
auto pivs = IndexHandle::makeIndexHandlePointers(ivs);
LoopNestRangeBuilder(pivs, loopRanges)([&op, &tileSizes, &ivs, &res, &state] {
- auto *b = ScopedContext::getBuilder();
+ auto b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
SmallVector<Value *, 4> ivValues(ivs.begin(), ivs.end());
// If/when the assertion below becomes false, we will have to templatize
// `makeTiledViews`.
assert(op.getNumInputsAndOutputs() == op.getOperation()->getNumOperands());
auto views = makeTiledViews(b, loc, op, ivValues, tileSizes, state);
- res = op.create(*b, loc, views);
+ res = op.create(b, loc, views);
});
SmallVector<ForOp, 8> loops;
diff --git a/mlir/lib/Linalg/Utils/Utils.cpp b/mlir/lib/Linalg/Utils/Utils.cpp
index 8a210082522..f00e7d8206c 100644
--- a/mlir/lib/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Linalg/Utils/Utils.cpp
@@ -95,16 +95,16 @@ SmallVector<Value *, 8> mlir::linalg::getViewSizes(LinalgOp &linalgOp) {
return res;
}
-static Value *emitOrFoldComposedAffineApply(OpBuilder *b, Location loc,
+static Value *emitOrFoldComposedAffineApply(OpBuilder &b, Location loc,
AffineMap map,
ArrayRef<Value *> operandsRef,
OperationFolder &state) {
SmallVector<Value *, 4> operands(operandsRef.begin(), operandsRef.end());
fullyComposeAffineMapAndOperands(&map, &operands);
- return state.create<AffineApplyOp>(*b, loc, map, operands);
+ return state.create<AffineApplyOp>(b, loc, map, operands);
}
-SmallVector<Value *, 4> mlir::linalg::applyMapToValues(OpBuilder *b,
+SmallVector<Value *, 4> mlir::linalg::applyMapToValues(OpBuilder &b,
Location loc,
AffineMap map,
ArrayRef<Value *> values,
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 7c745aa1a0b..5a926ceaa92 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -243,7 +243,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
OpBuilder prologue(block, begin);
// DMAs for write regions are going to be inserted just after the for loop.
OpBuilder epilogue(block, end);
- OpBuilder *b = region.isWrite() ? &epilogue : &prologue;
+ OpBuilder &b = region.isWrite() ? epilogue : prologue;
// Builder to create constants at the top level.
auto *func = block->getFunction();
@@ -327,7 +327,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
// corresponding dimension on the memory region (stored in 'offset').
auto map = top.getAffineMap(
cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset);
- memIndices.push_back(b->create<AffineApplyOp>(loc, map, regionSymbols));
+ memIndices.push_back(b.create<AffineApplyOp>(loc, map, regionSymbols));
}
// The fast buffer is DMAed into at location zero; addressing is relative.
bufIndices.push_back(zeroIndex);
@@ -395,21 +395,21 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
if (!region.isWrite()) {
// DMA non-blocking read from original buffer to fast buffer.
- b->create<DmaStartOp>(loc, memref, memIndices, fastMemRef, bufIndices,
- numElementsSSA, tagMemRef, zeroIndex, stride,
- numEltPerStride);
+ b.create<DmaStartOp>(loc, memref, memIndices, fastMemRef, bufIndices,
+ numElementsSSA, tagMemRef, zeroIndex, stride,
+ numEltPerStride);
} else {
// DMA non-blocking write from fast buffer to the original memref.
- auto op = b->create<DmaStartOp>(loc, fastMemRef, bufIndices, memref,
- memIndices, numElementsSSA, tagMemRef,
- zeroIndex, stride, numEltPerStride);
+ auto op = b.create<DmaStartOp>(loc, fastMemRef, bufIndices, memref,
+ memIndices, numElementsSSA, tagMemRef,
+ zeroIndex, stride, numEltPerStride);
// Since new ops are being appended (for outgoing DMAs), adjust the end to
// mark end of range of the original.
*nEnd = Block::iterator(op.getOperation());
}
// Matching DMA wait to block on completion; tag always has a 0 index.
- b->create<DmaWaitOp>(loc, tagMemRef, zeroIndex, numElementsSSA);
+ b.create<DmaWaitOp>(loc, tagMemRef, zeroIndex, numElementsSSA);
// Generate dealloc for the tag.
auto tagDeallocOp = epilogue.create<DeallocOp>(loc, tagMemRef);
@@ -435,10 +435,10 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
// The starting operands of indexRemap will be regionSymbols (the symbols on
// which the memref region is parametric); then those corresponding to
// the memref's original indices follow.
- auto dimExpr = b->getAffineDimExpr(regionSymbols.size() + i);
+ auto dimExpr = b.getAffineDimExpr(regionSymbols.size() + i);
remapExprs.push_back(dimExpr - offsets[i]);
}
- auto indexRemap = b->getAffineMap(regionSymbols.size() + rank, 0, remapExprs);
+ auto indexRemap = b.getAffineMap(regionSymbols.size() + rank, 0, remapExprs);
// Record the begin since it may be invalidated by memref replacement.
Block::iterator prev;
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 409eb397df4..7650db1ce27 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -192,7 +192,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollJamFactor, &cleanupMap,
- &cleanupOperands, &builder);
+ &cleanupOperands, builder);
cleanupAffineForOp.setLowerBound(cleanupOperands, cleanupMap);
// Promote the cleanup loop if it has turned into a single iteration loop.
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 0560002724d..6c93e2b562a 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -44,9 +44,9 @@ class AffineApplyExpander
public:
// This internal class expects arguments to be non-null, checks must be
// performed at the call site.
- AffineApplyExpander(OpBuilder *builder, ArrayRef<Value *> dimValues,
+ AffineApplyExpander(OpBuilder &builder, ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues, Location loc)
- : builder(*builder), dimValues(dimValues), symbolValues(symbolValues),
+ : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
loc(loc) {}
template <typename OpTy> Value *buildBinaryExpr(AffineBinaryOpExpr expr) {
@@ -223,19 +223,18 @@ mlir::Value *mlir::expandAffineExpr(OpBuilder &builder, Location loc,
AffineExpr expr,
ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues) {
- return AffineApplyExpander(&builder, dimValues, symbolValues, loc)
- .visit(expr);
+ return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
}
// Create a sequence of operations that implement the `affineMap` applied to
// the given `operands` (as it it were an AffineApplyOp).
Optional<SmallVector<Value *, 8>> static expandAffineMap(
- OpBuilder *builder, Location loc, AffineMap affineMap,
+ OpBuilder &builder, Location loc, AffineMap affineMap,
ArrayRef<Value *> operands) {
auto numDims = affineMap.getNumDims();
auto expanded = functional::map(
- [numDims, builder, loc, operands](AffineExpr expr) {
- return expandAffineExpr(*builder, loc, expr,
+ [numDims, &builder, loc, operands](AffineExpr expr) {
+ return expandAffineExpr(builder, loc, expr,
operands.take_front(numDims),
operands.drop_front(numDims));
},
@@ -276,7 +275,7 @@ static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
// the results.
Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
SmallVector<Value *, 8> boundOperands(op.getLowerBoundOperands());
- auto lbValues = expandAffineMap(&builder, op.getLoc(), op.getLowerBoundMap(),
+ auto lbValues = expandAffineMap(builder, op.getLoc(), op.getLowerBoundMap(),
boundOperands);
if (!lbValues)
return nullptr;
@@ -289,7 +288,7 @@ Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
// the results.
Value *mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) {
SmallVector<Value *, 8> boundOperands(op.getUpperBoundOperands());
- auto ubValues = expandAffineMap(&builder, op.getLoc(), op.getUpperBoundMap(),
+ auto ubValues = expandAffineMap(builder, op.getLoc(), op.getUpperBoundMap(),
boundOperands);
if (!ubValues)
return nullptr;
@@ -597,7 +596,7 @@ public:
PatternRewriter &rewriter) const override {
auto affineApplyOp = cast<AffineApplyOp>(op);
auto maybeExpandedMap = expandAffineMap(
- &rewriter, op->getLoc(), affineApplyOp.getAffineMap(), operands);
+ rewriter, op->getLoc(), affineApplyOp.getAffineMap(), operands);
if (!maybeExpandedMap)
return matchFailure();
rewriter.replaceOp(op, *maybeExpandedMap);
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 2204c42dec1..842feef8e8b 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -238,7 +238,7 @@ static SmallVector<unsigned, 8> delinearize(unsigned linearIndex,
return res;
}
-static Operation *instantiate(OpBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap);
@@ -258,7 +258,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
auto *opInst = v->getDefiningOp();
if (isa<ConstantOp>(opInst)) {
OpBuilder b(opInst);
- auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap);
+ auto *op = instantiate(b, opInst, hwVectorType, substitutionsMap);
auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0)));
assert(res.second && "Insertion failed");
return res.first->second;
@@ -331,7 +331,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
/// TODO(ntv): these implementation details should be captured in a
/// vectorization trait at the op level directly.
static SmallVector<mlir::Value *, 8>
-reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
+reindexAffineIndices(OpBuilder b, VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
ArrayRef<Value *> memrefIndices) {
auto vectorShape = hwVectorType.getShape();
@@ -347,14 +347,14 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
// The first numMemRefIndices correspond to AffineForOp that have not been
// vectorized, the transformation is the identity on those.
for (i = 0; i < numMemRefIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
affineExprs.push_back(d_i);
}
// The next numVectorIndices correspond to super-vector dimensions that
// do not have a hardware vector dimension counterpart. For those we only
// need to increment the index by the corresponding hwVectorInstance.
for (i = numMemRefIndices; i < numMemRefIndices + numVectorIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
auto offset = hwVectorInstance[i - numMemRefIndices];
affineExprs.push_back(d_i + offset);
}
@@ -363,7 +363,7 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
// index by "hwVectorInstance" multiples of the corresponding hardware
// vector size.
for (; i < numIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
auto offset = hwVectorInstance[i - numMemRefIndices];
auto stride = vectorShape[i - numMemRefIndices - numVectorIndices];
affineExprs.push_back(d_i + offset * stride);
@@ -374,7 +374,7 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
res.reserve(affineExprs.size());
for (auto expr : affineExprs) {
auto map = AffineMap::get(numIndices, 0, expr);
- res.push_back(makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(),
+ res.push_back(makeComposedAffineApply(b, b.getInsertionPoint()->getLoc(),
map, memrefIndices));
}
return res;
@@ -404,7 +404,7 @@ materializeAttributes(Operation *opInst, VectorType hwVectorType) {
/// substitutionsMap.
///
/// If the underlying substitution fails, this fails too and returns nullptr.
-static Operation *instantiate(OpBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap) {
assert(!isa<VectorTransferReadOp>(opInst) &&
@@ -428,10 +428,10 @@ static Operation *instantiate(OpBuilder *b, Operation *opInst,
auto attrs = materializeAttributes(opInst, hwVectorType);
- OperationState state(b->getContext(), opInst->getLoc(),
+ OperationState state(b.getContext(), opInst->getLoc(),
opInst->getName().getStringRef(), operands,
{hwVectorType}, attrs);
- return b->createOperation(state);
+ return b.createOperation(state);
}
/// Computes the permutationMap required for a VectorTransferOp from the memref
@@ -481,7 +481,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy transfer,
/// `hwVectorType` int the covering of the super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
+static Operation *instantiate(OpBuilder b, VectorTransferReadOp read,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -493,9 +493,9 @@ static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
if (!map) {
return nullptr;
}
- auto cloned = b->create<VectorTransferReadOp>(read.getLoc(), hwVectorType,
- read.getMemRef(), affineIndices,
- map, read.getPaddingValue());
+ auto cloned = b.create<VectorTransferReadOp>(read.getLoc(), hwVectorType,
+ read.getMemRef(), affineIndices,
+ map, read.getPaddingValue());
return cloned.getOperation();
}
@@ -505,7 +505,7 @@ static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
/// `hwVectorType` int the covering of th3e super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
+static Operation *instantiate(OpBuilder b, VectorTransferWriteOp write,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -513,7 +513,7 @@ static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
map(makePtrDynCaster<Value>(), write.getIndices());
auto affineIndices =
reindexAffineIndices(b, hwVectorType, hwVectorInstance, indices);
- auto cloned = b->create<VectorTransferWriteOp>(
+ auto cloned = b.create<VectorTransferWriteOp>(
write.getLoc(),
substitute(write.getVector(), hwVectorType, substitutionsMap),
write.getMemRef(), affineIndices,
@@ -557,12 +557,12 @@ static bool instantiateMaterialization(Operation *op,
return op->emitError("NYI path Op with region"), true;
if (auto write = dyn_cast<VectorTransferWriteOp>(op)) {
- auto *clone = instantiate(&b, write, state->hwVectorType,
+ auto *clone = instantiate(b, write, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
return clone == nullptr;
}
if (auto read = dyn_cast<VectorTransferReadOp>(op)) {
- auto *clone = instantiate(&b, read, state->hwVectorType,
+ auto *clone = instantiate(b, read, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
if (!clone) {
return true;
@@ -581,7 +581,7 @@ static bool instantiateMaterialization(Operation *op,
return op->emitError("Op does not return a supervector."), true;
}
auto *clone =
- instantiate(&b, op, state->hwVectorType, state->substitutionsMap);
+ instantiate(b, op, state->hwVectorType, state->substitutionsMap);
if (!clone) {
return true;
}
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 23375e7b472..728123f71a5 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -46,7 +46,7 @@ using namespace mlir;
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
- OpBuilder *b) {
+ OpBuilder &b) {
auto lbMap = forOp.getLowerBoundMap();
// Single result lower bound map only.
@@ -68,7 +68,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
unsigned step = forOp.getStep();
SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- auto lb = b->create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
+ auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
// For each upper bound expr, get the range.
// Eg: affine.for %i = lb to min (ub1, ub2),
@@ -80,20 +80,20 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
auto tripCountExpr = tripCountMap.getResult(i);
bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
- auto bumpMap = b->getAffineMap(tripCountMap.getNumDims(),
- tripCountMap.getNumSymbols(), bumpExprs[i]);
+ auto bumpMap = b.getAffineMap(tripCountMap.getNumDims(),
+ tripCountMap.getNumSymbols(), bumpExprs[i]);
bumpValues[i] =
- b->create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
+ b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
}
SmallVector<AffineExpr, 4> newUbExprs(tripCountMap.getNumResults());
for (unsigned i = 0, e = bumpExprs.size(); i < e; i++)
- newUbExprs[i] = b->getAffineDimExpr(0) + b->getAffineDimExpr(i + 1);
+ newUbExprs[i] = b.getAffineDimExpr(0) + b.getAffineDimExpr(i + 1);
operands->clear();
operands->push_back(lb);
operands->append(bumpValues.begin(), bumpValues.end());
- *map = b->getAffineMap(1 + tripCountMap.getNumResults(), 0, newUbExprs);
+ *map = b.getAffineMap(1 + tripCountMap.getNumResults(), 0, newUbExprs);
// Simplify the map + operands.
fullyComposeAffineMapAndOperands(map, operands);
*map = simplifyAffineMap(*map);
@@ -172,7 +172,7 @@ static AffineForOp
generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
- unsigned offset, AffineForOp srcForInst, OpBuilder *b) {
+ unsigned offset, AffineForOp srcForInst, OpBuilder b) {
SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
@@ -180,8 +180,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
assert(ubMap.getNumInputs() == ubOperands.size());
auto loopChunk =
- b->create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
- ubMap, srcForInst.getStep());
+ b.create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
+ ubMap, srcForInst.getStep());
auto *loopChunkIV = loopChunk.getInductionVar();
auto *srcIV = srcForInst.getInductionVar();
@@ -306,14 +306,14 @@ LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
res = generateLoop(
b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, lbShift + tripCount * step),
- instGroupQueue, 0, forOp, &b);
+ instGroupQueue, 0, forOp, b);
// Entire loop for the queued op groups generated, empty it.
instGroupQueue.clear();
lbShift += tripCount * step;
} else {
res = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, d), instGroupQueue,
- 0, forOp, &b);
+ 0, forOp, b);
lbShift = d * step;
}
if (!prologue && res)
@@ -333,7 +333,7 @@ LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
uint64_t ubShift = (instGroupQueue[i].first + tripCount) * step;
epilogue = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, ubShift),
- instGroupQueue, i, forOp, &b);
+ instGroupQueue, i, forOp, b);
lbShift = ubShift;
if (!prologue)
prologue = epilogue;
@@ -428,7 +428,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands,
- &builder);
+ builder);
assert(cleanupMap &&
"cleanup loop lower bound map for single result lower bound maps "
"can always be determined");
@@ -646,13 +646,13 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
// ...
// }
// ```
-static void augmentMapAndBounds(OpBuilder *b, Value *iv, AffineMap *map,
+static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map,
SmallVector<Value *, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
- bounds.push_back(b->getAffineDimExpr(map->getNumDims()) + offset);
+ bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
operands->insert(operands->begin() + map->getNumDims(), iv);
- *map = b->getAffineMap(map->getNumDims() + 1, map->getNumSymbols(), bounds);
+ *map = b.getAffineMap(map->getNumDims() + 1, map->getNumSymbols(), bounds);
canonicalizeMapAndOperands(map, operands);
}
@@ -708,12 +708,12 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- augmentMapAndBounds(&b, forOp.getInductionVar(), &lbMap, &lbOperands);
+ augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands);
// Upper-bound map creation.
auto ubMap = forOp.getUpperBoundMap();
SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
- augmentMapAndBounds(&b, forOp.getInductionVar(), &ubMap, &ubOperands,
+ augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands,
/*offset=*/scaledStep);
SmallVector<AffineForOp, 8> innerLoops;
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 9220b7bb751..6636453c5e9 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -242,7 +242,7 @@ void VectorizerTestPass::testNormalizeMaps() {
auto app = cast<AffineApplyOp>(m.getMatchedOperation());
OpBuilder b(m.getMatchedOperation());
SmallVector<Value *, 8> operands(app.getOperands());
- makeComposedAffineApply(&b, app.getLoc(), app.getAffineMap(), operands);
+ makeComposedAffineApply(b, app.getLoc(), app.getAffineMap(), operands);
}
}
// We should now be able to erase everything in reverse order in this test.
OpenPOWER on IntegriCloud