summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms
diff options
context:
space:
mode:
authorNicolas Vasilache <ntv@google.com>2019-06-20 15:10:35 -0700
committerjpienaar <jpienaar@google.com>2019-06-22 09:14:49 -0700
commit0804750c9b529f93764a0fcbb3557064dfeda72a (patch)
treec918f92bd227180fb1d4a79af603da651d4dcc21 /mlir/lib/Transforms
parente19a0857a2925fc9aada9293206f7e9ad93ad8c5 (diff)
downloadbcm5719-llvm-0804750c9b529f93764a0fcbb3557064dfeda72a.tar.gz
bcm5719-llvm-0804750c9b529f93764a0fcbb3557064dfeda72a.zip
Uniformize usage of OpBuilder& (NFC)
Historically the pointer-based version of builders was used. This CL uniformizes to OpBuilder & PiperOrigin-RevId: 254280885
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp22
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp2
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp19
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp38
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp38
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp2
6 files changed, 60 insertions, 61 deletions
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 7c745aa1a0b..5a926ceaa92 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -243,7 +243,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
OpBuilder prologue(block, begin);
// DMAs for write regions are going to be inserted just after the for loop.
OpBuilder epilogue(block, end);
- OpBuilder *b = region.isWrite() ? &epilogue : &prologue;
+ OpBuilder &b = region.isWrite() ? epilogue : prologue;
// Builder to create constants at the top level.
auto *func = block->getFunction();
@@ -327,7 +327,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
// corresponding dimension on the memory region (stored in 'offset').
auto map = top.getAffineMap(
cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset);
- memIndices.push_back(b->create<AffineApplyOp>(loc, map, regionSymbols));
+ memIndices.push_back(b.create<AffineApplyOp>(loc, map, regionSymbols));
}
// The fast buffer is DMAed into at location zero; addressing is relative.
bufIndices.push_back(zeroIndex);
@@ -395,21 +395,21 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
if (!region.isWrite()) {
// DMA non-blocking read from original buffer to fast buffer.
- b->create<DmaStartOp>(loc, memref, memIndices, fastMemRef, bufIndices,
- numElementsSSA, tagMemRef, zeroIndex, stride,
- numEltPerStride);
+ b.create<DmaStartOp>(loc, memref, memIndices, fastMemRef, bufIndices,
+ numElementsSSA, tagMemRef, zeroIndex, stride,
+ numEltPerStride);
} else {
// DMA non-blocking write from fast buffer to the original memref.
- auto op = b->create<DmaStartOp>(loc, fastMemRef, bufIndices, memref,
- memIndices, numElementsSSA, tagMemRef,
- zeroIndex, stride, numEltPerStride);
+ auto op = b.create<DmaStartOp>(loc, fastMemRef, bufIndices, memref,
+ memIndices, numElementsSSA, tagMemRef,
+ zeroIndex, stride, numEltPerStride);
// Since new ops are being appended (for outgoing DMAs), adjust the end to
// mark end of range of the original.
*nEnd = Block::iterator(op.getOperation());
}
// Matching DMA wait to block on completion; tag always has a 0 index.
- b->create<DmaWaitOp>(loc, tagMemRef, zeroIndex, numElementsSSA);
+ b.create<DmaWaitOp>(loc, tagMemRef, zeroIndex, numElementsSSA);
// Generate dealloc for the tag.
auto tagDeallocOp = epilogue.create<DeallocOp>(loc, tagMemRef);
@@ -435,10 +435,10 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
// The starting operands of indexRemap will be regionSymbols (the symbols on
// which the memref region is parametric); then those corresponding to
// the memref's original indices follow.
- auto dimExpr = b->getAffineDimExpr(regionSymbols.size() + i);
+ auto dimExpr = b.getAffineDimExpr(regionSymbols.size() + i);
remapExprs.push_back(dimExpr - offsets[i]);
}
- auto indexRemap = b->getAffineMap(regionSymbols.size() + rank, 0, remapExprs);
+ auto indexRemap = b.getAffineMap(regionSymbols.size() + rank, 0, remapExprs);
// Record the begin since it may be invalidated by memref replacement.
Block::iterator prev;
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 409eb397df4..7650db1ce27 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -192,7 +192,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollJamFactor, &cleanupMap,
- &cleanupOperands, &builder);
+ &cleanupOperands, builder);
cleanupAffineForOp.setLowerBound(cleanupOperands, cleanupMap);
// Promote the cleanup loop if it has turned into a single iteration loop.
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 0560002724d..6c93e2b562a 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -44,9 +44,9 @@ class AffineApplyExpander
public:
// This internal class expects arguments to be non-null, checks must be
// performed at the call site.
- AffineApplyExpander(OpBuilder *builder, ArrayRef<Value *> dimValues,
+ AffineApplyExpander(OpBuilder &builder, ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues, Location loc)
- : builder(*builder), dimValues(dimValues), symbolValues(symbolValues),
+ : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
loc(loc) {}
template <typename OpTy> Value *buildBinaryExpr(AffineBinaryOpExpr expr) {
@@ -223,19 +223,18 @@ mlir::Value *mlir::expandAffineExpr(OpBuilder &builder, Location loc,
AffineExpr expr,
ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues) {
- return AffineApplyExpander(&builder, dimValues, symbolValues, loc)
- .visit(expr);
+ return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
}
// Create a sequence of operations that implement the `affineMap` applied to
// the given `operands` (as it it were an AffineApplyOp).
Optional<SmallVector<Value *, 8>> static expandAffineMap(
- OpBuilder *builder, Location loc, AffineMap affineMap,
+ OpBuilder &builder, Location loc, AffineMap affineMap,
ArrayRef<Value *> operands) {
auto numDims = affineMap.getNumDims();
auto expanded = functional::map(
- [numDims, builder, loc, operands](AffineExpr expr) {
- return expandAffineExpr(*builder, loc, expr,
+ [numDims, &builder, loc, operands](AffineExpr expr) {
+ return expandAffineExpr(builder, loc, expr,
operands.take_front(numDims),
operands.drop_front(numDims));
},
@@ -276,7 +275,7 @@ static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
// the results.
Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
SmallVector<Value *, 8> boundOperands(op.getLowerBoundOperands());
- auto lbValues = expandAffineMap(&builder, op.getLoc(), op.getLowerBoundMap(),
+ auto lbValues = expandAffineMap(builder, op.getLoc(), op.getLowerBoundMap(),
boundOperands);
if (!lbValues)
return nullptr;
@@ -289,7 +288,7 @@ Value *mlir::lowerAffineLowerBound(AffineForOp op, OpBuilder &builder) {
// the results.
Value *mlir::lowerAffineUpperBound(AffineForOp op, OpBuilder &builder) {
SmallVector<Value *, 8> boundOperands(op.getUpperBoundOperands());
- auto ubValues = expandAffineMap(&builder, op.getLoc(), op.getUpperBoundMap(),
+ auto ubValues = expandAffineMap(builder, op.getLoc(), op.getUpperBoundMap(),
boundOperands);
if (!ubValues)
return nullptr;
@@ -597,7 +596,7 @@ public:
PatternRewriter &rewriter) const override {
auto affineApplyOp = cast<AffineApplyOp>(op);
auto maybeExpandedMap = expandAffineMap(
- &rewriter, op->getLoc(), affineApplyOp.getAffineMap(), operands);
+ rewriter, op->getLoc(), affineApplyOp.getAffineMap(), operands);
if (!maybeExpandedMap)
return matchFailure();
rewriter.replaceOp(op, *maybeExpandedMap);
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 2204c42dec1..842feef8e8b 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -238,7 +238,7 @@ static SmallVector<unsigned, 8> delinearize(unsigned linearIndex,
return res;
}
-static Operation *instantiate(OpBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap);
@@ -258,7 +258,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
auto *opInst = v->getDefiningOp();
if (isa<ConstantOp>(opInst)) {
OpBuilder b(opInst);
- auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap);
+ auto *op = instantiate(b, opInst, hwVectorType, substitutionsMap);
auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0)));
assert(res.second && "Insertion failed");
return res.first->second;
@@ -331,7 +331,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
/// TODO(ntv): these implementation details should be captured in a
/// vectorization trait at the op level directly.
static SmallVector<mlir::Value *, 8>
-reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
+reindexAffineIndices(OpBuilder b, VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
ArrayRef<Value *> memrefIndices) {
auto vectorShape = hwVectorType.getShape();
@@ -347,14 +347,14 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
// The first numMemRefIndices correspond to AffineForOp that have not been
// vectorized, the transformation is the identity on those.
for (i = 0; i < numMemRefIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
affineExprs.push_back(d_i);
}
// The next numVectorIndices correspond to super-vector dimensions that
// do not have a hardware vector dimension counterpart. For those we only
// need to increment the index by the corresponding hwVectorInstance.
for (i = numMemRefIndices; i < numMemRefIndices + numVectorIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
auto offset = hwVectorInstance[i - numMemRefIndices];
affineExprs.push_back(d_i + offset);
}
@@ -363,7 +363,7 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
// index by "hwVectorInstance" multiples of the corresponding hardware
// vector size.
for (; i < numIndices; ++i) {
- auto d_i = b->getAffineDimExpr(i);
+ auto d_i = b.getAffineDimExpr(i);
auto offset = hwVectorInstance[i - numMemRefIndices];
auto stride = vectorShape[i - numMemRefIndices - numVectorIndices];
affineExprs.push_back(d_i + offset * stride);
@@ -374,7 +374,7 @@ reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
res.reserve(affineExprs.size());
for (auto expr : affineExprs) {
auto map = AffineMap::get(numIndices, 0, expr);
- res.push_back(makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(),
+ res.push_back(makeComposedAffineApply(b, b.getInsertionPoint()->getLoc(),
map, memrefIndices));
}
return res;
@@ -404,7 +404,7 @@ materializeAttributes(Operation *opInst, VectorType hwVectorType) {
/// substitutionsMap.
///
/// If the underlying substitution fails, this fails too and returns nullptr.
-static Operation *instantiate(OpBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap) {
assert(!isa<VectorTransferReadOp>(opInst) &&
@@ -428,10 +428,10 @@ static Operation *instantiate(OpBuilder *b, Operation *opInst,
auto attrs = materializeAttributes(opInst, hwVectorType);
- OperationState state(b->getContext(), opInst->getLoc(),
+ OperationState state(b.getContext(), opInst->getLoc(),
opInst->getName().getStringRef(), operands,
{hwVectorType}, attrs);
- return b->createOperation(state);
+ return b.createOperation(state);
}
/// Computes the permutationMap required for a VectorTransferOp from the memref
@@ -481,7 +481,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy transfer,
/// `hwVectorType` int the covering of the super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
+static Operation *instantiate(OpBuilder b, VectorTransferReadOp read,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -493,9 +493,9 @@ static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
if (!map) {
return nullptr;
}
- auto cloned = b->create<VectorTransferReadOp>(read.getLoc(), hwVectorType,
- read.getMemRef(), affineIndices,
- map, read.getPaddingValue());
+ auto cloned = b.create<VectorTransferReadOp>(read.getLoc(), hwVectorType,
+ read.getMemRef(), affineIndices,
+ map, read.getPaddingValue());
return cloned.getOperation();
}
@@ -505,7 +505,7 @@ static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
/// `hwVectorType` int the covering of th3e super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
+static Operation *instantiate(OpBuilder b, VectorTransferWriteOp write,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -513,7 +513,7 @@ static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
map(makePtrDynCaster<Value>(), write.getIndices());
auto affineIndices =
reindexAffineIndices(b, hwVectorType, hwVectorInstance, indices);
- auto cloned = b->create<VectorTransferWriteOp>(
+ auto cloned = b.create<VectorTransferWriteOp>(
write.getLoc(),
substitute(write.getVector(), hwVectorType, substitutionsMap),
write.getMemRef(), affineIndices,
@@ -557,12 +557,12 @@ static bool instantiateMaterialization(Operation *op,
return op->emitError("NYI path Op with region"), true;
if (auto write = dyn_cast<VectorTransferWriteOp>(op)) {
- auto *clone = instantiate(&b, write, state->hwVectorType,
+ auto *clone = instantiate(b, write, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
return clone == nullptr;
}
if (auto read = dyn_cast<VectorTransferReadOp>(op)) {
- auto *clone = instantiate(&b, read, state->hwVectorType,
+ auto *clone = instantiate(b, read, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
if (!clone) {
return true;
@@ -581,7 +581,7 @@ static bool instantiateMaterialization(Operation *op,
return op->emitError("Op does not return a supervector."), true;
}
auto *clone =
- instantiate(&b, op, state->hwVectorType, state->substitutionsMap);
+ instantiate(b, op, state->hwVectorType, state->substitutionsMap);
if (!clone) {
return true;
}
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 23375e7b472..728123f71a5 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -46,7 +46,7 @@ using namespace mlir;
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
- OpBuilder *b) {
+ OpBuilder &b) {
auto lbMap = forOp.getLowerBoundMap();
// Single result lower bound map only.
@@ -68,7 +68,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
unsigned step = forOp.getStep();
SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- auto lb = b->create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
+ auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
// For each upper bound expr, get the range.
// Eg: affine.for %i = lb to min (ub1, ub2),
@@ -80,20 +80,20 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
auto tripCountExpr = tripCountMap.getResult(i);
bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
- auto bumpMap = b->getAffineMap(tripCountMap.getNumDims(),
- tripCountMap.getNumSymbols(), bumpExprs[i]);
+ auto bumpMap = b.getAffineMap(tripCountMap.getNumDims(),
+ tripCountMap.getNumSymbols(), bumpExprs[i]);
bumpValues[i] =
- b->create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
+ b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
}
SmallVector<AffineExpr, 4> newUbExprs(tripCountMap.getNumResults());
for (unsigned i = 0, e = bumpExprs.size(); i < e; i++)
- newUbExprs[i] = b->getAffineDimExpr(0) + b->getAffineDimExpr(i + 1);
+ newUbExprs[i] = b.getAffineDimExpr(0) + b.getAffineDimExpr(i + 1);
operands->clear();
operands->push_back(lb);
operands->append(bumpValues.begin(), bumpValues.end());
- *map = b->getAffineMap(1 + tripCountMap.getNumResults(), 0, newUbExprs);
+ *map = b.getAffineMap(1 + tripCountMap.getNumResults(), 0, newUbExprs);
// Simplify the map + operands.
fullyComposeAffineMapAndOperands(map, operands);
*map = simplifyAffineMap(*map);
@@ -172,7 +172,7 @@ static AffineForOp
generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
- unsigned offset, AffineForOp srcForInst, OpBuilder *b) {
+ unsigned offset, AffineForOp srcForInst, OpBuilder b) {
SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
@@ -180,8 +180,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
assert(ubMap.getNumInputs() == ubOperands.size());
auto loopChunk =
- b->create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
- ubMap, srcForInst.getStep());
+ b.create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
+ ubMap, srcForInst.getStep());
auto *loopChunkIV = loopChunk.getInductionVar();
auto *srcIV = srcForInst.getInductionVar();
@@ -306,14 +306,14 @@ LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
res = generateLoop(
b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, lbShift + tripCount * step),
- instGroupQueue, 0, forOp, &b);
+ instGroupQueue, 0, forOp, b);
// Entire loop for the queued op groups generated, empty it.
instGroupQueue.clear();
lbShift += tripCount * step;
} else {
res = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, d), instGroupQueue,
- 0, forOp, &b);
+ 0, forOp, b);
lbShift = d * step;
}
if (!prologue && res)
@@ -333,7 +333,7 @@ LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
uint64_t ubShift = (instGroupQueue[i].first + tripCount) * step;
epilogue = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift),
b.getShiftedAffineMap(origLbMap, ubShift),
- instGroupQueue, i, forOp, &b);
+ instGroupQueue, i, forOp, b);
lbShift = ubShift;
if (!prologue)
prologue = epilogue;
@@ -428,7 +428,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands,
- &builder);
+ builder);
assert(cleanupMap &&
"cleanup loop lower bound map for single result lower bound maps "
"can always be determined");
@@ -646,13 +646,13 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
// ...
// }
// ```
-static void augmentMapAndBounds(OpBuilder *b, Value *iv, AffineMap *map,
+static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map,
SmallVector<Value *, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
- bounds.push_back(b->getAffineDimExpr(map->getNumDims()) + offset);
+ bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
operands->insert(operands->begin() + map->getNumDims(), iv);
- *map = b->getAffineMap(map->getNumDims() + 1, map->getNumSymbols(), bounds);
+ *map = b.getAffineMap(map->getNumDims() + 1, map->getNumSymbols(), bounds);
canonicalizeMapAndOperands(map, operands);
}
@@ -708,12 +708,12 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
- augmentMapAndBounds(&b, forOp.getInductionVar(), &lbMap, &lbOperands);
+ augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands);
// Upper-bound map creation.
auto ubMap = forOp.getUpperBoundMap();
SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
- augmentMapAndBounds(&b, forOp.getInductionVar(), &ubMap, &ubOperands,
+ augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands,
/*offset=*/scaledStep);
SmallVector<AffineForOp, 8> innerLoops;
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 9220b7bb751..6636453c5e9 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -242,7 +242,7 @@ void VectorizerTestPass::testNormalizeMaps() {
auto app = cast<AffineApplyOp>(m.getMatchedOperation());
OpBuilder b(m.getMatchedOperation());
SmallVector<Value *, 8> operands(app.getOperands());
- makeComposedAffineApply(&b, app.getLoc(), app.getAffineMap(), operands);
+ makeComposedAffineApply(b, app.getLoc(), app.getAffineMap(), operands);
}
}
// We should now be able to erase everything in reverse order in this test.
OpenPOWER on IntegriCloud