summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms/Utils
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-22 21:59:55 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-22 22:00:23 -0800
commit35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch)
treed083d37d993a774239081509a50e3e6c65366421 /mlir/lib/Transforms/Utils
parent22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff)
downloadbcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz
bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/lib/Transforms/Utils')
-rw-r--r--mlir/lib/Transforms/Utils/FoldUtils.cpp8
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp8
-rw-r--r--mlir/lib/Transforms/Utils/InliningUtils.cpp36
-rw-r--r--mlir/lib/Transforms/Utils/LoopFusionUtils.cpp16
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp169
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp24
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp57
7 files changed, 158 insertions, 160 deletions
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index d4b7caae527..85d1f21305e 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -90,7 +90,7 @@ LogicalResult OperationFolder::tryToFold(
return failure();
// Try to fold the operation.
- SmallVector<Value *, 8> results;
+ SmallVector<ValuePtr, 8> results;
if (failed(tryToFold(op, results, processGeneratedConstants)))
return failure();
@@ -138,7 +138,7 @@ void OperationFolder::notifyRemoval(Operation *op) {
/// Tries to perform folding on the given `op`. If successful, populates
/// `results` with the results of the folding.
LogicalResult OperationFolder::tryToFold(
- Operation *op, SmallVectorImpl<Value *> &results,
+ Operation *op, SmallVectorImpl<ValuePtr> &results,
function_ref<void(Operation *)> processGeneratedConstants) {
SmallVector<Attribute, 8> operandConstants;
SmallVector<OpFoldResult, 8> foldResults;
@@ -181,13 +181,13 @@ LogicalResult OperationFolder::tryToFold(
assert(!foldResults[i].isNull() && "expected valid OpFoldResult");
// Check if the result was an SSA value.
- if (auto *repl = foldResults[i].dyn_cast<Value *>()) {
+ if (auto repl = foldResults[i].dyn_cast<ValuePtr>()) {
results.emplace_back(repl);
continue;
}
// Check to see if there is a canonicalized version of this constant.
- auto *res = op->getResult(i);
+ auto res = op->getResult(i);
Attribute attrRepl = foldResults[i].get<Attribute>();
if (auto *constOp =
tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl,
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index e2ca3f8fc5e..fe4a6f9f9e0 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -107,7 +107,7 @@ protected:
// simplifications to its users - make sure to add them to the worklist
// before the root is changed.
void notifyRootReplaced(Operation *op) override {
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
for (auto *user : result->getUsers())
addToWorklist(user);
}
@@ -118,7 +118,7 @@ private:
// operation is modified or removed, as it may trigger further
// simplifications.
template <typename Operands> void addToWorklist(Operands &&operands) {
- for (Value *operand : operands) {
+ for (ValuePtr operand : operands) {
// If the use count of this operand is now < 2, we re-add the defining
// operation to the worklist.
// TODO(riverriddle) This is based on the fact that zero use operations
@@ -160,7 +160,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions,
region.walk(collectOps);
// These are scratch vectors used in the folding loop below.
- SmallVector<Value *, 8> originalOperands, resultValues;
+ SmallVector<ValuePtr, 8> originalOperands, resultValues;
changed = false;
while (!worklist.empty()) {
@@ -189,7 +189,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions,
// Add all the users of the result to the worklist so we make sure
// to revisit them.
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
for (auto *operand : result->getUsers())
addToWorklist(operand);
diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp
index e8466aa3fd6..048130c0d3a 100644
--- a/mlir/lib/Transforms/Utils/InliningUtils.cpp
+++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp
@@ -55,7 +55,7 @@ static void remapInlinedOperands(iterator_range<Region::iterator> inlinedBlocks,
BlockAndValueMapping &mapper) {
auto remapOperands = [&](Operation *op) {
for (auto &operand : op->getOpOperands())
- if (auto *mappedOp = mapper.lookupOrNull(operand.get()))
+ if (auto mappedOp = mapper.lookupOrNull(operand.get()))
operand.set(mappedOp);
};
for (auto &block : inlinedBlocks)
@@ -98,7 +98,7 @@ void InlinerInterface::handleTerminator(Operation *op, Block *newDest) const {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void InlinerInterface::handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const {
+ ArrayRef<ValuePtr> valuesToRepl) const {
auto *handler = getInterfaceFor(op);
assert(handler && "expected valid dialect handler");
handler->handleTerminator(op, valuesToRepl);
@@ -137,7 +137,7 @@ static bool isLegalToInline(InlinerInterface &interface, Region *src,
LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
BlockAndValueMapping &mapper,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
@@ -147,7 +147,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
// Check that all of the region arguments have been mapped.
auto *srcEntryBlock = &src->front();
if (llvm::any_of(srcEntryBlock->getArguments(),
- [&](BlockArgument *arg) { return !mapper.contains(arg); }))
+ [&](BlockArgumentPtr arg) { return !mapper.contains(arg); }))
return failure();
// The insertion point must be within a block.
@@ -207,7 +207,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
} else {
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
- for (Value *resultToRepl : resultsToReplace) {
+ for (ValuePtr resultToRepl : resultsToReplace) {
resultToRepl->replaceAllUsesWith(
postInsertBlock->addArgument(resultToRepl->getType()));
}
@@ -229,8 +229,8 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
/// in-favor of the region arguments when inlining.
LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
- ArrayRef<Value *> inlinedOperands,
- ArrayRef<Value *> resultsToReplace,
+ ArrayRef<ValuePtr> inlinedOperands,
+ ArrayRef<ValuePtr> resultsToReplace,
Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
@@ -246,7 +246,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
for (unsigned i = 0, e = inlinedOperands.size(); i != e; ++i) {
// Verify that the types of the provided values match the function argument
// types.
- BlockArgument *regionArg = entryBlock->getArgument(i);
+ BlockArgumentPtr regionArg = entryBlock->getArgument(i);
if (inlinedOperands[i]->getType() != regionArg->getType())
return failure();
mapper.map(regionArg, inlinedOperands[i]);
@@ -259,10 +259,10 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
/// Utility function used to generate a cast operation from the given interface,
/// or return nullptr if a cast could not be generated.
-static Value *materializeConversion(const DialectInlinerInterface *interface,
- SmallVectorImpl<Operation *> &castOps,
- OpBuilder &castBuilder, Value *arg,
- Type type, Location conversionLoc) {
+static ValuePtr materializeConversion(const DialectInlinerInterface *interface,
+ SmallVectorImpl<Operation *> &castOps,
+ OpBuilder &castBuilder, ValuePtr arg,
+ Type type, Location conversionLoc) {
if (!interface)
return nullptr;
@@ -297,8 +297,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Make sure that the number of arguments and results matchup between the call
// and the region.
- SmallVector<Value *, 8> callOperands(call.getArgOperands());
- SmallVector<Value *, 8> callResults(call.getOperation()->getResults());
+ SmallVector<ValuePtr, 8> callOperands(call.getArgOperands());
+ SmallVector<ValuePtr, 8> callResults(call.getOperation()->getResults());
if (callOperands.size() != entryBlock->getNumArguments() ||
callResults.size() != callableResultTypes.size())
return failure();
@@ -325,8 +325,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Map the provided call operands to the arguments of the region.
BlockAndValueMapping mapper;
for (unsigned i = 0, e = callOperands.size(); i != e; ++i) {
- BlockArgument *regionArg = entryBlock->getArgument(i);
- Value *operand = callOperands[i];
+ BlockArgumentPtr regionArg = entryBlock->getArgument(i);
+ ValuePtr operand = callOperands[i];
// If the call operand doesn't match the expected region argument, try to
// generate a cast.
@@ -342,13 +342,13 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface,
// Ensure that the resultant values of the call, match the callable.
castBuilder.setInsertionPointAfter(call);
for (unsigned i = 0, e = callResults.size(); i != e; ++i) {
- Value *callResult = callResults[i];
+ ValuePtr callResult = callResults[i];
if (callResult->getType() == callableResultTypes[i])
continue;
// Generate a conversion that will produce the original type, so that the IR
// is still valid after the original call gets replaced.
- Value *castResult =
+ ValuePtr castResult =
materializeConversion(callInterface, castOps, castBuilder, callResult,
callResult->getType(), castLoc);
if (!castResult)
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index fd803390ce7..d5cda3265de 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -45,7 +45,7 @@ using namespace mlir;
// Gathers all load and store memref accesses in 'opA' into 'values', where
// 'values[memref] == true' for each store operation.
static void getLoadAndStoreMemRefAccesses(Operation *opA,
- DenseMap<Value *, bool> &values) {
+ DenseMap<ValuePtr, bool> &values) {
opA->walk([&](Operation *op) {
if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
if (values.count(loadOp.getMemRef()) == 0)
@@ -60,7 +60,7 @@ static void getLoadAndStoreMemRefAccesses(Operation *opA,
// accessed 'values' and at least one of the access is a store operation.
// Returns false otherwise.
static bool isDependentLoadOrStoreOp(Operation *op,
- DenseMap<Value *, bool> &values) {
+ DenseMap<ValuePtr, bool> &values) {
if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
return values.count(loadOp.getMemRef()) > 0 &&
values[loadOp.getMemRef()] == true;
@@ -75,7 +75,7 @@ static bool isDependentLoadOrStoreOp(Operation *op,
static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) {
// Record memref values from all loads/store in loop nest rooted at 'opA'.
// Map from memref value to bool which is true if store, false otherwise.
- DenseMap<Value *, bool> values;
+ DenseMap<ValuePtr, bool> values;
getLoadAndStoreMemRefAccesses(opA, values);
// For each 'opX' in block in range ('opA', 'opB'), check if there is a data
@@ -101,7 +101,7 @@ static Operation *getFirstDependentOpInRange(Operation *opA, Operation *opB) {
static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
// Record memref values from all loads/store in loop nest rooted at 'opB'.
// Map from memref value to bool which is true if store, false otherwise.
- DenseMap<Value *, bool> values;
+ DenseMap<ValuePtr, bool> values;
getLoadAndStoreMemRefAccesses(opB, values);
// For each 'opX' in block in range ('opA', 'opB') in reverse order,
@@ -121,8 +121,8 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
}
return WalkResult::advance();
}
- for (auto *value : op->getResults()) {
- for (auto *user : value->getUsers()) {
+ for (auto value : op->getResults()) {
+ for (auto user : value->getUsers()) {
SmallVector<AffineForOp, 4> loops;
// Check if any loop in loop nest surrounding 'user' is 'opB'.
getLoopIVs(*user, &loops);
@@ -443,7 +443,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
// Subtract from operation count the loads/store we expect load/store
// forwarding to remove.
unsigned storeCount = 0;
- llvm::SmallDenseSet<Value *, 4> storeMemrefs;
+ llvm::SmallDenseSet<ValuePtr, 4> storeMemrefs;
srcForOp.walk([&](Operation *op) {
if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
storeMemrefs.insert(storeOp.getMemRef());
@@ -455,7 +455,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
computeCostMap[insertPointParent] = -storeCount;
// Subtract out any load users of 'storeMemrefs' nested below
// 'insertPointParent'.
- for (auto *value : storeMemrefs) {
+ for (auto value : storeMemrefs) {
for (auto *user : value->getUsers()) {
if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
SmallVector<AffineForOp, 4> loops;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 3691aee4870..bc1ced408a9 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -52,7 +52,7 @@ using llvm::SmallMapVector;
/// expression.
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
- SmallVectorImpl<Value *> *operands,
+ SmallVectorImpl<ValuePtr> *operands,
OpBuilder &b) {
auto lbMap = forOp.getLowerBoundMap();
@@ -63,7 +63,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
}
AffineMap tripCountMap;
- SmallVector<Value *, 4> tripCountOperands;
+ SmallVector<ValuePtr, 4> tripCountOperands;
buildTripCountMapAndOperands(forOp, &tripCountMap, &tripCountOperands);
// Sometimes the trip count cannot be expressed as an affine expression.
@@ -82,7 +82,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
// lb + tr1 - tr1 % ufactor, lb + tr2 - tr2 % ufactor; the results of all
// these affine.apply's make up the cleanup loop lower bound.
SmallVector<AffineExpr, 4> bumpExprs(tripCountMap.getNumResults());
- SmallVector<Value *, 4> bumpValues(tripCountMap.getNumResults());
+ SmallVector<ValuePtr, 4> bumpValues(tripCountMap.getNumResults());
for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
auto tripCountExpr = tripCountMap.getResult(i);
bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
@@ -105,7 +105,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
*map = simplifyAffineMap(*map);
canonicalizeMapAndOperands(map, operands);
// Remove any affine.apply's that became dead from the simplification above.
- for (auto *v : bumpValues) {
+ for (auto v : bumpValues) {
if (v->use_empty()) {
v->getDefiningOp()->erase();
}
@@ -127,7 +127,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
return failure();
// Replaces all IV uses to its single iteration value.
- auto *iv = forOp.getInductionVar();
+ auto iv = forOp.getInductionVar();
Operation *op = forOp.getOperation();
if (!iv->use_empty()) {
if (forOp.hasConstantLowerBound()) {
@@ -137,7 +137,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
iv->replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
- SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end());
+ SmallVector<ValuePtr, 4> lbOperands(lb.operand_begin(), lb.operand_end());
OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
@@ -178,8 +178,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
unsigned offset, AffineForOp srcForInst, OpBuilder b) {
- SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
- SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(srcForInst.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(srcForInst.getUpperBoundOperands());
assert(lbMap.getNumInputs() == lbOperands.size());
assert(ubMap.getNumInputs() == ubOperands.size());
@@ -187,8 +187,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
auto loopChunk =
b.create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands,
ubMap, srcForInst.getStep());
- auto *loopChunkIV = loopChunk.getInductionVar();
- auto *srcIV = srcForInst.getInductionVar();
+ auto loopChunkIV = loopChunk.getInductionVar();
+ auto srcIV = srcForInst.getInductionVar();
BlockAndValueMapping operandMap;
@@ -449,7 +449,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
OpBuilder builder(op->getBlock(), ++Block::iterator(op));
auto cleanupForInst = cast<AffineForOp>(builder.clone(*op));
AffineMap cleanupMap;
- SmallVector<Value *, 4> cleanupOperands;
+ SmallVector<ValuePtr, 4> cleanupOperands;
getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands,
builder);
assert(cleanupMap &&
@@ -477,7 +477,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
Block::iterator srcBlockEnd = std::prev(forOp.getBody()->end(), 2);
// Unroll the contents of 'forOp' (append unrollFactor-1 additional copies).
- auto *forOpIV = forOp.getInductionVar();
+ auto forOpIV = forOp.getInductionVar();
for (unsigned i = 1; i < unrollFactor; i++) {
BlockAndValueMapping operandMap;
@@ -669,8 +669,8 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
// ...
// }
// ```
-static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map,
- SmallVector<Value *, 4> *operands,
+static void augmentMapAndBounds(OpBuilder &b, ValuePtr iv, AffineMap *map,
+ SmallVector<ValuePtr, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
@@ -699,16 +699,16 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
- SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
+ SmallVector<ValuePtr, 4> lbOperands(forOp.getLowerBoundOperands());
augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands);
// Upper-bound map creation.
auto ubMap = forOp.getUpperBoundMap();
- SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
+ SmallVector<ValuePtr, 4> ubOperands(forOp.getUpperBoundOperands());
augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands,
/*offset=*/scaledStep);
- auto *iv = forOp.getInductionVar();
+ auto iv = forOp.getInductionVar();
SmallVector<AffineForOp, 8> innerLoops;
for (auto t : targets) {
// Insert newForOp before the terminator of `t`.
@@ -729,10 +729,10 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
return innerLoops;
}
-static Loops stripmineSink(loop::ForOp forOp, Value *factor,
+static Loops stripmineSink(loop::ForOp forOp, ValuePtr factor,
ArrayRef<loop::ForOp> targets) {
- auto *originalStep = forOp.step();
- auto *iv = forOp.getInductionVar();
+ auto originalStep = forOp.step();
+ auto iv = forOp.getInductionVar();
OpBuilder b(forOp);
forOp.setStep(b.create<MulIOp>(forOp.getLoc(), originalStep, factor));
@@ -745,10 +745,10 @@ static Loops stripmineSink(loop::ForOp forOp, Value *factor,
// Insert newForOp before the terminator of `t`.
OpBuilder b(t.getBodyBuilder());
- Value *stepped = b.create<AddIOp>(t.getLoc(), iv, forOp.step());
- Value *less = b.create<CmpIOp>(t.getLoc(), CmpIPredicate::slt,
- forOp.upperBound(), stepped);
- Value *ub =
+ ValuePtr stepped = b.create<AddIOp>(t.getLoc(), iv, forOp.step());
+ ValuePtr less = b.create<CmpIOp>(t.getLoc(), CmpIPredicate::slt,
+ forOp.upperBound(), stepped);
+ ValuePtr ub =
b.create<SelectOp>(t.getLoc(), less, forOp.upperBound(), stepped);
// Splice [begin, begin + nOps - 1) into `newForOp` and replace uses.
@@ -799,7 +799,7 @@ mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
}
SmallVector<Loops, 8> mlir::tile(ArrayRef<loop::ForOp> forOps,
- ArrayRef<Value *> sizes,
+ ArrayRef<ValuePtr> sizes,
ArrayRef<loop::ForOp> targets) {
return tileImpl(forOps, sizes, targets);
}
@@ -821,13 +821,13 @@ SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
return tileImpl(forOps, sizes, target);
}
-Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes,
+Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<ValuePtr> sizes,
loop::ForOp target) {
return tileImpl(forOps, sizes, target);
}
Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp,
- ArrayRef<Value *> sizes) {
+ ArrayRef<ValuePtr> sizes) {
// Collect perfectly nested loops. If more size values provided than nested
// loops available, truncate `sizes`.
SmallVector<loop::ForOp, 4> forOps;
@@ -842,14 +842,15 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp,
// Build the IR that performs ceil division of a positive value by a constant:
// ceildiv(a, B) = divis(a + (B-1), B)
// where divis is rounding-to-zero division.
-static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
- int64_t divisor) {
+static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc,
+ ValuePtr dividend, int64_t divisor) {
assert(divisor > 0 && "expected positive divisor");
assert(dividend->getType().isIndex() && "expected index-typed value");
- Value *divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1);
- Value *divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
- Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst);
+ ValuePtr divisorMinusOneCst =
+ builder.create<ConstantIndexOp>(loc, divisor - 1);
+ ValuePtr divisorCst = builder.create<ConstantIndexOp>(loc, divisor);
+ ValuePtr sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst);
return builder.create<SignedDivIOp>(loc, sum, divisorCst);
}
@@ -857,13 +858,13 @@ static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
// positive value:
// ceildiv(a, b) = divis(a + (b - 1), b)
// where divis is rounding-to-zero division.
-static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend,
- Value *divisor) {
+static ValuePtr ceilDivPositive(OpBuilder &builder, Location loc,
+ ValuePtr dividend, ValuePtr divisor) {
assert(dividend->getType().isIndex() && "expected index-typed value");
- Value *cstOne = builder.create<ConstantIndexOp>(loc, 1);
- Value *divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
- Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne);
+ ValuePtr cstOne = builder.create<ConstantIndexOp>(loc, 1);
+ ValuePtr divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne);
+ ValuePtr sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne);
return builder.create<SignedDivIOp>(loc, sum, divisor);
}
@@ -945,7 +946,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
// iterations. Given that the loop current executes
// numIterations = ceildiv((upperBound - lowerBound), step)
// iterations, we need to tile with size ceildiv(numIterations, size[i]).
- SmallVector<Value *, 4> tileSizes;
+ SmallVector<ValuePtr, 4> tileSizes;
tileSizes.reserve(sizes.size());
for (unsigned i = 0, e = sizes.size(); i < e; ++i) {
assert(sizes[i] > 0 && "expected strictly positive size for strip-mining");
@@ -953,10 +954,10 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
auto forOp = forOps[i];
OpBuilder builder(forOp);
auto loc = forOp.getLoc();
- Value *diff =
+ ValuePtr diff =
builder.create<SubIOp>(loc, forOp.upperBound(), forOp.lowerBound());
- Value *numIterations = ceilDivPositive(builder, loc, diff, forOp.step());
- Value *iterationsPerBlock =
+ ValuePtr numIterations = ceilDivPositive(builder, loc, diff, forOp.step());
+ ValuePtr iterationsPerBlock =
ceilDivPositive(builder, loc, numIterations, sizes[i]);
tileSizes.push_back(iterationsPerBlock);
}
@@ -976,7 +977,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
// Replaces all uses of `orig` with `replacement` except if the user is listed
// in `exceptions`.
static void
-replaceAllUsesExcept(Value *orig, Value *replacement,
+replaceAllUsesExcept(ValuePtr orig, ValuePtr replacement,
const SmallPtrSetImpl<Operation *> &exceptions) {
for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
if (exceptions.count(use.getOwner()) == 0)
@@ -1018,30 +1019,30 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
// of the loop to go from 0 to the number of iterations, if necessary.
// TODO(zinenko): introduce support for negative steps or emit dynamic asserts
// on step positivity, whatever gets implemented first.
- Value *diff =
+ ValuePtr diff =
builder.create<SubIOp>(loc, loop.upperBound(), loop.lowerBound());
- Value *numIterations = ceilDivPositive(builder, loc, diff, loop.step());
+ ValuePtr numIterations = ceilDivPositive(builder, loc, diff, loop.step());
loop.setUpperBound(numIterations);
- Value *lb = loop.lowerBound();
+ ValuePtr lb = loop.lowerBound();
if (!isZeroBased) {
- Value *cst0 = builder.create<ConstantIndexOp>(loc, 0);
+ ValuePtr cst0 = builder.create<ConstantIndexOp>(loc, 0);
loop.setLowerBound(cst0);
}
- Value *step = loop.step();
+ ValuePtr step = loop.step();
if (!isStepOne) {
- Value *cst1 = builder.create<ConstantIndexOp>(loc, 1);
+ ValuePtr cst1 = builder.create<ConstantIndexOp>(loc, 1);
loop.setStep(cst1);
}
// Insert code computing the value of the original loop induction variable
// from the "normalized" one.
builder.setInsertionPointToStart(inner.getBody());
- Value *scaled =
+ ValuePtr scaled =
isStepOne ? loop.getInductionVar()
: builder.create<MulIOp>(loc, loop.getInductionVar(), step);
- Value *shifted =
+ ValuePtr shifted =
isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb);
SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(),
@@ -1065,7 +1066,7 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
// of the number of iterations of all loops.
OpBuilder builder(outermost);
Location loc = outermost.getLoc();
- Value *upperBound = outermost.upperBound();
+ ValuePtr upperBound = outermost.upperBound();
for (auto loop : loops.drop_front())
upperBound = builder.create<MulIOp>(loc, upperBound, loop.upperBound());
outermost.setUpperBound(upperBound);
@@ -1080,16 +1081,16 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
// iv_i = floordiv(iv_linear, product-of-loop-ranges-until-i) mod range_i.
// Compute these iteratively from the innermost loop by creating a "running
// quotient" of division by the range.
- Value *previous = outermost.getInductionVar();
+ ValuePtr previous = outermost.getInductionVar();
for (unsigned i = 0, e = loops.size(); i < e; ++i) {
unsigned idx = loops.size() - i - 1;
if (i != 0)
previous = builder.create<SignedDivIOp>(loc, previous,
loops[idx + 1].upperBound());
- Value *iv = (i == e - 1) ? previous
- : builder.create<SignedRemIOp>(
- loc, previous, loops[idx].upperBound());
+ ValuePtr iv = (i == e - 1) ? previous
+ : builder.create<SignedRemIOp>(
+ loc, previous, loops[idx].upperBound());
replaceAllUsesInRegionWith(loops[idx].getInductionVar(), iv,
loops.back().region());
}
@@ -1105,24 +1106,24 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
}
void mlir::mapLoopToProcessorIds(loop::ForOp forOp,
- ArrayRef<Value *> processorId,
- ArrayRef<Value *> numProcessors) {
+ ArrayRef<ValuePtr> processorId,
+ ArrayRef<ValuePtr> numProcessors) {
assert(processorId.size() == numProcessors.size());
if (processorId.empty())
return;
OpBuilder b(forOp);
Location loc(forOp.getLoc());
- Value *mul = processorId.front();
+ ValuePtr mul = processorId.front();
for (unsigned i = 1, e = processorId.size(); i < e; ++i)
mul = b.create<AddIOp>(loc, b.create<MulIOp>(loc, mul, numProcessors[i]),
processorId[i]);
- Value *lb = b.create<AddIOp>(loc, forOp.lowerBound(),
- b.create<MulIOp>(loc, forOp.step(), mul));
+ ValuePtr lb = b.create<AddIOp>(loc, forOp.lowerBound(),
+ b.create<MulIOp>(loc, forOp.step(), mul));
forOp.setLowerBound(lb);
- Value *step = forOp.step();
- for (auto *numProcs : numProcessors)
+ ValuePtr step = forOp.step();
+ for (auto numProcs : numProcessors)
step = b.create<MulIOp>(loc, step, numProcs);
forOp.setStep(step);
}
@@ -1139,7 +1140,7 @@ findHighestBlockForPlacement(const MemRefRegion &region, Block &block,
Block::iterator *copyInPlacementStart,
Block::iterator *copyOutPlacementStart) {
const auto *cst = region.getConstraints();
- SmallVector<Value *, 4> symbols;
+ SmallVector<ValuePtr, 4> symbols;
cst->getIdValues(cst->getNumDimIds(), cst->getNumDimAndSymbolIds(), &symbols);
SmallVector<AffineForOp, 4> enclosingFors;
@@ -1202,10 +1203,10 @@ static void getMultiLevelStrides(const MemRefRegion &region,
/// returns the outermost AffineForOp of the copy loop nest. `memIndicesStart'
/// holds the lower coordinates of the region in the original memref to copy
/// in/out. If `copyOut' is true, generates a copy-out; otherwise a copy-in.
-static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
- Value *fastMemRef,
+static AffineForOp generatePointWiseCopy(Location loc, ValuePtr memref,
+ ValuePtr fastMemRef,
AffineMap memAffineMap,
- ArrayRef<Value *> memIndicesStart,
+ ArrayRef<ValuePtr> memIndicesStart,
ArrayRef<int64_t> fastBufferShape,
bool isCopyOut, OpBuilder b) {
assert(!memIndicesStart.empty() && "only 1-d or more memrefs");
@@ -1215,7 +1216,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
// for y = ...
// fast_buf[x][y] = buf[mem_x + x][mem_y + y]
- SmallVector<Value *, 4> fastBufIndices, memIndices;
+ SmallVector<ValuePtr, 4> fastBufIndices, memIndices;
AffineForOp copyNestRoot;
for (unsigned d = 0, e = fastBufferShape.size(); d < e; ++d) {
auto forOp = b.create<AffineForOp>(loc, 0, fastBufferShape[d]);
@@ -1224,7 +1225,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
b = forOp.getBodyBuilder();
fastBufIndices.push_back(forOp.getInductionVar());
- Value *memBase =
+ ValuePtr memBase =
(memAffineMap == b.getMultiDimIdentityMap(memAffineMap.getNumDims()))
? memIndicesStart[d]
: b.create<AffineApplyOp>(
@@ -1277,7 +1278,7 @@ static LogicalResult generateCopy(
const MemRefRegion &region, Block *block, Block::iterator begin,
Block::iterator end, Block *copyPlacementBlock,
Block::iterator copyInPlacementStart, Block::iterator copyOutPlacementStart,
- AffineCopyOptions copyOptions, DenseMap<Value *, Value *> &fastBufferMap,
+ AffineCopyOptions copyOptions, DenseMap<ValuePtr, ValuePtr> &fastBufferMap,
DenseSet<Operation *> &copyNests, uint64_t *sizeInBytes,
Block::iterator *nBegin, Block::iterator *nEnd) {
*nBegin = begin;
@@ -1285,7 +1286,7 @@ static LogicalResult generateCopy(
FuncOp f = begin->getParentOfType<FuncOp>();
OpBuilder topBuilder(f.getBody());
- Value *zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
+ ValuePtr zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
if (begin == end)
return success();
@@ -1305,7 +1306,7 @@ static LogicalResult generateCopy(
OpBuilder top(func.getBody());
auto loc = region.loc;
- auto *memref = region.memref;
+ auto memref = region.memref;
auto memRefType = memref->getType().cast<MemRefType>();
auto layoutMaps = memRefType.getAffineMaps();
@@ -1317,9 +1318,9 @@ static LogicalResult generateCopy(
// Indices to use for the copying.
// Indices for the original memref being copied from/to.
- SmallVector<Value *, 4> memIndices;
+ SmallVector<ValuePtr, 4> memIndices;
// Indices for the faster buffer being copied into/from.
- SmallVector<Value *, 4> bufIndices;
+ SmallVector<ValuePtr, 4> bufIndices;
unsigned rank = memRefType.getRank();
SmallVector<int64_t, 4> fastBufferShape;
@@ -1345,7 +1346,7 @@ static LogicalResult generateCopy(
// 'regionSymbols' hold values that this memory region is symbolic/parametric
// on; these typically include loop IVs surrounding the level at which the
// copy generation is being done or other valid symbols in MLIR.
- SmallVector<Value *, 8> regionSymbols;
+ SmallVector<ValuePtr, 8> regionSymbols;
cst->getIdValues(rank, cst->getNumIds(), &regionSymbols);
// Construct the index expressions for the fast memory buffer. The index
@@ -1393,7 +1394,7 @@ static LogicalResult generateCopy(
}
// The faster memory space buffer.
- Value *fastMemRef;
+ ValuePtr fastMemRef;
// Check if a buffer was already created.
bool existingBuf = fastBufferMap.count(memref) > 0;
@@ -1433,8 +1434,8 @@ static LogicalResult generateCopy(
return failure();
}
- Value *stride = nullptr;
- Value *numEltPerStride = nullptr;
+ ValuePtr stride = nullptr;
+ ValuePtr numEltPerStride = nullptr;
if (!strideInfos.empty()) {
stride = top.create<ConstantIndexOp>(loc, strideInfos[0].stride);
numEltPerStride =
@@ -1473,7 +1474,7 @@ static LogicalResult generateCopy(
copyOptions.tagMemorySpace);
auto tagMemRef = prologue.create<AllocOp>(loc, tagMemRefType);
- SmallVector<Value *, 4> tagIndices({zeroIndex});
+ SmallVector<ValuePtr, 4> tagIndices({zeroIndex});
auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size());
fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices);
if (!region.isWrite()) {
@@ -1582,7 +1583,7 @@ static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
SmallVector<AffineForOp, 4> ivs;
getLoopIVs(*opInst, &ivs);
ivs.resize(numParamLoopIVs);
- SmallVector<Value *, 4> symbols;
+ SmallVector<ValuePtr, 4> symbols;
extractForInductionVars(ivs, &symbols);
regionCst->reset(rank, numParamLoopIVs, 0);
regionCst->setIdValues(rank, rank + numParamLoopIVs, symbols);
@@ -1629,12 +1630,12 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
// List of memory regions to copy for. We need a map vector to have a
// guaranteed iteration order to write test cases. CHECK-DAG doesn't help here
// since the alloc's for example are identical except for the SSA id.
- SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> readRegions;
- SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> writeRegions;
+ SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4> readRegions;
+ SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4> writeRegions;
// Map from original memref's to the fast buffers that their accesses are
// replaced with.
- DenseMap<Value *, Value *> fastBufferMap;
+ DenseMap<ValuePtr, ValuePtr> fastBufferMap;
// To check for errors when walking the block.
bool error = false;
@@ -1684,7 +1685,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
// Attempts to update; returns true if 'region' exists in targetRegions.
auto updateRegion =
- [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4>
+ [&](const SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4>
&targetRegions) {
auto it = targetRegions.find(region->memref);
if (it == targetRegions.end())
@@ -1736,7 +1737,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
uint64_t totalCopyBuffersSizeInBytes = 0;
bool ret = true;
auto processRegions =
- [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4>
+ [&](const SmallMapVector<ValuePtr, std::unique_ptr<MemRefRegion>, 4>
&regions) {
for (const auto &regionEntry : regions) {
// For each region, hoist copy in/out past all hoistable
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index b91b189b381..749d5bf1dd0 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -27,9 +27,9 @@
using namespace mlir;
-void mlir::replaceAllUsesInRegionWith(Value *orig, Value *replacement,
+void mlir::replaceAllUsesInRegionWith(ValuePtr orig, ValuePtr replacement,
Region &region) {
- for (IROperand &use : llvm::make_early_inc_range(orig->getUses())) {
+ for (auto &use : llvm::make_early_inc_range(orig->getUses())) {
if (region.isAncestor(use.getOwner()->getParentRegion()))
use.set(replacement);
}
@@ -63,14 +63,14 @@ void mlir::visitUsedValuesDefinedAbove(
}
void mlir::getUsedValuesDefinedAbove(Region &region, Region &limit,
- llvm::SetVector<Value *> &values) {
+ llvm::SetVector<ValuePtr> &values) {
visitUsedValuesDefinedAbove(region, limit, [&](OpOperand *operand) {
values.insert(operand->get());
});
}
void mlir::getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
- llvm::SetVector<Value *> &values) {
+ llvm::SetVector<ValuePtr> &values) {
for (Region &region : regions)
getUsedValuesDefinedAbove(region, region, values);
}
@@ -146,8 +146,8 @@ namespace {
class LiveMap {
public:
/// Value methods.
- bool wasProvenLive(Value *value) { return liveValues.count(value); }
- void setProvedLive(Value *value) {
+ bool wasProvenLive(ValuePtr value) { return liveValues.count(value); }
+ void setProvedLive(ValuePtr value) {
changed |= liveValues.insert(value).second;
}
@@ -161,7 +161,7 @@ public:
private:
bool changed = false;
- DenseSet<Value *> liveValues;
+ DenseSet<ValuePtr> liveValues;
DenseSet<Operation *> liveOps;
};
} // namespace
@@ -188,7 +188,7 @@ static bool isUseSpeciallyKnownDead(OpOperand &use, LiveMap &liveMap) {
return false;
}
-static void processValue(Value *value, LiveMap &liveMap) {
+static void processValue(ValuePtr value, LiveMap &liveMap) {
bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) {
if (isUseSpeciallyKnownDead(use, liveMap))
return false;
@@ -222,9 +222,9 @@ static void propagateLiveness(Operation *op, LiveMap &liveMap) {
liveMap.setProvedLive(op);
return;
}
- for (Value *value : op->getResults())
+ for (ValuePtr value : op->getResults())
processValue(value, liveMap);
- bool provedLive = llvm::any_of(op->getResults(), [&](Value *value) {
+ bool provedLive = llvm::any_of(op->getResults(), [&](ValuePtr value) {
return liveMap.wasProvenLive(value);
});
if (provedLive)
@@ -240,7 +240,7 @@ static void propagateLiveness(Region &region, LiveMap &liveMap) {
// faster convergence to a fixed point (we try to visit uses before defs).
for (Operation &op : llvm::reverse(block->getOperations()))
propagateLiveness(&op, liveMap);
- for (Value *value : block->getArguments())
+ for (ValuePtr value : block->getArguments())
processValue(value, liveMap);
}
}
@@ -259,7 +259,7 @@ static void eraseTerminatorSuccessorOperands(Operation *terminator,
// Iterating args in reverse is needed for correctness, to avoid
// shifting later args when earlier args are erased.
unsigned arg = argE - argI - 1;
- Value *value = terminator->getSuccessor(succ)->getArgument(arg);
+ ValuePtr value = terminator->getSuccessor(succ)->getArgument(arg);
if (!liveMap.wasProvenLive(value)) {
terminator->eraseSuccessorOperand(succ, arg);
}
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 57a92531163..96a6cdc544f 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -47,7 +47,8 @@ static bool isMemRefDereferencingOp(Operation &op) {
}
/// Return the AffineMapAttr associated with memory 'op' on 'memref'.
-static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) {
+static NamedAttribute getAffineMapAttrForMemRef(Operation *op,
+ ValuePtr memref) {
return TypeSwitch<Operation *, NamedAttribute>(op)
.Case<AffineDmaStartOp, AffineLoadOp, AffinePrefetchOp, AffineStoreOp,
AffineDmaWaitOp>(
@@ -55,12 +56,10 @@ static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) {
}
// Perform the replacement in `op`.
-LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- Operation *op,
- ArrayRef<Value *> extraIndices,
- AffineMap indexRemap,
- ArrayRef<Value *> extraOperands,
- ArrayRef<Value *> symbolOperands) {
+LogicalResult mlir::replaceAllMemRefUsesWith(
+ ValuePtr oldMemRef, ValuePtr newMemRef, Operation *op,
+ ArrayRef<ValuePtr> extraIndices, AffineMap indexRemap,
+ ArrayRef<ValuePtr> extraOperands, ArrayRef<ValuePtr> symbolOperands) {
unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
@@ -106,13 +105,13 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
NamedAttribute oldMapAttrPair = getAffineMapAttrForMemRef(op, oldMemRef);
AffineMap oldMap = oldMapAttrPair.second.cast<AffineMapAttr>().getValue();
unsigned oldMapNumInputs = oldMap.getNumInputs();
- SmallVector<Value *, 4> oldMapOperands(
+ SmallVector<ValuePtr, 4> oldMapOperands(
op->operand_begin() + memRefOperandPos + 1,
op->operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
// Apply 'oldMemRefOperands = oldMap(oldMapOperands)'.
- SmallVector<Value *, 4> oldMemRefOperands;
- SmallVector<Value *, 4> affineApplyOps;
+ SmallVector<ValuePtr, 4> oldMemRefOperands;
+ SmallVector<ValuePtr, 4> affineApplyOps;
oldMemRefOperands.reserve(oldMemRefRank);
if (oldMap != builder.getMultiDimIdentityMap(oldMap.getNumDims())) {
for (auto resultExpr : oldMap.getResults()) {
@@ -130,14 +129,14 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
// Construct new indices as a remap of the old ones if a remapping has been
// provided. The indices of a memref come right after it, i.e.,
// at position memRefOperandPos + 1.
- SmallVector<Value *, 4> remapOperands;
+ SmallVector<ValuePtr, 4> remapOperands;
remapOperands.reserve(extraOperands.size() + oldMemRefRank +
symbolOperands.size());
remapOperands.append(extraOperands.begin(), extraOperands.end());
remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
remapOperands.append(symbolOperands.begin(), symbolOperands.end());
- SmallVector<Value *, 4> remapOutputs;
+ SmallVector<ValuePtr, 4> remapOutputs;
remapOutputs.reserve(oldMemRefRank);
if (indexRemap &&
@@ -156,11 +155,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
remapOutputs.append(remapOperands.begin(), remapOperands.end());
}
- SmallVector<Value *, 4> newMapOperands;
+ SmallVector<ValuePtr, 4> newMapOperands;
newMapOperands.reserve(newMemRefRank);
// Prepend 'extraIndices' in 'newMapOperands'.
- for (auto *extraIndex : extraIndices) {
+ for (auto extraIndex : extraIndices) {
assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
@@ -179,7 +178,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
newMap = simplifyAffineMap(newMap);
canonicalizeMapAndOperands(&newMap, &newMapOperands);
// Remove any affine.apply's that became dead as a result of composition.
- for (auto *value : affineApplyOps)
+ for (auto value : affineApplyOps)
if (value->use_empty())
value->getDefiningOp()->erase();
@@ -203,7 +202,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
// Result types don't change. Both memref's are of the same elemental type.
state.types.reserve(op->getNumResults());
- for (auto *result : op->getResults())
+ for (auto result : op->getResults())
state.types.push_back(result->getType());
// Add attribute for 'newMap', other Attributes do not change.
@@ -224,13 +223,11 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
return success();
}
-LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
- ArrayRef<Value *> extraIndices,
- AffineMap indexRemap,
- ArrayRef<Value *> extraOperands,
- ArrayRef<Value *> symbolOperands,
- Operation *domInstFilter,
- Operation *postDomInstFilter) {
+LogicalResult mlir::replaceAllMemRefUsesWith(
+ ValuePtr oldMemRef, ValuePtr newMemRef, ArrayRef<ValuePtr> extraIndices,
+ AffineMap indexRemap, ArrayRef<ValuePtr> extraOperands,
+ ArrayRef<ValuePtr> symbolOperands, Operation *domInstFilter,
+ Operation *postDomInstFilter) {
unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank();
@@ -331,9 +328,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
void mlir::createAffineComputationSlice(
Operation *opInst, SmallVectorImpl<AffineApplyOp> *sliceOps) {
// Collect all operands that are results of affine apply ops.
- SmallVector<Value *, 4> subOperands;
+ SmallVector<ValuePtr, 4> subOperands;
subOperands.reserve(opInst->getNumOperands());
- for (auto *operand : opInst->getOperands())
+ for (auto operand : opInst->getOperands())
if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp()))
subOperands.push_back(operand);
@@ -348,7 +345,7 @@ void mlir::createAffineComputationSlice(
// which case there would be nothing to do.
bool localized = true;
for (auto *op : affineApplyOps) {
- for (auto *result : op->getResults()) {
+ for (auto result : op->getResults()) {
for (auto *user : result->getUsers()) {
if (user != opInst) {
localized = false;
@@ -361,7 +358,7 @@ void mlir::createAffineComputationSlice(
return;
OpBuilder builder(opInst);
- SmallVector<Value *, 4> composedOpOperands(subOperands);
+ SmallVector<ValuePtr, 4> composedOpOperands(subOperands);
auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size());
fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands);
@@ -378,7 +375,7 @@ void mlir::createAffineComputationSlice(
// affine apply op above instead of existing ones (subOperands). So, they
// differ from opInst's operands only for those operands in 'subOperands', for
// which they will be replaced by the corresponding one from 'sliceOps'.
- SmallVector<Value *, 4> newOperands(opInst->getOperands());
+ SmallVector<ValuePtr, 4> newOperands(opInst->getOperands());
for (unsigned i = 0, e = newOperands.size(); i < e; i++) {
// Replace the subOperands from among the new operands.
unsigned j, f;
@@ -451,8 +448,8 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) {
newShape[d] = ubConst.getValue() + 1;
}
- auto *oldMemRef = allocOp.getResult();
- SmallVector<Value *, 4> symbolOperands(allocOp.getSymbolicOperands());
+ auto oldMemRef = allocOp.getResult();
+ SmallVector<ValuePtr, 4> symbolOperands(allocOp.getSymbolicOperands());
auto newMemRefType = MemRefType::get(newShape, memrefType.getElementType(),
b.getMultiDimIdentityMap(newRank));
OpenPOWER on IntegriCloud