diff options
| author | Uday Bondhugula <bondhugula@google.com> | 2019-01-07 15:06:32 -0800 |
|---|---|---|
| committer | jpienaar <jpienaar@google.com> | 2019-03-29 15:02:41 -0700 |
| commit | 56b3640b945c38c1a761a8811f30c04deabb5e67 (patch) | |
| tree | 06fe4344f1afed476cb0abe030bff12536e82530 | |
| parent | 2cdb59f38d74998c2153d2656317ccb4016621fd (diff) | |
| download | bcm5719-llvm-56b3640b945c38c1a761a8811f30c04deabb5e67.tar.gz bcm5719-llvm-56b3640b945c38c1a761a8811f30c04deabb5e67.zip | |
Misc readability and doc / code comment related improvements - NFC
- when SSAValue/MLValue existed, code at several places was forced to create additional
aggregate temporaries of SmallVector<SSAValue/MLValue> to handle the conversion; get
rid of such redundant code
- use filling ctors instead of explicit loops
- for smallvectors, change insert(list.end(), ...) -> append(...
- improve comments at various places
- turn getMemRefAccess into MemRefAccess ctor and drop duplicated
getMemRefAccess. In the next CL, provide getAccess() accessors for load,
store, DMA op's to return a MemRefAccess.
PiperOrigin-RevId: 228243638
| -rw-r--r-- | mlir/include/mlir/Analysis/AffineAnalysis.h | 9 | ||||
| -rw-r--r-- | mlir/include/mlir/Analysis/AffineStructures.h | 18 | ||||
| -rw-r--r-- | mlir/lib/Analysis/AffineAnalysis.cpp | 99 | ||||
| -rw-r--r-- | mlir/lib/Analysis/AffineStructures.cpp | 8 | ||||
| -rw-r--r-- | mlir/lib/Analysis/MemRefDependenceCheck.cpp | 33 | ||||
| -rw-r--r-- | mlir/lib/Analysis/Utils.cpp | 29 | ||||
| -rw-r--r-- | mlir/lib/EDSC/Types.cpp | 4 | ||||
| -rw-r--r-- | mlir/lib/Parser/Parser.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Transforms/LoopFusion.cpp | 9 | ||||
| -rw-r--r-- | mlir/lib/Transforms/LoopTiling.cpp | 6 | ||||
| -rw-r--r-- | mlir/lib/Transforms/MemRefDataFlowOpt.cpp | 5 | ||||
| -rw-r--r-- | mlir/lib/Transforms/Utils/Utils.cpp | 32 |
12 files changed, 125 insertions, 129 deletions
diff --git a/mlir/include/mlir/Analysis/AffineAnalysis.h b/mlir/include/mlir/Analysis/AffineAnalysis.h index b769841b451..588be4ea351 100644 --- a/mlir/include/mlir/Analysis/AffineAnalysis.h +++ b/mlir/include/mlir/Analysis/AffineAnalysis.h @@ -121,11 +121,18 @@ bool getFlattenedAffineExprs( bool getIndexSet(llvm::ArrayRef<ForInst *> forInsts, FlatAffineConstraints *domain); +/// Encapsulates a memref load or store access information. struct MemRefAccess { const Value *memref; const OperationInst *opInst; llvm::SmallVector<Value *, 4> indices; - // Populates 'accessMap' with composition of AffineApplyOps reachable from + + /// Constructs a MemRefAccess from a load or store operation instruction. + // TODO(b/119949820): add accessors to standard op's load, store, DMA op's to + // return MemRefAccess, i.e., loadOp->getAccess(), dmaOp->getRead/WriteAccess. + explicit MemRefAccess(OperationInst *opInst); + + /// Populates 'accessMap' with composition of AffineApplyOps reachable from // 'indices'. void getAccessMap(AffineValueMap *accessMap) const; }; diff --git a/mlir/include/mlir/Analysis/AffineStructures.h b/mlir/include/mlir/Analysis/AffineStructures.h index b1133520d74..ae8cda997a1 100644 --- a/mlir/include/mlir/Analysis/AffineStructures.h +++ b/mlir/include/mlir/Analysis/AffineStructures.h @@ -233,10 +233,16 @@ private: /// /// The identifiers x_0, x_1, ... appear in the order: dimensional identifiers, /// symbolic identifiers, and local identifiers. The local identifiers -/// correspond to local/internal variables created temporarily when converting -/// from tree AffineExpr's that have mod's and div's and are thus needed -/// to increase representational power. -// +/// correspond to local/internal variables created when converting from +/// AffineExpr's containing mod's and div's; they are thus needed to increase +/// representational power. Each local identifier is always (by construction) a +/// floordiv of a pure add/mul affine function of dimensional, symbolic, and +/// other local identifiers, in a non-mutually recursive way. Hence, every local +/// identifier can ultimately always be recovered as an affine function of +/// dimensional and symbolic identifiers (involving floordiv's); note however +/// that some floordiv combinations are converted to mod's by AffineExpr +/// construction. +/// class FlatAffineConstraints { public: enum IdKind { Dimension, Symbol, Local }; @@ -259,7 +265,7 @@ public: if (idArgs.empty()) ids.resize(numIds, None); else - ids.insert(ids.end(), idArgs.begin(), idArgs.end()); + ids.append(idArgs.begin(), idArgs.end()); } /// Constructs a constraint system with the specified number of @@ -276,7 +282,7 @@ public: if (idArgs.empty()) ids.resize(numIds, None); else - ids.insert(ids.end(), idArgs.begin(), idArgs.end()); + ids.append(idArgs.begin(), idArgs.end()); } explicit FlatAffineConstraints(const HyperRectangularSet &set); diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp index 89148139fb4..4485326c897 100644 --- a/mlir/lib/Analysis/AffineAnalysis.cpp +++ b/mlir/lib/Analysis/AffineAnalysis.cpp @@ -81,47 +81,59 @@ namespace { // This class is used to flatten a pure affine expression (AffineExpr, // which is in a tree form) into a sum of products (w.r.t constants) when -// possible, and in that process simplifying the expression. The simplification -// performed includes the accumulation of contributions for each dimensional and -// symbolic identifier together, the simplification of floordiv/ceildiv/mod -// expressions and other simplifications that in turn happen as a result. A -// simplification that this flattening naturally performs is of simplifying the -// numerator and denominator of floordiv/ceildiv, and folding a modulo -// expression to a zero, if possible. Three examples are below: +// possible, and in that process simplifying the expression. For a modulo, +// floordiv, or a ceildiv expression, an additional identifier, called a local +// identifier, is introduced to rewrite the expression as a sum of product +// affine expression. Each local identifier is always and by construction a +// floordiv of a pure add/mul affine function of dimensional, symbolic, and +// other local identifiers, in a non-mutually recursive way. Hence, every local +// identifier can ultimately always be recovered as an affine function of +// dimensional and symbolic identifiers (involving floordiv's); note however +// that by AffineExpr construction, some floordiv combinations are converted to +// mod's. The result of the flattening is a flattened expression and a set of +// constraints involving just the local variables. // -// (d0 + 3 * d1) + d0) - 2 * d1) - d0 simplified to d0 + d1 -// (d0 - d0 mod 4 + 4) mod 4 simplified to 0. -// (3*d0 + 2*d1 + d0) floordiv 2 + d1 simplified to 2*d0 + 2*d1 +// d2 + (d0 + d1) floordiv 4 is flattened to d2 + q where 'q' is the local +// variable introduced, with localVarCst containing 4*q <= d0 + d1 <= 4*q + 3. // -// For a modulo, floordiv, or a ceildiv expression, an additional identifier -// (called a local identifier) is introduced to rewrite it as a sum of products -// (w.r.t constants). For example, for the second example above, d0 % 4 is +// The simplification performed includes the accumulation of contributions for +// each dimensional and symbolic identifier together, the simplification of +// floordiv/ceildiv/mod expressions and other simplifications that in turn +// happen as a result. A simplification that this flattening naturally performs +// is of simplifying the numerator and denominator of floordiv/ceildiv, and +// folding a modulo expression to a zero, if possible. Three examples are below: +// +// (d0 + 3 * d1) + d0) - 2 * d1) - d0 simplified to d0 + d1 +// (d0 - d0 mod 4 + 4) mod 4 simplified to 0 +// (3*d0 + 2*d1 + d0) floordiv 2 + d1 simplified to 2*d0 + 2*d1 +// +// The way the flattening works for the second example is as follows: d0 % 4 is // replaced by d0 - 4*q with q being introduced: the expression then simplifies // to: (d0 - (d0 - 4q) + 4) = 4q + 4, modulo of which w.r.t 4 simplifies to -// zero. Note that an affine expression may not always be expressible in a sum -// of products form involving just the original dimensional and symbolic -// identifiers, due to the presence of modulo/floordiv/ceildiv expressions -// that may not be eliminated after simplification; in such cases, the final +// zero. Note that an affine expression may not always be expressible purely as +// a sum of products involving just the original dimensional and symbolic +// identifiers due to the presence of modulo/floordiv/ceildiv expressions that +// may not be eliminated after simplification; in such cases, the final // expression can be reconstructed by replacing the local identifiers with their -// corresponding explicit form stored in 'localExprs' (note that the explicit -// form itself would have been simplified). +// corresponding explicit form stored in 'localExprs' (note that each of the +// explicit forms itself would have been simplified). // -// This is a linear time post order walk for an affine expression that attempts -// the above simplifications through visit methods, with partial results being -// stored in 'operandExprStack'. When a parent expr is visited, the flattened -// expressions corresponding to its two operands would already be on the stack - -// the parent expression looks at the two flattened expressions and combines the -// two. It pops off the operand expressions and pushes the combined result -// (although this is done in-place on its LHS operand expr). When the walk is -// completed, the flattened form of the top-level expression would be left on -// the stack. +// The expression walk method here performs a linear time post order walk that +// performs the above simplifications through visit methods, with partial +// results being stored in 'operandExprStack'. When a parent expr is visited, +// the flattened expressions corresponding to its two operands would already be +// on the stack - the parent expression looks at the two flattened expressions +// and combines the two. It pops off the operand expressions and pushes the +// combined result (although this is done in-place on its LHS operand expr). +// When the walk is completed, the flattened form of the top-level expression +// would be left on the stack. // // A flattener can be repeatedly used for multiple affine expressions that bind // to the same operands, for example, for all result expressions of an // AffineMap or AffineValueMap. In such cases, using it for multiple expressions // is more efficient than creating a new flattener for each expression since // common idenical div and mod expressions appearing across different -// expressions are mapped to the local identifier (same column position in +// expressions are mapped to the same local identifier (same column position in // 'localVarCst'). struct AffineExprFlattener : public AffineExprVisitor<AffineExprFlattener> { public: @@ -143,11 +155,11 @@ public: unsigned numLocals; // AffineExpr's corresponding to the floordiv/ceildiv/mod expressions for // which new identifiers were introduced; if the latter do not get canceled - // out, these expressions are needed to reconstruct the AffineExpr / tree - // form. Note that these expressions themselves would have been simplified - // (recursively) by this pass. Eg. d0 + (d0 + 2*d1 + d0) ceildiv 4 will be - // simplified to d0 + q, where q = (d0 + d1) ceildiv 2. (d0 + d1) ceildiv 2 - // would be the local expression stored for q. + // out, these expressions can be readily used to reconstruct the AffineExpr + // (tree) form. Note that these expressions themselves would have been + // simplified (recursively) by this pass. Eg. d0 + (d0 + 2*d1 + d0) ceildiv 4 + // will be simplified to d0 + q, where q = (d0 + d1) ceildiv 2. (d0 + d1) + // ceildiv 2 would be the local expression stored for q. SmallVector<AffineExpr, 4> localExprs; MLIRContext *context; @@ -186,6 +198,12 @@ public: operandExprStack.pop_back(); } + // + // t = expr mod c <=> t = expr - c*q and c*q <= expr <= c*q + c - 1 + // + // A mod expression "expr mod c" is thus flattened by introducing a new local + // variable q (= expr floordiv c), such that expr mod c is replaced with + // 'expr - c * q' and c * q <= expr <= c * q + c - 1 are added to localVarCst. void visitModExpr(AffineBinaryOpExpr expr) { assert(operandExprStack.size() >= 2); // This is a pure affine expr; the RHS will be a constant. @@ -231,18 +249,21 @@ public: void visitFloorDivExpr(AffineBinaryOpExpr expr) { visitDivExpr(expr, /*isCeil=*/false); } + void visitDimExpr(AffineDimExpr expr) { operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0)); auto &eq = operandExprStack.back(); assert(expr.getPosition() < numDims && "Inconsistent number of dims"); eq[getDimStartIndex() + expr.getPosition()] = 1; } + void visitSymbolExpr(AffineSymbolExpr expr) { operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0)); auto &eq = operandExprStack.back(); assert(expr.getPosition() < numSymbols && "inconsistent number of symbols"); eq[getSymbolStartIndex() + expr.getPosition()] = 1; } + void visitConstantExpr(AffineConstantExpr expr) { operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0)); auto &eq = operandExprStack.back(); @@ -250,9 +271,19 @@ public: } private: + // t = expr floordiv c <=> t = q, c * q <= expr <= c * q + c - 1 + // A floordiv is thus flattened by introducing a new local variable q, and + // replacing that expression with 'q' while adding the constraints + // c * q <= expr <= c * q + c - 1 to localVarCst. + // + // A ceildiv is similarly flattened: + // t = expr ceildiv c <=> t = q, c * q - (c - 1) <= expr <= c * q + // Note that although t = expr ceildiv c, it is equivalent to + // (expr + c - 1) floordiv c. void visitDivExpr(AffineBinaryOpExpr expr, bool isCeil) { assert(operandExprStack.size() >= 2); assert(expr.getRHS().isa<AffineConstantExpr>()); + // This is a pure affine expr; the RHS is a positive constant. auto rhsConst = operandExprStack.back()[getConstantIndex()]; // TODO(bondhugula): handle division by zero at the same time the issue is diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp index 3dbbfa7a49d..f4f525bc470 100644 --- a/mlir/lib/Analysis/AffineStructures.cpp +++ b/mlir/lib/Analysis/AffineStructures.cpp @@ -484,7 +484,7 @@ FlatAffineConstraints::FlatAffineConstraints( auto otherIds = other.getIds(); ids.reserve(numReservedCols); - ids.insert(ids.end(), otherIds.begin(), otherIds.end()); + ids.append(otherIds.begin(), otherIds.end()); unsigned numReservedEqualities = other.getNumReservedEqualities(); unsigned numReservedInequalities = other.getNumReservedInequalities(); @@ -562,7 +562,7 @@ void FlatAffineConstraints::reset(unsigned numReservedInequalities, ids.resize(numIds, None); } else { ids.reserve(idArgs.size()); - ids.insert(ids.end(), idArgs.begin(), idArgs.end()); + ids.append(idArgs.begin(), idArgs.end()); } } @@ -1817,8 +1817,8 @@ void FlatAffineConstraints::FourierMotzkinEliminate( SmallVector<Optional<Value *>, 8> newIds; newIds.reserve(numIds - 1); - newIds.insert(newIds.end(), ids.begin(), ids.begin() + pos); - newIds.insert(newIds.end(), ids.begin() + pos + 1, ids.end()); + newIds.append(ids.begin(), ids.begin() + pos); + newIds.append(ids.begin() + pos + 1, ids.end()); /// Create the new system which has one identifier less. FlatAffineConstraints newFac( diff --git a/mlir/lib/Analysis/MemRefDependenceCheck.cpp b/mlir/lib/Analysis/MemRefDependenceCheck.cpp index c7bf2abd8d6..043d62d0cc9 100644 --- a/mlir/lib/Analysis/MemRefDependenceCheck.cpp +++ b/mlir/lib/Analysis/MemRefDependenceCheck.cpp @@ -62,33 +62,6 @@ FunctionPass *mlir::createMemRefDependenceCheckPass() { return new MemRefDependenceCheck(); } -// Adds memref access indices 'opIndices' from 'memrefType' to 'access'. -static void addMemRefAccessIndices( - llvm::iterator_range<OperationInst::const_operand_iterator> opIndices, - MemRefType memrefType, MemRefAccess *access) { - access->indices.reserve(memrefType.getRank()); - for (auto *index : opIndices) { - access->indices.push_back(const_cast<mlir::Value *>(index)); - } -} - -// Populates 'access' with memref, indices and opinst from 'loadOrStoreOpInst'. -static void getMemRefAccess(const OperationInst *loadOrStoreOpInst, - MemRefAccess *access) { - access->opInst = loadOrStoreOpInst; - if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) { - access->memref = loadOp->getMemRef(); - addMemRefAccessIndices(loadOp->getIndices(), loadOp->getMemRefType(), - access); - } else { - assert(loadOrStoreOpInst->isa<StoreOp>()); - auto storeOp = loadOrStoreOpInst->dyn_cast<StoreOp>(); - access->memref = storeOp->getMemRef(); - addMemRefAccessIndices(storeOp->getIndices(), storeOp->getMemRefType(), - access); - } -} - // Returns a result string which represents the direction vector (if there was // a dependence), returns the string "false" otherwise. static string @@ -118,12 +91,10 @@ getDirectionVectorStr(bool ret, unsigned numCommonLoops, unsigned loopNestDepth, static void checkDependences(ArrayRef<OperationInst *> loadsAndStores) { for (unsigned i = 0, e = loadsAndStores.size(); i < e; ++i) { auto *srcOpInst = loadsAndStores[i]; - MemRefAccess srcAccess; - getMemRefAccess(srcOpInst, &srcAccess); + MemRefAccess srcAccess(srcOpInst); for (unsigned j = 0; j < e; ++j) { auto *dstOpInst = loadsAndStores[j]; - MemRefAccess dstAccess; - getMemRefAccess(dstOpInst, &dstAccess); + MemRefAccess dstAccess(dstOpInst); unsigned numCommonLoops = getNumCommonSurroundingLoops(*srcOpInst, *dstOpInst); diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp index d94e0967dcd..9d89f04d41d 100644 --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -119,16 +119,12 @@ bool mlir::getMemRefRegion(OperationInst *opInst, unsigned loopDepth, if ((loadOp = opInst->dyn_cast<LoadOp>())) { rank = loadOp->getMemRefType().getRank(); - for (auto *index : loadOp->getIndices()) { - indices.push_back(index); - } + indices.append(loadOp->getIndices().begin(), loadOp->getIndices().end()); region->memref = loadOp->getMemRef(); region->setWrite(false); } else if ((storeOp = opInst->dyn_cast<StoreOp>())) { rank = storeOp->getMemRefType().getRank(); - for (auto *index : storeOp->getIndices()) { - indices.push_back(index); - } + indices.append(storeOp->getIndices().begin(), storeOp->getIndices().end()); region->memref = storeOp->getMemRef(); region->setWrite(true); } else { @@ -442,25 +438,26 @@ ForInst *mlir::insertBackwardComputationSlice(MemRefAccess *srcAccess, return sliceLoopNest; } -void mlir::getMemRefAccess(OperationInst *loadOrStoreOpInst, - MemRefAccess *access) { +// Constructs MemRefAccess populating it with the memref, its indices and +// opinst from 'loadOrStoreOpInst'. +MemRefAccess::MemRefAccess(OperationInst *loadOrStoreOpInst) { if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) { - access->memref = loadOp->getMemRef(); - access->opInst = loadOrStoreOpInst; + memref = loadOp->getMemRef(); + opInst = loadOrStoreOpInst; auto loadMemrefType = loadOp->getMemRefType(); - access->indices.reserve(loadMemrefType.getRank()); + indices.reserve(loadMemrefType.getRank()); for (auto *index : loadOp->getIndices()) { - access->indices.push_back(index); + indices.push_back(index); } } else { assert(loadOrStoreOpInst->isa<StoreOp>() && "load/store op expected"); auto storeOp = loadOrStoreOpInst->dyn_cast<StoreOp>(); - access->opInst = loadOrStoreOpInst; - access->memref = storeOp->getMemRef(); + opInst = loadOrStoreOpInst; + memref = storeOp->getMemRef(); auto storeMemrefType = storeOp->getMemRefType(); - access->indices.reserve(storeMemrefType.getRank()); + indices.reserve(storeMemrefType.getRank()); for (auto *index : storeOp->getIndices()) { - access->indices.push_back(index); + indices.push_back(index); } } } diff --git a/mlir/lib/EDSC/Types.cpp b/mlir/lib/EDSC/Types.cpp index d762e3f732e..30ae5ab00ff 100644 --- a/mlir/lib/EDSC/Types.cpp +++ b/mlir/lib/EDSC/Types.cpp @@ -178,7 +178,7 @@ Stmt ForNest(MutableArrayRef<Bindable> indices, ArrayRef<Expr> lbs, Expr load(Expr m, llvm::ArrayRef<Expr> indices) { SmallVector<Expr, 8> exprs; exprs.push_back(m); - exprs.insert(exprs.end(), indices.begin(), indices.end()); + exprs.append(indices.begin(), indices.end()); return VariadicExpr(ExprKind::Load, exprs); } @@ -186,7 +186,7 @@ Expr store(Expr val, Expr m, llvm::ArrayRef<Expr> indices) { SmallVector<Expr, 8> exprs; exprs.push_back(val); exprs.push_back(m); - exprs.insert(exprs.end(), indices.begin(), indices.end()); + exprs.append(indices.begin(), indices.end()); return VariadicExpr(ExprKind::Store, exprs); } diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp index c90a0d40056..5790b1ad938 100644 --- a/mlir/lib/Parser/Parser.cpp +++ b/mlir/lib/Parser/Parser.cpp @@ -798,7 +798,7 @@ ParseResult TensorLiteralParser::parseList(llvm::SmallVectorImpl<int> &dims) { // Return the sublists' dimensions with 'size' prepended. dims.clear(); dims.push_back(size); - dims.insert(dims.end(), newDims.begin(), newDims.end()); + dims.append(newDims.begin(), newDims.end()); return ParseSuccess; } diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index 31b59d85e14..2a004492d84 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -87,16 +87,13 @@ struct FusionCandidate { MemRefAccess srcAccess; // Load or store access within dst loop nest. MemRefAccess dstAccess; + explicit FusionCandidate(OperationInst *src, OperationInst *dst) + : srcAccess(MemRefAccess(src)), dstAccess(MemRefAccess(dst)) {} }; static FusionCandidate buildFusionCandidate(OperationInst *srcStoreOpInst, OperationInst *dstLoadOpInst) { - FusionCandidate candidate; - // Get store access for src loop nest. - getMemRefAccess(srcStoreOpInst, &candidate.srcAccess); - // Get load access for dst loop nest. - getMemRefAccess(dstLoadOpInst, &candidate.dstAccess); - return candidate; + return FusionCandidate(srcStoreOpInst, dstLoadOpInst); } namespace { diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index 085a9c0b0fe..ee66c9b17b1 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -86,8 +86,8 @@ static void constructTiledIndexSetHyperRect(ArrayRef<ForInst *> origLoops, for (unsigned i = 0; i < width; i++) { auto lbOperands = origLoops[i]->getLowerBoundOperands(); auto ubOperands = origLoops[i]->getUpperBoundOperands(); - SmallVector<Value *, 4> newLbOperands(lbOperands.begin(), lbOperands.end()); - SmallVector<Value *, 4> newUbOperands(ubOperands.begin(), ubOperands.end()); + SmallVector<Value *, 4> newLbOperands(lbOperands); + SmallVector<Value *, 4> newUbOperands(ubOperands); newLoops[i]->setLowerBound(newLbOperands, origLoops[i]->getLowerBoundMap()); newLoops[i]->setUpperBound(newUbOperands, origLoops[i]->getUpperBoundMap()); newLoops[i]->setStep(tileSizes[i]); @@ -121,7 +121,7 @@ static void constructTiledIndexSetHyperRect(ArrayRef<ForInst *> origLoops, // The new upper bound map is the original one with an additional // expression i + tileSize appended. boundExprs.push_back(dim + tileSizes[i]); - boundExprs.insert(boundExprs.end(), origUbMap.getResults().begin(), + boundExprs.append(origUbMap.getResults().begin(), origUbMap.getResults().end()); auto ubMap = b.getAffineMap(origUbMap.getNumInputs() + 1, 0, boundExprs, {}); diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 49b33b0596b..adf91b76276 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -128,9 +128,8 @@ void MemRefDataFlowOpt::visitOperationInst(OperationInst *opInst) { // post-dominance on these. 'fwdingCandidates' are a subset of depSrcStores. SmallVector<OperationInst *, 8> depSrcStores; for (auto *storeOpInst : storeOps) { - MemRefAccess srcAccess, destAccess; - getMemRefAccess(storeOpInst, &srcAccess); - getMemRefAccess(loadOpInst, &destAccess); + MemRefAccess srcAccess(storeOpInst); + MemRefAccess destAccess(loadOpInst); FlatAffineConstraints dependenceConstraints; unsigned nsLoops = getNumCommonSurroundingLoops(*loadOpInst, *storeOpInst); // Dependences at loop depth <= minSurroundingLoops do NOT matter. diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index 4af9436b44d..cf9da344b82 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -117,7 +117,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef, opInst->getName()); state.operands.reserve(opInst->getNumOperands() + extraIndices.size()); // Insert the non-memref operands. - state.operands.insert(state.operands.end(), opInst->operand_begin(), + state.operands.append(opInst->operand_begin(), opInst->operand_begin() + memRefOperandPos); state.operands.push_back(newMemRef); @@ -138,11 +138,10 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef, // at position memRefOperandPos + 1. SmallVector<Value *, 4> remapOperands; remapOperands.reserve(oldMemRefRank + extraOperands.size()); - remapOperands.insert(remapOperands.end(), extraOperands.begin(), - extraOperands.end()); - remapOperands.insert( - remapOperands.end(), opInst->operand_begin() + memRefOperandPos + 1, - opInst->operand_begin() + memRefOperandPos + 1 + oldMemRefRank); + remapOperands.append(extraOperands.begin(), extraOperands.end()); + remapOperands.append(opInst->operand_begin() + memRefOperandPos + 1, + opInst->operand_begin() + memRefOperandPos + 1 + + oldMemRefRank); if (indexRemap) { auto remapOp = builder.create<AffineApplyOp>(opInst->getLoc(), indexRemap, remapOperands); @@ -156,8 +155,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef, } // Insert the remaining operands unmodified. - state.operands.insert(state.operands.end(), - opInst->operand_begin() + memRefOperandPos + 1 + + state.operands.append(opInst->operand_begin() + memRefOperandPos + 1 + oldMemRefRank, opInst->operand_end()); @@ -167,7 +165,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef, state.types.push_back(result->getType()); // Attributes also do not change. - state.attributes.insert(state.attributes.end(), opInst->getAttrs().begin(), + state.attributes.append(opInst->getAttrs().begin(), opInst->getAttrs().end()); // Create the new operation. @@ -206,14 +204,9 @@ mlir::createComposedAffineApplyOp(FuncBuilder *builder, Location loc, } // Compose affine maps from all ancestor AffineApplyOps. // Create new AffineApplyOp from 'valueMap'. - unsigned numOperands = valueMap.getNumOperands(); - SmallVector<Value *, 4> outOperands(numOperands); - for (unsigned i = 0; i < numOperands; ++i) { - outOperands[i] = valueMap.getOperand(i); - } // Create new AffineApplyOp based on 'valueMap'. - auto affineApplyOp = - builder->create<AffineApplyOp>(loc, valueMap.getAffineMap(), outOperands); + auto affineApplyOp = builder->create<AffineApplyOp>( + loc, valueMap.getAffineMap(), valueMap.getOperands()); results->resize(operands.size()); for (unsigned i = 0, e = operands.size(); i < e; ++i) { (*results)[i] = affineApplyOp->getResult(i); @@ -340,13 +333,8 @@ void mlir::forwardSubstitute(OpPointer<AffineApplyOp> affineApplyOp) { valueMap.forwardSubstituteSingle(*affineApplyOp, resultIndex); // Create new AffineApplyOp from 'valueMap'. - unsigned numOperands = valueMap.getNumOperands(); - SmallVector<Value *, 4> operands(numOperands); - for (unsigned i = 0; i < numOperands; ++i) { - operands[i] = valueMap.getOperand(i); - } auto newAffineApplyOp = builder.create<AffineApplyOp>( - useOpInst->getLoc(), valueMap.getAffineMap(), operands); + useOpInst->getLoc(), valueMap.getAffineMap(), valueMap.getOperands()); // Update all uses to use results from 'newAffineApplyOp'. for (unsigned i = 0, e = useOpInst->getNumResults(); i < e; ++i) { |

