summaryrefslogtreecommitdiffstats
path: root/mlir/lib
diff options
context:
space:
mode:
authorUday Bondhugula <bondhugula@google.com>2019-02-22 16:51:08 -0800
committerjpienaar <jpienaar@google.com>2019-03-29 16:39:32 -0700
commitdfe07b7bf6077040cbb2b4392cbd81dc443570b2 (patch)
tree42a76ed1a4f1007a0725c0290a0a17d5b48655d3 /mlir/lib
parentc81b16e27979f319e6f7969ac9ed220bc216c1e0 (diff)
downloadbcm5719-llvm-dfe07b7bf6077040cbb2b4392cbd81dc443570b2.tar.gz
bcm5719-llvm-dfe07b7bf6077040cbb2b4392cbd81dc443570b2.zip
Refactor AffineExprFlattener and move FlatAffineConstraints out of IR into
Analysis - NFC - refactor AffineExprFlattener (-> SimpleAffineExprFlattener) so that it doesn't depend on FlatAffineConstraints, and so that FlatAffineConstraints could be moved out of IR/; the simplification that the IR needs for AffineExpr's doesn't depend on FlatAffineConstraints - have AffineExprFlattener derive from SimpleAffineExprFlattener to use for all Analysis/Transforms purposes; override addLocalFloorDivId in the derived class - turn addAffineForOpDomain into a method on FlatAffineConstraints - turn AffineForOp::getAsValueMap into an AffineValueMap ctor PiperOrigin-RevId: 235283610
Diffstat (limited to 'mlir/lib')
-rw-r--r--mlir/lib/AffineOps/AffineOps.cpp113
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp4
-rw-r--r--mlir/lib/Analysis/AffineStructures.cpp (renamed from mlir/lib/IR/AffineStructures.cpp)220
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp5
-rw-r--r--mlir/lib/Analysis/MemRefBoundCheck.cpp2
-rw-r--r--mlir/lib/Analysis/MemRefDependenceCheck.cpp2
-rw-r--r--mlir/lib/Analysis/Utils.cpp4
-rw-r--r--mlir/lib/IR/AffineExpr.cpp484
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp2
-rw-r--r--mlir/lib/Transforms/LoopFusion.cpp2
-rw-r--r--mlir/lib/Transforms/LoopTiling.cpp2
-rw-r--r--mlir/lib/Transforms/SimplifyAffineStructures.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp2
14 files changed, 419 insertions, 427 deletions
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index 0b82df271f5..bfed6f8a645 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -16,7 +16,6 @@
// =============================================================================
#include "mlir/AffineOps/AffineOps.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
@@ -187,12 +186,6 @@ bool AffineApplyOp::verify() const {
return false;
}
-/// Returns an AffineValueMap representing this affine apply.
-AffineValueMap AffineApplyOp::getAsAffineValueMap() {
- SmallVector<Value *, 8> operands(getOperands());
- return AffineValueMap(getAffineMap(), operands, getResult());
-}
-
// The result of the affine apply operation can be used as a dimension id if it
// is a CFG value or if it is an Value, and all the operands are valid
// dimension ids.
@@ -1033,112 +1026,6 @@ void mlir::extractForInductionVars(ArrayRef<OpPointer<AffineForOp>> forInsts,
ivs->push_back(forInst->getInductionVar());
}
-bool mlir::addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp,
- FlatAffineConstraints *constraints) {
- unsigned pos;
- // Pre-condition for this method.
- if (!constraints->findId(*forOp->getInductionVar(), &pos)) {
- assert(0 && "Value not found");
- return false;
- }
-
- if (forOp->getStep() != 1)
- LLVM_DEBUG(llvm::dbgs()
- << "Domain conservative: non-unit stride not handled\n");
-
- int64_t step = forOp->getStep();
-
- // Adds a lower or upper bound when the bounds aren't constant.
- auto addLowerOrUpperBound = [&](bool lower) -> bool {
- auto operands =
- lower ? forOp->getLowerBoundOperands() : forOp->getUpperBoundOperands();
- for (const auto &operand : operands) {
- unsigned pos;
- if (!constraints->findId(*operand, &pos)) {
- if (isValidSymbol(operand)) {
- constraints->addSymbolId(constraints->getNumSymbolIds(),
- const_cast<Value *>(operand));
- pos = constraints->getNumDimAndSymbolIds() - 1;
- // Check if the symbol is a constant.
- if (auto *opInst = operand->getDefiningInst()) {
- if (auto constOp = opInst->dyn_cast<ConstantIndexOp>()) {
- constraints->setIdToConstant(*operand, constOp->getValue());
- }
- }
- } else {
- constraints->addDimId(constraints->getNumDimIds(),
- const_cast<Value *>(operand));
- pos = constraints->getNumDimIds() - 1;
- if (auto loop = getForInductionVarOwner(operand)) {
- // Outer loop IVs could be used in forOp's bounds.
- if (!addAffineForOpDomain(loop, constraints))
- return false;
- }
- }
- }
- }
- // Record positions of the operands in the constraint system.
- SmallVector<unsigned, 8> positions;
- for (const auto &operand : operands) {
- unsigned pos;
- if (!constraints->findId(*operand, &pos))
- assert(0 && "expected to be found");
- positions.push_back(pos);
- }
-
- auto boundMap =
- lower ? forOp->getLowerBoundMap() : forOp->getUpperBoundMap();
-
- FlatAffineConstraints localVarCst;
- std::vector<SmallVector<int64_t, 8>> flatExprs;
- if (!getFlattenedAffineExprs(boundMap, &flatExprs, &localVarCst)) {
- LLVM_DEBUG(llvm::dbgs() << "semi-affine expressions not yet supported\n");
- return false;
- }
- if (localVarCst.getNumLocalIds() > 0) {
- LLVM_DEBUG(llvm::dbgs()
- << "loop bounds with mod/floordiv expr's not yet supported\n");
- return false;
- }
-
- for (const auto &flatExpr : flatExprs) {
- SmallVector<int64_t, 4> ineq(constraints->getNumCols(), 0);
- ineq[pos] = lower ? 1 : -1;
- for (unsigned j = 0, e = boundMap.getNumInputs(); j < e; j++) {
- ineq[positions[j]] = lower ? -flatExpr[j] : flatExpr[j];
- }
- // Constant term.
- ineq[constraints->getNumCols() - 1] =
- lower ? -flatExpr[flatExpr.size() - 1]
- // Upper bound in flattenedExpr is an exclusive one.
- : flatExpr[flatExpr.size() - 1] - step;
- constraints->addInequality(ineq);
- }
- return true;
- };
-
- if (forOp->hasConstantLowerBound()) {
- constraints->addConstantLowerBound(pos, forOp->getConstantLowerBound());
- } else {
- // Non-constant lower bound case.
- if (!addLowerOrUpperBound(/*lower=*/true))
- return false;
- }
-
- if (forOp->hasConstantUpperBound()) {
- constraints->addConstantUpperBound(pos,
- forOp->getConstantUpperBound() - step);
- return true;
- }
- // Non-constant upper bound case.
- return addLowerOrUpperBound(/*lower=*/false);
-}
-
-/// Returns an AffineValueMap representing this bound.
-AffineValueMap AffineBound::getAsAffineValueMap() {
- SmallVector<Value *, 8> operands(getOperands());
- return AffineValueMap(getMap(), operands);
-}
//===----------------------------------------------------------------------===//
// AffineIfOp
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index c60e11dbe0f..a2e679b182d 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -22,9 +22,9 @@
#include "mlir/Analysis/AffineAnalysis.h"
#include "mlir/AffineOps/AffineOps.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/Utils.h"
#include "mlir/IR/AffineExprVisitor.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Instruction.h"
@@ -107,7 +107,7 @@ bool mlir::getIndexSet(MutableArrayRef<OpPointer<AffineForOp>> forOps,
domain->reset(forOps.size(), /*numSymbols=*/0, /*numLocals=*/0, indices);
for (auto forOp : forOps) {
// Add constraints from forOp's bounds.
- if (!addAffineForOpDomain(forOp, domain))
+ if (!domain->addAffineForOpDomain(forOp))
return false;
}
return true;
diff --git a/mlir/lib/IR/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index 5114f56bcfc..d1f05be9cc4 100644
--- a/mlir/lib/IR/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -19,7 +19,8 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/IR/AffineStructures.h"
+#include "mlir/Analysis/AffineStructures.h"
+#include "mlir/AffineOps/AffineOps.h"
#include "mlir/IR/AffineExprVisitor.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinOps.h"
@@ -35,6 +36,115 @@
using namespace mlir;
using namespace llvm;
+namespace {
+
+// See comments for SimpleAffineExprFlattener.
+// An AffineExprFlattener extends a SimpleAffineExprFlattener by recording
+// constraint information associated with mod's, floordiv's, and ceildiv's
+// in localVarCst.
+struct AffineExprFlattener : public SimpleAffineExprFlattener {
+public:
+ // Constraints connecting newly introduced local variables (for mod's and
+ // div's) to existing (dimensional and symbolic) ones. These are always
+ // inequalities.
+ FlatAffineConstraints localVarCst;
+
+ AffineExprFlattener(unsigned nDims, unsigned nSymbols, MLIRContext *ctx)
+ : SimpleAffineExprFlattener(nDims, nSymbols, ctx) {
+ localVarCst.reset(nDims, nSymbols, /*numLocals=*/0);
+ }
+
+private:
+ // Add a local identifier (needed to flatten a mod, floordiv, ceildiv expr).
+ // The local identifier added is always a floordiv of a pure add/mul affine
+ // function of other identifiers, coefficients of which are specified in
+ // dividend and with respect to a positive constant divisor. localExpr is the
+ // simplified tree expression (AffineExpr) corresponding to the quantifier.
+ void addLocalFloorDivId(ArrayRef<int64_t> dividend, int64_t divisor,
+ AffineExpr localExpr) override {
+ SimpleAffineExprFlattener::addLocalFloorDivId(dividend, divisor, localExpr);
+ // Update localVarCst.
+ localVarCst.addLocalFloorDiv(dividend, divisor);
+ }
+};
+
+} // end anonymous namespace
+
+// Flattens the expressions in map. Returns true on success or false
+// if 'expr' was unable to be flattened (i.e., semi-affine expressions not
+// handled yet).
+static bool getFlattenedAffineExprs(
+ ArrayRef<AffineExpr> exprs, unsigned numDims, unsigned numSymbols,
+ std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
+ FlatAffineConstraints *localVarCst) {
+ if (exprs.empty()) {
+ localVarCst->reset(numDims, numSymbols);
+ return true;
+ }
+
+ AffineExprFlattener flattener(numDims, numSymbols, exprs[0].getContext());
+ // Use the same flattener to simplify each expression successively. This way
+ // local identifiers / expressions are shared.
+ for (auto expr : exprs) {
+ if (!expr.isPureAffine())
+ return false;
+
+ flattener.walkPostOrder(expr);
+ }
+
+ assert(flattener.operandExprStack.size() == exprs.size());
+ flattenedExprs->clear();
+ flattenedExprs->assign(flattener.operandExprStack.begin(),
+ flattener.operandExprStack.end());
+
+ if (localVarCst) {
+ localVarCst->clearAndCopyFrom(flattener.localVarCst);
+ }
+
+ return true;
+}
+
+// Flattens 'expr' into 'flattenedExpr'. Returns true on success or false
+// if 'expr' was unable to be flattened (semi-affine expressions not handled
+// yet).
+bool mlir::getFlattenedAffineExpr(AffineExpr expr, unsigned numDims,
+ unsigned numSymbols,
+ llvm::SmallVectorImpl<int64_t> *flattenedExpr,
+ FlatAffineConstraints *localVarCst) {
+ std::vector<SmallVector<int64_t, 8>> flattenedExprs;
+ bool ret = ::getFlattenedAffineExprs({expr}, numDims, numSymbols,
+ &flattenedExprs, localVarCst);
+ *flattenedExpr = flattenedExprs[0];
+ return ret;
+}
+
+/// Flattens the expressions in map. Returns true on success or false
+/// if 'expr' was unable to be flattened (i.e., semi-affine expressions not
+/// handled yet).
+bool mlir::getFlattenedAffineExprs(
+ AffineMap map, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
+ FlatAffineConstraints *localVarCst) {
+ if (map.getNumResults() == 0) {
+ localVarCst->reset(map.getNumDims(), map.getNumSymbols());
+ return true;
+ }
+ return ::getFlattenedAffineExprs(map.getResults(), map.getNumDims(),
+ map.getNumSymbols(), flattenedExprs,
+ localVarCst);
+}
+
+bool mlir::getFlattenedAffineExprs(
+ IntegerSet set, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
+ FlatAffineConstraints *localVarCst) {
+ if (set.getNumConstraints() == 0) {
+ localVarCst->reset(set.getNumDims(), set.getNumSymbols());
+ return true;
+ }
+ return ::getFlattenedAffineExprs(set.getConstraints(), set.getNumDims(),
+ set.getNumSymbols(), flattenedExprs,
+ localVarCst);
+}
+
//===----------------------------------------------------------------------===//
// MutableAffineMap.
//===----------------------------------------------------------------------===//
@@ -105,6 +215,16 @@ AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<Value *> operands,
: map(map), operands(operands.begin(), operands.end()),
results(results.begin(), results.end()) {}
+AffineValueMap::AffineValueMap(OpPointer<AffineApplyOp> applyOp)
+ : map(applyOp->getAffineMap()),
+ operands(applyOp->operand_begin(), applyOp->operand_end()) {
+ results.push_back(applyOp->getResult());
+}
+
+AffineValueMap::AffineValueMap(AffineBound bound)
+ : map(bound.getMap()),
+ operands(bound.operand_begin(), bound.operand_end()) {}
+
void AffineValueMap::reset(AffineMap map, ArrayRef<Value *> operands,
ArrayRef<Value *> results) {
this->map.reset(map);
@@ -461,6 +581,104 @@ bool FlatAffineConstraints::composeMap(AffineValueMap *vMap) {
return true;
}
+bool FlatAffineConstraints::addAffineForOpDomain(
+ ConstOpPointer<AffineForOp> forOp) {
+ unsigned pos;
+ // Pre-condition for this method.
+ if (!findId(*forOp->getInductionVar(), &pos)) {
+ assert(0 && "Value not found");
+ return false;
+ }
+
+ if (forOp->getStep() != 1)
+ LLVM_DEBUG(llvm::dbgs()
+ << "Domain conservative: non-unit stride not handled\n");
+
+ int64_t step = forOp->getStep();
+
+ // Adds a lower or upper bound when the bounds aren't constant.
+ auto addLowerOrUpperBound = [&](bool lower) -> bool {
+ auto operands =
+ lower ? forOp->getLowerBoundOperands() : forOp->getUpperBoundOperands();
+ for (const auto &operand : operands) {
+ unsigned pos;
+ if (!findId(*operand, &pos)) {
+ if (isValidSymbol(operand)) {
+ addSymbolId(getNumSymbolIds(), const_cast<Value *>(operand));
+ pos = getNumDimAndSymbolIds() - 1;
+ // Check if the symbol is a constant.
+ if (auto *opInst = operand->getDefiningInst()) {
+ if (auto constOp = opInst->dyn_cast<ConstantIndexOp>()) {
+ setIdToConstant(*operand, constOp->getValue());
+ }
+ }
+ } else {
+ addDimId(getNumDimIds(), const_cast<Value *>(operand));
+ pos = getNumDimIds() - 1;
+ if (auto loop = getForInductionVarOwner(operand)) {
+ // Outer loop IVs could be used in forOp's bounds.
+ if (!this->addAffineForOpDomain(loop))
+ return false;
+ }
+ }
+ }
+ }
+ // Record positions of the operands in the constraint system.
+ SmallVector<unsigned, 8> positions;
+ for (const auto &operand : operands) {
+ unsigned pos;
+ if (!findId(*operand, &pos))
+ assert(0 && "expected to be found");
+ positions.push_back(pos);
+ }
+
+ auto boundMap =
+ lower ? forOp->getLowerBoundMap() : forOp->getUpperBoundMap();
+
+ FlatAffineConstraints localVarCst;
+ std::vector<SmallVector<int64_t, 8>> flatExprs;
+ if (!getFlattenedAffineExprs(boundMap, &flatExprs, &localVarCst)) {
+ LLVM_DEBUG(llvm::dbgs() << "semi-affine expressions not yet supported\n");
+ return false;
+ }
+ if (localVarCst.getNumLocalIds() > 0) {
+ LLVM_DEBUG(llvm::dbgs()
+ << "loop bounds with mod/floordiv expr's not yet supported\n");
+ return false;
+ }
+
+ for (const auto &flatExpr : flatExprs) {
+ SmallVector<int64_t, 4> ineq(getNumCols(), 0);
+ ineq[pos] = lower ? 1 : -1;
+ for (unsigned j = 0, e = boundMap.getNumInputs(); j < e; j++) {
+ ineq[positions[j]] = lower ? -flatExpr[j] : flatExpr[j];
+ }
+ // Constant term.
+ ineq[getNumCols() - 1] =
+ lower ? -flatExpr[flatExpr.size() - 1]
+ // Upper bound in flattenedExpr is an exclusive one.
+ : flatExpr[flatExpr.size() - 1] - step;
+ addInequality(ineq);
+ }
+ return true;
+ };
+
+ if (forOp->hasConstantLowerBound()) {
+ addConstantLowerBound(pos, forOp->getConstantLowerBound());
+ } else {
+ // Non-constant lower bound case.
+ if (!addLowerOrUpperBound(/*lower=*/true))
+ return false;
+ }
+
+ if (forOp->hasConstantUpperBound()) {
+ addConstantUpperBound(pos, forOp->getConstantUpperBound() - step);
+ return true;
+ }
+ // Non-constant upper bound case.
+ return addLowerOrUpperBound(/*lower=*/false);
+}
+
// Searches for a constraint with a non-zero coefficient at 'colIdx' in
// equality (isEq=true) or inequality (isEq=false) constraints.
// Returns true and sets row found in search in 'rowIdx'.
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 545735fd6fd..c0deb805bdf 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -23,9 +23,9 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/NestedMatcher.h"
#include "mlir/Analysis/VectorAnalysis.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Instruction.h"
@@ -147,8 +147,7 @@ bool mlir::isAccessInvariant(const Value &iv, const Value &index) {
auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
// We need yet another level of indirection because the `dim` index of the
// access may not correspond to the `dim` index of composeOp.
- return !composeOp->getAsAffineValueMap().isFunctionOf(
- 0, const_cast<Value *>(&iv));
+ return !(AffineValueMap(composeOp).isFunctionOf(0, const_cast<Value *>(&iv)));
}
llvm::DenseSet<const Value *>
diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp
index b86651793f9..8a0cb44f0cc 100644
--- a/mlir/lib/Analysis/MemRefBoundCheck.cpp
+++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp
@@ -21,9 +21,9 @@
//===----------------------------------------------------------------------===//
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/Passes.h"
#include "mlir/Analysis/Utils.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
diff --git a/mlir/lib/Analysis/MemRefDependenceCheck.cpp b/mlir/lib/Analysis/MemRefDependenceCheck.cpp
index 0b5c9b997a5..93d4fde1fd9 100644
--- a/mlir/lib/Analysis/MemRefDependenceCheck.cpp
+++ b/mlir/lib/Analysis/MemRefDependenceCheck.cpp
@@ -20,9 +20,9 @@
//===----------------------------------------------------------------------===//
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/Passes.h"
#include "mlir/Analysis/Utils.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 4e176f63503..9947f1621b5 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -24,7 +24,7 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
-#include "mlir/IR/AffineStructures.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/StandardOps/StandardOps.h"
@@ -185,7 +185,7 @@ bool MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
// bounds expressions involve outer loops or other symbols.
// TODO(bondhugula): rewrite this to use getInstIndexSet; this way
// conditionals will be handled when the latter supports it.
- if (!addAffineForOpDomain(loop, &cst))
+ if (!cst.addAffineForOpDomain(loop))
return false;
} else {
// Has to be a valid symbol.
diff --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp
index 5cfb1461590..9081183cb3a 100644
--- a/mlir/lib/IR/AffineExpr.cpp
+++ b/mlir/lib/IR/AffineExpr.cpp
@@ -19,7 +19,6 @@
#include "AffineExprDetail.h"
#include "mlir/IR/AffineExprVisitor.h"
#include "mlir/IR/AffineMap.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/IntegerSet.h"
#include "mlir/Support/STLExtras.h"
#include "llvm/ADT/STLExtras.h"
@@ -336,126 +335,39 @@ AffineExpr mlir::toAffineExpr(ArrayRef<int64_t> eq, unsigned numDims,
return expr;
}
-namespace {
-
-// This class is used to flatten a pure affine expression (AffineExpr,
-// which is in a tree form) into a sum of products (w.r.t constants) when
-// possible, and in that process simplifying the expression. For a modulo,
-// floordiv, or a ceildiv expression, an additional identifier, called a local
-// identifier, is introduced to rewrite the expression as a sum of product
-// affine expression. Each local identifier is always and by construction a
-// floordiv of a pure add/mul affine function of dimensional, symbolic, and
-// other local identifiers, in a non-mutually recursive way. Hence, every local
-// identifier can ultimately always be recovered as an affine function of
-// dimensional and symbolic identifiers (involving floordiv's); note however
-// that by AffineExpr construction, some floordiv combinations are converted to
-// mod's. The result of the flattening is a flattened expression and a set of
-// constraints involving just the local variables.
-//
-// d2 + (d0 + d1) floordiv 4 is flattened to d2 + q where 'q' is the local
-// variable introduced, with localVarCst containing 4*q <= d0 + d1 <= 4*q + 3.
-//
-// The simplification performed includes the accumulation of contributions for
-// each dimensional and symbolic identifier together, the simplification of
-// floordiv/ceildiv/mod expressions and other simplifications that in turn
-// happen as a result. A simplification that this flattening naturally performs
-// is of simplifying the numerator and denominator of floordiv/ceildiv, and
-// folding a modulo expression to a zero, if possible. Three examples are below:
-//
-// (d0 + 3 * d1) + d0) - 2 * d1) - d0 simplified to d0 + d1
-// (d0 - d0 mod 4 + 4) mod 4 simplified to 0
-// (3*d0 + 2*d1 + d0) floordiv 2 + d1 simplified to 2*d0 + 2*d1
-//
-// The way the flattening works for the second example is as follows: d0 % 4 is
-// replaced by d0 - 4*q with q being introduced: the expression then simplifies
-// to: (d0 - (d0 - 4q) + 4) = 4q + 4, modulo of which w.r.t 4 simplifies to
-// zero. Note that an affine expression may not always be expressible purely as
-// a sum of products involving just the original dimensional and symbolic
-// identifiers due to the presence of modulo/floordiv/ceildiv expressions that
-// may not be eliminated after simplification; in such cases, the final
-// expression can be reconstructed by replacing the local identifiers with their
-// corresponding explicit form stored in 'localExprs' (note that each of the
-// explicit forms itself would have been simplified).
-//
-// The expression walk method here performs a linear time post order walk that
-// performs the above simplifications through visit methods, with partial
-// results being stored in 'operandExprStack'. When a parent expr is visited,
-// the flattened expressions corresponding to its two operands would already be
-// on the stack - the parent expression looks at the two flattened expressions
-// and combines the two. It pops off the operand expressions and pushes the
-// combined result (although this is done in-place on its LHS operand expr).
-// When the walk is completed, the flattened form of the top-level expression
-// would be left on the stack.
-//
-// A flattener can be repeatedly used for multiple affine expressions that bind
-// to the same operands, for example, for all result expressions of an
-// AffineMap or AffineValueMap. In such cases, using it for multiple expressions
-// is more efficient than creating a new flattener for each expression since
-// common idenical div and mod expressions appearing across different
-// expressions are mapped to the same local identifier (same column position in
-// 'localVarCst').
-struct AffineExprFlattener : public AffineExprVisitor<AffineExprFlattener> {
-public:
- // Flattend expression layout: [dims, symbols, locals, constant]
- // Stack that holds the LHS and RHS operands while visiting a binary op expr.
- // In future, consider adding a prepass to determine how big the SmallVector's
- // will be, and linearize this to std::vector<int64_t> to prevent
- // SmallVector moves on re-allocation.
- std::vector<SmallVector<int64_t, 8>> operandExprStack;
- // Constraints connecting newly introduced local variables (for mod's and
- // div's) to existing (dimensional and symbolic) ones. These are always
- // inequalities.
- FlatAffineConstraints localVarCst;
-
- unsigned numDims;
- unsigned numSymbols;
- // Number of newly introduced identifiers to flatten mod/floordiv/ceildiv
- // expressions that could not be simplified.
- unsigned numLocals;
- // AffineExpr's corresponding to the floordiv/ceildiv/mod expressions for
- // which new identifiers were introduced; if the latter do not get canceled
- // out, these expressions can be readily used to reconstruct the AffineExpr
- // (tree) form. Note that these expressions themselves would have been
- // simplified (recursively) by this pass. Eg. d0 + (d0 + 2*d1 + d0) ceildiv 4
- // will be simplified to d0 + q, where q = (d0 + d1) ceildiv 2. (d0 + d1)
- // ceildiv 2 would be the local expression stored for q.
- SmallVector<AffineExpr, 4> localExprs;
- MLIRContext *context;
-
- AffineExprFlattener(unsigned numDims, unsigned numSymbols,
- MLIRContext *context)
- : numDims(numDims), numSymbols(numSymbols), numLocals(0),
- context(context) {
- operandExprStack.reserve(8);
- localVarCst.reset(numDims, numSymbols, numLocals);
- }
+SimpleAffineExprFlattener::SimpleAffineExprFlattener(unsigned numDims,
+ unsigned numSymbols,
+ MLIRContext *context)
+ : numDims(numDims), numSymbols(numSymbols), numLocals(0), context(context) {
+ operandExprStack.reserve(8);
+}
- void visitMulExpr(AffineBinaryOpExpr expr) {
- assert(operandExprStack.size() >= 2);
- // This is a pure affine expr; the RHS will be a constant.
- assert(expr.getRHS().isa<AffineConstantExpr>());
- // Get the RHS constant.
- auto rhsConst = operandExprStack.back()[getConstantIndex()];
- operandExprStack.pop_back();
- // Update the LHS in place instead of pop and push.
- auto &lhs = operandExprStack.back();
- for (unsigned i = 0, e = lhs.size(); i < e; i++) {
- lhs[i] *= rhsConst;
- }
+void SimpleAffineExprFlattener::visitMulExpr(AffineBinaryOpExpr expr) {
+ assert(operandExprStack.size() >= 2);
+ // This is a pure affine expr; the RHS will be a constant.
+ assert(expr.getRHS().isa<AffineConstantExpr>());
+ // Get the RHS constant.
+ auto rhsConst = operandExprStack.back()[getConstantIndex()];
+ operandExprStack.pop_back();
+ // Update the LHS in place instead of pop and push.
+ auto &lhs = operandExprStack.back();
+ for (unsigned i = 0, e = lhs.size(); i < e; i++) {
+ lhs[i] *= rhsConst;
}
+}
- void visitAddExpr(AffineBinaryOpExpr expr) {
- assert(operandExprStack.size() >= 2);
- const auto &rhs = operandExprStack.back();
- auto &lhs = operandExprStack[operandExprStack.size() - 2];
- assert(lhs.size() == rhs.size());
- // Update the LHS in place.
- for (unsigned i = 0, e = rhs.size(); i < e; i++) {
- lhs[i] += rhs[i];
- }
- // Pop off the RHS.
- operandExprStack.pop_back();
+void SimpleAffineExprFlattener::visitAddExpr(AffineBinaryOpExpr expr) {
+ assert(operandExprStack.size() >= 2);
+ const auto &rhs = operandExprStack.back();
+ auto &lhs = operandExprStack[operandExprStack.size() - 2];
+ assert(lhs.size() == rhs.size());
+ // Update the LHS in place.
+ for (unsigned i = 0, e = rhs.size(); i < e; i++) {
+ lhs[i] += rhs[i];
}
+ // Pop off the RHS.
+ operandExprStack.pop_back();
+}
//
// t = expr mod c <=> t = expr - c*q and c*q <= expr <= c*q + c - 1
@@ -463,86 +375,85 @@ public:
// A mod expression "expr mod c" is thus flattened by introducing a new local
// variable q (= expr floordiv c), such that expr mod c is replaced with
// 'expr - c * q' and c * q <= expr <= c * q + c - 1 are added to localVarCst.
- void visitModExpr(AffineBinaryOpExpr expr) {
- assert(operandExprStack.size() >= 2);
- // This is a pure affine expr; the RHS will be a constant.
- assert(expr.getRHS().isa<AffineConstantExpr>());
- auto rhsConst = operandExprStack.back()[getConstantIndex()];
- operandExprStack.pop_back();
- auto &lhs = operandExprStack.back();
- // TODO(bondhugula): handle modulo by zero case when this issue is fixed
- // at the other places in the IR.
- assert(rhsConst > 0 && "RHS constant has to be positive");
-
- // Check if the LHS expression is a multiple of modulo factor.
- unsigned i, e;
- for (i = 0, e = lhs.size(); i < e; i++)
- if (lhs[i] % rhsConst != 0)
- break;
- // If yes, modulo expression here simplifies to zero.
- if (i == lhs.size()) {
- std::fill(lhs.begin(), lhs.end(), 0);
- return;
- }
-
- // Add a local variable for the quotient, i.e., expr % c is replaced by
- // (expr - q * c) where q = expr floordiv c. Do this while canceling out
- // the GCD of expr and c.
- SmallVector<int64_t, 8> floorDividend(lhs);
- uint64_t gcd = rhsConst;
- for (unsigned i = 0, e = lhs.size(); i < e; i++)
- gcd = llvm::GreatestCommonDivisor64(gcd, std::abs(lhs[i]));
- // Simplify the numerator and the denominator.
- if (gcd != 1) {
- for (unsigned i = 0, e = floorDividend.size(); i < e; i++)
- floorDividend[i] = floorDividend[i] / static_cast<int64_t>(gcd);
- }
- int64_t floorDivisor = rhsConst / static_cast<int64_t>(gcd);
-
- // Construct the AffineExpr form of the floordiv to store in localExprs.
- auto dividendExpr =
- toAffineExpr(floorDividend, numDims, numSymbols, localExprs, context);
- auto divisorExpr = getAffineConstantExpr(floorDivisor, context);
- auto floorDivExpr = dividendExpr.floorDiv(divisorExpr);
- int loc;
- if ((loc = findLocalId(floorDivExpr)) == -1) {
- addLocalFloorDivId(floorDividend, floorDivisor, floorDivExpr);
- // Set result at top of stack to "lhs - rhsConst * q".
- lhs[getLocalVarStartIndex() + numLocals - 1] = -rhsConst;
- } else {
- // Reuse the existing local id.
- lhs[getLocalVarStartIndex() + loc] = -rhsConst;
- }
+void SimpleAffineExprFlattener::visitModExpr(AffineBinaryOpExpr expr) {
+ assert(operandExprStack.size() >= 2);
+ // This is a pure affine expr; the RHS will be a constant.
+ assert(expr.getRHS().isa<AffineConstantExpr>());
+ auto rhsConst = operandExprStack.back()[getConstantIndex()];
+ operandExprStack.pop_back();
+ auto &lhs = operandExprStack.back();
+ // TODO(bondhugula): handle modulo by zero case when this issue is fixed
+ // at the other places in the IR.
+ assert(rhsConst > 0 && "RHS constant has to be positive");
+
+ // Check if the LHS expression is a multiple of modulo factor.
+ unsigned i, e;
+ for (i = 0, e = lhs.size(); i < e; i++)
+ if (lhs[i] % rhsConst != 0)
+ break;
+ // If yes, modulo expression here simplifies to zero.
+ if (i == lhs.size()) {
+ std::fill(lhs.begin(), lhs.end(), 0);
+ return;
}
- void visitCeilDivExpr(AffineBinaryOpExpr expr) {
- visitDivExpr(expr, /*isCeil=*/true);
- }
- void visitFloorDivExpr(AffineBinaryOpExpr expr) {
- visitDivExpr(expr, /*isCeil=*/false);
+ // Add a local variable for the quotient, i.e., expr % c is replaced by
+ // (expr - q * c) where q = expr floordiv c. Do this while canceling out
+ // the GCD of expr and c.
+ SmallVector<int64_t, 8> floorDividend(lhs);
+ uint64_t gcd = rhsConst;
+ for (unsigned i = 0, e = lhs.size(); i < e; i++)
+ gcd = llvm::GreatestCommonDivisor64(gcd, std::abs(lhs[i]));
+ // Simplify the numerator and the denominator.
+ if (gcd != 1) {
+ for (unsigned i = 0, e = floorDividend.size(); i < e; i++)
+ floorDividend[i] = floorDividend[i] / static_cast<int64_t>(gcd);
}
+ int64_t floorDivisor = rhsConst / static_cast<int64_t>(gcd);
- void visitDimExpr(AffineDimExpr expr) {
- operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
- auto &eq = operandExprStack.back();
- assert(expr.getPosition() < numDims && "Inconsistent number of dims");
- eq[getDimStartIndex() + expr.getPosition()] = 1;
+ // Construct the AffineExpr form of the floordiv to store in localExprs.
+ auto dividendExpr =
+ toAffineExpr(floorDividend, numDims, numSymbols, localExprs, context);
+ auto divisorExpr = getAffineConstantExpr(floorDivisor, context);
+ auto floorDivExpr = dividendExpr.floorDiv(divisorExpr);
+ int loc;
+ if ((loc = findLocalId(floorDivExpr)) == -1) {
+ addLocalFloorDivId(floorDividend, floorDivisor, floorDivExpr);
+ // Set result at top of stack to "lhs - rhsConst * q".
+ lhs[getLocalVarStartIndex() + numLocals - 1] = -rhsConst;
+ } else {
+ // Reuse the existing local id.
+ lhs[getLocalVarStartIndex() + loc] = -rhsConst;
}
+}
- void visitSymbolExpr(AffineSymbolExpr expr) {
- operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
- auto &eq = operandExprStack.back();
- assert(expr.getPosition() < numSymbols && "inconsistent number of symbols");
- eq[getSymbolStartIndex() + expr.getPosition()] = 1;
- }
+void SimpleAffineExprFlattener::visitCeilDivExpr(AffineBinaryOpExpr expr) {
+ visitDivExpr(expr, /*isCeil=*/true);
+}
+void SimpleAffineExprFlattener::visitFloorDivExpr(AffineBinaryOpExpr expr) {
+ visitDivExpr(expr, /*isCeil=*/false);
+}
- void visitConstantExpr(AffineConstantExpr expr) {
- operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
- auto &eq = operandExprStack.back();
- eq[getConstantIndex()] = expr.getValue();
- }
+void SimpleAffineExprFlattener::visitDimExpr(AffineDimExpr expr) {
+ operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
+ auto &eq = operandExprStack.back();
+ assert(expr.getPosition() < numDims && "Inconsistent number of dims");
+ eq[getDimStartIndex() + expr.getPosition()] = 1;
+}
+
+void SimpleAffineExprFlattener::visitSymbolExpr(AffineSymbolExpr expr) {
+ operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
+ auto &eq = operandExprStack.back();
+ assert(expr.getPosition() < numSymbols && "inconsistent number of symbols");
+ eq[getSymbolStartIndex() + expr.getPosition()] = 1;
+}
+
+void SimpleAffineExprFlattener::visitConstantExpr(AffineConstantExpr expr) {
+ operandExprStack.emplace_back(SmallVector<int64_t, 32>(getNumCols(), 0));
+ auto &eq = operandExprStack.back();
+ eq[getConstantIndex()] = expr.getValue();
+}
-private:
// t = expr floordiv c <=> t = q, c * q <= expr <= c * q + c - 1
// A floordiv is thus flattened by introducing a new local variable q, and
// replacing that expression with 'q' while adding the constraints
@@ -551,97 +462,86 @@ private:
//
// A ceildiv is similarly flattened:
// t = expr ceildiv c <=> t = (expr + c - 1) floordiv c
- void visitDivExpr(AffineBinaryOpExpr expr, bool isCeil) {
- assert(operandExprStack.size() >= 2);
- assert(expr.getRHS().isa<AffineConstantExpr>());
-
- // This is a pure affine expr; the RHS is a positive constant.
- int64_t rhsConst = operandExprStack.back()[getConstantIndex()];
- // TODO(bondhugula): handle division by zero at the same time the issue is
- // fixed at other places.
- assert(rhsConst > 0 && "RHS constant has to be positive");
- operandExprStack.pop_back();
- auto &lhs = operandExprStack.back();
-
- // Simplify the floordiv, ceildiv if possible by canceling out the greatest
- // common divisors of the numerator and denominator.
- uint64_t gcd = std::abs(rhsConst);
+void SimpleAffineExprFlattener::visitDivExpr(AffineBinaryOpExpr expr,
+ bool isCeil) {
+ assert(operandExprStack.size() >= 2);
+ assert(expr.getRHS().isa<AffineConstantExpr>());
+
+ // This is a pure affine expr; the RHS is a positive constant.
+ int64_t rhsConst = operandExprStack.back()[getConstantIndex()];
+ // TODO(bondhugula): handle division by zero at the same time the issue is
+ // fixed at other places.
+ assert(rhsConst > 0 && "RHS constant has to be positive");
+ operandExprStack.pop_back();
+ auto &lhs = operandExprStack.back();
+
+ // Simplify the floordiv, ceildiv if possible by canceling out the greatest
+ // common divisors of the numerator and denominator.
+ uint64_t gcd = std::abs(rhsConst);
+ for (unsigned i = 0, e = lhs.size(); i < e; i++)
+ gcd = llvm::GreatestCommonDivisor64(gcd, std::abs(lhs[i]));
+ // Simplify the numerator and the denominator.
+ if (gcd != 1) {
for (unsigned i = 0, e = lhs.size(); i < e; i++)
- gcd = llvm::GreatestCommonDivisor64(gcd, std::abs(lhs[i]));
- // Simplify the numerator and the denominator.
- if (gcd != 1) {
- for (unsigned i = 0, e = lhs.size(); i < e; i++)
- lhs[i] = lhs[i] / static_cast<int64_t>(gcd);
- }
- int64_t divisor = rhsConst / static_cast<int64_t>(gcd);
- // If the divisor becomes 1, the updated LHS is the result. (The
- // divisor can't be negative since rhsConst is positive).
- if (divisor == 1)
- return;
-
- // If the divisor cannot be simplified to one, we will have to retain
- // the ceil/floor expr (simplified up until here). Add an existential
- // quantifier to express its result, i.e., expr1 div expr2 is replaced
- // by a new identifier, q.
- auto a = toAffineExpr(lhs, numDims, numSymbols, localExprs, context);
- auto b = getAffineConstantExpr(divisor, context);
-
- int loc;
- auto divExpr = isCeil ? a.ceilDiv(b) : a.floorDiv(b);
- if ((loc = findLocalId(divExpr)) == -1) {
- if (!isCeil) {
- SmallVector<int64_t, 8> dividend(lhs);
- addLocalFloorDivId(dividend, divisor, divExpr);
- } else {
- // lhs ceildiv c <=> (lhs + c - 1) floordiv c
- SmallVector<int64_t, 8> dividend(lhs);
- dividend.back() += divisor - 1;
- addLocalFloorDivId(dividend, divisor, divExpr);
- }
+ lhs[i] = lhs[i] / static_cast<int64_t>(gcd);
+ }
+ int64_t divisor = rhsConst / static_cast<int64_t>(gcd);
+ // If the divisor becomes 1, the updated LHS is the result. (The
+ // divisor can't be negative since rhsConst is positive).
+ if (divisor == 1)
+ return;
+
+ // If the divisor cannot be simplified to one, we will have to retain
+ // the ceil/floor expr (simplified up until here). Add an existential
+ // quantifier to express its result, i.e., expr1 div expr2 is replaced
+ // by a new identifier, q.
+ auto a = toAffineExpr(lhs, numDims, numSymbols, localExprs, context);
+ auto b = getAffineConstantExpr(divisor, context);
+
+ int loc;
+ auto divExpr = isCeil ? a.ceilDiv(b) : a.floorDiv(b);
+ if ((loc = findLocalId(divExpr)) == -1) {
+ if (!isCeil) {
+ SmallVector<int64_t, 8> dividend(lhs);
+ addLocalFloorDivId(dividend, divisor, divExpr);
+ } else {
+ // lhs ceildiv c <=> (lhs + c - 1) floordiv c
+ SmallVector<int64_t, 8> dividend(lhs);
+ dividend.back() += divisor - 1;
+ addLocalFloorDivId(dividend, divisor, divExpr);
}
- // Set the expression on stack to the local var introduced to capture the
- // result of the division (floor or ceil).
- std::fill(lhs.begin(), lhs.end(), 0);
- if (loc == -1)
- lhs[getLocalVarStartIndex() + numLocals - 1] = 1;
- else
- lhs[getLocalVarStartIndex() + loc] = 1;
}
+ // Set the expression on stack to the local var introduced to capture the
+ // result of the division (floor or ceil).
+ std::fill(lhs.begin(), lhs.end(), 0);
+ if (loc == -1)
+ lhs[getLocalVarStartIndex() + numLocals - 1] = 1;
+ else
+ lhs[getLocalVarStartIndex() + loc] = 1;
+}
// Add a local identifier (needed to flatten a mod, floordiv, ceildiv expr).
// The local identifier added is always a floordiv of a pure add/mul affine
// function of other identifiers, coefficients of which are specified in
// dividend and with respect to a positive constant divisor. localExpr is the
// simplified tree expression (AffineExpr) corresponding to the quantifier.
- void addLocalFloorDivId(ArrayRef<int64_t> dividend, int64_t divisor,
- AffineExpr localExpr) {
- assert(divisor > 0 && "positive constant divisor expected");
- for (auto &subExpr : operandExprStack)
- subExpr.insert(subExpr.begin() + getLocalVarStartIndex() + numLocals, 0);
- localExprs.push_back(localExpr);
- numLocals++;
- // Update localVarCst.
- localVarCst.addLocalFloorDiv(dividend, divisor);
- }
-
- int findLocalId(AffineExpr localExpr) {
- SmallVectorImpl<AffineExpr>::iterator it;
- if ((it = std::find(localExprs.begin(), localExprs.end(), localExpr)) ==
- localExprs.end())
- return -1;
- return it - localExprs.begin();
- }
-
- inline unsigned getNumCols() const {
- return numDims + numSymbols + numLocals + 1;
- }
- inline unsigned getConstantIndex() const { return getNumCols() - 1; }
- inline unsigned getLocalVarStartIndex() const { return numDims + numSymbols; }
- inline unsigned getSymbolStartIndex() const { return numDims; }
- inline unsigned getDimStartIndex() const { return 0; }
-};
+void SimpleAffineExprFlattener::addLocalFloorDivId(ArrayRef<int64_t> dividend,
+ int64_t divisor,
+ AffineExpr localExpr) {
+ assert(divisor > 0 && "positive constant divisor expected");
+ for (auto &subExpr : operandExprStack)
+ subExpr.insert(subExpr.begin() + getLocalVarStartIndex() + numLocals, 0);
+ localExprs.push_back(localExpr);
+ numLocals++;
+ // dividend and divisor are ignored; an override of this method uses it.
+}
-} // end anonymous namespace
+int SimpleAffineExprFlattener::findLocalId(AffineExpr localExpr) {
+ SmallVectorImpl<AffineExpr>::iterator it;
+ if ((it = llvm::find(localExprs, localExpr)) == localExprs.end())
+ return -1;
+ return it - localExprs.begin();
+}
/// Simplify the affine expression by flattening it and reconstructing it.
AffineExpr mlir::simplifyAffineExpr(AffineExpr expr, unsigned numDims,
@@ -651,7 +551,7 @@ AffineExpr mlir::simplifyAffineExpr(AffineExpr expr, unsigned numDims,
if (!expr.isPureAffine())
return expr;
- AffineExprFlattener flattener(numDims, numSymbols, expr.getContext());
+ SimpleAffineExprFlattener flattener(numDims, numSymbols, expr.getContext());
flattener.walkPostOrder(expr);
ArrayRef<int64_t> flattenedExpr = flattener.operandExprStack.back();
auto simplifiedExpr = toAffineExpr(flattenedExpr, numDims, numSymbols,
@@ -667,17 +567,13 @@ AffineExpr mlir::simplifyAffineExpr(AffineExpr expr, unsigned numDims,
// handled yet).
static bool getFlattenedAffineExprs(
ArrayRef<AffineExpr> exprs, unsigned numDims, unsigned numSymbols,
- std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
- FlatAffineConstraints *localVarCst) {
+ std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs) {
if (exprs.empty()) {
- localVarCst->reset(numDims, numSymbols);
return true;
}
- flattenedExprs->clear();
- flattenedExprs->reserve(exprs.size());
-
- AffineExprFlattener flattener(numDims, numSymbols, exprs[0].getContext());
+ SimpleAffineExprFlattener flattener(numDims, numSymbols,
+ exprs[0].getContext());
// Use the same flattener to simplify each expression successively. This way
// local identifiers / expressions are shared.
for (auto expr : exprs) {
@@ -687,12 +583,10 @@ static bool getFlattenedAffineExprs(
flattener.walkPostOrder(expr);
}
+ flattenedExprs->clear();
assert(flattener.operandExprStack.size() == exprs.size());
- flattenedExprs->insert(flattenedExprs->end(),
- flattener.operandExprStack.begin(),
+ flattenedExprs->assign(flattener.operandExprStack.begin(),
flattener.operandExprStack.end());
- if (localVarCst)
- localVarCst->clearAndCopyFrom(flattener.localVarCst);
return true;
}
@@ -700,13 +594,12 @@ static bool getFlattenedAffineExprs(
// Flattens 'expr' into 'flattenedExpr'. Returns true on success or false
// if 'expr' was unable to be flattened (semi-affine expressions not handled
// yet).
-bool mlir::getFlattenedAffineExpr(AffineExpr expr, unsigned numDims,
- unsigned numSymbols,
- llvm::SmallVectorImpl<int64_t> *flattenedExpr,
- FlatAffineConstraints *localVarCst) {
+bool mlir::getFlattenedAffineExpr(
+ AffineExpr expr, unsigned numDims, unsigned numSymbols,
+ llvm::SmallVectorImpl<int64_t> *flattenedExpr) {
std::vector<SmallVector<int64_t, 8>> flattenedExprs;
- bool ret = ::getFlattenedAffineExprs({expr}, numDims, numSymbols,
- &flattenedExprs, localVarCst);
+ bool ret =
+ ::getFlattenedAffineExprs({expr}, numDims, numSymbols, &flattenedExprs);
*flattenedExpr = flattenedExprs[0];
return ret;
}
@@ -715,25 +608,20 @@ bool mlir::getFlattenedAffineExpr(AffineExpr expr, unsigned numDims,
/// if 'expr' was unable to be flattened (i.e., semi-affine expressions not
/// handled yet).
bool mlir::getFlattenedAffineExprs(
- AffineMap map, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
- FlatAffineConstraints *localVarCst) {
+ AffineMap map, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs) {
if (map.getNumResults() == 0) {
- localVarCst->reset(map.getNumDims(), map.getNumSymbols());
return true;
}
return ::getFlattenedAffineExprs(map.getResults(), map.getNumDims(),
- map.getNumSymbols(), flattenedExprs,
- localVarCst);
+ map.getNumSymbols(), flattenedExprs);
}
bool mlir::getFlattenedAffineExprs(
- IntegerSet set, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
- FlatAffineConstraints *localVarCst) {
+ IntegerSet set,
+ std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs) {
if (set.getNumConstraints() == 0) {
- localVarCst->reset(set.getNumDims(), set.getNumSymbols());
return true;
}
return ::getFlattenedAffineExprs(set.getConstraints(), set.getNumDims(),
- set.getNumSymbols(), flattenedExprs,
- localVarCst);
+ set.getNumSymbols(), flattenedExprs);
}
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 5083bc4d586..4fb6f34ed53 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -22,8 +22,8 @@
//===----------------------------------------------------------------------===//
#include "mlir/AffineOps/AffineOps.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/Utils.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 5dbefb875da..677710b00a6 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -21,11 +21,11 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/Analysis/Utils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index 2253d1d354a..240b2b6d9b6 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -21,8 +21,8 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/LoopAnalysis.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
diff --git a/mlir/lib/Transforms/SimplifyAffineStructures.cpp b/mlir/lib/Transforms/SimplifyAffineStructures.cpp
index 4ddfd9f06fb..d0fdcb5527f 100644
--- a/mlir/lib/Transforms/SimplifyAffineStructures.cpp
+++ b/mlir/lib/Transforms/SimplifyAffineStructures.cpp
@@ -19,7 +19,7 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/IR/AffineStructures.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/Instruction.h"
#include "mlir/IR/IntegerSet.h"
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 6b1a0be3bd3..2a7738924a7 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -23,10 +23,10 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 519885b3a50..80dc49f1aab 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -24,9 +24,9 @@
#include "mlir/AffineOps/AffineOps.h"
#include "mlir/Analysis/AffineAnalysis.h"
+#include "mlir/Analysis/AffineStructures.h"
#include "mlir/Analysis/Dominance.h"
#include "mlir/Analysis/Utils.h"
-#include "mlir/IR/AffineStructures.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Module.h"
#include "mlir/StandardOps/StandardOps.h"
OpenPOWER on IntegriCloud