summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms/Utils
diff options
context:
space:
mode:
authorChris Lattner <clattner@google.com>2018-12-27 14:35:10 -0800
committerjpienaar <jpienaar@google.com>2019-03-29 14:40:06 -0700
commit3f190312f8f7f09b5910bc77e80268402732ce6b (patch)
tree1ac0989c889d04e1acb0370952ed3bf1f141e17d /mlir/lib/Transforms/Utils
parent776b035646d809d8b31662363e797f4d7f26c223 (diff)
downloadbcm5719-llvm-3f190312f8f7f09b5910bc77e80268402732ce6b.tar.gz
bcm5719-llvm-3f190312f8f7f09b5910bc77e80268402732ce6b.zip
Merge SSAValue, CFGValue, and MLValue together into a single Value class, which
is the new base of the SSA value hierarchy. This CL also standardizes all the nomenclature and comments to use 'Value' where appropriate. This also eliminates a large number of cast<MLValue>(x)'s, which is very soothing. This is step 11/n towards merging instructions and statements, NFC. PiperOrigin-RevId: 227064624
Diffstat (limited to 'mlir/lib/Transforms/Utils')
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp16
-rw-r--r--mlir/lib/Transforms/Utils/LoweringUtils.cpp28
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp39
4 files changed, 40 insertions, 45 deletions
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index 0af7e52b5b1..9d955fb6a81 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -217,7 +217,7 @@ void GreedyPatternRewriteDriver::simplifyFunction(Function *currentFunction,
// If we already have a canonicalized version of this constant, just
// reuse it. Otherwise create a new one.
- SSAValue *cstValue;
+ Value *cstValue;
auto it = uniquedConstants.find({resultConstants[i], res->getType()});
if (it != uniquedConstants.end())
cstValue = it->second->getResult(0);
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 5a5617f3fb1..e8fc5e7ca14 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -31,7 +31,6 @@
#include "mlir/StandardOps/StandardOps.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Debug.h"
-
#define DEBUG_TYPE "LoopUtils"
using namespace mlir;
@@ -108,8 +107,7 @@ bool mlir::promoteIfSingleIteration(ForStmt *forStmt) {
forStmt->replaceAllUsesWith(constOp);
} else {
const AffineBound lb = forStmt->getLowerBound();
- SmallVector<SSAValue *, 4> lbOperands(lb.operand_begin(),
- lb.operand_end());
+ SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end());
MLFuncBuilder builder(forStmt->getBlock(), StmtBlock::iterator(forStmt));
auto affineApplyOp = builder.create<AffineApplyOp>(
forStmt->getLoc(), lb.getMap(), lbOperands);
@@ -149,8 +147,8 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Statement *>>>
&stmtGroupQueue,
unsigned offset, ForStmt *srcForStmt, MLFuncBuilder *b) {
- SmallVector<MLValue *, 4> lbOperands(srcForStmt->getLowerBoundOperands());
- SmallVector<MLValue *, 4> ubOperands(srcForStmt->getUpperBoundOperands());
+ SmallVector<Value *, 4> lbOperands(srcForStmt->getLowerBoundOperands());
+ SmallVector<Value *, 4> ubOperands(srcForStmt->getUpperBoundOperands());
assert(lbMap.getNumInputs() == lbOperands.size());
assert(ubMap.getNumInputs() == ubOperands.size());
@@ -176,7 +174,7 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
srcForStmt->getStep() * shift)),
loopChunk)
->getResult(0);
- operandMap[srcForStmt] = cast<MLValue>(ivRemap);
+ operandMap[srcForStmt] = ivRemap;
} else {
operandMap[srcForStmt] = loopChunk;
}
@@ -380,7 +378,7 @@ bool mlir::loopUnrollByFactor(ForStmt *forStmt, uint64_t unrollFactor) {
// Generate the cleanup loop if trip count isn't a multiple of unrollFactor.
if (getLargestDivisorOfTripCount(*forStmt) % unrollFactor != 0) {
- DenseMap<const MLValue *, MLValue *> operandMap;
+ DenseMap<const Value *, Value *> operandMap;
MLFuncBuilder builder(forStmt->getBlock(), ++StmtBlock::iterator(forStmt));
auto *cleanupForStmt = cast<ForStmt>(builder.clone(*forStmt, operandMap));
auto clLbMap = getCleanupLoopLowerBound(*forStmt, unrollFactor, &builder);
@@ -414,7 +412,7 @@ bool mlir::loopUnrollByFactor(ForStmt *forStmt, uint64_t unrollFactor) {
// Unroll the contents of 'forStmt' (append unrollFactor-1 additional copies).
for (unsigned i = 1; i < unrollFactor; i++) {
- DenseMap<const MLValue *, MLValue *> operandMap;
+ DenseMap<const Value *, Value *> operandMap;
// If the induction variable is used, create a remapping to the value for
// this unrolled instance.
@@ -425,7 +423,7 @@ bool mlir::loopUnrollByFactor(ForStmt *forStmt, uint64_t unrollFactor) {
auto *ivUnroll =
builder.create<AffineApplyOp>(forStmt->getLoc(), bumpMap, forStmt)
->getResult(0);
- operandMap[forStmt] = cast<MLValue>(ivUnroll);
+ operandMap[forStmt] = ivUnroll;
}
// Clone the original body of 'forStmt'.
diff --git a/mlir/lib/Transforms/Utils/LoweringUtils.cpp b/mlir/lib/Transforms/Utils/LoweringUtils.cpp
index c8ac881dba7..8457ce4ce28 100644
--- a/mlir/lib/Transforms/Utils/LoweringUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoweringUtils.cpp
@@ -32,17 +32,17 @@ using namespace mlir;
namespace {
// Visit affine expressions recursively and build the sequence of instructions
-// that correspond to it. Visitation functions return an SSAValue of the
+// that correspond to it. Visitation functions return an Value of the
// expression subtree they visited or `nullptr` on error.
class AffineApplyExpander
- : public AffineExprVisitor<AffineApplyExpander, SSAValue *> {
+ : public AffineExprVisitor<AffineApplyExpander, Value *> {
public:
// This internal clsas expects arguments to be non-null, checks must be
// performed at the call site.
AffineApplyExpander(FuncBuilder *builder, AffineApplyOp *op)
: builder(*builder), applyOp(*op), loc(op->getLoc()) {}
- template <typename OpTy> SSAValue *buildBinaryExpr(AffineBinaryOpExpr expr) {
+ template <typename OpTy> Value *buildBinaryExpr(AffineBinaryOpExpr expr) {
auto lhs = visit(expr.getLHS());
auto rhs = visit(expr.getRHS());
if (!lhs || !rhs)
@@ -51,33 +51,33 @@ public:
return op->getResult();
}
- SSAValue *visitAddExpr(AffineBinaryOpExpr expr) {
+ Value *visitAddExpr(AffineBinaryOpExpr expr) {
return buildBinaryExpr<AddIOp>(expr);
}
- SSAValue *visitMulExpr(AffineBinaryOpExpr expr) {
+ Value *visitMulExpr(AffineBinaryOpExpr expr) {
return buildBinaryExpr<MulIOp>(expr);
}
// TODO(zinenko): implement when the standard operators are made available.
- SSAValue *visitModExpr(AffineBinaryOpExpr) {
+ Value *visitModExpr(AffineBinaryOpExpr) {
builder.getContext()->emitError(loc, "unsupported binary operator: mod");
return nullptr;
}
- SSAValue *visitFloorDivExpr(AffineBinaryOpExpr) {
+ Value *visitFloorDivExpr(AffineBinaryOpExpr) {
builder.getContext()->emitError(loc,
"unsupported binary operator: floor_div");
return nullptr;
}
- SSAValue *visitCeilDivExpr(AffineBinaryOpExpr) {
+ Value *visitCeilDivExpr(AffineBinaryOpExpr) {
builder.getContext()->emitError(loc,
"unsupported binary operator: ceil_div");
return nullptr;
}
- SSAValue *visitConstantExpr(AffineConstantExpr expr) {
+ Value *visitConstantExpr(AffineConstantExpr expr) {
auto valueAttr =
builder.getIntegerAttr(builder.getIndexType(), expr.getValue());
auto op =
@@ -85,7 +85,7 @@ public:
return op->getResult();
}
- SSAValue *visitDimExpr(AffineDimExpr expr) {
+ Value *visitDimExpr(AffineDimExpr expr) {
assert(expr.getPosition() < applyOp.getNumOperands() &&
"affine dim position out of range");
// FIXME: this assumes a certain order of AffineApplyOp operands, the
@@ -93,7 +93,7 @@ public:
return applyOp.getOperand(expr.getPosition());
}
- SSAValue *visitSymbolExpr(AffineSymbolExpr expr) {
+ Value *visitSymbolExpr(AffineSymbolExpr expr) {
// FIXME: this assumes a certain order of AffineApplyOp operands, the
// cleaner interface would be to separate them at the op level.
assert(expr.getPosition() + applyOp.getAffineMap().getNumDims() <
@@ -114,8 +114,8 @@ private:
// Given an affine expression `expr` extracted from `op`, build the sequence of
// primitive instructions that correspond to the affine expression in the
// `builder`.
-static SSAValue *expandAffineExpr(FuncBuilder *builder, AffineExpr expr,
- AffineApplyOp *op) {
+static mlir::Value *expandAffineExpr(FuncBuilder *builder, AffineExpr expr,
+ AffineApplyOp *op) {
auto expander = AffineApplyExpander(builder, op);
return expander.visit(expr);
}
@@ -127,7 +127,7 @@ bool mlir::expandAffineApply(AffineApplyOp *op) {
FuncBuilder builder(op->getOperation());
auto affineMap = op->getAffineMap();
for (auto numberedExpr : llvm::enumerate(affineMap.getResults())) {
- SSAValue *expanded = expandAffineExpr(&builder, numberedExpr.value(), op);
+ Value *expanded = expandAffineExpr(&builder, numberedExpr.value(), op);
if (!expanded)
return true;
op->getResult(numberedExpr.index())->replaceAllUsesWith(expanded);
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 2818e8c2e4f..624a8a758b5 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -31,7 +31,6 @@
#include "mlir/StandardOps/StandardOps.h"
#include "mlir/Support/MathExtras.h"
#include "llvm/ADT/DenseMap.h"
-
using namespace mlir;
/// Return true if this operation dereferences one or more memref's.
@@ -61,13 +60,12 @@ static bool isMemRefDereferencingOp(const Operation &op) {
// extra operands, note that 'indexRemap' would just be applied to the existing
// indices (%i, %j).
//
-// TODO(mlir-team): extend this for SSAValue / CFGFunctions. Can also be easily
+// TODO(mlir-team): extend this for Value/ CFGFunctions. Can also be easily
// extended to add additional indices at any position.
-bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef,
- MLValue *newMemRef,
- ArrayRef<SSAValue *> extraIndices,
+bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
+ ArrayRef<Value *> extraIndices,
AffineMap indexRemap,
- ArrayRef<SSAValue *> extraOperands,
+ ArrayRef<Value *> extraOperands,
const Statement *domStmtFilter) {
unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank();
(void)newMemRefRank; // unused in opt mode
@@ -128,16 +126,15 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef,
// operation.
assert(extraIndex->getDefiningStmt()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
- assert((cast<MLValue>(extraIndex)->isValidDim() ||
- cast<MLValue>(extraIndex)->isValidSymbol()) &&
+ assert((extraIndex->isValidDim() || extraIndex->isValidSymbol()) &&
"invalid memory op index");
- state.operands.push_back(cast<MLValue>(extraIndex));
+ state.operands.push_back(extraIndex);
}
// Construct new indices as a remap of the old ones if a remapping has been
// provided. The indices of a memref come right after it, i.e.,
// at position memRefOperandPos + 1.
- SmallVector<SSAValue *, 4> remapOperands;
+ SmallVector<Value *, 4> remapOperands;
remapOperands.reserve(oldMemRefRank + extraOperands.size());
remapOperands.insert(remapOperands.end(), extraOperands.begin(),
extraOperands.end());
@@ -149,11 +146,11 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef,
remapOperands);
// Remapped indices.
for (auto *index : remapOp->getOperation()->getResults())
- state.operands.push_back(cast<MLValue>(index));
+ state.operands.push_back(index);
} else {
// No remapping specified.
for (auto *index : remapOperands)
- state.operands.push_back(cast<MLValue>(index));
+ state.operands.push_back(index);
}
// Insert the remaining operands unmodified.
@@ -191,9 +188,9 @@ bool mlir::replaceAllMemRefUsesWith(const MLValue *oldMemRef,
// composed AffineApplyOp are returned in output parameter 'results'.
OperationStmt *
mlir::createComposedAffineApplyOp(FuncBuilder *builder, Location loc,
- ArrayRef<MLValue *> operands,
+ ArrayRef<Value *> operands,
ArrayRef<OperationStmt *> affineApplyOps,
- SmallVectorImpl<SSAValue *> *results) {
+ SmallVectorImpl<Value *> *results) {
// Create identity map with same number of dimensions as number of operands.
auto map = builder->getMultiDimIdentityMap(operands.size());
// Initialize AffineValueMap with identity map.
@@ -208,7 +205,7 @@ mlir::createComposedAffineApplyOp(FuncBuilder *builder, Location loc,
// Compose affine maps from all ancestor AffineApplyOps.
// Create new AffineApplyOp from 'valueMap'.
unsigned numOperands = valueMap.getNumOperands();
- SmallVector<SSAValue *, 4> outOperands(numOperands);
+ SmallVector<Value *, 4> outOperands(numOperands);
for (unsigned i = 0; i < numOperands; ++i) {
outOperands[i] = valueMap.getOperand(i);
}
@@ -252,7 +249,7 @@ mlir::createComposedAffineApplyOp(FuncBuilder *builder, Location loc,
/// otherwise.
OperationStmt *mlir::createAffineComputationSlice(OperationStmt *opStmt) {
// Collect all operands that are results of affine apply ops.
- SmallVector<MLValue *, 4> subOperands;
+ SmallVector<Value *, 4> subOperands;
subOperands.reserve(opStmt->getNumOperands());
for (auto *operand : opStmt->getOperands()) {
auto *defStmt = operand->getDefiningStmt();
@@ -285,7 +282,7 @@ OperationStmt *mlir::createAffineComputationSlice(OperationStmt *opStmt) {
return nullptr;
FuncBuilder builder(opStmt);
- SmallVector<SSAValue *, 4> results;
+ SmallVector<Value *, 4> results;
auto *affineApplyStmt = createComposedAffineApplyOp(
&builder, opStmt->getLoc(), subOperands, affineApplyOps, &results);
assert(results.size() == subOperands.size() &&
@@ -295,7 +292,7 @@ OperationStmt *mlir::createAffineComputationSlice(OperationStmt *opStmt) {
// affine apply op above instead of existing ones (subOperands). So, they
// differ from opStmt's operands only for those operands in 'subOperands', for
// which they will be replaced by the corresponding one from 'results'.
- SmallVector<MLValue *, 4> newOperands(opStmt->getOperands());
+ SmallVector<Value *, 4> newOperands(opStmt->getOperands());
for (unsigned i = 0, e = newOperands.size(); i < e; i++) {
// Replace the subOperands from among the new operands.
unsigned j, f;
@@ -304,7 +301,7 @@ OperationStmt *mlir::createAffineComputationSlice(OperationStmt *opStmt) {
break;
}
if (j < subOperands.size()) {
- newOperands[i] = cast<MLValue>(results[j]);
+ newOperands[i] = results[j];
}
}
@@ -326,7 +323,7 @@ void mlir::forwardSubstitute(OpPointer<AffineApplyOp> affineApplyOp) {
// into any uses which are AffineApplyOps.
for (unsigned resultIndex = 0, e = opStmt->getNumResults(); resultIndex < e;
++resultIndex) {
- const MLValue *result = opStmt->getResult(resultIndex);
+ const Value *result = opStmt->getResult(resultIndex);
for (auto it = result->use_begin(); it != result->use_end();) {
StmtOperand &use = *(it++);
auto *useStmt = use.getOwner();
@@ -347,7 +344,7 @@ void mlir::forwardSubstitute(OpPointer<AffineApplyOp> affineApplyOp) {
// Create new AffineApplyOp from 'valueMap'.
unsigned numOperands = valueMap.getNumOperands();
- SmallVector<SSAValue *, 4> operands(numOperands);
+ SmallVector<Value *, 4> operands(numOperands);
for (unsigned i = 0; i < numOperands; ++i) {
operands[i] = valueMap.getOperand(i);
}
OpenPOWER on IntegriCloud