summaryrefslogtreecommitdiffstats
path: root/mlir/lib
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib')
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp14
-rw-r--r--mlir/lib/Analysis/AffineStructures.cpp3
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp12
-rw-r--r--mlir/lib/EDSC/MLIREmitter.cpp6
-rw-r--r--mlir/lib/IR/BuiltinOps.cpp20
-rw-r--r--mlir/lib/Transforms/ComposeAffineMaps.cpp9
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp3
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp6
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp13
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp7
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp5
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp18
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp4
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp4
14 files changed, 40 insertions, 84 deletions
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index a4d969bc203..0153546a4c6 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -1474,17 +1474,6 @@ AffineMap AffineApplyNormalizer::renumber(const AffineApplyOp &app) {
return renumber(normalizer);
}
-static unsigned getIndexOf(Value *v, const AffineApplyOp &op) {
- unsigned numResults = op.getNumResults();
- for (unsigned i = 0; i < numResults; ++i) {
- if (v == op.getResult(i)) {
- return i;
- }
- }
- llvm_unreachable("value is not a result of AffineApply");
- return static_cast<unsigned>(-1);
-}
-
AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
ArrayRef<Value *> operands)
: AffineApplyNormalizer() {
@@ -1511,9 +1500,8 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
} else {
auto *inst = t->getDefiningInst();
auto app = inst->dyn_cast<AffineApplyOp>();
- unsigned idx = getIndexOf(t, *app);
auto tmpMap = renumber(*app);
- exprs.push_back(tmpMap.getResult(idx));
+ exprs.push_back(tmpMap.getResult(0));
}
}
diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index 7aa23bbe480..2ea4091f82b 100644
--- a/mlir/lib/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -105,8 +105,7 @@ AffineValueMap::AffineValueMap(const AffineApplyOp &op)
: map(op.getAffineMap()) {
for (auto *operand : op.getOperands())
operands.push_back(const_cast<Value *>(operand));
- for (unsigned i = 0, e = op.getNumResults(); i < e; i++)
- results.push_back(const_cast<Value *>(op.getResult(i)));
+ results.push_back(const_cast<Value *>(op.getResult()));
}
AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<Value *> operands)
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 640984bf866..219f356807a 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -145,17 +145,7 @@ bool mlir::isAccessInvariant(const Value &iv, const Value &index) {
auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
// We need yet another level of indirection because the `dim` index of the
// access may not correspond to the `dim` index of composeOp.
- unsigned idx = std::numeric_limits<unsigned>::max();
- unsigned numResults = composeOp->getNumResults();
- for (unsigned i = 0; i < numResults; ++i) {
- if (&index == composeOp->getResult(i)) {
- idx = i;
- break;
- }
- }
- assert(idx < std::numeric_limits<unsigned>::max());
- return !AffineValueMap(*composeOp)
- .isFunctionOf(idx, &const_cast<Value &>(iv));
+ return !AffineValueMap(*composeOp).isFunctionOf(0, const_cast<Value *>(&iv));
}
llvm::DenseSet<const Value *>
diff --git a/mlir/lib/EDSC/MLIREmitter.cpp b/mlir/lib/EDSC/MLIREmitter.cpp
index c2a6dc1f90a..2becf90c944 100644
--- a/mlir/lib/EDSC/MLIREmitter.cpp
+++ b/mlir/lib/EDSC/MLIREmitter.cpp
@@ -95,8 +95,7 @@ Value *add(FuncBuilder *builder, Location location, Value *a, Value *b) {
auto d0 = getAffineDimExpr(0, context);
auto d1 = getAffineDimExpr(1, context);
auto map = AffineMap::get(2, 0, {d0 + d1}, {});
- return makeComposedAffineApply(builder, location, map, {a, b})
- ->getResult(0);
+ return makeComposedAffineApply(builder, location, map, {a, b});
} else if (isIntElement(*a)) {
return builder->create<AddIOp>(location, a, b)->getResult();
}
@@ -110,8 +109,7 @@ Value *sub(FuncBuilder *builder, Location location, Value *a, Value *b) {
auto d0 = getAffineDimExpr(0, context);
auto d1 = getAffineDimExpr(1, context);
auto map = AffineMap::get(2, 0, {d0 - d1}, {});
- return makeComposedAffineApply(builder, location, map, {a, b})
- ->getResult(0);
+ return makeComposedAffineApply(builder, location, map, {a, b});
} else if (isIntElement(*a)) {
return builder->create<SubIOp>(location, a, b)->getResult();
}
diff --git a/mlir/lib/IR/BuiltinOps.cpp b/mlir/lib/IR/BuiltinOps.cpp
index 5a27ea9d109..68543cfb035 100644
--- a/mlir/lib/IR/BuiltinOps.cpp
+++ b/mlir/lib/IR/BuiltinOps.cpp
@@ -119,9 +119,6 @@ void AffineApplyOp::print(OpAsmPrinter *p) const {
}
bool AffineApplyOp::verify() const {
- if (getNumResults() != 1)
- return emitOpError("multi-result affine_apply is not supported");
-
// Check that affine map attribute was specified.
auto affineMapAttr = getAttrOfType<AffineMapAttr>("map");
if (!affineMapAttr)
@@ -136,8 +133,8 @@ bool AffineApplyOp::verify() const {
"operand count and affine map dimension and symbol count must match");
// Verify that result count matches affine map result count.
- if (getNumResults() != map.getNumResults())
- return emitOpError("result count and affine map result count must match");
+ if (map.getNumResults() != 1)
+ return emitOpError("mapping must produce one value");
return false;
}
@@ -163,14 +160,13 @@ bool AffineApplyOp::isValidSymbol() const {
return true;
}
-bool AffineApplyOp::constantFold(ArrayRef<Attribute> operandConstants,
- SmallVectorImpl<Attribute> &results,
- MLIRContext *context) const {
+Attribute AffineApplyOp::constantFold(ArrayRef<Attribute> operands,
+ MLIRContext *context) const {
auto map = getAffineMap();
- if (map.constantFold(operandConstants, results))
- return true;
- // Return false on success.
- return false;
+ SmallVector<Attribute, 1> result;
+ if (map.constantFold(operands, result))
+ return Attribute();
+ return result[0];
}
namespace {
diff --git a/mlir/lib/Transforms/ComposeAffineMaps.cpp b/mlir/lib/Transforms/ComposeAffineMaps.cpp
index 4752928d062..2457d868ae5 100644
--- a/mlir/lib/Transforms/ComposeAffineMaps.cpp
+++ b/mlir/lib/Transforms/ComposeAffineMaps.cpp
@@ -78,10 +78,7 @@ PassResult ComposeAffineMaps::runOnFunction(Function *f) {
FuncBuilder b(m.first);
auto newApp = makeComposedAffineApply(&b, app->getLoc(),
app->getAffineMap(), operands);
- unsigned idx = 0;
- for (auto *v : app->getResults()) {
- v->replaceAllUsesWith(newApp->getResult(idx++));
- }
+ app->replaceAllUsesWith(newApp);
}
{
auto pattern = Op(affineApplyOp);
@@ -89,9 +86,7 @@ PassResult ComposeAffineMaps::runOnFunction(Function *f) {
std::reverse(apps.begin(), apps.end());
for (auto m : apps) {
auto app = cast<OperationInst>(m.first)->cast<AffineApplyOp>();
- bool hasNonEmptyUse = llvm::any_of(
- app->getResults(), [](Value *r) { return !r->use_empty(); });
- if (!hasNonEmptyUse) {
+ if (app->use_empty()) {
m.first->erase();
}
}
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 04eb38e9fc9..dd16bd06dde 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -280,8 +280,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, ForInst *forInst,
// corresponding dimension on the memory region (stored in 'offset').
auto map = top.getAffineMap(
cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset, {});
- memIndices.push_back(
- b->create<AffineApplyOp>(loc, map, outerIVs)->getResult(0));
+ memIndices.push_back(b->create<AffineApplyOp>(loc, map, outerIVs));
}
// The fast buffer is DMAed into at location zero; addressing is relative.
bufIndices.push_back(zeroIndex);
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index a8ec57c0426..7deaf850362 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -231,10 +231,8 @@ bool mlir::loopUnrollJamByFactor(ForInst *forInst, uint64_t unrollJamFactor) {
// iv' = iv + i, i = 1 to unrollJamFactor-1.
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step}, {});
- auto *ivUnroll =
- builder
- .create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV)
- ->getResult(0);
+ auto ivUnroll = builder.create<AffineApplyOp>(forInst->getLoc(),
+ bumpMap, forInstIV);
operandMapping.map(forInstIV, ivUnroll);
}
// Clone the sub-block being unroll-jammed.
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 94f300bd16a..ab37ff63bad 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -562,13 +562,12 @@ bool LowerAffinePass::lowerAffineApply(AffineApplyOp *op) {
llvm::to_vector<8>(op->getOperands()));
if (!maybeExpandedMap)
return true;
- for (auto pair : llvm::zip(op->getResults(), *maybeExpandedMap)) {
- Value *original = std::get<0>(pair);
- Value *expanded = std::get<1>(pair);
- if (!expanded)
- return true;
- original->replaceAllUsesWith(expanded);
- }
+
+ Value *original = op->getResult();
+ Value *expanded = (*maybeExpandedMap)[0];
+ if (!expanded)
+ return true;
+ original->replaceAllUsesWith(expanded);
op->erase();
return false;
}
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 7dd3cecdfed..4360a4e1b96 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -376,11 +376,10 @@ reindexAffineIndices(FuncBuilder *b, VectorType hwVectorType,
// Create a bunch of single result maps.
return functional::map(
- [b, numIndices, memrefIndices](AffineExpr expr) {
+ [b, numIndices, memrefIndices](AffineExpr expr) -> Value * {
auto map = AffineMap::get(numIndices, 0, expr, {});
- auto app = makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(),
- map, memrefIndices);
- return app->getResult(0);
+ return makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(), map,
+ memrefIndices);
},
affineExprs);
}
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 0019714b6a3..811741d08d1 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -126,9 +126,8 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
// replaceAllMemRefUsesWith will always succeed unless the forInst body has
// non-deferencing uses of the memref.
- if (!replaceAllMemRefUsesWith(oldMemRef, newMemRef, ivModTwoOp->getResult(0),
- AffineMap(), {},
- &*forInst->getBody()->begin())) {
+ if (!replaceAllMemRefUsesWith(oldMemRef, newMemRef, {ivModTwoOp}, AffineMap(),
+ {}, &*forInst->getBody()->begin())) {
LLVM_DEBUG(llvm::dbgs()
<< "memref replacement for double buffering failed\n";);
ivModTwoOp->getInstruction()->erase();
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 03673eaa535..59da2b0a56e 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -117,7 +117,7 @@ bool mlir::promoteIfSingleIteration(ForInst *forInst) {
} else {
auto affineApplyOp = builder.create<AffineApplyOp>(
forInst->getLoc(), lb.getMap(), lbOperands);
- iv->replaceAllUsesWith(affineApplyOp->getResult(0));
+ iv->replaceAllUsesWith(affineApplyOp);
}
}
}
@@ -177,12 +177,11 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
// shift.
if (!srcIV->use_empty() && shift != 0) {
auto b = FuncBuilder::getForInstBodyBuilder(loopChunk);
- auto *ivRemap = b.create<AffineApplyOp>(
- srcForInst->getLoc(),
- b.getSingleDimShiftAffineMap(-static_cast<int64_t>(
- srcForInst->getStep() * shift)),
- loopChunkIV)
- ->getResult(0);
+ auto ivRemap = b.create<AffineApplyOp>(
+ srcForInst->getLoc(),
+ b.getSingleDimShiftAffineMap(
+ -static_cast<int64_t>(srcForInst->getStep() * shift)),
+ loopChunkIV);
operandMap.map(srcIV, ivRemap);
} else {
operandMap.map(srcIV, loopChunkIV);
@@ -432,9 +431,8 @@ bool mlir::loopUnrollByFactor(ForInst *forInst, uint64_t unrollFactor) {
// iv' = iv + 1/2/3...unrollFactor-1;
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step}, {});
- auto *ivUnroll =
- builder.create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV)
- ->getResult(0);
+ auto ivUnroll =
+ builder.create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV);
operandMap.map(forInstIV, ivUnroll);
}
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 03c2a9df1e4..0e890c780f0 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -136,7 +136,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
indexRemap.getNumSymbols(), resultExpr, {});
auto afOp = builder.create<AffineApplyOp>(opInst->getLoc(),
singleResMap, remapOperands);
- state.operands.push_back(afOp->getResult(0));
+ state.operands.push_back(afOp);
}
} else {
// No remapping specified.
@@ -266,7 +266,7 @@ void mlir::createAffineComputationSlice(
break;
}
if (j < subOperands.size()) {
- newOperands[i] = (*sliceOps)[j]->getResult(0);
+ newOperands[i] = (*sliceOps)[j];
}
}
for (unsigned idx = 0, e = newOperands.size(); idx < e; idx++) {
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 0a199f008d6..a01b8fdf216 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -240,9 +240,7 @@ static bool affineApplyOp(const Instruction &inst) {
static bool singleResultAffineApplyOpWithoutUses(const Instruction &inst) {
const auto &opInst = cast<OperationInst>(inst);
auto app = opInst.dyn_cast<AffineApplyOp>();
- return app && (app->getNumResults() == 1) &&
- app->getResult(0)->getUses().end() ==
- app->getResult(0)->getUses().begin();
+ return app && app->use_empty();
}
void VectorizerTestPass::testNormalizeMaps(Function *f) {
OpenPOWER on IntegriCloud