summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mlir/g3doc/LangRef.md72
-rw-r--r--mlir/g3doc/Rationale.md29
-rw-r--r--mlir/include/mlir/IR/BuiltinOps.h18
-rw-r--r--mlir/lib/Analysis/AffineAnalysis.cpp14
-rw-r--r--mlir/lib/Analysis/AffineStructures.cpp3
-rw-r--r--mlir/lib/Analysis/LoopAnalysis.cpp12
-rw-r--r--mlir/lib/EDSC/MLIREmitter.cpp6
-rw-r--r--mlir/lib/IR/BuiltinOps.cpp20
-rw-r--r--mlir/lib/Transforms/ComposeAffineMaps.cpp9
-rw-r--r--mlir/lib/Transforms/DmaGeneration.cpp3
-rw-r--r--mlir/lib/Transforms/LoopUnrollAndJam.cpp6
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp13
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp7
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp5
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp18
-rw-r--r--mlir/lib/Transforms/Utils/Utils.cpp4
-rw-r--r--mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp4
-rw-r--r--mlir/test/IR/invalid-ops.mlir2
18 files changed, 100 insertions, 145 deletions
diff --git a/mlir/g3doc/LangRef.md b/mlir/g3doc/LangRef.md
index 64df9dca908..c310726d2b7 100644
--- a/mlir/g3doc/LangRef.md
+++ b/mlir/g3doc/LangRef.md
@@ -252,7 +252,7 @@ loops and if instructions), the result of a
[`affine_apply`](#'affine_apply'-operation) operation that recursively takes as
arguments any symbolic identifiers. Dimensions may be bound not only to anything
that a symbol is bound to, but also to induction variables of enclosing
-[for instructions](#'for'-instruction), and the results of an
+[for instructions](#'for'-instruction), and the result of an
[`affine_apply` operation](#'affine_apply'-operation) (which recursively may use
other dimensions and symbols).
@@ -1038,15 +1038,15 @@ nullary mapping function that returns the constant value (e.g. `()->(-42)()`).
Example showing reverse iteration of the inner loop:
```mlir {.mlir}
-#map57 = (d0, d1)[s0] -> (d0, s0 - d1 - 1)
+#map57 = (d0)[s0] -> (s0 - d0 - 1)
func @simple_example(%A: memref<?x?xf32>, %B: memref<?x?xf32>) {
%N = dim %A, 0 : memref<?x?xf32>
for %i = 0 to %N step 1 {
for %j = 0 to %N { // implicitly steps by 1
- %0 = affine_apply #map57(%i, %j)[%N]
- %tmp = call @F1(%A, %0#0, %0#1) : (memref<?x?xf32>, index, index)->(f32)
- call @F2(%tmp, %B, %0#0, %0#1) : (f32, memref<?x?xf32>, index, index)->()
+ %0 = affine_apply #map57(%j)[%N]
+ %tmp = call @F1(%A, %i, %0) : (memref<?x?xf32>, index, index)->(f32)
+ call @F2(%tmp, %B, %i, %0) : (f32, memref<?x?xf32>, index, index)->()
}
}
return
@@ -1085,11 +1085,11 @@ Example:
func @reduced_domain_example(%A, %X, %N) : (memref<10xi32>, i32, i32) {
for %i = 0 to %N {
for %j = 0 to %N {
- %0 = affine_apply #map42(%i, %j)
- %tmp = call @S1(%X, %0#0, %0#1)
+ %0 = affine_apply #map42(%j)
+ %tmp = call @S1(%X, %i, %0)
if #set(%i, %j)[%N] {
%1 = affine_apply #map43(%i, %j)
- call @S2(%tmp, %A, %1#0, %1#1)
+ call @S2(%tmp, %A, %i, %1)
}
}
}
@@ -1242,17 +1242,16 @@ operation ::= ssa-id `=` `affine_apply` affine-map dim-and-symbol-use-list
```
The `affine_apply` instruction applies an [affine mapping](#affine-expressions)
-to a list of SSA values, yielding another list of SSA values. The number of
-dimension and symbol arguments to affine_apply must be equal to the respective
-number of dimensional and symbolic inputs to the affine mapping, and the number
-of results is the dimensionality of the range of the affine mapping. The input
-SSA values must all have 'index' type, and the results are all of 'index' type.
+to a list of SSA values, yielding a single SSA value. The number of dimension
+and symbol arguments to affine_apply must be equal to the respective number of
+dimensional and symbolic inputs to the affine mapping; the `affine_apply`
+instruction always returns one value. The input operands and result must all
+have 'index' type.
Example:
```mlir {.mlir}
-#map10 = (d0, d1) -> (floordiv(d0,8), floordiv(d1,128),
- d0 mod 8, d1 mod 128)
+#map10 = (d0, d1) -> (floordiv(d0,8) + floordiv(d1,128))
...
%1 = affine_apply #map10 (%s, %t)
@@ -1572,34 +1571,33 @@ The arity of indices is the rank of the memref (i.e., if the memref loaded from
is of rank 3, then 3 indices are required for the load following the memref
identifier).
-In an ML Function, the indices of a load are restricted to SSA values bound to
-surrounding loop induction variables, [symbols](#dimensions-and-symbols),
-results of a [`constant` operation](#'constant'-operation), or the results of an
+In an `if` or `for` body, the indices of a load are restricted to SSA values
+bound to surrounding loop induction variables,
+[symbols](#dimensions-and-symbols), results of a
+[`constant` operation](#'constant'-operation), or the result of an
`affine_apply` operation that can in turn take as arguments all of the
-aforementioned SSA values or the recursively results of such an `affine_apply`
+aforementioned SSA values or the recursively result of such an `affine_apply`
operation.
Example:
```mlir {.mlir}
-#remap1 = (d0, d1) -> (3*d0, d1+1)
-#remap2 = (d0) -> (2*d0 + 1)
- ...
-%1 = affine_apply #remap1(%i, %j)
-%12 = load %A[%1#0, %1#1] : memref<8x?xi32, #layout, hbm>
+%1 = affine_apply (d0, d1) -> (3*d0) (%i, %j)
+%2 = affine_apply (d0, d1) -> (d1+1) (%i, %j)
+%12 = load %A[%1, %2] : memref<8x?xi32, #layout, hbm>
// Example of an indirect load (treated as non-affine)
-%2 = affine_apply #remap2(%12)
-%13 = load %A[%2, %1#1] : memref<4x?xi32, #layout, hbm>
+%3 = affine_apply (d0) -> (2*d0 + 1)(%12)
+%13 = load %A[%3, %2] : memref<4x?xi32, #layout, hbm>
```
**Context:** The `load` and `store` instructions are specifically crafted to
-fully resolve a reference to an element of a memref, and (in an ML function) the
-compiler can follow use-def chains (e.g. through
+fully resolve a reference to an element of a memref, and (in polyhedral `if` and
+`for` instructions) the compiler can follow use-def chains (e.g. through
[`affine_apply`](#'affine_apply'-operation) operations) to precisely analyze
references at compile-time using polyhedral techniques. This is possible because
-of the [restrictions on dimensions and symbols](#dimensions-and-symbols) in ML
-functions.
+of the [restrictions on dimensions and symbols](#dimensions-and-symbols) in
+these contexts.
#### 'store' operation {#'store'-operation}
@@ -1616,10 +1614,10 @@ provided within brackets need to match the rank of the memref.
In an ML Function, the indices of a store are restricted to SSA values bound to
surrounding loop induction variables, [symbols](#dimensions-and-symbols),
-results of a [`constant` operation](#'constant'-operation), or the results of an
+results of a [`constant` operation](#'constant'-operation), or the result of an
[`affine_apply`](#'affine_apply'-operation) operation that can in turn take as
-arguments all of the aforementioned SSA values or the recursively results of
-such an `affine_apply` operation.
+arguments all of the aforementioned SSA values or the recursively result of such
+an `affine_apply` operation.
Example:
@@ -1628,12 +1626,12 @@ store %100, %A[%1, 1023] : memref<4x?xf32, #layout, hbm>
```
**Context:** The `load` and `store` instructions are specifically crafted to
-fully resolve a reference to a scalar member of a memref, and (in an ML
-function) the compiler can follow use-def chains (e.g. through
+fully resolve a reference to an element of a memref, and (in polyhedral `if` and
+`for` instructions) the compiler can follow use-def chains (e.g. through
[`affine_apply`](#'affine_apply'-operation) operations) to precisely analyze
references at compile-time using polyhedral techniques. This is possible because
-of the [restrictions on dimensions and symbols](#dimensions-and-symbols) in ML
-functions.
+of the [restrictions on dimensions and symbols](#dimensions-and-symbols) in
+these contexts.
#### 'tensor_load' operation {#'tensor_load'-operation}
diff --git a/mlir/g3doc/Rationale.md b/mlir/g3doc/Rationale.md
index d8bd21ce76b..f2ed4f81c27 100644
--- a/mlir/g3doc/Rationale.md
+++ b/mlir/g3doc/Rationale.md
@@ -116,10 +116,10 @@ n-ranked tensor. This disallows the equivalent of pointer arithmetic or the
ability to index into the same memref in other ways (something which C arrays
allow for example). Furthermore, in an affine constructs, the compiler can
follow use-def chains (e.g. through
-[affine_apply instructions](https://docs.google.com/document/d/1lwJ3o6MrkAa-jiqEwoORBLW3bAI1f4ONofjRqMK1-YU/edit?ts=5b208818#heading=h.kt8lzanb487r))
-to precisely analyze references at compile-time using polyhedral techniques.
-This is possible because of the
-[restrictions on dimensions and symbols](https://docs.google.com/document/d/1lwJ3o6MrkAa-jiqEwoORBLW3bAI1f4ONofjRqMK1-YU/edit?ts=5b208818#heading=h.fnmv1awabfj).
+[affine_apply instructions](LangRef.md#'affine_apply'-operation)) to precisely
+analyze references at compile-time using polyhedral techniques. This is possible
+because of the
+[restrictions on dimensions and symbols](LangRef.md#dimensions-and-symbols).
A scalar of element-type (a primitive type or a vector type) that is stored in
memory is modeled as a 0-d memref. This is also necessary for scalars that are
@@ -634,12 +634,13 @@ in a dilated convolution.
// S4 = h_pad_low, S5 = w_pad_low
// %out0 = %0#1 * %h_stride + %0#4 * %h_kernel_dilation - %h_pad_low
// %out1= %0#2 * %w_stride + %0#5 * %w_kernel_dilation - %w_pad_low
-#map1 = (d0, d1, d2, d3) [S0, S1, S2, S3, S4, S5] -> (d0 * S0 + d2 * S2 - %S4,
- d1 * S1 + d3 * S3 - %S5)
+#map1_0 = (d0, d1, d2, d3) [S0, S1, S2, S3, S4, S5] -> (d0 * S0 + d2 * S2 - %S4)
+#map1_1 = (d0, d1, d2, d3) [S0, S1, S2, S3, S4, S5] -> (d1 * S1 + d3 * S3 - %S5)
// Semi-affine map to undilated input coordinate space.
// d0 = input_h, d1 = input_w, S0 = h_base_dilation, S1 = w_base_dilation.
-#map2 = (d0, d1) [S0, S1] -> (d0 / S0, d1 / S1)
+#map2_0 = (d0, d1) [S0, S1] -> (d0 / S0)
+#map2_1 = (d0, d1) [S0, S1] -> (d1 / S1)
// Conv2D shapes:
// input: [batch, input_height, input_width, input_feature]
@@ -655,19 +656,21 @@ func @conv2d(memref<16x1024x1024x3xf32, #lm0, vmem> %input,
for %kh = 0 to %kernel_height {
for %kw = 0 to %kernel_width {
for %if = 0 to %input_feature {
- %0 = affine_apply #map0 (%b, %oh, %ow, %of, %kh, %kw, %if)
// Calculate input indices.
- %1 = affine_apply #map1 (%0#1, %0#2, %0#4, %0#5)
+ %1_0 = affine_apply #map1_0 (%0#1, %0#2, %0#4, %0#5)
+ [%h_stride, %w_stride, %h_kernel_dilation, %w_kernel_dilation,
+ %h_pad_low, %w_pad_low]
+ %1_1 = affine_apply #map1_1 (%0#1, %0#2, %0#4, %0#5)
[%h_stride, %w_stride, %h_kernel_dilation, %w_kernel_dilation,
%h_pad_low, %w_pad_low]
// Check if access is not in padding.
- if #domain(%1#0, %1#1)
+ if #domain(%1_0, %1_1)
[%h_base_dilation, %w_kernel_dilation, %h_bound, %w_bound] {
- %2 = affine_apply #map2 (%1#0, %1#1)
+ %2_0 = affine_apply #map2 (%1_0, %1_1)
+ %2_1 = affine_apply #map2 (%1_0, %1_1)
// Compute: output[output_indices] += input[input_indices] * kernel[kernel_indices]
- call @multiply_accumulate(%input, %kernel, %output, %0#0, %0#1, %0#2, %0#3,
- %0#4, %0#5, %0#6, %2#0, %2#1)
+ call @multiply_accumulate(%input, %kernel, %output, %b, %oh, %ow, %of, %kh, %kw, %if, %2_0, %2_1)
}
}
}
diff --git a/mlir/include/mlir/IR/BuiltinOps.h b/mlir/include/mlir/IR/BuiltinOps.h
index 189e3e248ea..883319251de 100644
--- a/mlir/include/mlir/IR/BuiltinOps.h
+++ b/mlir/include/mlir/IR/BuiltinOps.h
@@ -37,10 +37,10 @@ public:
};
/// The "affine_apply" operation applies an affine map to a list of operands,
-/// yielding a list of results. The operand and result list sizes must be the
-/// same. All operands and results are of type 'Index'. This operation
-/// requires a single affine map attribute named "map".
-/// For example:
+/// yielding a single result. The operand list must be the same size as the
+/// number of arguments to the affine mapping. All operands and the result are
+/// of type 'Index'. This operation requires a single affine map attribute named
+/// "map". For example:
///
/// %y = "affine_apply" (%x) { map: (d0) -> (d0 + 1) } :
/// (index) -> (index)
@@ -50,9 +50,8 @@ public:
/// #map42 = (d0)->(d0+1)
/// %y = affine_apply #map42(%x)
///
-class AffineApplyOp
- : public Op<AffineApplyOp, OpTrait::VariadicOperands,
- OpTrait::VariadicResults, OpTrait::HasNoSideEffect> {
+class AffineApplyOp : public Op<AffineApplyOp, OpTrait::VariadicOperands,
+ OpTrait::OneResult, OpTrait::HasNoSideEffect> {
public:
/// Builds an affine apply op with the specified map and operands.
static void build(Builder *builder, OperationState *result, AffineMap map,
@@ -75,9 +74,8 @@ public:
static bool parse(OpAsmParser *parser, OperationState *result);
void print(OpAsmPrinter *p) const;
bool verify() const;
- bool constantFold(ArrayRef<Attribute> operandConstants,
- SmallVectorImpl<Attribute> &results,
- MLIRContext *context) const;
+ Attribute constantFold(ArrayRef<Attribute> operands,
+ MLIRContext *context) const;
static void getCanonicalizationPatterns(OwningRewritePatternList &results,
MLIRContext *context);
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index a4d969bc203..0153546a4c6 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -1474,17 +1474,6 @@ AffineMap AffineApplyNormalizer::renumber(const AffineApplyOp &app) {
return renumber(normalizer);
}
-static unsigned getIndexOf(Value *v, const AffineApplyOp &op) {
- unsigned numResults = op.getNumResults();
- for (unsigned i = 0; i < numResults; ++i) {
- if (v == op.getResult(i)) {
- return i;
- }
- }
- llvm_unreachable("value is not a result of AffineApply");
- return static_cast<unsigned>(-1);
-}
-
AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
ArrayRef<Value *> operands)
: AffineApplyNormalizer() {
@@ -1511,9 +1500,8 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
} else {
auto *inst = t->getDefiningInst();
auto app = inst->dyn_cast<AffineApplyOp>();
- unsigned idx = getIndexOf(t, *app);
auto tmpMap = renumber(*app);
- exprs.push_back(tmpMap.getResult(idx));
+ exprs.push_back(tmpMap.getResult(0));
}
}
diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp
index 7aa23bbe480..2ea4091f82b 100644
--- a/mlir/lib/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Analysis/AffineStructures.cpp
@@ -105,8 +105,7 @@ AffineValueMap::AffineValueMap(const AffineApplyOp &op)
: map(op.getAffineMap()) {
for (auto *operand : op.getOperands())
operands.push_back(const_cast<Value *>(operand));
- for (unsigned i = 0, e = op.getNumResults(); i < e; i++)
- results.push_back(const_cast<Value *>(op.getResult(i)));
+ results.push_back(const_cast<Value *>(op.getResult()));
}
AffineValueMap::AffineValueMap(AffineMap map, ArrayRef<Value *> operands)
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 640984bf866..219f356807a 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -145,17 +145,7 @@ bool mlir::isAccessInvariant(const Value &iv, const Value &index) {
auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
// We need yet another level of indirection because the `dim` index of the
// access may not correspond to the `dim` index of composeOp.
- unsigned idx = std::numeric_limits<unsigned>::max();
- unsigned numResults = composeOp->getNumResults();
- for (unsigned i = 0; i < numResults; ++i) {
- if (&index == composeOp->getResult(i)) {
- idx = i;
- break;
- }
- }
- assert(idx < std::numeric_limits<unsigned>::max());
- return !AffineValueMap(*composeOp)
- .isFunctionOf(idx, &const_cast<Value &>(iv));
+ return !AffineValueMap(*composeOp).isFunctionOf(0, const_cast<Value *>(&iv));
}
llvm::DenseSet<const Value *>
diff --git a/mlir/lib/EDSC/MLIREmitter.cpp b/mlir/lib/EDSC/MLIREmitter.cpp
index c2a6dc1f90a..2becf90c944 100644
--- a/mlir/lib/EDSC/MLIREmitter.cpp
+++ b/mlir/lib/EDSC/MLIREmitter.cpp
@@ -95,8 +95,7 @@ Value *add(FuncBuilder *builder, Location location, Value *a, Value *b) {
auto d0 = getAffineDimExpr(0, context);
auto d1 = getAffineDimExpr(1, context);
auto map = AffineMap::get(2, 0, {d0 + d1}, {});
- return makeComposedAffineApply(builder, location, map, {a, b})
- ->getResult(0);
+ return makeComposedAffineApply(builder, location, map, {a, b});
} else if (isIntElement(*a)) {
return builder->create<AddIOp>(location, a, b)->getResult();
}
@@ -110,8 +109,7 @@ Value *sub(FuncBuilder *builder, Location location, Value *a, Value *b) {
auto d0 = getAffineDimExpr(0, context);
auto d1 = getAffineDimExpr(1, context);
auto map = AffineMap::get(2, 0, {d0 - d1}, {});
- return makeComposedAffineApply(builder, location, map, {a, b})
- ->getResult(0);
+ return makeComposedAffineApply(builder, location, map, {a, b});
} else if (isIntElement(*a)) {
return builder->create<SubIOp>(location, a, b)->getResult();
}
diff --git a/mlir/lib/IR/BuiltinOps.cpp b/mlir/lib/IR/BuiltinOps.cpp
index 5a27ea9d109..68543cfb035 100644
--- a/mlir/lib/IR/BuiltinOps.cpp
+++ b/mlir/lib/IR/BuiltinOps.cpp
@@ -119,9 +119,6 @@ void AffineApplyOp::print(OpAsmPrinter *p) const {
}
bool AffineApplyOp::verify() const {
- if (getNumResults() != 1)
- return emitOpError("multi-result affine_apply is not supported");
-
// Check that affine map attribute was specified.
auto affineMapAttr = getAttrOfType<AffineMapAttr>("map");
if (!affineMapAttr)
@@ -136,8 +133,8 @@ bool AffineApplyOp::verify() const {
"operand count and affine map dimension and symbol count must match");
// Verify that result count matches affine map result count.
- if (getNumResults() != map.getNumResults())
- return emitOpError("result count and affine map result count must match");
+ if (map.getNumResults() != 1)
+ return emitOpError("mapping must produce one value");
return false;
}
@@ -163,14 +160,13 @@ bool AffineApplyOp::isValidSymbol() const {
return true;
}
-bool AffineApplyOp::constantFold(ArrayRef<Attribute> operandConstants,
- SmallVectorImpl<Attribute> &results,
- MLIRContext *context) const {
+Attribute AffineApplyOp::constantFold(ArrayRef<Attribute> operands,
+ MLIRContext *context) const {
auto map = getAffineMap();
- if (map.constantFold(operandConstants, results))
- return true;
- // Return false on success.
- return false;
+ SmallVector<Attribute, 1> result;
+ if (map.constantFold(operands, result))
+ return Attribute();
+ return result[0];
}
namespace {
diff --git a/mlir/lib/Transforms/ComposeAffineMaps.cpp b/mlir/lib/Transforms/ComposeAffineMaps.cpp
index 4752928d062..2457d868ae5 100644
--- a/mlir/lib/Transforms/ComposeAffineMaps.cpp
+++ b/mlir/lib/Transforms/ComposeAffineMaps.cpp
@@ -78,10 +78,7 @@ PassResult ComposeAffineMaps::runOnFunction(Function *f) {
FuncBuilder b(m.first);
auto newApp = makeComposedAffineApply(&b, app->getLoc(),
app->getAffineMap(), operands);
- unsigned idx = 0;
- for (auto *v : app->getResults()) {
- v->replaceAllUsesWith(newApp->getResult(idx++));
- }
+ app->replaceAllUsesWith(newApp);
}
{
auto pattern = Op(affineApplyOp);
@@ -89,9 +86,7 @@ PassResult ComposeAffineMaps::runOnFunction(Function *f) {
std::reverse(apps.begin(), apps.end());
for (auto m : apps) {
auto app = cast<OperationInst>(m.first)->cast<AffineApplyOp>();
- bool hasNonEmptyUse = llvm::any_of(
- app->getResults(), [](Value *r) { return !r->use_empty(); });
- if (!hasNonEmptyUse) {
+ if (app->use_empty()) {
m.first->erase();
}
}
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 04eb38e9fc9..dd16bd06dde 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -280,8 +280,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, ForInst *forInst,
// corresponding dimension on the memory region (stored in 'offset').
auto map = top.getAffineMap(
cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset, {});
- memIndices.push_back(
- b->create<AffineApplyOp>(loc, map, outerIVs)->getResult(0));
+ memIndices.push_back(b->create<AffineApplyOp>(loc, map, outerIVs));
}
// The fast buffer is DMAed into at location zero; addressing is relative.
bufIndices.push_back(zeroIndex);
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index a8ec57c0426..7deaf850362 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -231,10 +231,8 @@ bool mlir::loopUnrollJamByFactor(ForInst *forInst, uint64_t unrollJamFactor) {
// iv' = iv + i, i = 1 to unrollJamFactor-1.
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step}, {});
- auto *ivUnroll =
- builder
- .create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV)
- ->getResult(0);
+ auto ivUnroll = builder.create<AffineApplyOp>(forInst->getLoc(),
+ bumpMap, forInstIV);
operandMapping.map(forInstIV, ivUnroll);
}
// Clone the sub-block being unroll-jammed.
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 94f300bd16a..ab37ff63bad 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -562,13 +562,12 @@ bool LowerAffinePass::lowerAffineApply(AffineApplyOp *op) {
llvm::to_vector<8>(op->getOperands()));
if (!maybeExpandedMap)
return true;
- for (auto pair : llvm::zip(op->getResults(), *maybeExpandedMap)) {
- Value *original = std::get<0>(pair);
- Value *expanded = std::get<1>(pair);
- if (!expanded)
- return true;
- original->replaceAllUsesWith(expanded);
- }
+
+ Value *original = op->getResult();
+ Value *expanded = (*maybeExpandedMap)[0];
+ if (!expanded)
+ return true;
+ original->replaceAllUsesWith(expanded);
op->erase();
return false;
}
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 7dd3cecdfed..4360a4e1b96 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -376,11 +376,10 @@ reindexAffineIndices(FuncBuilder *b, VectorType hwVectorType,
// Create a bunch of single result maps.
return functional::map(
- [b, numIndices, memrefIndices](AffineExpr expr) {
+ [b, numIndices, memrefIndices](AffineExpr expr) -> Value * {
auto map = AffineMap::get(numIndices, 0, expr, {});
- auto app = makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(),
- map, memrefIndices);
- return app->getResult(0);
+ return makeComposedAffineApply(b, b->getInsertionPoint()->getLoc(), map,
+ memrefIndices);
},
affineExprs);
}
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 0019714b6a3..811741d08d1 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -126,9 +126,8 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
// replaceAllMemRefUsesWith will always succeed unless the forInst body has
// non-deferencing uses of the memref.
- if (!replaceAllMemRefUsesWith(oldMemRef, newMemRef, ivModTwoOp->getResult(0),
- AffineMap(), {},
- &*forInst->getBody()->begin())) {
+ if (!replaceAllMemRefUsesWith(oldMemRef, newMemRef, {ivModTwoOp}, AffineMap(),
+ {}, &*forInst->getBody()->begin())) {
LLVM_DEBUG(llvm::dbgs()
<< "memref replacement for double buffering failed\n";);
ivModTwoOp->getInstruction()->erase();
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 03673eaa535..59da2b0a56e 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -117,7 +117,7 @@ bool mlir::promoteIfSingleIteration(ForInst *forInst) {
} else {
auto affineApplyOp = builder.create<AffineApplyOp>(
forInst->getLoc(), lb.getMap(), lbOperands);
- iv->replaceAllUsesWith(affineApplyOp->getResult(0));
+ iv->replaceAllUsesWith(affineApplyOp);
}
}
}
@@ -177,12 +177,11 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
// shift.
if (!srcIV->use_empty() && shift != 0) {
auto b = FuncBuilder::getForInstBodyBuilder(loopChunk);
- auto *ivRemap = b.create<AffineApplyOp>(
- srcForInst->getLoc(),
- b.getSingleDimShiftAffineMap(-static_cast<int64_t>(
- srcForInst->getStep() * shift)),
- loopChunkIV)
- ->getResult(0);
+ auto ivRemap = b.create<AffineApplyOp>(
+ srcForInst->getLoc(),
+ b.getSingleDimShiftAffineMap(
+ -static_cast<int64_t>(srcForInst->getStep() * shift)),
+ loopChunkIV);
operandMap.map(srcIV, ivRemap);
} else {
operandMap.map(srcIV, loopChunkIV);
@@ -432,9 +431,8 @@ bool mlir::loopUnrollByFactor(ForInst *forInst, uint64_t unrollFactor) {
// iv' = iv + 1/2/3...unrollFactor-1;
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step}, {});
- auto *ivUnroll =
- builder.create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV)
- ->getResult(0);
+ auto ivUnroll =
+ builder.create<AffineApplyOp>(forInst->getLoc(), bumpMap, forInstIV);
operandMap.map(forInstIV, ivUnroll);
}
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 03c2a9df1e4..0e890c780f0 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -136,7 +136,7 @@ bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
indexRemap.getNumSymbols(), resultExpr, {});
auto afOp = builder.create<AffineApplyOp>(opInst->getLoc(),
singleResMap, remapOperands);
- state.operands.push_back(afOp->getResult(0));
+ state.operands.push_back(afOp);
}
} else {
// No remapping specified.
@@ -266,7 +266,7 @@ void mlir::createAffineComputationSlice(
break;
}
if (j < subOperands.size()) {
- newOperands[i] = (*sliceOps)[j]->getResult(0);
+ newOperands[i] = (*sliceOps)[j];
}
}
for (unsigned idx = 0, e = newOperands.size(); idx < e; idx++) {
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 0a199f008d6..a01b8fdf216 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -240,9 +240,7 @@ static bool affineApplyOp(const Instruction &inst) {
static bool singleResultAffineApplyOpWithoutUses(const Instruction &inst) {
const auto &opInst = cast<OperationInst>(inst);
auto app = opInst.dyn_cast<AffineApplyOp>();
- return app && (app->getNumResults() == 1) &&
- app->getResult(0)->getUses().end() ==
- app->getResult(0)->getUses().begin();
+ return app && app->use_empty();
}
void VectorizerTestPass::testNormalizeMaps(Function *f) {
diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index d49407bc9cc..5fa9cb9f956 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -62,7 +62,7 @@ func @affine_apply_wrong_result_count() {
^bb0:
%i = "constant"() {value: 0} : () -> index
%j = "constant"() {value: 1} : () -> index
- %x = "affine_apply" (%i, %j) {map: (d0, d1) -> ((d0 + 1), (d1 + 2))} : (index,index) -> (index) // expected-error {{'affine_apply' op result count and affine map result count must match}}
+ %x = "affine_apply" (%i, %j) {map: (d0, d1) -> ((d0 + 1), (d1 + 2))} : (index,index) -> (index) // expected-error {{'affine_apply' op mapping must produce one value}}
return
}
OpenPOWER on IntegriCloud