summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUday Bondhugula <udayb@iisc.ac.in>2019-09-27 11:57:52 -0700
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-09-27 11:58:24 -0700
commit74eabdd14eb3ed697d6dc4d72c3423ec81dd80f4 (patch)
tree9d43312d5e2477b49aeefdbe382d046e1941d53c
parentddf737c5da728f25c5e0413bc737d04b2d92df96 (diff)
downloadbcm5719-llvm-74eabdd14eb3ed697d6dc4d72c3423ec81dd80f4.tar.gz
bcm5719-llvm-74eabdd14eb3ed697d6dc4d72c3423ec81dd80f4.zip
NFC - clean up op accessor usage, std.load/store op verify, other stale info
- also remove stale terminology/references in docs Signed-off-by: Uday Bondhugula <uday@polymagelabs.com> Closes tensorflow/mlir#148 COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/mlir/pull/148 from bondhugula:cleanup e846b641a3c2936e874138aff480a23cdbf66591 PiperOrigin-RevId: 271618279
-rw-r--r--mlir/g3doc/Rationale.md43
-rw-r--r--mlir/include/mlir/Analysis/NestedMatcher.h2
-rw-r--r--mlir/include/mlir/Dialect/AffineOps/AffineOps.h4
-rw-r--r--mlir/include/mlir/Dialect/StandardOps/Ops.td29
-rw-r--r--mlir/lib/Dialect/AffineOps/AffineOps.cpp34
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp25
-rw-r--r--mlir/lib/Dialect/StandardOps/Ops.cpp26
-rw-r--r--mlir/lib/Transforms/LoopTiling.cpp2
-rw-r--r--mlir/lib/Transforms/LowerAffine.cpp2
-rw-r--r--mlir/lib/Transforms/MaterializeVectors.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/LoopFusionUtils.cpp6
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp5
-rw-r--r--mlir/test/IR/core-ops.mlir2
13 files changed, 79 insertions, 103 deletions
diff --git a/mlir/g3doc/Rationale.md b/mlir/g3doc/Rationale.md
index 3122e3cb24d..ba94267a572 100644
--- a/mlir/g3doc/Rationale.md
+++ b/mlir/g3doc/Rationale.md
@@ -211,12 +211,12 @@ appear in subscripts, sizes of aggregate types and affine expressions. They are
also tightly coupled with `affine.apply` and load/store operations; having
`index` type is a necessary precondition of a value to be acceptable by these
operations. While it may be useful to have `memref<?xindex>` to express indirect
-accesses in MLFunctions, e.g. sparse matrix manipulations or lookup tables, it
-creates problems MLIR is not ready to address yet. MLIR needs to internally
-store constants of aggregate types and emit code operating on values of those
-types, which are subject to target-specific size and alignment constraints.
-Since MLIR does not have a target description mechanism at the moment, it cannot
-reliably emit such code. Moreover, some platforms may not support vectors of
+accesses, e.g. sparse matrix manipulations or lookup tables, it creates problems
+MLIR is not ready to address yet. MLIR needs to internally store constants of
+aggregate types and emit code operating on values of those types, which are
+subject to target-specific size and alignment constraints. Since MLIR does not
+have a target description mechanism at the moment, it cannot reliably emit such
+code. Moreover, some platforms may not support vectors of
type equivalent to `index`.
Indirect access use cases can be alternatively supported by providing and
@@ -721,9 +721,9 @@ in a dilated convolution.
// input: [batch, input_height, input_width, input_feature]
// kernel: [kernel_height, kernel_width, input_feature, output_feature]
// output: [batch, output_height, output_width, output_feature]
-func @conv2d(memref<16x1024x1024x3xf32, #lm0, vmem> %input,
- memref<5x5x3x32xf32, #lm0, vmem> %kernel,
- memref<16x512x512x32xf32, #lm0, vmem> %output) {
+func @conv2d(memref<16x1024x1024x3xf32, #lm0, /*scratchpad=*/1> %input,
+ memref<5x5x3x32xf32, #lm0, /*scratchpad=*/1> %kernel,
+ memref<16x512x512x32xf32, #lm0, /*scratchpad=*/1> %output) {
affine.for %b = 0 to %batch {
affine.for %oh = 0 to %output_height {
affine.for %ow = 0 to %output_width {
@@ -794,14 +794,13 @@ At a high level, we have two alternatives here:
explicitly propagate the schedule into domains and model all the cleanup
code. An example and more detail on the schedule tree form is in the next
section.
-1. Having two different forms of MLFunctions: an affine loop tree form
+1. Having two different forms of "affine regions": an affine loop tree form
(AffineLoopTreeFunction) and a polyhedral schedule tree form as two
- different forms of MLFunctions. Or in effect, having four different forms
- for functions in MLIR instead of three: CFG Function,
- AffineLoopTreeFunction, Polyhedral Schedule Tree function, and external
- functions.
+ different forms. Or in effect, having four different forms for functions in
+ MLIR instead of three: CFG Function, AffineLoopTreeFunction, Polyhedral
+ Schedule Tree function, and external functions.
-#### Schedule Tree Representation for MLFunctions
+#### Schedule Tree Representation for Affine Regions
This representation is based on a simplified form of the domain/schedule
representation used by the polyhedral compiler community. Domains represent what
@@ -826,15 +825,15 @@ func @matmul(%A, %B, %C, %M, %N, %K) : (...) { // %M, N, K are symbols
mldim %t1 : {S1,S2,S3,S4,S5} floordiv (i, 128) {
mldim %t2 : {S1,S2,S3,S4,S5} floordiv (j, 128) {
// (%i, %j) = affine.apply (d0, d1) -> (128*d0, 128*d1) (%t1, %t2)
- call dma_hbm_to_vmem(%C, %i, %j, %M, %N, %K)
+ call dma_mem_to_scratchpad(%C, %i, %j, %M, %N, %K)
with @intset_ij(%i, %j) [%M, %N, %K]
mldim %t3 : {S2,S3,S4,S5} floordiv (k, 128) {
// (%i, %j, %k) = affine.apply (d0, d1, d2)
// -> (128*d0, 128*d1, 128*d2) (%t1, %t2, %t3)
- call dma_hbm_to_vmem(%A, ...) with #inset_ijk (%i, %j, %k) [%M, %N, %K]
+ call dma_mem_to_scratchpad(%A, ...) with #inset_ijk (%i, %j, %k) [%M, %N, %K]
// (%i, %j, %k) = affine.apply (d0, d1, d2)
// -> (128*d0, 128*d1, 128*d2) (%t1, %t2, %t3)
- call dma_hbm_to_vmem(%B, ...) with #inset_ijk (%i, %j, %k) [%M, %N, %K]
+ call dma_mem_to_scratchpad(%B, ...) with #inset_ijk (%i, %j, %k) [%M, %N, %K]
mldim %t4 : {S4} i mod 128 {
mldim %t5 : {S4} j mod 128 {
mldim %t6 : {S4} k mod 128 {
@@ -846,7 +845,7 @@ func @matmul(%A, %B, %C, %M, %N, %K) : (...) { // %M, N, K are symbols
} // end mldim t4
} // end mldim t3
// (%i, %j) = affine.apply (d0, d1) -> (128*d0, 128*d1) (%t1, %t2)
- call $dma_vmem_to_hbm_C ... with #intset(%i, %j) [%M, %N, %K]
+ call $dma_scratchpad_to_mem_C ... with #intset(%i, %j) [%M, %N, %K]
} // end mldim t2
} // end mldim t1
return
@@ -978,15 +977,15 @@ Example:
```mlir {.mlir}
##rel9 ( ) [s0] -> (r0, r1) : 0 <= r0 <= 1023, 0 <= r1 <= s0 - 1
-func @cblas_reduce_ffi(memref<1024 x ? x f32, #layout_map0, hbm> %M) -> f32 [
+func @cblas_reduce_ffi(memref<1024 x ? x f32, #layout_map0, /*mem=*/0> %M) -> f32 [
reads: {%M, ##rel9() }
writes: /* empty */
may_reads: /* empty */
may_writes: /* empty */
]
-func @dma_hbm_to_vmem(memref<1024 x f32, #layout_map0, hbm> %a,
- offset, memref<1024 x f32, #layout_map0, vmem> %b,
+func @dma_mem_to_scratchpad(memref<1024 x f32, #layout_map0, /*mem=*/0> %a,
+ offset, memref<1024 x f32, #layout_map0, 1> %b,
memref<1024 x f32, #layout_map0> %c
) [
reads: {%M, ##rel9() }
diff --git a/mlir/include/mlir/Analysis/NestedMatcher.h b/mlir/include/mlir/Analysis/NestedMatcher.h
index b07b73a023a..dd4022a2617 100644
--- a/mlir/include/mlir/Analysis/NestedMatcher.h
+++ b/mlir/include/mlir/Analysis/NestedMatcher.h
@@ -1,4 +1,4 @@
-//===- NestedMacher.h - Nested matcher for MLFunction -----------*- C++ -*-===//
+//===- NestedMacher.h - Nested matcher for Function -------------*- C++ -*-===//
//
// Copyright 2019 The MLIR Authors.
//
diff --git a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
index 75b83e72aa5..9f2bbddb548 100644
--- a/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/Dialect/AffineOps/AffineOps.h
@@ -580,9 +580,7 @@ public:
AffineValueMap getAsAffineValueMap();
unsigned getNumOperands() { return opEnd - opStart; }
- Value *getOperand(unsigned idx) {
- return op.getOperation()->getOperand(opStart + idx);
- }
+ Value *getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
using operand_iterator = AffineForOp::operand_iterator;
using operand_range = AffineForOp::operand_range;
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td
index 78a5f493448..1c548f28522 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td
@@ -300,7 +300,8 @@ def CallIndirectOp : Std_Op<"call_indirect", [CallOpInterface]> {
let hasCanonicalizer = 1;
}
-def CmpIOp : Std_Op<"cmpi", [NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
+def CmpIOp : Std_Op<"cmpi",
+ [NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
let summary = "integer comparison operation";
let description = [{
The "cmpi" operation compares its two operands according to the integer
@@ -345,7 +346,8 @@ def CmpIOp : Std_Op<"cmpi", [NoSideEffect, SameTypeOperands, SameOperandsAndResu
let hasFolder = 1;
}
-def CmpFOp : Std_Op<"cmpf", [NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
+def CmpFOp : Std_Op<"cmpf",
+ [NoSideEffect, SameTypeOperands, SameOperandsAndResultShape]> {
let summary = "floating-point comparison operation";
let description = [{
The "cmpf" operation compares its two operands according to the float
@@ -431,12 +433,12 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
/// Return the destination if the condition is true.
Block *getTrueDest() {
- return getOperation()->getSuccessor(trueIndex);
+ return getSuccessor(trueIndex);
}
/// Return the destination if the condition is false.
Block *getFalseDest() {
- return getOperation()->getSuccessor(falseIndex);
+ return getSuccessor(falseIndex);
}
// Accessors for operands to the 'true' destination.
@@ -461,7 +463,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
unsigned getNumTrueOperands() {
- return getOperation()->getNumSuccessorOperands(trueIndex);
+ return getNumSuccessorOperands(trueIndex);
}
/// Erase the operand at 'index' from the true operand list.
@@ -488,7 +490,7 @@ def CondBranchOp : Std_Op<"cond_br", [Terminator]> {
}
unsigned getNumFalseOperands() {
- return getOperation()->getNumSuccessorOperands(falseIndex);
+ return getNumSuccessorOperands(falseIndex);
}
/// Erase the operand at 'index' from the false operand list.
@@ -624,8 +626,7 @@ def ExtractElementOp : Std_Op<"extract_element", [NoSideEffect]> {
Value *getAggregate() { return getOperand(0); }
operand_range getIndices() {
- return {getOperation()->operand_begin() + 1,
- getOperation()->operand_end()};
+ return {operand_begin() + 1, operand_end()};
}
}];
@@ -698,9 +699,7 @@ def LoadOp : Std_Op<"load"> {
return getMemRef()->getType().cast<MemRefType>();
}
- operand_range getIndices() {
- return {getOperation()->operand_begin() + 1, getOperation()->operand_end()};
- }
+ operand_range getIndices() { return {operand_begin() + 1, operand_end()}; }
}];
let hasCanonicalizer = 1;
@@ -843,7 +842,8 @@ def SelectOp : Std_Op<"select", [NoSideEffect, SameOperandsAndResultShape]> {
let hasFolder = 1;
}
-def SignExtendIOp : Std_Op<"sexti", [NoSideEffect, SameOperandsAndResultShape]> {
+def SignExtendIOp : Std_Op<"sexti",
+ [NoSideEffect, SameOperandsAndResultShape]> {
let summary = "integer sign extension operation";
let description = [{
The integer sign extension operation takes an integer input of
@@ -930,7 +930,8 @@ def StoreOp : Std_Op<"store"> {
store %v, %A[%i, %j] : memref<4x128xf32, (d0, d1) -> (d0, d1), 0>
}];
- let arguments = (ins AnyType:$value, AnyMemRef:$memref, Variadic<Index>:$indices);
+ let arguments = (ins AnyType:$value, AnyMemRef:$memref,
+ Variadic<Index>:$indices);
let builders = [OpBuilder<
"Builder *, OperationState &result, Value *valueToStore, Value *memref", [{
@@ -948,7 +949,7 @@ def StoreOp : Std_Op<"store"> {
}
operand_range getIndices() {
- return {getOperation()->operand_begin() + 2, getOperation()->operand_end()};
+ return {operand_begin() + 2, operand_end()};
}
}];
diff --git a/mlir/lib/Dialect/AffineOps/AffineOps.cpp b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
index 94ce1386a9a..f2f9f92cb82 100644
--- a/mlir/lib/Dialect/AffineOps/AffineOps.cpp
+++ b/mlir/lib/Dialect/AffineOps/AffineOps.cpp
@@ -188,7 +188,7 @@ void AffineApplyOp::build(Builder *builder, OperationState &result,
ParseResult AffineApplyOp::parse(OpAsmParser &parser, OperationState &result) {
auto &builder = parser.getBuilder();
- auto affineIntTy = builder.getIndexType();
+ auto indexTy = builder.getIndexType();
AffineMapAttr mapAttr;
unsigned numDims;
@@ -204,7 +204,7 @@ ParseResult AffineApplyOp::parse(OpAsmParser &parser, OperationState &result) {
"dimension or symbol index mismatch");
}
- result.types.append(map.getNumResults(), affineIntTy);
+ result.types.append(map.getNumResults(), indexTy);
return success();
}
@@ -1139,7 +1139,7 @@ static ParseResult parseBound(bool isLower, OperationState &result,
return p.emitError(p.getNameLoc(),
"expected only one loop bound operand");
- // TODO: improve error message when SSA value is not an affine integer.
+ // TODO: improve error message when SSA value is not of index type.
// Currently it is 'use of value ... expects different type than prior uses'
if (p.resolveOperand(boundOpInfos.front(), builder.getIndexType(),
result.operands))
@@ -1754,7 +1754,7 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
ParseResult AffineLoadOp::parse(OpAsmParser &parser, OperationState &result) {
auto &builder = parser.getBuilder();
- auto affineIntTy = builder.getIndexType();
+ auto indexTy = builder.getIndexType();
MemRefType type;
OpAsmParser::OperandType memrefInfo;
@@ -1767,7 +1767,7 @@ ParseResult AffineLoadOp::parse(OpAsmParser &parser, OperationState &result) {
parser.parseOptionalAttributeDict(result.attributes) ||
parser.parseColonType(type) ||
parser.resolveOperand(memrefInfo, type, result.operands) ||
- parser.resolveOperands(mapOperands, affineIntTy, result.operands) ||
+ parser.resolveOperands(mapOperands, indexTy, result.operands) ||
parser.addTypeToList(type.getElementType(), result.types));
}
@@ -1845,24 +1845,24 @@ void AffineStoreOp::build(Builder *builder, OperationState &result,
}
ParseResult AffineStoreOp::parse(OpAsmParser &parser, OperationState &result) {
- auto affineIntTy = parser.getBuilder().getIndexType();
+ auto indexTy = parser.getBuilder().getIndexType();
MemRefType type;
OpAsmParser::OperandType storeValueInfo;
OpAsmParser::OperandType memrefInfo;
AffineMapAttr mapAttr;
SmallVector<OpAsmParser::OperandType, 1> mapOperands;
- return failure(
- parser.parseOperand(storeValueInfo) || parser.parseComma() ||
- parser.parseOperand(memrefInfo) ||
- parser.parseAffineMapOfSSAIds(mapOperands, mapAttr, getMapAttrName(),
- result.attributes) ||
- parser.parseOptionalAttributeDict(result.attributes) ||
- parser.parseColonType(type) ||
- parser.resolveOperand(storeValueInfo, type.getElementType(),
- result.operands) ||
- parser.resolveOperand(memrefInfo, type, result.operands) ||
- parser.resolveOperands(mapOperands, affineIntTy, result.operands));
+ return failure(parser.parseOperand(storeValueInfo) || parser.parseComma() ||
+ parser.parseOperand(memrefInfo) ||
+ parser.parseAffineMapOfSSAIds(mapOperands, mapAttr,
+ getMapAttrName(),
+ result.attributes) ||
+ parser.parseOptionalAttributeDict(result.attributes) ||
+ parser.parseColonType(type) ||
+ parser.resolveOperand(storeValueInfo, type.getElementType(),
+ result.operands) ||
+ parser.resolveOperand(memrefInfo, type, result.operands) ||
+ parser.resolveOperands(mapOperands, indexTy, result.operands));
}
void AffineStoreOp::print(OpAsmPrinter &p) {
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index bd15869e0c6..f3251fcff87 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -406,14 +406,14 @@ static ParseResult parseLoadOp(OpAsmParser &parser, OperationState &result) {
SmallVector<OpAsmParser::OperandType, 4> indexInfo;
ViewType type;
- auto affineIntTy = parser.getBuilder().getIndexType();
+ auto indexTy = parser.getBuilder().getIndexType();
return failure(
parser.parseOperand(viewInfo) ||
parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
parser.parseOptionalAttributeDict(result.attributes) ||
parser.parseColonType(type) ||
parser.resolveOperand(viewInfo, type, result.operands) ||
- parser.resolveOperands(indexInfo, affineIntTy, result.operands) ||
+ parser.resolveOperands(indexInfo, indexTy, result.operands) ||
parser.addTypeToList(type.getElementType(), result.types));
}
@@ -438,15 +438,14 @@ static void print(OpAsmPrinter &p, RangeOp op) {
static ParseResult parseRangeOp(OpAsmParser &parser, OperationState &result) {
SmallVector<OpAsmParser::OperandType, 3> rangeInfo(3);
RangeType type;
- auto affineIntTy = parser.getBuilder().getIndexType();
- return failure(
- parser.parseOperand(rangeInfo[0]) || parser.parseColon() ||
- parser.parseOperand(rangeInfo[1]) || parser.parseColon() ||
- parser.parseOperand(rangeInfo[2]) ||
- parser.parseOptionalAttributeDict(result.attributes) ||
- parser.parseColonType(type) ||
- parser.resolveOperands(rangeInfo, affineIntTy, result.operands) ||
- parser.addTypeToList(type, result.types));
+ auto indexTy = parser.getBuilder().getIndexType();
+ return failure(parser.parseOperand(rangeInfo[0]) || parser.parseColon() ||
+ parser.parseOperand(rangeInfo[1]) || parser.parseColon() ||
+ parser.parseOperand(rangeInfo[2]) ||
+ parser.parseOptionalAttributeDict(result.attributes) ||
+ parser.parseColonType(type) ||
+ parser.resolveOperands(rangeInfo, indexTy, result.operands) ||
+ parser.addTypeToList(type, result.types));
}
//===----------------------------------------------------------------------===//
@@ -538,7 +537,7 @@ static ParseResult parseStoreOp(OpAsmParser &parser, OperationState &result) {
SmallVector<OpAsmParser::OperandType, 4> indexInfo;
ViewType viewType;
- auto affineIntTy = parser.getBuilder().getIndexType();
+ auto indexTy = parser.getBuilder().getIndexType();
return failure(
parser.parseOperand(storeValueInfo) || parser.parseComma() ||
parser.parseOperand(viewInfo) ||
@@ -548,7 +547,7 @@ static ParseResult parseStoreOp(OpAsmParser &parser, OperationState &result) {
parser.resolveOperand(storeValueInfo, viewType.getElementType(),
result.operands) ||
parser.resolveOperand(viewInfo, viewType, result.operands) ||
- parser.resolveOperands(indexInfo, affineIntTy, result.operands));
+ parser.resolveOperands(indexInfo, indexTy, result.operands));
}
static LogicalResult verify(linalg::StoreOp op) {
diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp
index 3af8f1432fc..023bbaf366f 100644
--- a/mlir/lib/Dialect/StandardOps/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/Ops.cpp
@@ -358,7 +358,7 @@ static LogicalResult verify(AllocOp op) {
// the affine map, plus the number of dynamic dimensions specified in the
// memref type.
unsigned numDynamicDims = memRefType.getNumDynamicDims();
- if (op.getOperation()->getNumOperands() != numDynamicDims + numSymbols)
+ if (op.getNumOperands() != numDynamicDims + numSymbols)
return op.emitOpError(
"operand count does not equal dimension plus symbol operand count");
@@ -474,11 +474,9 @@ static void print(OpAsmPrinter &p, BranchOp op) {
p.printSuccessorAndUseList(op.getOperation(), 0);
}
-Block *BranchOp::getDest() { return getOperation()->getSuccessor(0); }
+Block *BranchOp::getDest() { return getSuccessor(0); }
-void BranchOp::setDest(Block *block) {
- return getOperation()->setSuccessor(block, 0);
-}
+void BranchOp::setDest(Block *block) { return setSuccessor(block, 0); }
void BranchOp::eraseOperand(unsigned index) {
getOperation()->eraseSuccessorOperand(0, index);
@@ -1754,17 +1752,9 @@ static LogicalResult verify(LoadOp op) {
if (op.getType() != op.getMemRefType().getElementType())
return op.emitOpError("result type must match element type of memref");
- if (op.getMemRefType().getRank() != op.getNumOperands() - 1)
+ if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
return op.emitOpError("incorrect number of indices for load");
- for (auto *idx : op.getIndices())
- if (!idx->getType().isIndex())
- return op.emitOpError("index to load must have 'index' type");
-
- // TODO: Verify we have the right number of indices.
-
- // TODO: in Function verify that the indices are parameters, IV's, or the
- // result of an affine.apply.
return success();
}
@@ -2133,14 +2123,6 @@ static LogicalResult verify(StoreOp op) {
if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
return op.emitOpError("store index operand count not equal to memref rank");
- for (auto *idx : op.getIndices())
- if (!idx->getType().isIndex())
- return op.emitOpError("index to load must have 'index' type");
-
- // TODO: Verify we have the right number of indices.
-
- // TODO: in Function verify that the indices are parameters, IV's, or the
- // result of an affine.apply.
return success();
}
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index d90e727b0ac..af1ecd06ee6 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -190,7 +190,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
// Check if the supplied for op's are all successively nested.
for (unsigned i = 1, e = band.size(); i < e; i++) {
- assert(band[i].getOperation()->getParentOp() == band[i - 1].getOperation());
+ assert(band[i].getParentOp() == band[i - 1].getOperation());
}
auto origLoops = band;
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 045cf04274c..72d52b9a4f5 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -335,7 +335,7 @@ public:
// Now we just have to handle the condition logic.
auto integerSet = op.getIntegerSet();
Value *zeroConstant = rewriter.create<ConstantIndexOp>(loc, 0);
- SmallVector<Value *, 8> operands(op.getOperation()->getOperands());
+ SmallVector<Value *, 8> operands(op.getOperands());
auto operandsRef = llvm::makeArrayRef(operands);
// Calculate cond as a conjunction without short-circuiting.
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 08bd6644111..a0b60dd3648 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -721,7 +721,7 @@ static bool materialize(FuncOp f, const SetVector<Operation *> &terminators,
if (fail) {
return true;
}
- LLVM_DEBUG(dbgs() << "\nMLFunction is now\n");
+ LLVM_DEBUG(dbgs() << "\nFunction is now\n");
LLVM_DEBUG(f.print(dbgs()));
}
return false;
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index 8f96cc23fb9..7b2c323b806 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -187,7 +187,7 @@ static bool
gatherLoadsAndStores(AffineForOp forOp,
SmallVectorImpl<Operation *> &loadAndStoreOps) {
bool hasIfOp = false;
- forOp.getOperation()->walk([&](Operation *op) {
+ forOp.walk([&](Operation *op) {
if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op))
loadAndStoreOps.push_back(op);
else if (isa<AffineIfOp>(op))
@@ -261,7 +261,7 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
auto *childForOp = forOp.getOperation();
- auto *parentForOp = forOp.getOperation()->getParentOp();
+ auto *parentForOp = forOp.getParentOp();
if (!llvm::isa<FuncOp>(parentForOp)) {
if (!isa<AffineForOp>(parentForOp)) {
LLVM_DEBUG(llvm::dbgs() << "Expected parent AffineForOp");
@@ -444,7 +444,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
// forwarding to remove.
unsigned storeCount = 0;
llvm::SmallDenseSet<Value *, 4> storeMemrefs;
- srcForOp.getOperation()->walk([&](Operation *op) {
+ srcForOp.walk([&](Operation *op) {
if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
storeMemrefs.insert(storeOp.getMemRef());
++storeCount;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index f7dd02ff877..1872044b0fb 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -367,10 +367,7 @@ void getPerfectlyNestedLoopsImpl(
unsigned maxLoops = std::numeric_limits<unsigned>::max()) {
for (unsigned i = 0; i < maxLoops; ++i) {
forOps.push_back(rootForOp);
- // FIXME: ForOp and AffineForOp currently provide different names to access
- // the region ("region" and "getRegion"). Remove this generic access when
- // AffineForOp moves to ODS and also gets "region".
- Block &body = rootForOp.getOperation()->getRegion(0).front();
+ Block &body = rootForOp.region().front();
if (body.begin() != std::prev(body.end(), 2))
return;
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 6d9a0c1de4f..82c04e05f3d 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -462,7 +462,7 @@ func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>) {
func @test_dimop(%arg0: tensor<4x4x?xf32>) {
// CHECK: %0 = dim %arg0, 2 : tensor<4x4x?xf32>
%0 = dim %arg0, 2 : tensor<4x4x?xf32>
- // use dim as an affine_int to ensure type correctness
+ // use dim as an index to ensure type correctness
%1 = affine.apply (d0) -> (d0)(%0)
return
}
OpenPOWER on IntegriCloud