summaryrefslogtreecommitdiffstats
path: root/mlir/include
diff options
context:
space:
mode:
authorNicolas Vasilache <ntv@google.com>2020-01-06 22:14:14 -0500
committerNicolas Vasilache <ntv@google.com>2020-01-06 22:21:19 -0500
commite3750cafdb17f5b7431f18c0229bfb597dde4c88 (patch)
treedcc7e49a8544cf7c5f886a83bf9164d801b3cbe3 /mlir/include
parentd877229b5b21c833e2344eda7c07f9c49d4dc453 (diff)
downloadbcm5719-llvm-e3750cafdb17f5b7431f18c0229bfb597dde4c88.tar.gz
bcm5719-llvm-e3750cafdb17f5b7431f18c0229bfb597dde4c88.zip
[mlir][Linalg] Add a linalg.reshape op
Summary: This diff adds a new operation to linalg to allow reshaping of an existing view into a new view in the same buffer at the same offset. More specifically: The `linalg.reshape` op produces a new view whose sizes are a reassociation of the original `view`. Depending on whether or not the reassociated MemRefType is contiguous, the resulting memref may require explicit alloc and copies. A reassociation is defined as a continous grouping of dimensions and is represented with a affine map array attribute. In the future, non-continous groupings may be allowed (i.e. permutations, reindexings etc). For now, it is assumed that either: 1. a reassociation produces and consumes contiguous MemRefType or, 2. the reshape op will be folded into its consumers (by changing the shape of the computations). All other cases are undefined behavior and a reshape op may not lower to LLVM if it cannot be proven statically that it does not require alloc+copy. A reshape may either collapse or expand dimensions, depending on the relationship between source and target memref ranks. The verification rule is that the reassociation maps are applied to the memref with the larger rank to obtain the memref with the smaller rank. In the case of a dimension expansion, the reassociation maps can be interpreted as inverse maps. Examples: ```mlir // Dimension collapse (i, j) -> i' and k -> k' %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] : memref<?x?x?xf32, stride_spec> into memref<?x?xf32, stride_spec_2> ``` ```mlir // Dimension expansion i -> (i', j') and (k) -> (k') %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] : memref<?x?xf32, stride_spec> into memref<?x?x?xf32, stride_spec_2> ``` The relevant invalid and roundtripping tests are added. Reviewers: AlexEichenberger, ftynse, rriddle, asaadaldien, yangjunpro Subscribers: kiszk, merge_guards_bot, mehdi_amini, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D72168
Diffstat (limited to 'mlir/include')
-rw-r--r--mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h1
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td52
-rw-r--r--mlir/include/mlir/IR/AffineExpr.h19
-rw-r--r--mlir/include/mlir/IR/StandardTypes.h14
4 files changed, 76 insertions, 10 deletions
diff --git a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h b/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
index b04c11f22bb..42b286d504f 100644
--- a/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
+++ b/mlir/include/mlir/Dialect/Linalg/EDSC/Intrinsics.h
@@ -17,6 +17,7 @@ namespace edsc {
namespace intrinsics {
using linalg_fill = OperationBuilder<linalg::FillOp>;
+using linalg_reshape = OperationBuilder<linalg::ReshapeOp>;
using linalg_yield = OperationBuilder<linalg::YieldOp>;
} // namespace intrinsics
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
index d517c0a61aa..e3fab873046 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td
@@ -58,6 +58,58 @@ def Linalg_RangeOp :
let verifier = ?;
}
+def Linalg_ReshapeOp : Linalg_Op<"reshape", [NoSideEffect]>,
+ Arguments<(ins AnyStridedMemRef:$view, AffineMapArrayAttr:$reassociation)>,
+ Results<(outs AnyStridedMemRef)> {
+ let summary = "linalg.reshape produces a new view into the operand view";
+ let description = [{
+ The `linalg.reshape` op produces a new view whose sizes are a reassociation
+ of the original `view`. Depending on whether or not the reassociated
+ MemRefType is contiguous, the resulting memref may require explicit alloc
+ and copies.
+
+ A reassociation is defined as a continous grouping of dimensions and is
+ represented with an affine map array attribute. In the future, non-continous
+ groupings may be allowed (i.e. permutations, reindexings etc).
+
+ For now, it is assumed that either:
+ 1. a reassociation produces and consumes contiguous MemRefType or,
+ 2. the reshape op will be folded into its consumers (by changing the shape
+ of the computations).
+ All other cases are undefined behavior and a reshape op may not lower to
+ LLVM if it cannot be proven statically that it does not require alloc+copy.
+
+ A reshape may either collapse or expand dimensions, depending on the
+ relationship between source and target memref ranks. The verification rule
+ is that the reassociation maps are applied to the memref with the larger
+ rank to obtain the memref with the smaller rank. In the case of a dimension
+ expansion, the reassociation maps can be interpreted as inverse maps.
+
+ Examples:
+
+ ```mlir
+ // Dimension collapse (i, j) -> i' and k -> k'
+ %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
+ memref<?x?x?xf32, stride_spec> into memref<?x?xf32, stride_spec_2>
+ ```
+
+ ```mlir
+ // Dimension expansion i -> (i', j') and (k) -> (k')
+ %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
+ memref<?x?xf32, stride_spec> into memref<?x?x?xf32, stride_spec_2>
+ ```
+ }];
+
+ let builders = [OpBuilder<
+ "Builder *b, OperationState &result, Value view, "
+ "ArrayAttr reassociation, ArrayRef<NamedAttribute> attrs = {}">];
+
+ let extraClassDeclaration = [{
+ static StringRef getReassociationAttrName() { return "reassociation"; }
+ MemRefType getViewType() { return view().getType().cast<MemRefType>(); }
+ }];
+}
+
def Linalg_SliceOp : Linalg_Op<"slice", [NoSideEffect]>,
Arguments<(ins AnyStridedMemRef:$view,
Variadic<AnyTypeOf<[Range, Index]>>:$indexings)>,
diff --git a/mlir/include/mlir/IR/AffineExpr.h b/mlir/include/mlir/IR/AffineExpr.h
index 7059489ed4c..3f3c82f0568 100644
--- a/mlir/include/mlir/IR/AffineExpr.h
+++ b/mlir/include/mlir/IR/AffineExpr.h
@@ -87,6 +87,7 @@ public:
template <typename U> bool isa() const;
template <typename U> U dyn_cast() const;
+ template <typename U> U dyn_cast_or_null() const;
template <typename U> U cast() const;
MLIRContext *getContext() const;
@@ -226,25 +227,23 @@ AffineExpr toAffineExpr(ArrayRef<int64_t> eq, unsigned numDims,
raw_ostream &operator<<(raw_ostream &os, AffineExpr &expr);
template <typename U> bool AffineExpr::isa() const {
- if (std::is_same<U, AffineBinaryOpExpr>::value) {
+ if (std::is_same<U, AffineBinaryOpExpr>::value)
return getKind() <= AffineExprKind::LAST_AFFINE_BINARY_OP;
- }
- if (std::is_same<U, AffineDimExpr>::value) {
+ if (std::is_same<U, AffineDimExpr>::value)
return getKind() == AffineExprKind::DimId;
- }
- if (std::is_same<U, AffineSymbolExpr>::value) {
+ if (std::is_same<U, AffineSymbolExpr>::value)
return getKind() == AffineExprKind::SymbolId;
- }
- if (std::is_same<U, AffineConstantExpr>::value) {
+ if (std::is_same<U, AffineConstantExpr>::value)
return getKind() == AffineExprKind::Constant;
- }
}
template <typename U> U AffineExpr::dyn_cast() const {
- if (isa<U>()) {
+ if (isa<U>())
return U(expr);
- }
return U(nullptr);
}
+template <typename U> U AffineExpr::dyn_cast_or_null() const {
+ return (!*this || !isa<U>()) ? U(nullptr) : U(expr);
+}
template <typename U> U AffineExpr::cast() const {
assert(isa<U>());
return U(expr);
diff --git a/mlir/include/mlir/IR/StandardTypes.h b/mlir/include/mlir/IR/StandardTypes.h
index 89ffc45e547..4c5dbad4550 100644
--- a/mlir/include/mlir/IR/StandardTypes.h
+++ b/mlir/include/mlir/IR/StandardTypes.h
@@ -16,6 +16,7 @@ struct fltSemantics;
} // namespace llvm
namespace mlir {
+class AffineExpr;
class AffineMap;
class FloatType;
class IndexType;
@@ -245,6 +246,9 @@ public:
/// Whether the given dimension size indicates a dynamic dimension.
static constexpr bool isDynamic(int64_t dSize) { return dSize < 0; }
+ static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) {
+ return dStrideOrOffset == kDynamicStrideOrOffset;
+ }
};
/// Vector types represent multi-dimensional SIMD vectors, and have a fixed
@@ -548,6 +552,9 @@ public:
LogicalResult getStridesAndOffset(MemRefType t,
SmallVectorImpl<int64_t> &strides,
int64_t &offset);
+LogicalResult getStridesAndOffset(MemRefType t,
+ SmallVectorImpl<AffineExpr> &strides,
+ AffineExpr &offset);
/// Given a list of strides (in which MemRefType::getDynamicStrideOrOffset()
/// represents a dynamic value), return the single result AffineMap which
@@ -569,6 +576,13 @@ LogicalResult getStridesAndOffset(MemRefType t,
AffineMap makeStridedLinearLayoutMap(ArrayRef<int64_t> strides, int64_t offset,
MLIRContext *context);
+/// Return a version of `t` with identity layout if it can be determined
+/// statically that the layout is the canonical contiguous strided layout.
+/// Otherwise pass `t`'s layout into `simplifyAffineMap` and return a copy of
+/// `t` with simplifed layout.
+MemRefType canonicalizeStridedLayout(MemRefType t);
+
+/// Return true if the layout for `t` is compatible with strided semantics.
bool isStrided(MemRefType t);
} // end namespace mlir
OpenPOWER on IntegriCloud