summaryrefslogtreecommitdiffstats
path: root/mlir/examples
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2020-01-11 08:54:04 -0800
committerRiver Riddle <riverriddle@google.com>2020-01-11 08:54:39 -0800
commit2bdf33cc4c733342fc83081bc7410ac5e9a24f55 (patch)
tree3306d769c2bbabda1060928e0cea79d021ea9da2 /mlir/examples
parent1d641daf260308815d014d1bf1b424a1ed1e7277 (diff)
downloadbcm5719-llvm-2bdf33cc4c733342fc83081bc7410ac5e9a24f55.tar.gz
bcm5719-llvm-2bdf33cc4c733342fc83081bc7410ac5e9a24f55.zip
[mlir] NFC: Remove Value::operator* and Value::operator-> now that Value is properly value-typed.
Summary: These were temporary methods used to simplify the transition. Reviewed By: antiagainst Differential Revision: https://reviews.llvm.org/D72548
Diffstat (limited to 'mlir/examples')
-rw-r--r--mlir/examples/toy/Ch2/mlir/Dialect.cpp5
-rw-r--r--mlir/examples/toy/Ch3/mlir/Dialect.cpp5
-rw-r--r--mlir/examples/toy/Ch3/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch3/mlir/ToyCombine.td4
-rw-r--r--mlir/examples/toy/Ch4/mlir/Dialect.cpp17
-rw-r--r--mlir/examples/toy/Ch4/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch4/mlir/ToyCombine.td4
-rw-r--r--mlir/examples/toy/Ch5/mlir/Dialect.cpp17
-rw-r--r--mlir/examples/toy/Ch5/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch5/mlir/ToyCombine.td4
-rw-r--r--mlir/examples/toy/Ch6/mlir/Dialect.cpp17
-rw-r--r--mlir/examples/toy/Ch6/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch6/mlir/ToyCombine.td4
-rw-r--r--mlir/examples/toy/Ch7/mlir/Dialect.cpp26
-rw-r--r--mlir/examples/toy/Ch7/mlir/MLIRGen.cpp4
-rw-r--r--mlir/examples/toy/Ch7/mlir/ToyCombine.cpp2
-rw-r--r--mlir/examples/toy/Ch7/mlir/ToyCombine.td4
17 files changed, 58 insertions, 63 deletions
diff --git a/mlir/examples/toy/Ch2/mlir/Dialect.cpp b/mlir/examples/toy/Ch2/mlir/Dialect.cpp
index 6b4d669d18e..f9301605b46 100644
--- a/mlir/examples/toy/Ch2/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch2/mlir/Dialect.cpp
@@ -54,8 +54,7 @@ void ConstantOp::build(mlir::Builder *builder, mlir::OperationState &state,
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
@@ -158,7 +157,7 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch3/mlir/Dialect.cpp b/mlir/examples/toy/Ch3/mlir/Dialect.cpp
index 6b4d669d18e..f9301605b46 100644
--- a/mlir/examples/toy/Ch3/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch3/mlir/Dialect.cpp
@@ -54,8 +54,7 @@ void ConstantOp::build(mlir::Builder *builder, mlir::OperationState &state,
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
@@ -158,7 +157,7 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
index e3205402179..e261e77b02e 100644
--- a/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch3/mlir/ToyCombine.cpp
@@ -41,7 +41,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
diff --git a/mlir/examples/toy/Ch3/mlir/ToyCombine.td b/mlir/examples/toy/Ch3/mlir/ToyCombine.td
index e6e33e84d7e..fc7ffafa430 100644
--- a/mlir/examples/toy/Ch3/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch3/mlir/ToyCombine.td
@@ -41,7 +41,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
@@ -54,7 +54,7 @@ def FoldConstantReshapeOptPattern : Pat<
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
index 0a9ded0c3d3..c0bd6f79aa1 100644
--- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp
@@ -53,7 +53,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
@@ -104,8 +104,7 @@ void ConstantOp::build(mlir::Builder *builder, mlir::OperationState &state,
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
@@ -142,14 +141,14 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
@@ -183,7 +182,7 @@ void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
@@ -233,13 +232,13 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
index 82c247c1be2..3c41958ed31 100644
--- a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp
@@ -46,7 +46,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
diff --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.td b/mlir/examples/toy/Ch4/mlir/ToyCombine.td
index e6e33e84d7e..fc7ffafa430 100644
--- a/mlir/examples/toy/Ch4/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.td
@@ -41,7 +41,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
@@ -54,7 +54,7 @@ def FoldConstantReshapeOptPattern : Pat<
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
index 0a9ded0c3d3..c0bd6f79aa1 100644
--- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp
@@ -53,7 +53,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
@@ -104,8 +104,7 @@ void ConstantOp::build(mlir::Builder *builder, mlir::OperationState &state,
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
@@ -142,14 +141,14 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
@@ -183,7 +182,7 @@ void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
@@ -233,13 +232,13 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
index 82c247c1be2..3c41958ed31 100644
--- a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
@@ -46,7 +46,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
diff --git a/mlir/examples/toy/Ch5/mlir/ToyCombine.td b/mlir/examples/toy/Ch5/mlir/ToyCombine.td
index e6e33e84d7e..fc7ffafa430 100644
--- a/mlir/examples/toy/Ch5/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch5/mlir/ToyCombine.td
@@ -41,7 +41,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
@@ -54,7 +54,7 @@ def FoldConstantReshapeOptPattern : Pat<
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
index 0a9ded0c3d3..c0bd6f79aa1 100644
--- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp
@@ -53,7 +53,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
@@ -104,8 +104,7 @@ void ConstantOp::build(mlir::Builder *builder, mlir::OperationState &state,
static mlir::LogicalResult verify(ConstantOp op) {
// If the return type of the constant is not an unranked tensor, the shape
// must match the shape of the attribute holding the data.
- auto resultType =
- op.getResult()->getType().dyn_cast<mlir::RankedTensorType>();
+ auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>();
if (!resultType)
return success();
@@ -142,14 +141,14 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
@@ -183,7 +182,7 @@ void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
@@ -233,13 +232,13 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
index 82c247c1be2..3c41958ed31 100644
--- a/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch6/mlir/ToyCombine.cpp
@@ -46,7 +46,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
diff --git a/mlir/examples/toy/Ch6/mlir/ToyCombine.td b/mlir/examples/toy/Ch6/mlir/ToyCombine.td
index e6e33e84d7e..fc7ffafa430 100644
--- a/mlir/examples/toy/Ch6/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch6/mlir/ToyCombine.td
@@ -41,7 +41,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
@@ -54,7 +54,7 @@ def FoldConstantReshapeOptPattern : Pat<
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
index 7e37f61a473..619185d3bba 100644
--- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp
+++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp
@@ -54,7 +54,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface {
// Replace the values directly with the return operands.
assert(returnOp.getNumOperands() == valuesToRepl.size());
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
- valuesToRepl[it.index()]->replaceAllUsesWith(it.value());
+ valuesToRepl[it.index()].replaceAllUsesWith(it.value());
}
/// Attempts to materialize a conversion for a type mismatch between a call
@@ -171,16 +171,16 @@ static mlir::LogicalResult verifyConstantForType(mlir::Type type,
/// Verifier for the constant operation. This corresponds to the `::verify(...)`
/// in the op definition.
static mlir::LogicalResult verify(ConstantOp op) {
- return verifyConstantForType(op.getResult()->getType(), op.value(), op);
+ return verifyConstantForType(op.getResult().getType(), op.value(), op);
}
static mlir::LogicalResult verify(StructConstantOp op) {
- return verifyConstantForType(op.getResult()->getType(), op.value(), op);
+ return verifyConstantForType(op.getResult().getType(), op.value(), op);
}
/// Infer the output shape of the ConstantOp, this is required by the shape
/// inference interface.
-void ConstantOp::inferShapes() { getResult()->setType(value().getType()); }
+void ConstantOp::inferShapes() { getResult().setType(value().getType()); }
//===----------------------------------------------------------------------===//
// AddOp
@@ -193,14 +193,14 @@ void AddOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the AddOp, this is required by the shape inference
/// interface.
-void AddOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void AddOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// CastOp
/// Infer the output shape of the CastOp, this is required by the shape
/// inference interface.
-void CastOp::inferShapes() { getResult()->setType(getOperand()->getType()); }
+void CastOp::inferShapes() { getResult().setType(getOperand().getType()); }
//===----------------------------------------------------------------------===//
// GenericCallOp
@@ -234,7 +234,7 @@ void MulOp::build(mlir::Builder *builder, mlir::OperationState &state,
/// Infer the output shape of the MulOp, this is required by the shape inference
/// interface.
-void MulOp::inferShapes() { getResult()->setType(getOperand(0)->getType()); }
+void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); }
//===----------------------------------------------------------------------===//
// ReturnOp
@@ -280,7 +280,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
void StructAccessOp::build(mlir::Builder *b, mlir::OperationState &state,
mlir::Value input, size_t index) {
// Extract the result type from the input type.
- StructType structTy = input->getType().cast<StructType>();
+ StructType structTy = input.getType().cast<StructType>();
assert(index < structTy.getNumElementTypes());
mlir::Type resultType = structTy.getElementTypes()[index];
@@ -289,12 +289,12 @@ void StructAccessOp::build(mlir::Builder *b, mlir::OperationState &state,
}
static mlir::LogicalResult verify(StructAccessOp op) {
- StructType structTy = op.input()->getType().cast<StructType>();
+ StructType structTy = op.input().getType().cast<StructType>();
size_t index = op.index().getZExtValue();
if (index >= structTy.getNumElementTypes())
return op.emitOpError()
<< "index should be within the range of the input struct type";
- mlir::Type resultType = op.getResult()->getType();
+ mlir::Type resultType = op.getResult().getType();
if (resultType != structTy.getElementTypes()[index])
return op.emitOpError() << "must have the same result type as the struct "
"element referred to by the index";
@@ -311,13 +311,13 @@ void TransposeOp::build(mlir::Builder *builder, mlir::OperationState &state,
}
void TransposeOp::inferShapes() {
- auto arrayTy = getOperand()->getType().cast<RankedTensorType>();
+ auto arrayTy = getOperand().getType().cast<RankedTensorType>();
SmallVector<int64_t, 2> dims(llvm::reverse(arrayTy.getShape()));
- getResult()->setType(RankedTensorType::get(dims, arrayTy.getElementType()));
+ getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType()));
}
static mlir::LogicalResult verify(TransposeOp op) {
- auto inputType = op.getOperand()->getType().dyn_cast<RankedTensorType>();
+ auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>();
auto resultType = op.getType().dyn_cast<RankedTensorType>();
if (!inputType || !resultType)
return mlir::success();
diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
index 3d543f69bdc..c1bcee7e1b9 100644
--- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
@@ -585,11 +585,11 @@ private:
mlir::Type type = getType(varType, vardecl.loc());
if (!type)
return nullptr;
- if (type != value->getType()) {
+ if (type != value.getType()) {
emitError(loc(vardecl.loc()))
<< "struct type of initializer is different than the variable "
"declaration. Got "
- << value->getType() << ", but expected " << type;
+ << value.getType() << ", but expected " << type;
return nullptr;
}
diff --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
index c688a53d86f..088603bb8c6 100644
--- a/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.cpp
@@ -64,7 +64,7 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
// Look through the input of the current transpose.
mlir::Value transposeInput = op.getOperand();
TransposeOp transposeInputOp =
- llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
+ llvm::dyn_cast_or_null<TransposeOp>(transposeInput.getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
diff --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.td b/mlir/examples/toy/Ch7/mlir/ToyCombine.td
index e6e33e84d7e..fc7ffafa430 100644
--- a/mlir/examples/toy/Ch7/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.td
@@ -41,7 +41,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
// Reshape(Constant(x)) = x'
def ReshapeConstant :
- NativeCodeCall<"$0.reshape(($1->getType()).cast<ShapedType>())">;
+ NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
def FoldConstantReshapeOptPattern : Pat<
(ReshapeOp:$res (ConstantOp $arg)),
(ConstantOp (ReshapeConstant $arg, $res))>;
@@ -54,7 +54,7 @@ def FoldConstantReshapeOptPattern : Pat<
// on operand properties.
// Reshape(x) = x, where input and output shapes are identical
-def TypesAreIdentical : Constraint<CPred<"$0->getType() == $1->getType()">>;
+def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantReshapeOptPattern : Pat<
(ReshapeOp:$res $arg), (replaceWithValue $arg),
[(TypesAreIdentical $res, $arg)]>;
OpenPOWER on IntegriCloud