summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Dialect/StandardOps/Ops.cpp
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-12-22 21:59:55 -0800
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-12-22 22:00:23 -0800
commit35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch)
treed083d37d993a774239081509a50e3e6c65366421 /mlir/lib/Dialect/StandardOps/Ops.cpp
parent22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff)
downloadbcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz
bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics. PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/lib/Dialect/StandardOps/Ops.cpp')
-rw-r--r--mlir/lib/Dialect/StandardOps/Ops.cpp66
1 files changed, 33 insertions, 33 deletions
diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp
index 4116f6f14ae..94166b5a7dd 100644
--- a/mlir/lib/Dialect/StandardOps/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/Ops.cpp
@@ -81,7 +81,7 @@ struct StdInlinerInterface : public DialectInlinerInterface {
/// Handle the given inlined terminator by replacing it with a new operation
/// as necessary.
void handleTerminator(Operation *op,
- ArrayRef<Value *> valuesToRepl) const final {
+ ArrayRef<ValuePtr> valuesToRepl) const final {
// Only "std.return" needs to be handled here.
auto returnOp = cast<ReturnOp>(op);
@@ -184,7 +184,7 @@ void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
// dimension operands parsed.
// Returns 'false' on success and 'true' on error.
ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser,
- SmallVectorImpl<Value *> &operands,
+ SmallVectorImpl<ValuePtr> &operands,
unsigned &numDims) {
SmallVector<OpAsmParser::OperandType, 8> opInfos;
if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren))
@@ -325,7 +325,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
PatternRewriter &rewriter) const override {
// Check to see if any dimensions operands are constants. If so, we can
// substitute and drop them.
- if (llvm::none_of(alloc.getOperands(), [](Value *operand) {
+ if (llvm::none_of(alloc.getOperands(), [](ValuePtr operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -336,8 +336,8 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocOp> {
// and keep track of the resultant memref type to build.
SmallVector<int64_t, 4> newShapeConstants;
newShapeConstants.reserve(memrefType.getRank());
- SmallVector<Value *, 4> newOperands;
- SmallVector<Value *, 4> droppedOperands;
+ SmallVector<ValuePtr, 4> newOperands;
+ SmallVector<ValuePtr, 4> droppedOperands;
unsigned dynamicDimPos = 0;
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
@@ -429,7 +429,7 @@ struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
static ParseResult parseBranchOp(OpAsmParser &parser, OperationState &result) {
Block *dest;
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
if (parser.parseSuccessorAndUseList(dest, destOperands))
return failure();
result.addSuccessor(dest, destOperands);
@@ -623,7 +623,7 @@ static Type getI1SameShape(Builder *build, Type type) {
//===----------------------------------------------------------------------===//
static void buildCmpIOp(Builder *build, OperationState &result,
- CmpIPredicate predicate, Value *lhs, Value *rhs) {
+ CmpIPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -777,7 +777,7 @@ CmpFPredicate CmpFOp::getPredicateByName(StringRef name) {
}
static void buildCmpFOp(Builder *build, OperationState &result,
- CmpFPredicate predicate, Value *lhs, Value *rhs) {
+ CmpFPredicate predicate, ValuePtr lhs, ValuePtr rhs) {
result.addOperands({lhs, rhs});
result.types.push_back(getI1SameShape(build, lhs->getType()));
result.addAttribute(
@@ -946,7 +946,7 @@ struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
static ParseResult parseCondBranchOp(OpAsmParser &parser,
OperationState &result) {
- SmallVector<Value *, 4> destOperands;
+ SmallVector<ValuePtr, 4> destOperands;
Block *dest;
OpAsmParser::OperandType condInfo;
@@ -1088,7 +1088,7 @@ OpFoldResult ConstantOp::fold(ArrayRef<Attribute> operands) {
}
void ConstantOp::getAsmResultNames(
- function_ref<void(Value *, StringRef)> setNameFn) {
+ function_ref<void(ValuePtr, StringRef)> setNameFn) {
Type type = getType();
if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
IntegerType intTy = type.dyn_cast<IntegerType>();
@@ -1183,7 +1183,7 @@ struct SimplifyDeadDealloc : public OpRewritePattern<DeallocOp> {
PatternMatchResult matchAndRewrite(DeallocOp dealloc,
PatternRewriter &rewriter) const override {
// Check that the memref operand's defining operation is an AllocOp.
- Value *memref = dealloc.memref();
+ ValuePtr memref = dealloc.memref();
if (!isa_and_nonnull<AllocOp>(memref->getDefiningOp()))
return matchFailure();
@@ -1362,11 +1362,11 @@ OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) {
// ---------------------------------------------------------------------------
void DmaStartOp::build(Builder *builder, OperationState &result,
- Value *srcMemRef, ValueRange srcIndices,
- Value *destMemRef, ValueRange destIndices,
- Value *numElements, Value *tagMemRef,
- ValueRange tagIndices, Value *stride,
- Value *elementsPerStride) {
+ ValuePtr srcMemRef, ValueRange srcIndices,
+ ValuePtr destMemRef, ValueRange destIndices,
+ ValuePtr numElements, ValuePtr tagMemRef,
+ ValueRange tagIndices, ValuePtr stride,
+ ValuePtr elementsPerStride) {
result.addOperands(srcMemRef);
result.addOperands(srcIndices);
result.addOperands(destMemRef);
@@ -1507,8 +1507,8 @@ LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
// ---------------------------------------------------------------------------
void DmaWaitOp::build(Builder *builder, OperationState &result,
- Value *tagMemRef, ValueRange tagIndices,
- Value *numElements) {
+ ValuePtr tagMemRef, ValueRange tagIndices,
+ ValuePtr numElements) {
result.addOperands(tagMemRef);
result.addOperands(tagIndices);
result.addOperands(numElements);
@@ -2025,7 +2025,7 @@ static LogicalResult verify(SelectOp op) {
}
OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
- auto *condition = getCondition();
+ auto condition = getCondition();
// select true, %0, %1 => %0
if (matchPattern(condition, m_One()))
@@ -2357,7 +2357,7 @@ static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
static void print(OpAsmPrinter &p, ViewOp op) {
p << op.getOperationName() << ' ' << *op.getOperand(0) << '[';
- auto *dynamicOffset = op.getDynamicOffset();
+ auto dynamicOffset = op.getDynamicOffset();
if (dynamicOffset != nullptr)
p.printOperand(dynamicOffset);
p << "][" << op.getDynamicSizes() << ']';
@@ -2365,7 +2365,7 @@ static void print(OpAsmPrinter &p, ViewOp op) {
p << " : " << op.getOperand(0)->getType() << " to " << op.getType();
}
-Value *ViewOp::getDynamicOffset() {
+ValuePtr ViewOp::getDynamicOffset() {
int64_t offset;
SmallVector<int64_t, 4> strides;
auto result =
@@ -2440,7 +2440,7 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
PatternMatchResult matchAndRewrite(ViewOp viewOp,
PatternRewriter &rewriter) const override {
// Return if none of the operands are constants.
- if (llvm::none_of(viewOp.getOperands(), [](Value *operand) {
+ if (llvm::none_of(viewOp.getOperands(), [](ValuePtr operand) {
return matchPattern(operand, m_ConstantIndex());
}))
return matchFailure();
@@ -2457,11 +2457,11 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
return matchFailure();
- SmallVector<Value *, 4> newOperands;
- SmallVector<Value *, 4> droppedOperands;
+ SmallVector<ValuePtr, 4> newOperands;
+ SmallVector<ValuePtr, 4> droppedOperands;
// Fold dynamic offset operand if it is produced by a constant.
- auto *dynamicOffset = viewOp.getDynamicOffset();
+ auto dynamicOffset = viewOp.getDynamicOffset();
int64_t newOffset = oldOffset;
unsigned dynamicOffsetOperandCount = 0;
if (dynamicOffset != nullptr) {
@@ -2576,7 +2576,7 @@ static Type inferSubViewResultType(MemRefType memRefType) {
memRefType.getMemorySpace());
}
-void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source,
+void mlir::SubViewOp::build(Builder *b, OperationState &result, ValuePtr source,
ValueRange offsets, ValueRange sizes,
ValueRange strides, Type resultType,
ArrayRef<NamedAttribute> attrs) {
@@ -2590,7 +2590,7 @@ void mlir::SubViewOp::build(Builder *b, OperationState &result, Value *source,
}
void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType,
- Value *source) {
+ ValuePtr source) {
build(b, result, source, /*offsets=*/{}, /*sizes=*/{}, /*strides=*/{},
resultType);
}
@@ -2826,7 +2826,7 @@ public:
// Follow all or nothing approach for shapes for now. If all the operands
// for sizes are constants then fold it into the type of the result memref.
if (subViewType.hasStaticShape() ||
- llvm::any_of(subViewOp.sizes(), [](Value *operand) {
+ llvm::any_of(subViewOp.sizes(), [](ValuePtr operand) {
return !matchPattern(operand, m_ConstantIndex());
})) {
return matchFailure();
@@ -2842,7 +2842,7 @@ public:
subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- ArrayRef<Value *>(), subViewOp.strides(), newMemRefType);
+ ArrayRef<ValuePtr>(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.sizes(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2871,7 +2871,7 @@ public:
failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) ||
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
- llvm::any_of(subViewOp.strides(), [](Value *stride) {
+ llvm::any_of(subViewOp.strides(), [](ValuePtr stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2892,7 +2892,7 @@ public:
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
- subViewOp.sizes(), ArrayRef<Value *>(), newMemRefType);
+ subViewOp.sizes(), ArrayRef<ValuePtr>(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
subViewOp.strides(), subViewOp, newSubViewOp, subViewOp.getType());
@@ -2922,7 +2922,7 @@ public:
llvm::is_contained(baseStrides,
MemRefType::getDynamicStrideOrOffset()) ||
baseOffset == MemRefType::getDynamicStrideOrOffset() ||
- llvm::any_of(subViewOp.offsets(), [](Value *stride) {
+ llvm::any_of(subViewOp.offsets(), [](ValuePtr stride) {
return !matchPattern(stride, m_ConstantIndex());
})) {
return matchFailure();
@@ -2943,7 +2943,7 @@ public:
MemRefType::get(subViewType.getShape(), subViewType.getElementType(),
layoutMap, subViewType.getMemorySpace());
auto newSubViewOp = rewriter.create<SubViewOp>(
- subViewOp.getLoc(), subViewOp.source(), ArrayRef<Value *>(),
+ subViewOp.getLoc(), subViewOp.source(), ArrayRef<ValuePtr>(),
subViewOp.sizes(), subViewOp.strides(), newMemRefType);
// Insert a memref_cast for compatibility of the uses of the op.
rewriter.replaceOpWithNewOp<MemRefCastOp>(
OpenPOWER on IntegriCloud